@@ -42,7 +42,9 @@ extern struct target_ops gdbstub_ops;
42
42
#define IF_imm (i , v ) (i->imm == v)
43
43
44
44
#if RV32_HAS (SYSTEM )
45
+ #if !RV32_HAS (JIT )
45
46
static bool need_clear_block_map = false;
47
+ #endif
46
48
static uint32_t reloc_enable_mmu_jalr_addr ;
47
49
static bool reloc_enable_mmu = false;
48
50
bool need_retranslate = false;
@@ -704,6 +706,7 @@ static inline void remove_next_nth_ir(const riscv_t *rv,
704
706
* Strategies are being devised to increase the number of instructions that
705
707
* match the pattern, including possible instruction reordering.
706
708
*/
709
+ #if RV32_HAS (MOP_FUSION )
707
710
static void match_pattern (riscv_t * rv , block_t * block )
708
711
{
709
712
uint32_t i ;
@@ -795,7 +798,7 @@ static void match_pattern(riscv_t *rv, block_t *block)
795
798
}
796
799
}
797
800
}
798
-
801
+ #endif
799
802
typedef struct {
800
803
bool is_constant [N_RV_REGS ];
801
804
uint32_t const_val [N_RV_REGS ];
@@ -838,12 +841,11 @@ static block_t *block_find_or_translate(riscv_t *rv)
838
841
block_t * next_blk = block_find (map , rv -> PC );
839
842
#else
840
843
/* lookup the next block in the block cache */
841
- /*
842
- * The function "cache_get()" gets the cached block by the given "key (PC)".
843
- * In system simulation, the returned block might be dropped because it is
844
- * not the one from the current process (by checking SATP CSR register).
845
- */
846
844
block_t * next_blk = (block_t * ) cache_get (rv -> block_cache , rv -> PC , true);
845
+ #if RV32_HAS (SYSTEM )
846
+ if (next_blk && next_blk -> satp != rv -> csr_satp )
847
+ next_blk = NULL ;
848
+ #endif
847
849
#endif
848
850
849
851
if (next_blk )
@@ -861,12 +863,20 @@ static block_t *block_find_or_translate(riscv_t *rv)
861
863
862
864
block_translate (rv , next_blk );
863
865
866
+ #if RV32_HAS (JIT ) && RV32_HAS (SYSTEM )
867
+ /*
868
+ * may be an ifetch fault which changes satp, Do not do this
869
+ * in "block_alloc()
870
+ */
871
+ next_blk -> satp = rv -> csr_satp ;
872
+ #endif
873
+
864
874
optimize_constant (rv , next_blk );
875
+
865
876
#if RV32_HAS (GDBSTUB )
866
877
if (likely (!rv -> debug_mode ))
867
878
#endif
868
- #if RV32_HAS (MOP_FUSION )
869
- /* macro operation fusion */
879
+ #if RV32_HAS (GDBSTUB ) || RV32_HAS (MOP_FUSION )
870
880
match_pattern (rv , next_blk );
871
881
#endif
872
882
@@ -890,8 +900,6 @@ static block_t *block_find_or_translate(riscv_t *rv)
890
900
return next_blk ;
891
901
}
892
902
893
- list_del_init (& replaced_blk -> list );
894
-
895
903
if (prev == replaced_blk )
896
904
prev = NULL ;
897
905
@@ -910,6 +918,32 @@ static block_t *block_find_or_translate(riscv_t *rv)
910
918
if (untaken == replaced_blk_entry ) {
911
919
entry -> ir_tail -> branch_untaken = NULL ;
912
920
}
921
+
922
+ /* upadte JALR LUT */
923
+ if (!entry -> ir_tail -> branch_table )
924
+ continue ;
925
+
926
+ #if 0
927
+ /*
928
+ * This branch lookup updating is unused since we get the PC from it and
929
+ * use function "cache_get()" achieve the branch prediction of T1C.
930
+ * However, if the structure "branch_table_t" is going to reference the
931
+ * block directly, this updating is nacessary to avoid to use the freed
932
+ * blocks.
933
+ */
934
+ for (int i = 0 ; i < HISTORY_SIZE ; i ++ ) {
935
+ if (entry -> ir_tail -> branch_table -> PC [i ] == replaced_blk -> pc_start ) {
936
+ IIF (RV32_HAS (SYSTEM ))
937
+ (if (entry -> ir_tail -> branch_table -> satp [i ] ==
938
+ replaced_blk -> satp ), )
939
+ {
940
+ entry -> ir_tail -> branch_table -> PC [i ] =
941
+ entry -> ir_tail -> branch_table -> satp [i ] =
942
+ entry -> ir_tail -> branch_table -> times [i ] = 0 ;
943
+ }
944
+ }
945
+ }
946
+ #endif
913
947
}
914
948
915
949
/* free IRs in replaced block */
@@ -923,6 +957,7 @@ static block_t *block_find_or_translate(riscv_t *rv)
923
957
mpool_free (rv -> block_ir_mp , ir );
924
958
}
925
959
960
+ list_del_init (& replaced_blk -> list );
926
961
mpool_free (rv -> block_mp , replaced_blk );
927
962
#if RV32_HAS (T2C )
928
963
pthread_mutex_unlock (& rv -> cache_lock );
@@ -941,6 +976,10 @@ static bool runtime_profiler(riscv_t *rv, block_t *block)
941
976
* we posit that our profiler could effectively identify hotspots using
942
977
* three key indicators.
943
978
*/
979
+ #if RV32_HAS (SYSTEM )
980
+ if (block -> satp != rv -> csr_satp )
981
+ return false;
982
+ #endif
944
983
uint32_t freq = cache_freq (rv -> block_cache , block -> pc_start );
945
984
/* To profile a block after chaining, it must first be executed. */
946
985
if (unlikely (freq >= 2 && block -> has_loops ))
@@ -1022,15 +1061,21 @@ void rv_step(void *arg)
1022
1061
block_t * block = block_find_or_translate (rv );
1023
1062
/* by now, a block should be available */
1024
1063
assert (block );
1064
+ #if RV32_HAS (JIT ) && RV32_HAS (SYSTEM )
1065
+ assert (block -> satp == rv -> csr_satp );
1066
+ #endif
1025
1067
1026
1068
/* After emulating the previous block, it is determined whether the
1027
1069
* branch is taken or not. The IR array of the current block is then
1028
1070
* assigned to either the branch_taken or branch_untaken pointer of
1029
1071
* the previous block.
1030
1072
*/
1031
-
1032
1073
#if RV32_HAS (BLOCK_CHAINING )
1033
- if (prev ) {
1074
+ if (prev
1075
+ #if RV32_HAS (JIT ) && RV32_HAS (SYSTEM )
1076
+ && prev -> satp == rv -> csr_satp
1077
+ #endif
1078
+ ) {
1034
1079
rv_insn_t * last_ir = prev -> ir_tail ;
1035
1080
/* chain block */
1036
1081
if (!insn_is_unconditional_branch (last_ir -> opcode )) {
@@ -1048,7 +1093,7 @@ void rv_step(void *arg)
1048
1093
#endif
1049
1094
last_pc = rv -> PC ;
1050
1095
#if RV32_HAS (JIT )
1051
- #if RV32_HAS (T2C )
1096
+ #if RV32_HAS (T2C ) && ! RV32_HAS ( SYSTEM )
1052
1097
/* executed through the tier-2 JIT compiler */
1053
1098
if (block -> hot2 ) {
1054
1099
((exec_t2c_func_t ) block -> func )(rv );
0 commit comments