@@ -863,12 +863,12 @@ static block_t *block_find_or_translate(riscv_t *rv)
863
863
block_t * next_blk = block_find (map , rv -> PC );
864
864
#else
865
865
/* lookup the next block in the block cache */
866
- /*
867
- * The function "cache_get()" gets the cached block by the given "key (PC)".
868
- * In system simulation, the returned block might be dropped because it is
869
- * not the one from the current process (by checking SATP CSR register).
870
- */
871
866
block_t * next_blk = (block_t * ) cache_get (rv -> block_cache , rv -> PC , true);
867
+ #if RV32_HAS (SYSTEM )
868
+ /* discard cache if satp is not matched */
869
+ if (next_blk && next_blk -> satp != rv -> csr_satp )
870
+ next_blk = NULL ;
871
+ #endif
872
872
#endif
873
873
874
874
if (next_blk )
@@ -886,6 +886,14 @@ static block_t *block_find_or_translate(riscv_t *rv)
886
886
887
887
block_translate (rv , next_blk );
888
888
889
+ #if RV32_HAS (JIT ) && RV32_HAS (SYSTEM )
890
+ /*
891
+ * May be an ifetch fault which changes satp, Do not do this
892
+ * in "block_alloc()"
893
+ */
894
+ next_blk -> satp = rv -> csr_satp ;
895
+ #endif
896
+
889
897
optimize_constant (rv , next_blk );
890
898
#if RV32_HAS (MOP_FUSION )
891
899
/* macro operation fusion */
@@ -912,8 +920,6 @@ static block_t *block_find_or_translate(riscv_t *rv)
912
920
return next_blk ;
913
921
}
914
922
915
- list_del_init (& replaced_blk -> list );
916
-
917
923
if (prev == replaced_blk )
918
924
prev = NULL ;
919
925
@@ -932,6 +938,16 @@ static block_t *block_find_or_translate(riscv_t *rv)
932
938
if (untaken == replaced_blk_entry ) {
933
939
entry -> ir_tail -> branch_untaken = NULL ;
934
940
}
941
+
942
+ /* upadte JALR LUT */
943
+ if (!entry -> ir_tail -> branch_table ) {
944
+ continue ;
945
+ }
946
+
947
+ /**
948
+ * TODO: upadate all JALR instructions which references to this
949
+ * basic block as the destination.
950
+ */
935
951
}
936
952
937
953
/* free IRs in replaced block */
@@ -945,6 +961,7 @@ static block_t *block_find_or_translate(riscv_t *rv)
945
961
mpool_free (rv -> block_ir_mp , ir );
946
962
}
947
963
964
+ list_del_init (& replaced_blk -> list );
948
965
mpool_free (rv -> block_mp , replaced_blk );
949
966
#if RV32_HAS (T2C )
950
967
pthread_mutex_unlock (& rv -> cache_lock );
@@ -961,6 +978,10 @@ static block_t *block_find_or_translate(riscv_t *rv)
961
978
#if RV32_HAS (JIT ) && !RV32_HAS (ARCH_TEST )
962
979
static bool runtime_profiler (riscv_t * rv , block_t * block )
963
980
{
981
+ #if RV32_HAS (SYSTEM )
982
+ if (block -> satp != rv -> csr_satp )
983
+ return false;
984
+ #endif
964
985
/* Based on our observations, a significant number of true hotspots are
965
986
* characterized by high usage frequency and including loop. Consequently,
966
987
* we posit that our profiler could effectively identify hotspots using
@@ -1053,14 +1074,22 @@ void rv_step(void *arg)
1053
1074
/* by now, a block should be available */
1054
1075
assert (block );
1055
1076
1077
+ #if RV32_HAS (JIT ) && RV32_HAS (SYSTEM )
1078
+ assert (block -> satp == rv -> csr_satp );
1079
+ #endif
1080
+
1056
1081
/* After emulating the previous block, it is determined whether the
1057
1082
* branch is taken or not. The IR array of the current block is then
1058
1083
* assigned to either the branch_taken or branch_untaken pointer of
1059
1084
* the previous block.
1060
1085
*/
1061
1086
1062
1087
#if RV32_HAS (BLOCK_CHAINING )
1063
- if (prev ) {
1088
+ if (prev
1089
+ #if RV32_HAS (JIT ) && RV32_HAS (SYSTEM )
1090
+ && prev -> satp == rv -> csr_satp
1091
+ #endif
1092
+ ) {
1064
1093
rv_insn_t * last_ir = prev -> ir_tail ;
1065
1094
/* chain block */
1066
1095
if (!insn_is_unconditional_branch (last_ir -> opcode )) {
0 commit comments