@@ -533,8 +533,37 @@ FORCE_INLINE bool insn_is_unconditional_branch(uint8_t opcode)
533
533
case rv_insn_cebreak :
534
534
#endif
535
535
return true;
536
+ default :
537
+ return false;
538
+ }
539
+ }
540
+
541
+ FORCE_INLINE bool insn_is_direct_branch (uint8_t opcode )
542
+ {
543
+ switch (opcode ) {
544
+ case rv_insn_jal :
545
+ #if RV32_HAS (EXT_C )
546
+ case rv_insn_cjal :
547
+ case rv_insn_cj :
548
+ #endif
549
+ return true;
550
+ default :
551
+ return false;
552
+ }
553
+ }
554
+
555
+ FORCE_INLINE bool insn_is_indirect_branch (uint8_t opcode )
556
+ {
557
+ switch (opcode ) {
558
+ case rv_insn_jalr :
559
+ #if RV32_HAS (EXT_C )
560
+ case rv_insn_cjalr :
561
+ case rv_insn_cjr :
562
+ #endif
563
+ return true;
564
+ default :
565
+ return false;
536
566
}
537
- return false;
538
567
}
539
568
540
569
static void block_translate (riscv_t * rv , block_t * block )
@@ -571,11 +600,7 @@ static void block_translate(riscv_t *rv, block_t *block)
571
600
#endif
572
601
/* stop on branch */
573
602
if (insn_is_branch (ir -> opcode )) {
574
- if (ir -> opcode == rv_insn_jalr
575
- #if RV32_HAS (EXT_C )
576
- || ir -> opcode == rv_insn_cjalr || ir -> opcode == rv_insn_cjr
577
- #endif
578
- ) {
603
+ if (insn_is_indirect_branch (ir -> opcode )) {
579
604
ir -> branch_table = calloc (1 , sizeof (branch_history_table_t ));
580
605
assert (ir -> branch_table );
581
606
memset (ir -> branch_table -> PC , -1 ,
@@ -768,95 +793,89 @@ static block_t *block_find_or_translate(riscv_t *rv)
768
793
#if !RV32_HAS (JIT )
769
794
block_map_t * map = & rv -> block_map ;
770
795
/* lookup the next block in the block map */
771
- block_t * next = block_find (map , rv -> PC );
796
+ block_t * next_blk = block_find (map , rv -> PC );
772
797
#else
773
798
/* lookup the next block in the block cache */
774
- block_t * next = (block_t * ) cache_get (rv -> block_cache , rv -> PC , true);
799
+ /*
800
+ * The function "cache_get()" gets the cached block by the given "key (PC)".
801
+ * In system simulation, the returned block might be dropped because it is
802
+ * not the one from the current process (by checking SATP CSR register).
803
+ */
804
+ block_t * next_blk = (block_t * ) cache_get (rv -> block_cache , rv -> PC , true);
775
805
#endif
776
806
777
- if (!next ) {
807
+ if (next_blk )
808
+ return next_blk ;
809
+
778
810
#if !RV32_HAS (JIT )
779
- if (map -> size * 1.25 > map -> block_capacity ) {
780
- block_map_clear (rv );
781
- prev = NULL ;
782
- }
811
+ /* clear block list if it is going to be filled */
812
+ if (map -> size * 1.25 > map -> block_capacity ) {
813
+ block_map_clear (rv );
814
+ prev = NULL ;
815
+ }
783
816
#endif
784
- /* allocate a new block */
785
- next = block_alloc (rv );
786
- block_translate (rv , next );
817
+ /* allocate a new block */
818
+ next_blk = block_alloc (rv );
819
+
820
+ block_translate (rv , next_blk );
787
821
788
- optimize_constant (rv , next );
822
+ optimize_constant (rv , next_blk );
789
823
#if RV32_HAS (GDBSTUB )
790
- if (likely (!rv -> debug_mode ))
824
+ if (likely (!rv -> debug_mode ))
791
825
#endif
792
- /* macro operation fusion */
793
- match_pattern (rv , next );
826
+ /* macro operation fusion */
827
+ match_pattern (rv , next_blk );
794
828
795
829
#if !RV32_HAS (JIT )
796
- /* insert the block into block map */
797
- block_insert (& rv -> block_map , next );
830
+ /* insert the block into block map */
831
+ block_insert (& rv -> block_map , next_blk );
798
832
#else
799
- /* insert the block into block cache */
800
- block_t * delete_target = cache_put (rv -> block_cache , rv -> PC , & (* next ));
801
- if (delete_target ) {
802
- if (prev == delete_target )
803
- prev = NULL ;
804
- chain_entry_t * entry , * safe ;
805
- /* correctly remove deleted block from its chained block */
806
- rv_insn_t * taken = delete_target -> ir_tail -> branch_taken ,
807
- * untaken = delete_target -> ir_tail -> branch_untaken ;
808
- if (taken && taken -> pc != delete_target -> pc_start ) {
809
- block_t * target = cache_get (rv -> block_cache , taken -> pc , false);
810
- bool flag = false;
811
- list_for_each_entry_safe (entry , safe , & target -> list , list ) {
812
- if (entry -> block == delete_target ) {
813
- list_del_init (& entry -> list );
814
- mpool_free (rv -> chain_entry_mp , entry );
815
- flag = true;
816
- }
817
- }
818
- assert (flag );
819
- }
820
- if (untaken && untaken -> pc != delete_target -> pc_start ) {
821
- block_t * target =
822
- cache_get (rv -> block_cache , untaken -> pc , false);
823
- assert (target );
824
- bool flag = false;
825
- list_for_each_entry_safe (entry , safe , & target -> list , list ) {
826
- if (entry -> block == delete_target ) {
827
- list_del_init (& entry -> list );
828
- mpool_free (rv -> chain_entry_mp , entry );
829
- flag = true;
830
- }
831
- }
832
- assert (flag );
833
- }
834
- /* correctly remove deleted block from the block chained to it */
835
- list_for_each_entry_safe (entry , safe , & delete_target -> list , list ) {
836
- if (entry -> block == delete_target )
837
- continue ;
838
- rv_insn_t * target = entry -> block -> ir_tail ;
839
- if (target -> branch_taken == delete_target -> ir_head )
840
- target -> branch_taken = NULL ;
841
- else if (target -> branch_untaken == delete_target -> ir_head )
842
- target -> branch_untaken = NULL ;
843
- mpool_free (rv -> chain_entry_mp , entry );
844
- }
845
- /* free deleted block */
846
- uint32_t idx ;
847
- rv_insn_t * ir , * next ;
848
- for (idx = 0 , ir = delete_target -> ir_head ;
849
- idx < delete_target -> n_insn ; idx ++ , ir = next ) {
850
- free (ir -> fuse );
851
- next = ir -> next ;
852
- mpool_free (rv -> block_ir_mp , ir );
853
- }
854
- mpool_free (rv -> block_mp , delete_target );
833
+ list_add (& next_blk -> list , & rv -> block_list );
834
+
835
+ /* insert the block into block cache */
836
+ block_t * replaced_blk = cache_put (rv -> block_cache , rv -> PC , & (* next_blk ));
837
+
838
+ if (!replaced_blk )
839
+ return next_blk ;
840
+
841
+ list_del_init (& replaced_blk -> list );
842
+
843
+ if (prev == replaced_blk )
844
+ prev = NULL ;
845
+
846
+ /* remove the connection from parents */
847
+ rv_insn_t * replaced_blk_entry = replaced_blk -> ir_head ;
848
+
849
+ /* TODO: record parents of each block to avoid traversing all blocks */
850
+ block_t * entry ;
851
+ list_for_each_entry (entry , & rv -> block_list , list ) {
852
+ rv_insn_t * taken = entry -> ir_tail -> branch_taken ,
853
+ * untaken = entry -> ir_tail -> branch_untaken ;
854
+
855
+ if (taken == replaced_blk_entry ) {
856
+ entry -> ir_tail -> branch_taken = NULL ;
855
857
}
856
- #endif
858
+ if (untaken == replaced_blk_entry ) {
859
+ entry -> ir_tail -> branch_untaken = NULL ;
860
+ }
861
+ }
862
+
863
+ /* free IRs in replaced block */
864
+ for (rv_insn_t * ir = replaced_blk -> ir_head , * next_ir ; ir != NULL ;
865
+ ir = next_ir ) {
866
+ next_ir = ir -> next ;
867
+
868
+ if (ir -> fuse )
869
+ free (ir -> fuse );
870
+
871
+ mpool_free (rv -> block_ir_mp , ir );
857
872
}
858
873
859
- return next ;
874
+ mpool_free (rv -> block_mp , replaced_blk );
875
+ #endif
876
+
877
+ assert (next_blk );
878
+ return next_blk ;
860
879
}
861
880
862
881
#if RV32_HAS (JIT )
@@ -918,31 +937,12 @@ void rv_step(void *arg)
918
937
if (!insn_is_unconditional_branch (last_ir -> opcode )) {
919
938
if (is_branch_taken && !last_ir -> branch_taken ) {
920
939
last_ir -> branch_taken = block -> ir_head ;
921
- #if RV32_HAS (JIT )
922
- chain_entry_t * new_entry = mpool_alloc (rv -> chain_entry_mp );
923
- new_entry -> block = prev ;
924
- list_add (& new_entry -> list , & block -> list );
925
- #endif
926
940
} else if (!is_branch_taken && !last_ir -> branch_untaken ) {
927
941
last_ir -> branch_untaken = block -> ir_head ;
928
- #if RV32_HAS (JIT )
929
- chain_entry_t * new_entry = mpool_alloc (rv -> chain_entry_mp );
930
- new_entry -> block = prev ;
931
- list_add (& new_entry -> list , & block -> list );
932
- #endif
933
942
}
934
- } else if (IF_insn (last_ir , jal )
935
- #if RV32_HAS (EXT_C )
936
- || IF_insn (last_ir , cj ) || IF_insn (last_ir , cjal )
937
- #endif
938
- ) {
943
+ } else if (insn_is_direct_branch (last_ir -> opcode )) {
939
944
if (!last_ir -> branch_taken ) {
940
945
last_ir -> branch_taken = block -> ir_head ;
941
- #if RV32_HAS (JIT )
942
- chain_entry_t * new_entry = mpool_alloc (rv -> chain_entry_mp );
943
- new_entry -> block = prev ;
944
- list_add (& new_entry -> list , & block -> list );
945
- #endif
946
946
}
947
947
}
948
948
}
0 commit comments