@@ -9,6 +9,23 @@ use crate::jit::{jit_memory_map, Cond, ShiftType};
99use crate :: { DEBUG_LOG , IS_DEBUG } ;
1010use std:: ptr;
1111
12+ pub const MAX_LOOP_CYCLE_COUNT : u32 = 255 ;
13+
14+ macro_rules! exit_guest_context {
15+ ( $asm: expr) => { {
16+ // r4-r12,pc since we need an even amount of registers for 8 byte alignment, in case the compiler decides to use neon instructions
17+ std:: arch:: asm!(
18+ "mov sp, {}" ,
19+ "pop {{r4-r12,pc}}" ,
20+ in( reg) $asm. runtime_data. host_sp
21+ ) ;
22+ std:: hint:: unreachable_unchecked( ) ;
23+ } } ;
24+ }
25+
26+ use crate :: jit:: inst_branch_handler:: branch_reg;
27+ pub ( crate ) use exit_guest_context;
28+
1229pub struct JitAsmCommonFuns < const CPU : CpuType > {
1330 branch_return_stack : usize ,
1431 branch_reg : usize ,
@@ -34,7 +51,8 @@ impl<const CPU: CpuType> JitAsmCommonFuns<CPU> {
3451 } ;
3552 JitAsmCommonFuns {
3653 branch_return_stack : create_function ( Self :: emit_branch_return_stack, & format ! ( "{CPU:?}_branch_return_stack" ) ) ,
37- branch_reg : create_function ( Self :: emit_branch_reg, & format ! ( "{CPU:?}_branch_reg" ) ) ,
54+ // branch_reg: create_function(Self::emit_branch_reg, &format!("{CPU:?}_branch_reg")),
55+ branch_reg : 0 ,
3856 }
3957 }
4058
@@ -131,7 +149,6 @@ impl<const CPU: CpuType> JitAsmCommonFuns<CPU> {
131149 let result_accumulated_cycles_reg = block_asm. new_reg ( ) ;
132150 Self :: emit_count_cycles ( block_asm, total_cycles_reg, runtime_data_addr_reg, result_accumulated_cycles_reg) ;
133151
134- const MAX_LOOP_CYCLE_COUNT : u32 = 255 ;
135152 block_asm. cmp (
136153 result_accumulated_cycles_reg,
137154 match CPU {
@@ -336,17 +353,19 @@ impl<const CPU: CpuType> JitAsmCommonFuns<CPU> {
336353
337354 pub fn emit_call_branch_reg ( & self , block_asm : & mut BlockAsm , total_cycles : u16 , lr_reg : BlockReg , target_pc_reg : BlockReg , current_pc : u32 ) {
338355 if IS_DEBUG {
339- block_asm. call4_common ( self . branch_reg , total_cycles as u32 , lr_reg, target_pc_reg, current_pc) ;
356+ // block_asm.call4_common(self.branch_reg, total_cycles as u32, lr_reg, target_pc_reg, current_pc);
357+ block_asm. call4 ( branch_reg :: < CPU > as * const ( ) , total_cycles as u32 , lr_reg, target_pc_reg, current_pc) ;
340358 } else {
341- block_asm. call3_common ( self . branch_reg , total_cycles as u32 , lr_reg, target_pc_reg) ;
359+ // block_asm.call3_common(self.branch_reg, total_cycles as u32, lr_reg, target_pc_reg);
360+ block_asm. call3 ( branch_reg :: < CPU > as * const ( ) , total_cycles as u32 , lr_reg, target_pc_reg) ;
342361 }
343362 }
344363
345- extern "C" fn debug_push_return_stack ( current_pc : u32 , lr_pc : u32 , stack_size : u8 ) {
364+ pub extern "C" fn debug_push_return_stack ( current_pc : u32 , lr_pc : u32 , stack_size : u8 ) {
346365 println ! ( "{CPU:?} push {lr_pc:x} to return stack with size {stack_size} at {current_pc:x}" )
347366 }
348367
349- extern "C" fn debug_branch_reg ( current_pc : u32 , target_pc : u32 ) {
368+ pub extern "C" fn debug_branch_reg ( current_pc : u32 , target_pc : u32 ) {
350369 println ! ( "{CPU:?} branch reg from {current_pc:x} to {target_pc:x}" )
351370 }
352371
0 commit comments