@@ -7,16 +7,14 @@ use nix::unistd;
7
7
use crate :: discrete_alloc;
8
8
use crate :: helpers:: ToU64 ;
9
9
10
- #[ cfg( target_pointer_width = "64" ) ]
11
- const BITS : u32 = 64 ;
12
- #[ cfg( target_pointer_width = "32" ) ]
13
- const BITS : u32 = 32 ;
14
-
15
10
#[ cfg( any( target_arch = "x86" , target_arch = "x86_64" ) ) ]
16
11
const BREAKPT_INSTR : isize = 0xCC ;
17
12
#[ cfg( target_arch = "aarch64" ) ]
18
13
const BREAKPT_INSTR : isize = 0xD420 ;
19
14
15
+ // FIXME: Make this architecture-specific
16
+ const ARCH_MAX_ACCESS_SIZE : u64 = 256 ;
17
+
20
18
// We do NOT ever want to block the child from accessing this!!
21
19
static SUPERVISOR : std:: sync:: Mutex < Option < Supervisor > > = std:: sync:: Mutex :: new ( None ) ;
22
20
static mut PAGE_ADDR : * mut libc:: c_void = std:: ptr:: null_mut ( ) ;
@@ -121,7 +119,7 @@ impl Supervisor {
121
119
sv_loop ( listener, t_event)
122
120
} ) ;
123
121
eprintln ! ( "{p:?}" ) ;
124
- std:: process:: abort ( ) ;
122
+ std:: process:: exit ( - 1 ) ;
125
123
}
126
124
unistd:: ForkResult :: Child => {
127
125
let mut sv = SUPERVISOR . lock ( ) . map_err ( |_| ( ) ) ?;
@@ -256,6 +254,9 @@ fn sv_loop(listener: ChildListener, t_event: ipc::IpcSender<MemEvents>) -> ! {
256
254
let mut writes: Vec < Range < u64 > > = vec ! [ ] ;
257
255
let mut mappings: Vec < Range < u64 > > = vec ! [ ] ;
258
256
257
+ // An instance of the Capstone disassembler, so we don't spawn one on every access
258
+ let cs = get_disasm ( ) ;
259
+
259
260
// Memory allocated on the MiriMachine
260
261
let mut ch_pages = vec ! [ ] ;
261
262
@@ -290,7 +291,7 @@ fn sv_loop(listener: ChildListener, t_event: ipc::IpcSender<MemEvents>) -> ! {
290
291
match signal {
291
292
signal:: SIGSEGV => {
292
293
if let Err ( ret) =
293
- handle_segfault ( pid, & ch_pages, & mut reads, & mut writes)
294
+ handle_segfault ( pid, & ch_pages, & cs , & mut reads, & mut writes)
294
295
{
295
296
retcode = ret;
296
297
break ' listen;
@@ -441,6 +442,40 @@ fn sv_loop(listener: ChildListener, t_event: ipc::IpcSender<MemEvents>) -> ! {
441
442
std:: process:: exit ( retcode) ;
442
443
}
443
444
445
+ fn get_disasm ( ) -> capstone:: Capstone {
446
+ use capstone:: prelude:: * ;
447
+ let cs_pre = Capstone :: new ( ) ;
448
+ {
449
+ #[ cfg( target_arch = "x86_64" ) ]
450
+ {
451
+ cs_pre. x86 ( ) . mode ( arch:: x86:: ArchMode :: Mode64 )
452
+ }
453
+ #[ cfg( target_arch = "x86" ) ]
454
+ {
455
+ cs_pre. x86 ( ) . mode ( arch:: x86:: ArchMode :: Mode32 )
456
+ }
457
+ #[ cfg( target_arch = "aarch64" ) ]
458
+ {
459
+ cs_pre. arm64 ( )
460
+ }
461
+ #[ cfg( target_arch = "arm" ) ]
462
+ {
463
+ cs_pre. arm ( )
464
+ }
465
+ #[ cfg( target_arch = "riscv64" ) ]
466
+ {
467
+ cs_pre. riscv ( ) . mode ( arch:: riscv:: ArchMode :: RiscV64 )
468
+ }
469
+ #[ cfg( target_arch = "riscv32" ) ]
470
+ {
471
+ cs_pre. riscv ( ) . mode ( arch:: riscv:: ArchMode :: RiscV32 )
472
+ }
473
+ }
474
+ . detail ( true )
475
+ . build ( )
476
+ . unwrap ( )
477
+ }
478
+
444
479
/// Waits for a specific signal to be triggered.
445
480
fn wait_for_signal (
446
481
pid : unistd:: Pid ,
@@ -545,9 +580,74 @@ fn handle_munmap(
545
580
fn handle_segfault (
546
581
pid : unistd:: Pid ,
547
582
ch_pages : & [ u64 ] ,
583
+ cs : & capstone:: Capstone ,
548
584
reads : & mut Vec < Range < u64 > > ,
549
585
writes : & mut Vec < Range < u64 > > ,
550
586
) -> Result < ( ) , i32 > {
587
+ // This is just here to not pollute the main namespace with capstone::prelude::*
588
+ // and so that we can get a Result instead of just unwrapping on error
589
+ #[ inline]
590
+ fn capstone_disassemble (
591
+ instr : & [ u8 ] ,
592
+ addr : u64 ,
593
+ cs : & capstone:: Capstone ,
594
+ reads : & mut Vec < Range < u64 > > ,
595
+ writes : & mut Vec < Range < u64 > > ,
596
+ ) -> capstone:: CsResult < ( ) > {
597
+ use capstone:: prelude:: * ;
598
+
599
+ let insns = cs. disasm_count ( instr, 0x1000 , 1 ) ?;
600
+ let ins_detail = cs. insn_detail ( & insns[ 0 ] ) ?;
601
+ let arch_detail = ins_detail. arch_detail ( ) ;
602
+
603
+ for op in arch_detail. operands ( ) {
604
+ match op {
605
+ arch:: ArchOperand :: X86Operand ( x86_operand) => {
606
+ let size: u64 = x86_operand. size . into ( ) ;
607
+ match x86_operand. op_type {
608
+ arch:: x86:: X86OperandType :: Mem ( _) => {
609
+ // It's called a "RegAccessType" but it also applies to memory
610
+ let acc_ty = x86_operand. access . unwrap ( ) ;
611
+ if acc_ty. is_readable ( ) {
612
+ reads. push ( addr..addr. strict_add ( size) ) ;
613
+ }
614
+ if acc_ty. is_writable ( ) {
615
+ writes. push ( addr..addr. strict_add ( size) ) ;
616
+ }
617
+ }
618
+ _ => ( ) ,
619
+ }
620
+ }
621
+ arch:: ArchOperand :: Arm64Operand ( arm64_operand) => {
622
+ // Annoyingly, we don't get the size here, so just be pessimistic for now
623
+ match arm64_operand. op_type {
624
+ arch:: arm64:: Arm64OperandType :: Mem ( _arm64_op_mem) => {
625
+ //
626
+ }
627
+ _ => ( ) ,
628
+ }
629
+ }
630
+ arch:: ArchOperand :: ArmOperand ( arm_operand) =>
631
+ match arm_operand. op_type {
632
+ arch:: arm:: ArmOperandType :: Mem ( _) => {
633
+ let acc_ty = arm_operand. access . unwrap ( ) ;
634
+ if acc_ty. is_readable ( ) {
635
+ reads. push ( addr..addr. strict_add ( ARCH_MAX_ACCESS_SIZE ) ) ;
636
+ }
637
+ if acc_ty. is_writable ( ) {
638
+ writes. push ( addr..addr. strict_add ( ARCH_MAX_ACCESS_SIZE ) ) ;
639
+ }
640
+ }
641
+ _ => ( ) ,
642
+ } ,
643
+ arch:: ArchOperand :: RiscVOperand ( _risc_voperand) => todo ! ( ) ,
644
+ _ => unimplemented ! ( ) ,
645
+ }
646
+ }
647
+
648
+ Ok ( ( ) )
649
+ }
650
+
551
651
let siginfo = ptrace:: getsiginfo ( pid) . unwrap ( ) ;
552
652
let addr = unsafe { siginfo. si_addr ( ) . addr ( ) . to_u64 ( ) } ;
553
653
let page_addr = addr. strict_sub ( addr. strict_rem ( unsafe { PAGE_SIZE } ) ) ;
@@ -596,51 +696,12 @@ fn handle_segfault(
596
696
) ;
597
697
ret
598
698
} ) ;
599
- let mut decoder = iced_x86:: Decoder :: new ( BITS , instr. as_slice ( ) , 0 ) ;
600
- let mut fac = iced_x86:: InstructionInfoFactory :: new ( ) ;
601
- let instr = decoder. decode ( ) ;
602
- let memsize = instr. op_code ( ) . memory_size ( ) . size ( ) . to_u64 ( ) ;
603
- let mem = fac. info ( & instr) . used_memory ( ) ;
604
-
605
- for acc in mem {
606
- let mut r = false ;
607
- let mut w = false ;
608
- match acc. access ( ) {
609
- iced_x86:: OpAccess :: Read | iced_x86:: OpAccess :: CondRead => {
610
- r = true ;
611
- }
612
- iced_x86:: OpAccess :: Write | iced_x86:: OpAccess :: CondWrite => {
613
- w = true ;
614
- }
615
- iced_x86:: OpAccess :: ReadWrite | iced_x86:: OpAccess :: ReadCondWrite => {
616
- r = true ;
617
- w = true ;
618
- }
619
- _ => ( ) ,
620
- }
621
- let addr_end = addr. strict_add ( memsize) ;
622
- if r {
623
- if let Some ( idx) = reads. iter ( ) . position ( |r| r. start <= addr_end && addr <= r. end ) {
624
- let mut rg = reads[ idx] . clone ( ) ;
625
- rg. start = std:: cmp:: min ( rg. start , addr) ;
626
- rg. end = std:: cmp:: max ( rg. end , addr_end) ;
627
- reads[ idx] = rg;
628
- } else {
629
- reads. push ( addr..addr_end) ;
630
- }
631
- }
632
- if w {
633
- if let Some ( idx) = writes. iter ( ) . position ( |r| r. start <= addr_end && addr <= r. end )
634
- {
635
- let mut rg = writes[ idx] . clone ( ) ;
636
- rg. start = std:: cmp:: min ( rg. start , addr) ;
637
- rg. end = std:: cmp:: max ( rg. end , addr_end) ;
638
- writes[ idx] = rg;
639
- } else {
640
- writes. push ( addr..addr_end) ;
641
- }
642
- }
699
+
700
+ if capstone_disassemble ( & instr, addr, cs, reads, writes) . is_err ( ) {
701
+ reads. push ( addr..addr. strict_add ( ARCH_MAX_ACCESS_SIZE ) ) ;
702
+ writes. push ( addr..addr. strict_add ( ARCH_MAX_ACCESS_SIZE ) ) ;
643
703
}
704
+
644
705
#[ expect( clippy:: as_conversions) ]
645
706
new_regs. set_ip ( mempr_on as usize ) ;
646
707
new_regs. set_sp ( unsafe { ( & raw mut CLICK_HERE_4_FREE_STACK [ 512 ] ) . addr ( ) } ) ;
0 commit comments