1
- use std:: sync:: atomic:: { AtomicPtr , AtomicU64 } ;
1
+ use std:: sync:: atomic:: { AtomicPtr , AtomicUsize } ;
2
2
3
3
use ipc_channel:: ipc;
4
4
use nix:: sys:: { ptrace, signal, wait} ;
5
5
use nix:: unistd;
6
6
7
- use crate :: helpers:: ToU64 ;
8
7
use crate :: shims:: trace:: { AccessEvent , FAKE_STACK_SIZE , MemEvents , StartFfiInfo , TraceRequest } ;
9
8
10
9
/// The flags to use when calling `waitid()`.
@@ -16,27 +15,26 @@ const WAIT_FLAGS: wait::WaitPidFlag =
16
15
/// Arch-specific maximum size a single access might perform. x86 value is set
17
16
/// assuming nothing bigger than AVX-512 is available.
18
17
#[ cfg( any( target_arch = "x86" , target_arch = "x86_64" ) ) ]
19
- const ARCH_MAX_ACCESS_SIZE : u64 = 64 ;
18
+ const ARCH_MAX_ACCESS_SIZE : usize = 64 ;
20
19
#[ cfg( any( target_arch = "arm" , target_arch = "aarch64" ) ) ]
21
- const ARCH_MAX_ACCESS_SIZE : u64 = 16 ;
20
+ const ARCH_MAX_ACCESS_SIZE : usize = 16 ;
22
21
#[ cfg( any( target_arch = "riscv32" , target_arch = "riscv64" ) ) ]
23
- const ARCH_MAX_ACCESS_SIZE : u64 = 16 ;
22
+ const ARCH_MAX_ACCESS_SIZE : usize = 16 ;
24
23
25
- /// The default word size on a given platform, in bytes. Only for targets where
26
- /// this is actually used.
27
- #[ cfg( target_arch = "arm" ) ]
28
- const ARCH_WORD_SIZE : u64 = 4 ;
29
- #[ cfg( target_arch = "aarch64" ) ]
30
- const ARCH_WORD_SIZE : u64 = 8 ;
24
+ /// The default word size on a given platform, in bytes.
25
+ #[ cfg( any( target_arch = "x86" , target_arch = "arm" , target_arch = "riscv32" ) ) ]
26
+ const ARCH_WORD_SIZE : usize = 4 ;
27
+ #[ cfg( any( target_arch = "x86_64" , target_arch = "aarch64" , target_arch = "riscv64" ) ) ]
28
+ const ARCH_WORD_SIZE : usize = 8 ;
31
29
32
30
/// The address of the page set to be edited, initialised to a sentinel null
33
31
/// pointer.
34
32
static PAGE_ADDR : AtomicPtr < u8 > = AtomicPtr :: new ( std:: ptr:: null_mut ( ) ) ;
35
33
/// The host pagesize, initialised to a sentinel zero value.
36
- pub static PAGE_SIZE : AtomicU64 = AtomicU64 :: new ( 0 ) ;
34
+ pub static PAGE_SIZE : AtomicUsize = AtomicUsize :: new ( 0 ) ;
37
35
/// How many consecutive pages to unprotect. 1 by default, unlikely to be set
38
36
/// higher than 2.
39
- static PAGE_COUNT : AtomicU64 = AtomicU64 :: new ( 1 ) ;
37
+ static PAGE_COUNT : AtomicUsize = AtomicUsize :: new ( 1 ) ;
40
38
41
39
/// Allows us to get common arguments from the `user_regs_t` across architectures.
42
40
/// Normally this would land us ABI hell, but thankfully all of our usecases
@@ -231,7 +229,7 @@ pub fn sv_loop(
231
229
listener : ChildListener ,
232
230
event_tx : ipc:: IpcSender < MemEvents > ,
233
231
confirm_tx : ipc:: IpcSender < ( ) > ,
234
- page_size : u64 ,
232
+ page_size : usize ,
235
233
) -> Result < !, Option < i32 > > {
236
234
// Things that we return to the child process
237
235
let mut acc_events = Vec :: new ( ) ;
@@ -278,7 +276,7 @@ pub fn sv_loop(
278
276
ExecEvent :: End => {
279
277
// Hand over the access info we traced
280
278
event_tx
281
- . send ( MemEvents { acc_events, alloc_cutoff : page_size. try_into ( ) . unwrap ( ) } )
279
+ . send ( MemEvents { acc_events, alloc_cutoff : page_size } )
282
280
. unwrap ( ) ;
283
281
// And reset our values
284
282
acc_events = Vec :: new ( ) ;
@@ -400,18 +398,18 @@ fn wait_for_signal(
400
398
/// or kills the child and returns the appropriate error otherwise.
401
399
fn handle_segfault (
402
400
pid : unistd:: Pid ,
403
- ch_pages : & [ u64 ] ,
401
+ ch_pages : & [ usize ] ,
404
402
ch_stack : usize ,
405
- page_size : u64 ,
403
+ page_size : usize ,
406
404
cs : & capstone:: Capstone ,
407
405
acc_events : & mut Vec < AccessEvent > ,
408
406
) -> Result < ( ) , ExecError > {
409
407
/// This is just here to not pollute the main namespace with `capstone::prelude::*`.
410
408
#[ inline]
411
409
fn capstone_disassemble (
412
410
instr : & [ u8 ] ,
413
- addr : u64 ,
414
- page_size : u64 ,
411
+ addr : usize ,
412
+ page_size : usize ,
415
413
cs : & capstone:: Capstone ,
416
414
acc_events : & mut Vec < AccessEvent > ,
417
415
) -> capstone:: CsResult < ( ) > {
@@ -425,10 +423,10 @@ fn handle_segfault(
425
423
let arch_detail = ins_detail. arch_detail ( ) ;
426
424
427
425
// Take an (addr, size, cutoff_size) and split an access into multiple if needed
428
- let get_ranges: fn ( u64 , u64 , u64 ) -> Vec < std:: ops:: Range < u64 > > =
429
- |addr, size, cutoff_size : u64 | {
426
+ let get_ranges: fn ( usize , usize , usize ) -> Vec < std:: ops:: Range < usize > > =
427
+ |addr, size, cutoff_size : usize | {
430
428
let addr_added = addr. strict_add ( size) ;
431
- let mut counter = 0u64 ;
429
+ let mut counter = 0usize ;
432
430
let mut ret = vec ! [ ] ;
433
431
loop {
434
432
let curr = addr. strict_add ( counter. strict_mul ( cutoff_size) ) ;
@@ -582,7 +580,7 @@ fn handle_segfault(
582
580
// All x86, ARM, etc. instructions only have at most one memory operand
583
581
// (thankfully!)
584
582
// SAFETY: si_addr is safe to call
585
- let addr = unsafe { siginfo. si_addr ( ) . addr ( ) . to_u64 ( ) } ;
583
+ let addr = unsafe { siginfo. si_addr ( ) . addr ( ) } ;
586
584
let page_addr = addr. strict_sub ( addr. strict_rem ( page_size) ) ;
587
585
588
586
if ch_pages. iter ( ) . any ( |pg| ( * pg..pg. strict_add ( page_size) ) . contains ( & addr) ) {
@@ -640,8 +638,8 @@ fn handle_segfault(
640
638
let regs_bak = ptrace:: getregs ( pid) . unwrap ( ) ;
641
639
new_regs = regs_bak;
642
640
let ip_poststep = regs_bak. ip ( ) ;
643
- // We need to do reads/writes in 8-byte chunks
644
- let diff = ( ip_poststep. strict_sub ( ip_prestep) ) . div_ceil ( 8 ) ;
641
+ // We need to do reads/writes in word-sized chunks
642
+ let diff = ( ip_poststep. strict_sub ( ip_prestep) ) . div_ceil ( ARCH_WORD_SIZE ) ;
645
643
let instr = ( ip_prestep..ip_prestep. strict_add ( diff) ) . fold ( vec ! [ ] , |mut ret, ip| {
646
644
// This only needs to be a valid pointer in the child process, not ours
647
645
ret. append (
@@ -702,7 +700,7 @@ pub unsafe extern "C" fn mempr_off() {
702
700
// we mustn't unwind from here, so...
703
701
if libc:: mprotect (
704
702
PAGE_ADDR . load ( Ordering :: Relaxed ) . cast ( ) ,
705
- len. try_into ( ) . unwrap_unchecked ( ) ,
703
+ len,
706
704
libc:: PROT_READ | libc:: PROT_WRITE ,
707
705
) != 0
708
706
{
@@ -727,7 +725,7 @@ pub unsafe extern "C" fn mempr_on() {
727
725
unsafe {
728
726
if libc:: mprotect (
729
727
PAGE_ADDR . load ( Ordering :: Relaxed ) . cast ( ) ,
730
- len. try_into ( ) . unwrap_unchecked ( ) ,
728
+ len,
731
729
libc:: PROT_NONE ,
732
730
) != 0
733
731
{
0 commit comments