Skip to content

Commit a1ad0a9

Browse files
committed
use usizes
1 parent f8214ac commit a1ad0a9

File tree

4 files changed

+39
-44
lines changed

4 files changed

+39
-44
lines changed

src/alloc/isolated_alloc.rs

Lines changed: 3 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -3,8 +3,6 @@ use std::alloc::{self, Layout};
33
use nix::sys::mman;
44
use rustc_index::bit_set::DenseBitSet;
55

6-
use crate::helpers::ToU64;
7-
86
/// How many bytes of memory each bit in the bitset represents.
97
const COMPRESSION_FACTOR: usize = 4;
108

@@ -274,12 +272,12 @@ impl IsolatedAlloc {
274272
}
275273

276274
/// Returns a vector of page addresses managed by the allocator.
277-
pub fn pages(&self) -> Vec<u64> {
275+
pub fn pages(&self) -> Vec<usize> {
278276
let mut pages: Vec<_> =
279-
self.page_ptrs.clone().into_iter().map(|p| p.addr().to_u64()).collect();
277+
self.page_ptrs.clone().into_iter().map(|p| p.addr()).collect();
280278
self.huge_ptrs.iter().for_each(|(ptr, size)| {
281279
for i in 0..size / self.page_size {
282-
pages.push(unsafe { ptr.add(i * self.page_size).expose_provenance().to_u64() });
280+
pages.push(unsafe { ptr.add(i * self.page_size).expose_provenance() });
283281
}
284282
});
285283
pages

src/alloc_addresses/mod.rs

Lines changed: 8 additions & 8 deletions
Original file line numberDiff line numberDiff line change
@@ -501,16 +501,16 @@ pub trait EvalContextExt<'tcx>: crate::MiriInterpCxExt<'tcx> {
501501
let alloc_cutoff = events.alloc_cutoff;
502502
// FIXME: these could be u16s
503503
//eprintln!("bare accesses: {:#0x?}", events.acc_events);
504-
let mut reads_bitset: Vec<(DenseBitSet<u32>, u64)> = vec![];
505-
let mut writes_bitset: Vec<(DenseBitSet<u32>, u64)> = vec![];
504+
let mut reads_bitset: Vec<(DenseBitSet<u32>, usize)> = vec![];
505+
let mut writes_bitset: Vec<(DenseBitSet<u32>, usize)> = vec![];
506506
for acc in events.acc_events {
507507
match acc {
508508
// Reads have more logic to them since we don't want to count
509509
// them at all if a write already occurred but obviously we do
510510
// if the write has yet to happen
511511
shims::trace::AccessEvent::Read(range) => {
512512
// The tracer ensures access ranges don't go over this alignment
513-
let pg = range.start - range.start % alloc_cutoff.to_u64();
513+
let pg = range.start - range.start % alloc_cutoff;
514514
for byte in range {
515515
#[expect(clippy::as_conversions)]
516516
let ofs = (byte - pg) as u32;
@@ -536,7 +536,7 @@ pub trait EvalContextExt<'tcx>: crate::MiriInterpCxExt<'tcx> {
536536
// at the same time as reads for this to be useful. We just
537537
// insert ranges into the appropriate bitset
538538
shims::trace::AccessEvent::Write(range) => {
539-
let pg = range.start - range.start % alloc_cutoff.to_u64();
539+
let pg = range.start - range.start % alloc_cutoff;
540540
#[expect(clippy::as_conversions)]
541541
let rg_norm = ((range.start - pg) as u32)..((range.end - pg) as u32);
542542
let pos = writes_bitset.iter().position(|(_, p)| *p == pg).unwrap_or({
@@ -547,9 +547,9 @@ pub trait EvalContextExt<'tcx>: crate::MiriInterpCxExt<'tcx> {
547547
}
548548
}
549549
}
550-
// The rustc side expects a `Vec<Range<u64>>`, not our monstrosity, so
550+
// The rustc side expects a `Vec<Range<usize>>`, not our monstrosity, so
551551
// this turns our bitset vector into one of ranges
552-
let decompress: fn(Vec<(DenseBitSet<u32>, u64)>, &mut Vec<std::ops::Range<u64>>) =
552+
let decompress: fn(Vec<(DenseBitSet<u32>, usize)>, &mut Vec<std::ops::Range<usize>>) =
553553
|sets, into| {
554554
sets.into_iter().for_each(|(set, p)| {
555555
// Iterates over the indices of 1s and so long as they
@@ -564,7 +564,7 @@ pub trait EvalContextExt<'tcx>: crate::MiriInterpCxExt<'tcx> {
564564
opt_so_far = Some(so_far);
565565
} else {
566566
// When there's a jump, push what we have so far and start anew
567-
into.push((so_far.start as u64 + p)..(so_far.end as u64 + p));
567+
into.push((so_far.start as usize + p)..(so_far.end as usize + p));
568568
opt_so_far = Some(bit..bit + 1);
569569
},
570570
// 1st time we obviously need to insert it
@@ -575,7 +575,7 @@ pub trait EvalContextExt<'tcx>: crate::MiriInterpCxExt<'tcx> {
575575
// (or this set was empty)
576576
#[expect(clippy::as_conversions)]
577577
if let Some(so_far) = opt_so_far {
578-
into.push((so_far.start as u64 + p)..(so_far.end as u64 + p));
578+
into.push((so_far.start as usize + p)..(so_far.end as usize + p));
579579
}
580580
});
581581
};

src/shims/trace/mod.rs

Lines changed: 3 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -13,11 +13,10 @@ const FAKE_STACK_SIZE: usize = 1024;
1313
struct StartFfiInfo {
1414
/// A vector of page addresses. These should have been automatically obtained
1515
/// with `IsolatedAlloc::pages` and prepared with `IsolatedAlloc::prepare_ffi`.
16-
page_ptrs: Vec<u64>,
16+
page_ptrs: Vec<usize>,
1717
/// The address of an allocation that can serve as a temporary stack.
1818
/// This should be a leaked `Box<[u8; FAKE_STACK_SIZE]>` cast to an int.
1919
stack_ptr: usize,
20-
//pid: i32,
2120
}
2221

2322
#[derive(serde::Serialize, serde::Deserialize, Debug, Clone)]
@@ -31,9 +30,9 @@ enum TraceRequest {
3130
#[derive(serde::Serialize, serde::Deserialize, Debug)]
3231
pub enum AccessEvent {
3332
/// A read may have occurred on no more than the specified address range.
34-
Read(Range<u64>),
33+
Read(Range<usize>),
3534
/// A write may have occurred on no more than the specified address range.
36-
Write(Range<u64>),
35+
Write(Range<usize>),
3736
}
3837

3938
/// The final results of an FFI trace, containing every relevant event detected

src/shims/trace/parent.rs

Lines changed: 25 additions & 27 deletions
Original file line numberDiff line numberDiff line change
@@ -1,10 +1,9 @@
1-
use std::sync::atomic::{AtomicPtr, AtomicU64};
1+
use std::sync::atomic::{AtomicPtr, AtomicUsize};
22

33
use ipc_channel::ipc;
44
use nix::sys::{ptrace, signal, wait};
55
use nix::unistd;
66

7-
use crate::helpers::ToU64;
87
use crate::shims::trace::{AccessEvent, FAKE_STACK_SIZE, MemEvents, StartFfiInfo, TraceRequest};
98

109
/// The flags to use when calling `waitid()`.
@@ -16,27 +15,26 @@ const WAIT_FLAGS: wait::WaitPidFlag =
1615
/// Arch-specific maximum size a single access might perform. x86 value is set
1716
/// assuming nothing bigger than AVX-512 is available.
1817
#[cfg(any(target_arch = "x86", target_arch = "x86_64"))]
19-
const ARCH_MAX_ACCESS_SIZE: u64 = 64;
18+
const ARCH_MAX_ACCESS_SIZE: usize = 64;
2019
#[cfg(any(target_arch = "arm", target_arch = "aarch64"))]
21-
const ARCH_MAX_ACCESS_SIZE: u64 = 16;
20+
const ARCH_MAX_ACCESS_SIZE: usize = 16;
2221
#[cfg(any(target_arch = "riscv32", target_arch = "riscv64"))]
23-
const ARCH_MAX_ACCESS_SIZE: u64 = 16;
22+
const ARCH_MAX_ACCESS_SIZE: usize = 16;
2423

25-
/// The default word size on a given platform, in bytes. Only for targets where
26-
/// this is actually used.
27-
#[cfg(target_arch = "arm")]
28-
const ARCH_WORD_SIZE: u64 = 4;
29-
#[cfg(target_arch = "aarch64")]
30-
const ARCH_WORD_SIZE: u64 = 8;
24+
/// The default word size on a given platform, in bytes.
25+
#[cfg(any(target_arch = "x86", target_arch = "arm", target_arch = "riscv32"))]
26+
const ARCH_WORD_SIZE: usize = 4;
27+
#[cfg(any(target_arch = "x86_64", target_arch = "aarch64", target_arch = "riscv64"))]
28+
const ARCH_WORD_SIZE: usize = 8;
3129

3230
/// The address of the page set to be edited, initialised to a sentinel null
3331
/// pointer.
3432
static PAGE_ADDR: AtomicPtr<u8> = AtomicPtr::new(std::ptr::null_mut());
3533
/// The host pagesize, initialised to a sentinel zero value.
36-
pub static PAGE_SIZE: AtomicU64 = AtomicU64::new(0);
34+
pub static PAGE_SIZE: AtomicUsize = AtomicUsize::new(0);
3735
/// How many consecutive pages to unprotect. 1 by default, unlikely to be set
3836
/// higher than 2.
39-
static PAGE_COUNT: AtomicU64 = AtomicU64::new(1);
37+
static PAGE_COUNT: AtomicUsize = AtomicUsize::new(1);
4038

4139
/// Allows us to get common arguments from the `user_regs_t` across architectures.
4240
/// Normally this would land us ABI hell, but thankfully all of our usecases
@@ -231,7 +229,7 @@ pub fn sv_loop(
231229
listener: ChildListener,
232230
event_tx: ipc::IpcSender<MemEvents>,
233231
confirm_tx: ipc::IpcSender<()>,
234-
page_size: u64,
232+
page_size: usize,
235233
) -> Result<!, Option<i32>> {
236234
// Things that we return to the child process
237235
let mut acc_events = Vec::new();
@@ -278,7 +276,7 @@ pub fn sv_loop(
278276
ExecEvent::End => {
279277
// Hand over the access info we traced
280278
event_tx
281-
.send(MemEvents { acc_events, alloc_cutoff: page_size.try_into().unwrap() })
279+
.send(MemEvents { acc_events, alloc_cutoff: page_size })
282280
.unwrap();
283281
// And reset our values
284282
acc_events = Vec::new();
@@ -400,18 +398,18 @@ fn wait_for_signal(
400398
/// or kills the child and returns the appropriate error otherwise.
401399
fn handle_segfault(
402400
pid: unistd::Pid,
403-
ch_pages: &[u64],
401+
ch_pages: &[usize],
404402
ch_stack: usize,
405-
page_size: u64,
403+
page_size: usize,
406404
cs: &capstone::Capstone,
407405
acc_events: &mut Vec<AccessEvent>,
408406
) -> Result<(), ExecError> {
409407
/// This is just here to not pollute the main namespace with `capstone::prelude::*`.
410408
#[inline]
411409
fn capstone_disassemble(
412410
instr: &[u8],
413-
addr: u64,
414-
page_size: u64,
411+
addr: usize,
412+
page_size: usize,
415413
cs: &capstone::Capstone,
416414
acc_events: &mut Vec<AccessEvent>,
417415
) -> capstone::CsResult<()> {
@@ -425,10 +423,10 @@ fn handle_segfault(
425423
let arch_detail = ins_detail.arch_detail();
426424

427425
// Take an (addr, size, cutoff_size) and split an access into multiple if needed
428-
let get_ranges: fn(u64, u64, u64) -> Vec<std::ops::Range<u64>> =
429-
|addr, size, cutoff_size: u64| {
426+
let get_ranges: fn(usize, usize, usize) -> Vec<std::ops::Range<usize>> =
427+
|addr, size, cutoff_size: usize| {
430428
let addr_added = addr.strict_add(size);
431-
let mut counter = 0u64;
429+
let mut counter = 0usize;
432430
let mut ret = vec![];
433431
loop {
434432
let curr = addr.strict_add(counter.strict_mul(cutoff_size));
@@ -582,7 +580,7 @@ fn handle_segfault(
582580
// All x86, ARM, etc. instructions only have at most one memory operand
583581
// (thankfully!)
584582
// SAFETY: si_addr is safe to call
585-
let addr = unsafe { siginfo.si_addr().addr().to_u64() };
583+
let addr = unsafe { siginfo.si_addr().addr() };
586584
let page_addr = addr.strict_sub(addr.strict_rem(page_size));
587585

588586
if ch_pages.iter().any(|pg| (*pg..pg.strict_add(page_size)).contains(&addr)) {
@@ -640,8 +638,8 @@ fn handle_segfault(
640638
let regs_bak = ptrace::getregs(pid).unwrap();
641639
new_regs = regs_bak;
642640
let ip_poststep = regs_bak.ip();
643-
// We need to do reads/writes in 8-byte chunks
644-
let diff = (ip_poststep.strict_sub(ip_prestep)).div_ceil(8);
641+
// We need to do reads/writes in word-sized chunks
642+
let diff = (ip_poststep.strict_sub(ip_prestep)).div_ceil(ARCH_WORD_SIZE);
645643
let instr = (ip_prestep..ip_prestep.strict_add(diff)).fold(vec![], |mut ret, ip| {
646644
// This only needs to be a valid pointer in the child process, not ours
647645
ret.append(
@@ -702,7 +700,7 @@ pub unsafe extern "C" fn mempr_off() {
702700
// we mustn't unwind from here, so...
703701
if libc::mprotect(
704702
PAGE_ADDR.load(Ordering::Relaxed).cast(),
705-
len.try_into().unwrap_unchecked(),
703+
len,
706704
libc::PROT_READ | libc::PROT_WRITE,
707705
) != 0
708706
{
@@ -727,7 +725,7 @@ pub unsafe extern "C" fn mempr_on() {
727725
unsafe {
728726
if libc::mprotect(
729727
PAGE_ADDR.load(Ordering::Relaxed).cast(),
730-
len.try_into().unwrap_unchecked(),
728+
len,
731729
libc::PROT_NONE,
732730
) != 0
733731
{

0 commit comments

Comments
 (0)