Skip to content

Commit 18da864

Browse files
authored
Merge pull request #4405 from nia-e/standalone-ptrace
trace: implement supervisor components for tracing
2 parents 6bbcce9 + 6ccb6a7 commit 18da864

File tree

6 files changed

+619
-112
lines changed

6 files changed

+619
-112
lines changed

Cargo.lock

Lines changed: 21 additions & 0 deletions
Some generated files are not rendered by default. Learn more about customizing how changed files appear on GitHub.

Cargo.toml

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -44,6 +44,7 @@ libloading = "0.8"
4444
nix = { version = "0.30.1", features = ["mman", "ptrace", "signal"] }
4545
ipc-channel = "0.19.0"
4646
serde = { version = "1.0.219", features = ["derive"] }
47+
capstone = "0.13"
4748

4849
[dev-dependencies]
4950
ui_test = "0.29.1"

src/alloc/isolated_alloc.rs

Lines changed: 56 additions & 17 deletions
Original file line numberDiff line numberDiff line change
@@ -1,5 +1,7 @@
11
use std::alloc::Layout;
2+
use std::ptr::NonNull;
23

4+
use nix::sys::mman;
35
use rustc_index::bit_set::DenseBitSet;
46

57
/// How many bytes of memory each bit in the bitset represents.
@@ -12,7 +14,7 @@ pub struct IsolatedAlloc {
1214
/// Pointers to page-aligned memory that has been claimed by the allocator.
1315
/// Every pointer here must point to a page-sized allocation claimed via
1416
/// mmap. These pointers are used for "small" allocations.
15-
page_ptrs: Vec<*mut u8>,
17+
page_ptrs: Vec<NonNull<u8>>,
1618
/// Metadata about which bytes have been allocated on each page. The length
1719
/// of this vector must be the same as that of `page_ptrs`, and the domain
1820
/// size of the bitset must be exactly `page_size / COMPRESSION_FACTOR`.
@@ -24,7 +26,7 @@ pub struct IsolatedAlloc {
2426
page_infos: Vec<DenseBitSet<usize>>,
2527
/// Pointers to multiple-page-sized allocations. These must also be page-aligned,
2628
/// with their size stored as the second element of the vector.
27-
huge_ptrs: Vec<(*mut u8, usize)>,
29+
huge_ptrs: Vec<(NonNull<u8>, usize)>,
2830
/// The host (not emulated) page size.
2931
page_size: usize,
3032
}
@@ -137,7 +139,7 @@ impl IsolatedAlloc {
137139
unsafe fn alloc_small(
138140
page_size: usize,
139141
layout: Layout,
140-
page: *mut u8,
142+
page: NonNull<u8>,
141143
pinfo: &mut DenseBitSet<usize>,
142144
zeroed: bool,
143145
) -> Option<*mut u8> {
@@ -164,15 +166,15 @@ impl IsolatedAlloc {
164166
// zero out, even if we allocated more
165167
ptr.write_bytes(0, layout.size());
166168
}
167-
return Some(ptr);
169+
return Some(ptr.as_ptr());
168170
}
169171
}
170172
}
171173
None
172174
}
173175

174176
/// Expands the available memory pool by adding one page.
175-
fn add_page(&mut self) -> (*mut u8, &mut DenseBitSet<usize>) {
177+
fn add_page(&mut self) -> (NonNull<u8>, &mut DenseBitSet<usize>) {
176178
// SAFETY: mmap is always safe to call when requesting anonymous memory
177179
let page_ptr = unsafe {
178180
libc::mmap(
@@ -189,8 +191,8 @@ impl IsolatedAlloc {
189191
// `page_infos` has to have one bit for each `COMPRESSION_FACTOR`-sized chunk of bytes in the page.
190192
assert!(self.page_size % COMPRESSION_FACTOR == 0);
191193
self.page_infos.push(DenseBitSet::new_empty(self.page_size / COMPRESSION_FACTOR));
192-
self.page_ptrs.push(page_ptr);
193-
(page_ptr, self.page_infos.last_mut().unwrap())
194+
self.page_ptrs.push(NonNull::new(page_ptr).unwrap());
195+
(NonNull::new(page_ptr).unwrap(), self.page_infos.last_mut().unwrap())
194196
}
195197

196198
/// Allocates in multiples of one page on the host system.
@@ -212,7 +214,7 @@ impl IsolatedAlloc {
212214
.cast::<u8>()
213215
};
214216
assert_ne!(ret.addr(), usize::MAX, "mmap failed");
215-
self.huge_ptrs.push((ret, size));
217+
self.huge_ptrs.push((NonNull::new(ret).unwrap(), size));
216218
// huge_normalized_layout ensures that we've overallocated enough space
217219
// for this to be valid.
218220
ret.map_addr(|a| a.next_multiple_of(layout.align()))
@@ -246,7 +248,7 @@ impl IsolatedAlloc {
246248
// from us pointing to this page, and we know it was allocated
247249
// in add_page as exactly a single page.
248250
unsafe {
249-
assert_eq!(libc::munmap(page_ptr.cast(), self.page_size), 0);
251+
assert_eq!(libc::munmap(page_ptr.as_ptr().cast(), self.page_size), 0);
250252
}
251253
}
252254
}
@@ -265,7 +267,7 @@ impl IsolatedAlloc {
265267
// This could be made faster if the list was sorted -- the allocator isn't fully optimized at the moment.
266268
let pinfo = std::iter::zip(&mut self.page_ptrs, &mut self.page_infos)
267269
.enumerate()
268-
.find(|(_, (page, _))| page.addr() == page_addr);
270+
.find(|(_, (page, _))| page.addr().get() == page_addr);
269271
let Some((idx_of_pinfo, (_, pinfo))) = pinfo else {
270272
panic!("Freeing in an unallocated page: {ptr:?}\nHolding pages {:?}", self.page_ptrs)
271273
};
@@ -287,30 +289,67 @@ impl IsolatedAlloc {
287289
.huge_ptrs
288290
.iter()
289291
.position(|&(pg, size)| {
290-
pg.addr() <= ptr.addr() && ptr.addr() < pg.addr().strict_add(size)
292+
pg.addr().get() <= ptr.addr() && ptr.addr() < pg.addr().get().strict_add(size)
291293
})
292294
.expect("Freeing unallocated pages");
293295
// And kick it from the list
294296
let (un_offset_ptr, size2) = self.huge_ptrs.remove(idx);
295297
assert_eq!(size, size2, "got wrong layout in dealloc");
296298
// SAFETY: huge_ptrs contains allocations made with mmap with the size recorded there.
297299
unsafe {
298-
let ret = libc::munmap(un_offset_ptr.cast(), size);
300+
let ret = libc::munmap(un_offset_ptr.as_ptr().cast(), size);
299301
assert_eq!(ret, 0);
300302
}
301303
}
302304

303305
/// Returns a vector of page addresses managed by the allocator.
304306
pub fn pages(&self) -> Vec<usize> {
305-
let mut pages: Vec<_> =
306-
self.page_ptrs.clone().into_iter().map(|p| p.expose_provenance()).collect();
307-
for (ptr, size) in &self.huge_ptrs {
307+
let mut pages: Vec<usize> =
308+
self.page_ptrs.clone().into_iter().map(|p| p.expose_provenance().get()).collect();
309+
self.huge_ptrs.iter().for_each(|(ptr, size)| {
308310
for i in 0..size / self.page_size {
309-
pages.push(ptr.expose_provenance().strict_add(i * self.page_size));
311+
pages.push(ptr.expose_provenance().get().strict_add(i * self.page_size));
310312
}
311-
}
313+
});
312314
pages
313315
}
316+
317+
/// Protects all owned memory as `PROT_NONE`, preventing accesses.
318+
///
319+
/// SAFETY: Accessing memory after this point will result in a segfault
320+
/// unless it is first unprotected.
321+
pub unsafe fn prepare_ffi(&mut self) -> Result<(), nix::errno::Errno> {
322+
let prot = mman::ProtFlags::PROT_NONE;
323+
unsafe { self.mprotect(prot) }
324+
}
325+
326+
/// Deprotects all owned memory by setting it to RW. Erroring here is very
327+
/// likely unrecoverable, so it may panic if applying those permissions
328+
/// fails.
329+
pub fn unprep_ffi(&mut self) {
330+
let prot = mman::ProtFlags::PROT_READ | mman::ProtFlags::PROT_WRITE;
331+
unsafe {
332+
self.mprotect(prot).unwrap();
333+
}
334+
}
335+
336+
/// Applies `prot` to every page managed by the allocator.
337+
///
338+
/// SAFETY: Accessing memory in violation of the protection flags will
339+
/// trigger a segfault.
340+
unsafe fn mprotect(&mut self, prot: mman::ProtFlags) -> Result<(), nix::errno::Errno> {
341+
for &pg in &self.page_ptrs {
342+
unsafe {
343+
mman::mprotect(pg.cast(), self.page_size, prot)?;
344+
}
345+
}
346+
for &(hpg, size) in &self.huge_ptrs {
347+
unsafe {
348+
mman::mprotect(hpg.cast(), size.next_multiple_of(self.page_size), prot)?;
349+
}
350+
}
351+
Ok(())
352+
}
314353
}
315354

316355
#[cfg(test)]

src/shims/native_lib.rs

Lines changed: 12 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -229,7 +229,14 @@ pub trait EvalContextExt<'tcx>: crate::MiriInterpCxExt<'tcx> {
229229
.collect::<Vec<libffi::high::Arg<'_>>>();
230230

231231
// Call the function and store output, depending on return type in the function signature.
232-
let (ret, _) = this.call_native_with_args(link_name, dest, code_ptr, libffi_args)?;
232+
let (ret, maybe_memevents) =
233+
this.call_native_with_args(link_name, dest, code_ptr, libffi_args)?;
234+
235+
if cfg!(target_os = "linux")
236+
&& let Some(events) = maybe_memevents
237+
{
238+
trace!("Registered FFI events:\n{events:#0x?}");
239+
}
233240

234241
this.write_immediate(*ret, dest)?;
235242
interp_ok(true)
@@ -250,15 +257,15 @@ unsafe fn do_native_call<T: libffi::high::CType>(
250257

251258
unsafe {
252259
if let Some(alloc) = alloc {
253-
// SAFETY: We don't touch the machine memory past this point
260+
// SAFETY: We don't touch the machine memory past this point.
254261
let (guard, stack_ptr) = Supervisor::start_ffi(alloc.clone());
255-
// SAFETY: Upheld by caller
262+
// SAFETY: Upheld by caller.
256263
let ret = ffi::call(ptr, args);
257264
// SAFETY: We got the guard and stack pointer from start_ffi, and
258-
// the allocator is the same
265+
// the allocator is the same.
259266
(ret, Supervisor::end_ffi(guard, alloc, stack_ptr))
260267
} else {
261-
// SAFETY: Upheld by caller
268+
// SAFETY: Upheld by caller.
262269
(ffi::call(ptr, args), None)
263270
}
264271
}

0 commit comments

Comments
 (0)