Skip to content

Commit 4db90d6

Browse files
committed
die if ptrace is broken
1 parent 35b2626 commit 4db90d6

File tree

4 files changed

+143
-64
lines changed

4 files changed

+143
-64
lines changed

Cargo.lock

Lines changed: 21 additions & 10 deletions
Some generated files are not rendered by default. Learn more about customizing how changed files appear on GitHub.

Cargo.toml

Lines changed: 3 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -42,9 +42,10 @@ libloading = "0.8"
4242
nix = { version = "0.30.1", features = ["mman", "ptrace", "signal"] }
4343
ipc-channel = "0.19.0"
4444
serde = { version = "1.0.219", features = ["derive"] }
45+
capstone = "0.13"
4546

46-
[target.'cfg(all(unix, any(target_arch = "x86", target_arch = "x86_64")))'.dependencies]
47-
iced-x86 = "1.21.0"
47+
#[target.'cfg(all(unix, any(target_arch = "x86", target_arch = "x86_64")))'.dependencies]
48+
#iced-x86 = "1.21.0"
4849

4950
[target.'cfg(target_family = "windows")'.dependencies]
5051
windows-sys = { version = "0.59", features = [

src/shims/native_lib.rs

Lines changed: 7 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -182,7 +182,13 @@ pub trait EvalContextExt<'tcx>: crate::MiriInterpCxExt<'tcx> {
182182

183183
// Prepare all exposed memory, depending on whether we have a supervisor process.
184184
#[cfg(all(unix, any(target_arch = "x86", target_arch = "x86_64")))]
185-
this.prepare_exposed_for_native_call(super::trace::Supervisor::init().is_err())?;
185+
if super::trace::Supervisor::init().is_ok() {
186+
this.prepare_exposed_for_native_call(false)?;
187+
} else {
188+
//this.prepare_exposed_for_native_call(true)?;
189+
//eprintln!("Oh noes!")
190+
panic!("No ptrace!");
191+
}
186192
#[cfg(not(all(unix, any(target_arch = "x86", target_arch = "x86_64"))))]
187193
this.prepare_exposed_for_native_call(true)?;
188194

src/shims/trace.rs

Lines changed: 112 additions & 51 deletions
Original file line numberDiff line numberDiff line change
@@ -7,16 +7,14 @@ use nix::unistd;
77
use crate::discrete_alloc;
88
use crate::helpers::ToU64;
99

10-
#[cfg(target_pointer_width = "64")]
11-
const BITS: u32 = 64;
12-
#[cfg(target_pointer_width = "32")]
13-
const BITS: u32 = 32;
14-
1510
#[cfg(any(target_arch = "x86", target_arch = "x86_64"))]
1611
const BREAKPT_INSTR: isize = 0xCC;
1712
#[cfg(target_arch = "aarch64")]
1813
const BREAKPT_INSTR: isize = 0xD420;
1914

15+
// FIXME: Make this architecture-specific
16+
const ARCH_MAX_ACCESS_SIZE: u64 = 256;
17+
2018
// We do NOT ever want to block the child from accessing this!!
2119
static SUPERVISOR: std::sync::Mutex<Option<Supervisor>> = std::sync::Mutex::new(None);
2220
static mut PAGE_ADDR: *mut libc::c_void = std::ptr::null_mut();
@@ -121,7 +119,7 @@ impl Supervisor {
121119
sv_loop(listener, t_event)
122120
});
123121
eprintln!("{p:?}");
124-
std::process::abort();
122+
std::process::exit(-1);
125123
}
126124
unistd::ForkResult::Child => {
127125
let mut sv = SUPERVISOR.lock().map_err(|_| ())?;
@@ -256,6 +254,9 @@ fn sv_loop(listener: ChildListener, t_event: ipc::IpcSender<MemEvents>) -> ! {
256254
let mut writes: Vec<Range<u64>> = vec![];
257255
let mut mappings: Vec<Range<u64>> = vec![];
258256

257+
// An instance of the Capstone disassembler, so we don't spawn one on every access
258+
let cs = get_disasm();
259+
259260
// Memory allocated on the MiriMachine
260261
let mut ch_pages = vec![];
261262

@@ -290,7 +291,7 @@ fn sv_loop(listener: ChildListener, t_event: ipc::IpcSender<MemEvents>) -> ! {
290291
match signal {
291292
signal::SIGSEGV => {
292293
if let Err(ret) =
293-
handle_segfault(pid, &ch_pages, &mut reads, &mut writes)
294+
handle_segfault(pid, &ch_pages, &cs, &mut reads, &mut writes)
294295
{
295296
retcode = ret;
296297
break 'listen;
@@ -441,6 +442,40 @@ fn sv_loop(listener: ChildListener, t_event: ipc::IpcSender<MemEvents>) -> ! {
441442
std::process::exit(retcode);
442443
}
443444

445+
fn get_disasm() -> capstone::Capstone {
446+
use capstone::prelude::*;
447+
let cs_pre = Capstone::new();
448+
{
449+
#[cfg(target_arch = "x86_64")]
450+
{
451+
cs_pre.x86().mode(arch::x86::ArchMode::Mode64)
452+
}
453+
#[cfg(target_arch = "x86")]
454+
{
455+
cs_pre.x86().mode(arch::x86::ArchMode::Mode32)
456+
}
457+
#[cfg(target_arch = "aarch64")]
458+
{
459+
cs_pre.arm64()
460+
}
461+
#[cfg(target_arch = "arm")]
462+
{
463+
cs_pre.arm()
464+
}
465+
#[cfg(target_arch = "riscv64")]
466+
{
467+
cs_pre.riscv().mode(arch::riscv::ArchMode::RiscV64)
468+
}
469+
#[cfg(target_arch = "riscv32")]
470+
{
471+
cs_pre.riscv().mode(arch::riscv::ArchMode::RiscV32)
472+
}
473+
}
474+
.detail(true)
475+
.build()
476+
.unwrap()
477+
}
478+
444479
/// Waits for a specific signal to be triggered.
445480
fn wait_for_signal(
446481
pid: unistd::Pid,
@@ -545,9 +580,74 @@ fn handle_munmap(
545580
fn handle_segfault(
546581
pid: unistd::Pid,
547582
ch_pages: &[u64],
583+
cs: &capstone::Capstone,
548584
reads: &mut Vec<Range<u64>>,
549585
writes: &mut Vec<Range<u64>>,
550586
) -> Result<(), i32> {
587+
// This is just here to not pollute the main namespace with capstone::prelude::*
588+
// and so that we can get a Result instead of just unwrapping on error
589+
#[inline]
590+
fn capstone_disassemble(
591+
instr: &[u8],
592+
addr: u64,
593+
cs: &capstone::Capstone,
594+
reads: &mut Vec<Range<u64>>,
595+
writes: &mut Vec<Range<u64>>,
596+
) -> capstone::CsResult<()> {
597+
use capstone::prelude::*;
598+
599+
let insns = cs.disasm_count(instr, 0x1000, 1)?;
600+
let ins_detail = cs.insn_detail(&insns[0])?;
601+
let arch_detail = ins_detail.arch_detail();
602+
603+
for op in arch_detail.operands() {
604+
match op {
605+
arch::ArchOperand::X86Operand(x86_operand) => {
606+
let size: u64 = x86_operand.size.into();
607+
match x86_operand.op_type {
608+
arch::x86::X86OperandType::Mem(_) => {
609+
// It's called a "RegAccessType" but it also applies to memory
610+
let acc_ty = x86_operand.access.unwrap();
611+
if acc_ty.is_readable() {
612+
reads.push(addr..addr.strict_add(size));
613+
}
614+
if acc_ty.is_writable() {
615+
writes.push(addr..addr.strict_add(size));
616+
}
617+
}
618+
_ => (),
619+
}
620+
}
621+
arch::ArchOperand::Arm64Operand(arm64_operand) => {
622+
// Annoyingly, we don't get the size here, so just be pessimistic for now
623+
match arm64_operand.op_type {
624+
arch::arm64::Arm64OperandType::Mem(_arm64_op_mem) => {
625+
//
626+
}
627+
_ => (),
628+
}
629+
}
630+
arch::ArchOperand::ArmOperand(arm_operand) =>
631+
match arm_operand.op_type {
632+
arch::arm::ArmOperandType::Mem(_) => {
633+
let acc_ty = arm_operand.access.unwrap();
634+
if acc_ty.is_readable() {
635+
reads.push(addr..addr.strict_add(ARCH_MAX_ACCESS_SIZE));
636+
}
637+
if acc_ty.is_writable() {
638+
writes.push(addr..addr.strict_add(ARCH_MAX_ACCESS_SIZE));
639+
}
640+
}
641+
_ => (),
642+
},
643+
arch::ArchOperand::RiscVOperand(_risc_voperand) => todo!(),
644+
_ => unimplemented!(),
645+
}
646+
}
647+
648+
Ok(())
649+
}
650+
551651
let siginfo = ptrace::getsiginfo(pid).unwrap();
552652
let addr = unsafe { siginfo.si_addr().addr().to_u64() };
553653
let page_addr = addr.strict_sub(addr.strict_rem(unsafe { PAGE_SIZE }));
@@ -596,51 +696,12 @@ fn handle_segfault(
596696
);
597697
ret
598698
});
599-
let mut decoder = iced_x86::Decoder::new(BITS, instr.as_slice(), 0);
600-
let mut fac = iced_x86::InstructionInfoFactory::new();
601-
let instr = decoder.decode();
602-
let memsize = instr.op_code().memory_size().size().to_u64();
603-
let mem = fac.info(&instr).used_memory();
604-
605-
for acc in mem {
606-
let mut r = false;
607-
let mut w = false;
608-
match acc.access() {
609-
iced_x86::OpAccess::Read | iced_x86::OpAccess::CondRead => {
610-
r = true;
611-
}
612-
iced_x86::OpAccess::Write | iced_x86::OpAccess::CondWrite => {
613-
w = true;
614-
}
615-
iced_x86::OpAccess::ReadWrite | iced_x86::OpAccess::ReadCondWrite => {
616-
r = true;
617-
w = true;
618-
}
619-
_ => (),
620-
}
621-
let addr_end = addr.strict_add(memsize);
622-
if r {
623-
if let Some(idx) = reads.iter().position(|r| r.start <= addr_end && addr <= r.end) {
624-
let mut rg = reads[idx].clone();
625-
rg.start = std::cmp::min(rg.start, addr);
626-
rg.end = std::cmp::max(rg.end, addr_end);
627-
reads[idx] = rg;
628-
} else {
629-
reads.push(addr..addr_end);
630-
}
631-
}
632-
if w {
633-
if let Some(idx) = writes.iter().position(|r| r.start <= addr_end && addr <= r.end)
634-
{
635-
let mut rg = writes[idx].clone();
636-
rg.start = std::cmp::min(rg.start, addr);
637-
rg.end = std::cmp::max(rg.end, addr_end);
638-
writes[idx] = rg;
639-
} else {
640-
writes.push(addr..addr_end);
641-
}
642-
}
699+
700+
if capstone_disassemble(&instr, addr, cs, reads, writes).is_err() {
701+
reads.push(addr..addr.strict_add(ARCH_MAX_ACCESS_SIZE));
702+
writes.push(addr..addr.strict_add(ARCH_MAX_ACCESS_SIZE));
643703
}
704+
644705
#[expect(clippy::as_conversions)]
645706
new_regs.set_ip(mempr_on as usize);
646707
new_regs.set_sp(unsafe { (&raw mut CLICK_HERE_4_FREE_STACK[512]).addr() });

0 commit comments

Comments
 (0)