@@ -10,6 +10,11 @@ use super::messages::{Confirmation, MemEvents, StartFfiInfo, TraceRequest};
10
10
use super :: parent:: { ChildListener , sv_loop} ;
11
11
use crate :: alloc:: isolated_alloc:: IsolatedAlloc ;
12
12
13
+ /// A handle to the single, shared supervisor process across all `MiriMachine`s.
14
+ /// Since it would be very difficult to trace multiple FFI calls in parallel, we
15
+ /// need to ensure that either (a) only one `MiriMachine` is performing an FFI call
16
+ /// at any given time, or (b) there are distinct supervisor and child processes for
17
+ /// each machine. The former was chosen here.
13
18
static SUPERVISOR : std:: sync:: Mutex < Option < Supervisor > > = std:: sync:: Mutex :: new ( None ) ;
14
19
15
20
/// The main means of communication between the child and parent process,
@@ -24,6 +29,12 @@ pub struct Supervisor {
24
29
event_rx : ipc:: IpcReceiver < MemEvents > ,
25
30
}
26
31
32
+ pub struct SvFfiGuard < ' a > {
33
+ alloc : & ' a Rc < RefCell < IsolatedAlloc > > ,
34
+ sv_guard : std:: sync:: MutexGuard < ' static , Option < Supervisor > > ,
35
+ cb_stack : Option < * mut [ u8 ; CALLBACK_STACK_SIZE ] > ,
36
+ }
37
+
27
38
/// Marker representing that an error occurred during creation of the supervisor.
28
39
#[ derive( Debug ) ]
29
40
pub struct SvInitError ;
@@ -46,20 +57,24 @@ impl Supervisor {
46
57
/// after the desired call has concluded.
47
58
pub unsafe fn start_ffi (
48
59
alloc : & Rc < RefCell < IsolatedAlloc > > ,
49
- ) -> ( std :: sync :: MutexGuard < ' static , Option < Supervisor > > , Option < * mut [ u8 ; CALLBACK_STACK_SIZE ] > )
60
+ ) -> SvFfiGuard < ' _ >
50
61
{
51
62
let mut sv_guard = SUPERVISOR . lock ( ) . unwrap ( ) ;
52
63
// If the supervisor is not initialised for whatever reason, fast-return.
53
64
// This might be desired behaviour, as even on platforms where ptracing
54
65
// is not implemented it enables us to enforce that only one FFI call
55
66
// happens at a time.
56
67
let Some ( sv) = sv_guard. as_mut ( ) else {
57
- return ( sv_guard, None ) ;
68
+ return SvFfiGuard {
69
+ alloc,
70
+ sv_guard,
71
+ cb_stack : None ,
72
+ } ;
58
73
} ;
59
74
60
75
// Get pointers to all the pages the supervisor must allow accesses in
61
76
// and prepare the callback stack.
62
- let page_ptrs = alloc. borrow ( ) . pages ( ) ;
77
+ let page_ptrs = alloc. borrow ( ) . pages ( ) . collect ( ) ;
63
78
let raw_stack_ptr: * mut [ u8 ; CALLBACK_STACK_SIZE ] =
64
79
Box :: leak ( Box :: new ( [ 0u8 ; CALLBACK_STACK_SIZE ] ) ) . as_mut_ptr ( ) . cast ( ) ;
65
80
let stack_ptr = raw_stack_ptr. expose_provenance ( ) ;
@@ -86,7 +101,11 @@ impl Supervisor {
86
101
// modifications to our memory - simply waiting on the recv() doesn't
87
102
// count.
88
103
signal:: raise ( signal:: SIGSTOP ) . unwrap ( ) ;
89
- ( sv_guard, Some ( raw_stack_ptr) )
104
+ SvFfiGuard {
105
+ alloc,
106
+ sv_guard,
107
+ cb_stack : Some ( raw_stack_ptr) ,
108
+ }
90
109
}
91
110
92
111
/// Undoes FFI-related preparations, allowing Miri to continue as normal, then
@@ -98,10 +117,12 @@ impl Supervisor {
98
117
/// received by a prior call to `start_ffi`, and the allocator must be the
99
118
/// one passed to it also.
100
119
pub unsafe fn end_ffi (
101
- alloc : & Rc < RefCell < IsolatedAlloc > > ,
102
- mut sv_guard : std:: sync:: MutexGuard < ' static , Option < Supervisor > > ,
103
- raw_stack_ptr : Option < * mut [ u8 ; CALLBACK_STACK_SIZE ] > ,
120
+ guard : SvFfiGuard < ' _ > ,
104
121
) -> Option < MemEvents > {
122
+ let alloc = guard. alloc ;
123
+ let mut sv_guard = guard. sv_guard ;
124
+ let cb_stack = guard. cb_stack ;
125
+
105
126
// We can't use IPC channels here to signal that FFI mode has ended,
106
127
// since they might allocate memory which could get us stuck in a SIGTRAP
107
128
// with no easy way out! While this could be worked around, it is much
@@ -121,7 +142,7 @@ impl Supervisor {
121
142
// SAFETY: Caller upholds that this pointer was allocated as a box with
122
143
// this type.
123
144
unsafe {
124
- drop ( Box :: from_raw ( raw_stack_ptr . unwrap ( ) ) ) ;
145
+ drop ( Box :: from_raw ( cb_stack . unwrap ( ) ) ) ;
125
146
}
126
147
// On the off-chance something really weird happens, don't block forever.
127
148
sv. event_rx
@@ -130,7 +151,7 @@ impl Supervisor {
130
151
match e {
131
152
ipc:: TryRecvError :: IpcError ( _) => ( ) ,
132
153
ipc:: TryRecvError :: Empty =>
133
- eprintln ! ( "Waiting for accesses from supervisor timed out!" ) ,
154
+ panic ! ( "Waiting for accesses from supervisor timed out!" ) ,
134
155
}
135
156
} )
136
157
. ok ( )
@@ -141,6 +162,10 @@ impl Supervisor {
141
162
/// supervisor process could not be created successfully; else, the caller
142
163
/// is now the child process and can communicate via `start_ffi`/`end_ffi`,
143
164
/// receiving back events through `get_events`.
165
+ ///
166
+ /// When forking to initialise the supervisor, the child raises a `SIGSTOP`; if
167
+ /// the parent successfully ptraces the child, it will allow it to resume. Else,
168
+ /// the child will be killed by the parent.
144
169
///
145
170
/// # Safety
146
171
/// The invariants for `fork()` must be upheld by the caller, namely either:
@@ -151,11 +176,6 @@ pub unsafe fn init_sv() -> Result<(), SvInitError> {
151
176
// FIXME: Much of this could be reimplemented via the mitosis crate if we upstream the
152
177
// relevant missing bits.
153
178
154
- // Not on a properly supported architecture!
155
- if cfg ! ( not( any( target_arch = "x86" , target_arch = "x86_64" , target_arch = "aarch64" ) ) ) {
156
- return Err ( SvInitError ) ;
157
- }
158
-
159
179
// On Linux, this will check whether ptrace is fully disabled by the Yama module.
160
180
// If Yama isn't running or we're not on Linux, we'll still error later, but
161
181
// this saves a very expensive fork call.
0 commit comments