1
1
use std:: alloc:: Layout ;
2
+ use std:: ptr:: NonNull ;
2
3
4
+ use nix:: sys:: mman;
3
5
use rustc_index:: bit_set:: DenseBitSet ;
4
6
5
7
/// How many bytes of memory each bit in the bitset represents.
@@ -12,7 +14,7 @@ pub struct IsolatedAlloc {
12
14
/// Pointers to page-aligned memory that has been claimed by the allocator.
13
15
/// Every pointer here must point to a page-sized allocation claimed via
14
16
/// mmap. These pointers are used for "small" allocations.
15
- page_ptrs : Vec < * mut u8 > ,
17
+ page_ptrs : Vec < NonNull < u8 > > ,
16
18
/// Metadata about which bytes have been allocated on each page. The length
17
19
/// of this vector must be the same as that of `page_ptrs`, and the domain
18
20
/// size of the bitset must be exactly `page_size / COMPRESSION_FACTOR`.
@@ -24,7 +26,7 @@ pub struct IsolatedAlloc {
24
26
page_infos : Vec < DenseBitSet < usize > > ,
25
27
/// Pointers to multiple-page-sized allocations. These must also be page-aligned,
26
28
/// with their size stored as the second element of the vector.
27
- huge_ptrs : Vec < ( * mut u8 , usize ) > ,
29
+ huge_ptrs : Vec < ( NonNull < u8 > , usize ) > ,
28
30
/// The host (not emulated) page size.
29
31
page_size : usize ,
30
32
}
@@ -137,7 +139,7 @@ impl IsolatedAlloc {
137
139
unsafe fn alloc_small (
138
140
page_size : usize ,
139
141
layout : Layout ,
140
- page : * mut u8 ,
142
+ page : NonNull < u8 > ,
141
143
pinfo : & mut DenseBitSet < usize > ,
142
144
zeroed : bool ,
143
145
) -> Option < * mut u8 > {
@@ -164,15 +166,15 @@ impl IsolatedAlloc {
164
166
// zero out, even if we allocated more
165
167
ptr. write_bytes ( 0 , layout. size ( ) ) ;
166
168
}
167
- return Some ( ptr) ;
169
+ return Some ( ptr. as_ptr ( ) ) ;
168
170
}
169
171
}
170
172
}
171
173
None
172
174
}
173
175
174
176
/// Expands the available memory pool by adding one page.
175
- fn add_page ( & mut self ) -> ( * mut u8 , & mut DenseBitSet < usize > ) {
177
+ fn add_page ( & mut self ) -> ( NonNull < u8 > , & mut DenseBitSet < usize > ) {
176
178
// SAFETY: mmap is always safe to call when requesting anonymous memory
177
179
let page_ptr = unsafe {
178
180
libc:: mmap (
@@ -189,8 +191,8 @@ impl IsolatedAlloc {
189
191
// `page_infos` has to have one bit for each `COMPRESSION_FACTOR`-sized chunk of bytes in the page.
190
192
assert ! ( self . page_size % COMPRESSION_FACTOR == 0 ) ;
191
193
self . page_infos . push ( DenseBitSet :: new_empty ( self . page_size / COMPRESSION_FACTOR ) ) ;
192
- self . page_ptrs . push ( page_ptr) ;
193
- ( page_ptr, self . page_infos . last_mut ( ) . unwrap ( ) )
194
+ self . page_ptrs . push ( NonNull :: new ( page_ptr) . unwrap ( ) ) ;
195
+ ( NonNull :: new ( page_ptr) . unwrap ( ) , self . page_infos . last_mut ( ) . unwrap ( ) )
194
196
}
195
197
196
198
/// Allocates in multiples of one page on the host system.
@@ -212,7 +214,7 @@ impl IsolatedAlloc {
212
214
. cast :: < u8 > ( )
213
215
} ;
214
216
assert_ne ! ( ret. addr( ) , usize :: MAX , "mmap failed" ) ;
215
- self . huge_ptrs . push ( ( ret, size) ) ;
217
+ self . huge_ptrs . push ( ( NonNull :: new ( ret) . unwrap ( ) , size) ) ;
216
218
// huge_normalized_layout ensures that we've overallocated enough space
217
219
// for this to be valid.
218
220
ret. map_addr ( |a| a. next_multiple_of ( layout. align ( ) ) )
@@ -246,7 +248,7 @@ impl IsolatedAlloc {
246
248
// from us pointing to this page, and we know it was allocated
247
249
// in add_page as exactly a single page.
248
250
unsafe {
249
- assert_eq ! ( libc:: munmap( page_ptr. cast( ) , self . page_size) , 0 ) ;
251
+ assert_eq ! ( libc:: munmap( page_ptr. as_ptr ( ) . cast( ) , self . page_size) , 0 ) ;
250
252
}
251
253
}
252
254
}
@@ -265,7 +267,7 @@ impl IsolatedAlloc {
265
267
// This could be made faster if the list was sorted -- the allocator isn't fully optimized at the moment.
266
268
let pinfo = std:: iter:: zip ( & mut self . page_ptrs , & mut self . page_infos )
267
269
. enumerate ( )
268
- . find ( |( _, ( page, _) ) | page. addr ( ) == page_addr) ;
270
+ . find ( |( _, ( page, _) ) | page. addr ( ) . get ( ) == page_addr) ;
269
271
let Some ( ( idx_of_pinfo, ( _, pinfo) ) ) = pinfo else {
270
272
panic ! ( "Freeing in an unallocated page: {ptr:?}\n Holding pages {:?}" , self . page_ptrs)
271
273
} ;
@@ -287,30 +289,67 @@ impl IsolatedAlloc {
287
289
. huge_ptrs
288
290
. iter ( )
289
291
. position ( |& ( pg, size) | {
290
- pg. addr ( ) <= ptr. addr ( ) && ptr. addr ( ) < pg. addr ( ) . strict_add ( size)
292
+ pg. addr ( ) . get ( ) <= ptr. addr ( ) && ptr. addr ( ) < pg. addr ( ) . get ( ) . strict_add ( size)
291
293
} )
292
294
. expect ( "Freeing unallocated pages" ) ;
293
295
// And kick it from the list
294
296
let ( un_offset_ptr, size2) = self . huge_ptrs . remove ( idx) ;
295
297
assert_eq ! ( size, size2, "got wrong layout in dealloc" ) ;
296
298
// SAFETY: huge_ptrs contains allocations made with mmap with the size recorded there.
297
299
unsafe {
298
- let ret = libc:: munmap ( un_offset_ptr. cast ( ) , size) ;
300
+ let ret = libc:: munmap ( un_offset_ptr. as_ptr ( ) . cast ( ) , size) ;
299
301
assert_eq ! ( ret, 0 ) ;
300
302
}
301
303
}
302
304
303
305
/// Returns a vector of page addresses managed by the allocator.
304
306
pub fn pages ( & self ) -> Vec < usize > {
305
- let mut pages: Vec < _ > =
306
- self . page_ptrs . clone ( ) . into_iter ( ) . map ( |p| p. expose_provenance ( ) ) . collect ( ) ;
307
- for ( ptr, size) in & self . huge_ptrs {
307
+ let mut pages: Vec < usize > =
308
+ self . page_ptrs . clone ( ) . into_iter ( ) . map ( |p| p. expose_provenance ( ) . get ( ) ) . collect ( ) ;
309
+ self . huge_ptrs . iter ( ) . for_each ( | ( ptr, size) | {
308
310
for i in 0 ..size / self . page_size {
309
- pages. push ( ptr. expose_provenance ( ) . strict_add ( i * self . page_size ) ) ;
311
+ pages. push ( ptr. expose_provenance ( ) . get ( ) . strict_add ( i * self . page_size ) ) ;
310
312
}
311
- }
313
+ } ) ;
312
314
pages
313
315
}
316
+
317
+ /// Protects all owned memory as `PROT_NONE`, preventing accesses.
318
+ ///
319
+ /// SAFETY: Accessing memory after this point will result in a segfault
320
+ /// unless it is first unprotected.
321
+ pub unsafe fn prepare_ffi ( & mut self ) -> Result < ( ) , nix:: errno:: Errno > {
322
+ let prot = mman:: ProtFlags :: PROT_NONE ;
323
+ unsafe { self . mprotect ( prot) }
324
+ }
325
+
326
+ /// Deprotects all owned memory by setting it to RW. Erroring here is very
327
+ /// likely unrecoverable, so it may panic if applying those permissions
328
+ /// fails.
329
+ pub fn unprep_ffi ( & mut self ) {
330
+ let prot = mman:: ProtFlags :: PROT_READ | mman:: ProtFlags :: PROT_WRITE ;
331
+ unsafe {
332
+ self . mprotect ( prot) . unwrap ( ) ;
333
+ }
334
+ }
335
+
336
+ /// Applies `prot` to every page managed by the allocator.
337
+ ///
338
+ /// SAFETY: Accessing memory in violation of the protection flags will
339
+ /// trigger a segfault.
340
+ unsafe fn mprotect ( & mut self , prot : mman:: ProtFlags ) -> Result < ( ) , nix:: errno:: Errno > {
341
+ for & pg in & self . page_ptrs {
342
+ unsafe {
343
+ mman:: mprotect ( pg. cast ( ) , self . page_size , prot) ?;
344
+ }
345
+ }
346
+ for & ( hpg, size) in & self . huge_ptrs {
347
+ unsafe {
348
+ mman:: mprotect ( hpg. cast ( ) , size. next_multiple_of ( self . page_size ) , prot) ?;
349
+ }
350
+ }
351
+ Ok ( ( ) )
352
+ }
314
353
}
315
354
316
355
#[ cfg( test) ]
0 commit comments