1
- use std:: alloc:: Layout ;
1
+ use std:: { alloc:: Layout , ptr :: NonNull } ;
2
2
3
3
use nix:: sys:: mman;
4
4
use rustc_index:: bit_set:: DenseBitSet ;
@@ -13,7 +13,7 @@ pub struct IsolatedAlloc {
13
13
/// Pointers to page-aligned memory that has been claimed by the allocator.
14
14
/// Every pointer here must point to a page-sized allocation claimed via
15
15
/// mmap. These pointers are used for "small" allocations.
16
- page_ptrs : Vec < * mut u8 > ,
16
+ page_ptrs : Vec < NonNull < u8 > > ,
17
17
/// Metadata about which bytes have been allocated on each page. The length
18
18
/// of this vector must be the same as that of `page_ptrs`, and the domain
19
19
/// size of the bitset must be exactly `page_size / COMPRESSION_FACTOR`.
@@ -25,7 +25,7 @@ pub struct IsolatedAlloc {
25
25
page_infos : Vec < DenseBitSet < usize > > ,
26
26
/// Pointers to multiple-page-sized allocations. These must also be page-aligned,
27
27
/// with their size stored as the second element of the vector.
28
- huge_ptrs : Vec < ( * mut u8 , usize ) > ,
28
+ huge_ptrs : Vec < ( NonNull < u8 > , usize ) > ,
29
29
/// The host (not emulated) page size.
30
30
page_size : usize ,
31
31
}
@@ -138,7 +138,7 @@ impl IsolatedAlloc {
138
138
unsafe fn alloc_small (
139
139
page_size : usize ,
140
140
layout : Layout ,
141
- page : * mut u8 ,
141
+ page : NonNull < u8 > ,
142
142
pinfo : & mut DenseBitSet < usize > ,
143
143
zeroed : bool ,
144
144
) -> Option < * mut u8 > {
@@ -165,15 +165,15 @@ impl IsolatedAlloc {
165
165
// zero out, even if we allocated more
166
166
ptr. write_bytes ( 0 , layout. size ( ) ) ;
167
167
}
168
- return Some ( ptr) ;
168
+ return Some ( ptr. as_ptr ( ) ) ;
169
169
}
170
170
}
171
171
}
172
172
None
173
173
}
174
174
175
175
/// Expands the available memory pool by adding one page.
176
- fn add_page ( & mut self ) -> ( * mut u8 , & mut DenseBitSet < usize > ) {
176
+ fn add_page ( & mut self ) -> ( NonNull < u8 > , & mut DenseBitSet < usize > ) {
177
177
// SAFETY: mmap is always safe to call when requesting anonymous memory
178
178
let page_ptr = unsafe {
179
179
libc:: mmap (
@@ -190,8 +190,8 @@ impl IsolatedAlloc {
190
190
// `page_infos` has to have one bit for each `COMPRESSION_FACTOR`-sized chunk of bytes in the page.
191
191
assert ! ( self . page_size % COMPRESSION_FACTOR == 0 ) ;
192
192
self . page_infos . push ( DenseBitSet :: new_empty ( self . page_size / COMPRESSION_FACTOR ) ) ;
193
- self . page_ptrs . push ( page_ptr) ;
194
- ( page_ptr, self . page_infos . last_mut ( ) . unwrap ( ) )
193
+ self . page_ptrs . push ( NonNull :: new ( page_ptr) . unwrap ( ) ) ;
194
+ ( NonNull :: new ( page_ptr) . unwrap ( ) , self . page_infos . last_mut ( ) . unwrap ( ) )
195
195
}
196
196
197
197
/// Allocates in multiples of one page on the host system.
@@ -213,7 +213,7 @@ impl IsolatedAlloc {
213
213
. cast :: < u8 > ( )
214
214
} ;
215
215
assert_ne ! ( ret. addr( ) , usize :: MAX , "mmap failed" ) ;
216
- self . huge_ptrs . push ( ( ret, size) ) ;
216
+ self . huge_ptrs . push ( ( NonNull :: new ( ret) . unwrap ( ) , size) ) ;
217
217
// huge_normalized_layout ensures that we've overallocated enough space
218
218
// for this to be valid.
219
219
ret. map_addr ( |a| a. next_multiple_of ( layout. align ( ) ) )
@@ -247,7 +247,7 @@ impl IsolatedAlloc {
247
247
// from us pointing to this page, and we know it was allocated
248
248
// in add_page as exactly a single page.
249
249
unsafe {
250
- assert_eq ! ( libc:: munmap( page_ptr. cast( ) , self . page_size) , 0 ) ;
250
+ assert_eq ! ( libc:: munmap( page_ptr. as_ptr ( ) . cast( ) , self . page_size) , 0 ) ;
251
251
}
252
252
}
253
253
}
@@ -266,7 +266,7 @@ impl IsolatedAlloc {
266
266
// This could be made faster if the list was sorted -- the allocator isn't fully optimized at the moment.
267
267
let pinfo = std:: iter:: zip ( & mut self . page_ptrs , & mut self . page_infos )
268
268
. enumerate ( )
269
- . find ( |( _, ( page, _) ) | page. addr ( ) == page_addr) ;
269
+ . find ( |( _, ( page, _) ) | page. addr ( ) . get ( ) == page_addr) ;
270
270
let Some ( ( idx_of_pinfo, ( _, pinfo) ) ) = pinfo else {
271
271
panic ! ( "Freeing in an unallocated page: {ptr:?}\n Holding pages {:?}" , self . page_ptrs)
272
272
} ;
@@ -288,26 +288,26 @@ impl IsolatedAlloc {
288
288
. huge_ptrs
289
289
. iter ( )
290
290
. position ( |& ( pg, size) | {
291
- pg. addr ( ) <= ptr. addr ( ) && ptr. addr ( ) < pg. addr ( ) . strict_add ( size)
291
+ pg. addr ( ) . get ( ) <= ptr. addr ( ) && ptr. addr ( ) < pg. addr ( ) . get ( ) . strict_add ( size)
292
292
} )
293
293
. expect ( "Freeing unallocated pages" ) ;
294
294
// And kick it from the list
295
295
let ( un_offset_ptr, size2) = self . huge_ptrs . remove ( idx) ;
296
296
assert_eq ! ( size, size2, "got wrong layout in dealloc" ) ;
297
297
// SAFETY: huge_ptrs contains allocations made with mmap with the size recorded there.
298
298
unsafe {
299
- let ret = libc:: munmap ( un_offset_ptr. cast ( ) , size) ;
299
+ let ret = libc:: munmap ( un_offset_ptr. as_ptr ( ) . cast ( ) , size) ;
300
300
assert_eq ! ( ret, 0 ) ;
301
301
}
302
302
}
303
303
304
304
/// Returns a vector of page addresses managed by the allocator.
305
305
pub fn pages ( & self ) -> Vec < usize > {
306
- let mut pages: Vec < _ > =
307
- self . page_ptrs . clone ( ) . into_iter ( ) . map ( |p| p. expose_provenance ( ) ) . collect ( ) ;
306
+ let mut pages: Vec < usize > =
307
+ self . page_ptrs . clone ( ) . into_iter ( ) . map ( |p| p. expose_provenance ( ) . get ( ) ) . collect ( ) ;
308
308
self . huge_ptrs . iter ( ) . for_each ( |( ptr, size) | {
309
309
for i in 0 ..size / self . page_size {
310
- pages. push ( ptr. expose_provenance ( ) . strict_add ( i * self . page_size ) ) ;
310
+ pages. push ( ptr. expose_provenance ( ) . get ( ) . strict_add ( i * self . page_size ) ) ;
311
311
}
312
312
} ) ;
313
313
pages
@@ -339,16 +339,12 @@ impl IsolatedAlloc {
339
339
unsafe fn mprotect ( & mut self , prot : mman:: ProtFlags ) -> Result < ( ) , nix:: errno:: Errno > {
340
340
for & pg in & self . page_ptrs {
341
341
unsafe {
342
- // We already know only non-null ptrs are pushed to self.pages
343
- let addr: std:: ptr:: NonNull < std:: ffi:: c_void > =
344
- std:: ptr:: NonNull :: new_unchecked ( pg. cast ( ) ) ;
345
- mman:: mprotect ( addr, self . page_size , prot) ?;
342
+ mman:: mprotect ( pg. cast ( ) , self . page_size , prot) ?;
346
343
}
347
344
}
348
345
for & ( hpg, size) in & self . huge_ptrs {
349
346
unsafe {
350
- let addr = std:: ptr:: NonNull :: new_unchecked ( hpg. cast ( ) ) ;
351
- mman:: mprotect ( addr, size. next_multiple_of ( self . page_size ) , prot) ?;
347
+ mman:: mprotect ( hpg. cast ( ) , size. next_multiple_of ( self . page_size ) , prot) ?;
352
348
}
353
349
}
354
350
Ok ( ( ) )
0 commit comments