1
- use std:: alloc:: { self , Layout } ;
1
+ use std:: alloc:: Layout ;
2
2
3
3
use rustc_index:: bit_set:: DenseBitSet ;
4
4
@@ -11,7 +11,7 @@ const COMPRESSION_FACTOR: usize = 4;
11
11
pub struct IsolatedAlloc {
12
12
/// Pointers to page-aligned memory that has been claimed by the allocator.
13
13
/// Every pointer here must point to a page-sized allocation claimed via
14
- /// the global allocator . These pointers are used for "small" allocations.
14
+ /// mmap . These pointers are used for "small" allocations.
15
15
page_ptrs : Vec < * mut u8 > ,
16
16
/// Metadata about which bytes have been allocated on each page. The length
17
17
/// of this vector must be the same as that of `page_ptrs`, and the domain
@@ -52,20 +52,26 @@ impl IsolatedAlloc {
52
52
Layout :: from_size_align ( size, align) . unwrap ( )
53
53
}
54
54
55
- /// Returns the layout used to allocate the pages that hold small allocations.
55
+ /// For greater-than-page-sized allocations, returns the allocation size we need to request
56
+ /// including the slack we need to satisfy the alignment request.
56
57
#[ inline]
57
- fn page_layout ( & self ) -> Layout {
58
- Layout :: from_size_align ( self . page_size , self . page_size ) . unwrap ( )
59
- }
60
-
61
- /// If the allocation is greater than a page, then round to the nearest page #.
62
- #[ inline]
63
- fn huge_normalized_layout ( layout : Layout , page_size : usize ) -> Layout {
58
+ fn huge_normalized_layout ( & self , layout : Layout ) -> usize {
64
59
// Allocate in page-sized chunks
65
- let size = layout. size ( ) . next_multiple_of ( page_size) ;
60
+ let size = layout. size ( ) . next_multiple_of ( self . page_size ) ;
66
61
// And make sure the align is at least one page
67
- let align = std:: cmp:: max ( layout. align ( ) , page_size) ;
68
- Layout :: from_size_align ( size, align) . unwrap ( )
62
+ let align = std:: cmp:: max ( layout. align ( ) , self . page_size ) ;
63
+ // pg_count gives us the # of pages needed to satisfy the size. For
64
+ // align > page_size where align = n * page_size, a sufficently-aligned
65
+ // address must exist somewhere in the range of
66
+ // some_page_aligned_address..some_page_aligned_address + (n-1) * page_size
67
+ // (since if some_page_aligned_address + n * page_size is sufficently aligned,
68
+ // then so is some_page_aligned_address itself per the definition of n, so we
69
+ // can avoid using that 1 extra page).
70
+ // Thus we allocate n-1 extra pages
71
+ let pg_count = size. div_ceil ( self . page_size ) ;
72
+ let extra_pages = align. strict_div ( self . page_size ) . saturating_sub ( 1 ) ;
73
+
74
+ pg_count. strict_add ( extra_pages) . strict_mul ( self . page_size )
69
75
}
70
76
71
77
/// Determined whether a given normalized (size, align) should be sent to
@@ -78,15 +84,15 @@ impl IsolatedAlloc {
78
84
/// Allocates memory as described in `Layout`. This memory should be deallocated
79
85
/// by calling `dealloc` on this same allocator.
80
86
///
81
- /// SAFETY: See `alloc::alloc()`
87
+ /// SAFETY: See `alloc::alloc()`.
82
88
pub unsafe fn alloc ( & mut self , layout : Layout ) -> * mut u8 {
83
89
// SAFETY: Upheld by caller
84
90
unsafe { self . allocate ( layout, false ) }
85
91
}
86
92
87
93
/// Same as `alloc`, but zeroes out the memory.
88
94
///
89
- /// SAFETY: See `alloc::alloc_zeroed()`
95
+ /// SAFETY: See `alloc::alloc_zeroed()`.
90
96
pub unsafe fn alloc_zeroed ( & mut self , layout : Layout ) -> * mut u8 {
91
97
// SAFETY: Upheld by caller
92
98
unsafe { self . allocate ( layout, true ) }
@@ -95,14 +101,13 @@ impl IsolatedAlloc {
95
101
/// Abstracts over the logic of `alloc_zeroed` vs `alloc`, as determined by
96
102
/// the `zeroed` argument.
97
103
///
98
- /// SAFETY: See `alloc::alloc()`, with the added restriction that `page_size`
99
- /// corresponds to the host pagesize.
104
+ /// SAFETY: See `alloc::alloc()`.
100
105
unsafe fn allocate ( & mut self , layout : Layout , zeroed : bool ) -> * mut u8 {
101
106
let layout = IsolatedAlloc :: normalized_layout ( layout) ;
102
107
if self . is_huge_alloc ( & layout) {
103
108
// SAFETY: Validity of `layout` upheld by caller; we checked that
104
109
// the size and alignment are appropriate for being a huge alloc
105
- unsafe { self . alloc_huge ( layout, zeroed ) }
110
+ unsafe { self . alloc_huge ( layout) }
106
111
} else {
107
112
for ( & mut page, pinfo) in std:: iter:: zip ( & mut self . page_ptrs , & mut self . page_infos ) {
108
113
// SAFETY: The value in `self.page_size` is used to allocate
@@ -168,8 +173,19 @@ impl IsolatedAlloc {
168
173
169
174
/// Expands the available memory pool by adding one page.
170
175
fn add_page ( & mut self ) -> ( * mut u8 , & mut DenseBitSet < usize > ) {
171
- // SAFETY: The system page size, which is the layout size, cannot be 0
172
- let page_ptr = unsafe { alloc:: alloc ( self . page_layout ( ) ) } ;
176
+ // SAFETY: mmap is always safe to call when requesting anonymous memory
177
+ let page_ptr = unsafe {
178
+ libc:: mmap (
179
+ std:: ptr:: null_mut ( ) ,
180
+ self . page_size ,
181
+ libc:: PROT_READ | libc:: PROT_WRITE ,
182
+ libc:: MAP_PRIVATE | libc:: MAP_ANONYMOUS ,
183
+ -1 ,
184
+ 0 ,
185
+ )
186
+ . cast :: < u8 > ( )
187
+ } ;
188
+ assert_ne ! ( page_ptr. addr( ) , usize :: MAX , "mmap failed" ) ;
173
189
// `page_infos` has to have one bit for each `COMPRESSION_FACTOR`-sized chunk of bytes in the page.
174
190
assert ! ( self . page_size % COMPRESSION_FACTOR == 0 ) ;
175
191
self . page_infos . push ( DenseBitSet :: new_empty ( self . page_size / COMPRESSION_FACTOR ) ) ;
@@ -178,15 +194,28 @@ impl IsolatedAlloc {
178
194
}
179
195
180
196
/// Allocates in multiples of one page on the host system.
197
+ /// Will always be zeroed.
181
198
///
182
199
/// SAFETY: Same as `alloc()`.
183
- unsafe fn alloc_huge ( & mut self , layout : Layout , zeroed : bool ) -> * mut u8 {
184
- let layout = IsolatedAlloc :: huge_normalized_layout ( layout, self . page_size ) ;
185
- // SAFETY: Upheld by caller
186
- let ret =
187
- unsafe { if zeroed { alloc:: alloc_zeroed ( layout) } else { alloc:: alloc ( layout) } } ;
188
- self . huge_ptrs . push ( ( ret, layout. size ( ) ) ) ;
189
- ret
200
+ unsafe fn alloc_huge ( & mut self , layout : Layout ) -> * mut u8 {
201
+ let size = self . huge_normalized_layout ( layout) ;
202
+ // SAFETY: mmap is always safe to call when requesting anonymous memory
203
+ let ret = unsafe {
204
+ libc:: mmap (
205
+ std:: ptr:: null_mut ( ) ,
206
+ size,
207
+ libc:: PROT_READ | libc:: PROT_WRITE ,
208
+ libc:: MAP_PRIVATE | libc:: MAP_ANONYMOUS ,
209
+ -1 ,
210
+ 0 ,
211
+ )
212
+ . cast :: < u8 > ( )
213
+ } ;
214
+ assert_ne ! ( ret. addr( ) , usize :: MAX , "mmap failed" ) ;
215
+ self . huge_ptrs . push ( ( ret, size) ) ;
216
+ // huge_normalized_layout ensures that we've overallocated enough space
217
+ // for this to be valid.
218
+ ret. map_addr ( |a| a. next_multiple_of ( layout. align ( ) ) )
190
219
}
191
220
192
221
/// Deallocates a pointer from this allocator.
@@ -215,15 +244,15 @@ impl IsolatedAlloc {
215
244
let page_ptr = self . page_ptrs . remove ( idx) ;
216
245
// SAFETY: We checked that there are no outstanding allocations
217
246
// from us pointing to this page, and we know it was allocated
218
- // with this layout
247
+ // in add_page as exactly a single page.
219
248
unsafe {
220
- alloc :: dealloc ( page_ptr, self . page_layout ( ) ) ;
249
+ assert_eq ! ( libc :: munmap ( page_ptr. cast ( ) , self . page_size ) , 0 ) ;
221
250
}
222
251
}
223
252
}
224
253
}
225
254
226
- /// Returns the index of the page that this was deallocated from
255
+ /// Returns the index of the page that this was deallocated from.
227
256
///
228
257
/// SAFETY: the pointer must have been allocated with `alloc_small`.
229
258
unsafe fn dealloc_small ( & mut self , ptr : * mut u8 , layout : Layout ) -> usize {
@@ -252,18 +281,22 @@ impl IsolatedAlloc {
252
281
/// SAFETY: Same as `dealloc()` with the added requirement that `layout`
253
282
/// must ask for a size larger than the host pagesize.
254
283
unsafe fn dealloc_huge ( & mut self , ptr : * mut u8 , layout : Layout ) {
255
- let layout = IsolatedAlloc :: huge_normalized_layout ( layout, self . page_size ) ;
256
- // Find the pointer matching in address with the one we got
284
+ let size = self . huge_normalized_layout ( layout) ;
285
+ // Find the huge allocation containing `ptr`.
257
286
let idx = self
258
287
. huge_ptrs
259
288
. iter ( )
260
- . position ( |pg| ptr. addr ( ) == pg. 0 . addr ( ) )
289
+ . position ( |& ( pg, size) | {
290
+ pg. addr ( ) <= ptr. addr ( ) && ptr. addr ( ) < pg. addr ( ) . strict_add ( size)
291
+ } )
261
292
. expect ( "Freeing unallocated pages" ) ;
262
293
// And kick it from the list
263
- self . huge_ptrs . remove ( idx) ;
264
- // SAFETY: Caller ensures validity of the layout
294
+ let ( un_offset_ptr, size2) = self . huge_ptrs . remove ( idx) ;
295
+ assert_eq ! ( size, size2, "got wrong layout in dealloc" ) ;
296
+ // SAFETY: huge_ptrs contains allocations made with mmap with the size recorded there.
265
297
unsafe {
266
- alloc:: dealloc ( ptr, layout) ;
298
+ let ret = libc:: munmap ( un_offset_ptr. cast ( ) , size) ;
299
+ assert_eq ! ( ret, 0 ) ;
267
300
}
268
301
}
269
302
@@ -359,12 +392,15 @@ mod tests {
359
392
sizes. append ( & mut vec ! [ 256 ; 12 ] ) ;
360
393
// Give it some multi-page ones too
361
394
sizes. append ( & mut vec ! [ 32 * 1024 ; 4 ] ) ;
395
+ sizes. push ( 4 * 1024 ) ;
362
396
363
397
// Matching aligns for the sizes
364
398
let mut aligns = vec ! [ 16 ; 12 ] ;
365
399
aligns. append ( & mut vec ! [ 256 ; 2 ] ) ;
366
400
aligns. append ( & mut vec ! [ 64 ; 12 ] ) ;
367
401
aligns. append ( & mut vec ! [ 4096 ; 4 ] ) ;
402
+ // And one that requests align > page_size
403
+ aligns. push ( 64 * 1024 ) ;
368
404
369
405
// Make sure we didn't mess up in the test itself!
370
406
assert_eq ! ( sizes. len( ) , aligns. len( ) ) ;
0 commit comments