@@ -2,6 +2,9 @@ use std::alloc::{self, Layout};
2
2
3
3
use rustc_index:: bit_set:: DenseBitSet ;
4
4
5
+ /// How many bytes of memory each bit in the bitset represents.
6
+ const COMPRESSION_FACTOR : usize = 4 ;
7
+
5
8
/// A dedicated allocator for interpreter memory contents, ensuring they are stored on dedicated
6
9
/// pages (not mixed with Miri's own memory). This is very useful for native-lib mode.
7
10
#[ derive( Debug ) ]
@@ -10,19 +13,19 @@ pub struct IsolatedAlloc {
10
13
/// Every pointer here must point to a page-sized allocation claimed via
11
14
/// the global allocator.
12
15
page_ptrs : Vec < * mut u8 > ,
13
- /// Pointers to multi -page-sized allocations. These must also be page-aligned,
16
+ /// Pointers to multiple -page-sized allocations. These must also be page-aligned,
14
17
/// with their size stored as the second element of the vector.
15
18
huge_ptrs : Vec < ( * mut u8 , usize ) > ,
16
19
/// Metadata about which bytes have been allocated on each page. The length
17
20
/// of this vector must be the same as that of `page_ptrs`, and the domain
18
- /// size of the bitset must be exactly `page_size / 8 `.
21
+ /// size of the bitset must be exactly `page_size / COMPRESSION_FACTOR `.
19
22
///
20
23
/// Conceptually, each bit of the bitset represents the allocation status of
21
- /// one 8 -byte chunk on the corresponding element of `page_ptrs`. Thus,
22
- /// indexing into it should be done with a value one-eighth of the corresponding
23
- /// offset on the matching `page_ptrs` element.
24
+ /// one n -byte chunk on the corresponding element of `page_ptrs`. Thus,
25
+ /// indexing into it should be done with a value one-nth of the corresponding
26
+ /// offset on the matching `page_ptrs` element (n = `COMPRESSION_FACTOR`) .
24
27
page_infos : Vec < DenseBitSet < usize > > ,
25
- /// The host (not emulated) page size, or 0 if it has not yet been set .
28
+ /// The host (not emulated) page size.
26
29
page_size : usize ,
27
30
}
28
31
@@ -39,38 +42,57 @@ impl IsolatedAlloc {
39
42
40
43
/// Expands the available memory pool by adding one page.
41
44
fn add_page ( & mut self , page_size : usize ) -> ( * mut u8 , & mut DenseBitSet < usize > ) {
42
- assert_ne ! ( page_size, 0 ) ;
43
-
44
45
let page_layout = unsafe { Layout :: from_size_align_unchecked ( page_size, page_size) } ;
45
46
let page_ptr = unsafe { alloc:: alloc ( page_layout) } ;
46
47
// `page_infos` has to have one-eighth as many bits as a page has bytes
47
48
// (or one-64th as many bytes)
48
- self . page_infos . push ( DenseBitSet :: new_empty ( page_size / 8 ) ) ;
49
+ self . page_infos . push ( DenseBitSet :: new_empty ( page_size / COMPRESSION_FACTOR ) ) ;
49
50
self . page_ptrs . push ( page_ptr) ;
50
51
( page_ptr, self . page_infos . last_mut ( ) . unwrap ( ) )
51
52
}
52
53
53
- /// For simplicity, we allocate in multiples of 8 bytes with at least that
54
- /// alignment.
54
+ /// For simplicity, we serve allocations in multiples of COMPRESSION_FACTOR
55
+ /// bytes with at least that alignment.
55
56
#[ inline]
56
57
fn normalized_layout ( layout : Layout ) -> ( usize , usize ) {
57
- let align = if layout. align ( ) < 8 { 8 } else { layout. align ( ) } ;
58
- let size = layout. size ( ) . next_multiple_of ( 8 ) ;
58
+ let align =
59
+ if layout. align ( ) < COMPRESSION_FACTOR { COMPRESSION_FACTOR } else { layout. align ( ) } ;
60
+ let size = layout. size ( ) . next_multiple_of ( COMPRESSION_FACTOR ) ;
59
61
( size, align)
60
62
}
61
63
64
+ /// If the allocation is greater than a page, then round to the nearest page #.
65
+ /// Since we pass this into the global allocator, it's more useful to return
66
+ /// a `Layout` instead of a pair of usizes.
67
+ #[ inline]
68
+ fn huge_normalized_layout ( layout : Layout , page_size : usize ) -> Layout {
69
+ // Allocate in page-sized chunks
70
+ let size = layout. size ( ) . next_multiple_of ( page_size) ;
71
+ // Align probably *shouldn't* ever be greater than the page size,
72
+ // but just in case
73
+ let align = std:: cmp:: max ( layout. align ( ) , page_size) ;
74
+ Layout :: from_size_align ( size, align) . unwrap ( )
75
+ }
76
+
77
+ /// Determined whether a given (size, align) should be sent to `alloc_huge` /
78
+ /// `dealloc_huge`.
79
+ #[ inline]
80
+ fn is_huge_alloc ( size : usize , align : usize , page_size : usize ) -> bool {
81
+ align >= page_size || size >= page_size
82
+ }
83
+
62
84
/// Allocates memory as described in `Layout`. This memory should be deallocated
63
85
/// by calling `dealloc` on this same allocator.
64
86
///
65
87
/// SAFETY: See `alloc::alloc()`
66
- pub fn alloc ( & mut self , layout : Layout ) -> * mut u8 {
88
+ pub unsafe fn alloc ( & mut self , layout : Layout ) -> * mut u8 {
67
89
unsafe { self . allocate ( layout, false ) }
68
90
}
69
91
70
92
/// Same as `alloc`, but zeroes out the memory.
71
93
///
72
94
/// SAFETY: See `alloc::alloc_zeroed()`
73
- pub fn alloc_zeroed ( & mut self , layout : Layout ) -> * mut u8 {
95
+ pub unsafe fn alloc_zeroed ( & mut self , layout : Layout ) -> * mut u8 {
74
96
unsafe { self . allocate ( layout, true ) }
75
97
}
76
98
@@ -80,8 +102,9 @@ impl IsolatedAlloc {
80
102
/// SAFETY: See `alloc::alloc()`, with the added restriction that `page_size`
81
103
/// corresponds to the host pagesize.
82
104
unsafe fn allocate ( & mut self , layout : Layout , zeroed : bool ) -> * mut u8 {
83
- if layout. align ( ) > self . page_size || layout. size ( ) > self . page_size {
84
- unsafe { self . alloc_multi_page ( layout, zeroed) }
105
+ let ( size, align) = IsolatedAlloc :: normalized_layout ( layout) ;
106
+ if IsolatedAlloc :: is_huge_alloc ( size, align, self . page_size ) {
107
+ unsafe { self . alloc_huge ( layout, zeroed) }
85
108
} else {
86
109
for ( & mut page, pinfo) in std:: iter:: zip ( & mut self . page_ptrs , & mut self . page_infos ) {
87
110
if let Some ( ptr) =
@@ -93,6 +116,8 @@ impl IsolatedAlloc {
93
116
94
117
// We get here only if there's no space in our existing pages
95
118
let page_size = self . page_size ;
119
+ // Add another page and allocate from it; this cannot fail since the
120
+ // new page is empty and we already asserted it fits into a page
96
121
let ( page, pinfo) = self . add_page ( page_size) ;
97
122
unsafe { Self :: alloc_from_page ( page_size, layout, page, pinfo, zeroed) . unwrap ( ) }
98
123
}
@@ -112,18 +137,18 @@ impl IsolatedAlloc {
112
137
let ( size, align) = IsolatedAlloc :: normalized_layout ( layout) ;
113
138
114
139
// Check every alignment-sized block and see if there exists a `size`
115
- // chunk of empty space i.e. forall idx . !pinfo.contains(idx / 8 )
140
+ // chunk of empty space i.e. forall idx . !pinfo.contains(idx / n )
116
141
for idx in ( 0 ..page_size) . step_by ( align) {
117
- let idx_pinfo = idx / 8 ;
118
- let size_pinfo = size / 8 ;
142
+ let idx_pinfo = idx / COMPRESSION_FACTOR ;
143
+ let size_pinfo = size / COMPRESSION_FACTOR ;
119
144
// DenseBitSet::contains() panics if the index is out of bounds
120
145
if pinfo. domain_size ( ) < idx_pinfo + size_pinfo {
121
146
break ;
122
147
}
123
148
// FIXME: is there a more efficient way to check whether the entire range is unset
124
149
// in the bitset?
125
150
let range_avail = !( idx_pinfo..idx_pinfo + size_pinfo) . any ( |idx| pinfo. contains ( idx) ) ;
126
- if pred {
151
+ if range_avail {
127
152
pinfo. insert_range ( idx_pinfo..idx_pinfo + size_pinfo) ;
128
153
unsafe {
129
154
let ptr = page. add ( idx) ;
@@ -142,7 +167,8 @@ impl IsolatedAlloc {
142
167
/// Allocates in multiples of one page on the host system.
143
168
///
144
169
/// SAFETY: Same as `alloc()`.
145
- unsafe fn alloc_multi_page ( & mut self , layout : Layout , zeroed : bool ) -> * mut u8 {
170
+ unsafe fn alloc_huge ( & mut self , layout : Layout , zeroed : bool ) -> * mut u8 {
171
+ let layout = IsolatedAlloc :: huge_normalized_layout ( layout, self . page_size ) ;
146
172
let ret =
147
173
unsafe { if zeroed { alloc:: alloc_zeroed ( layout) } else { alloc:: alloc ( layout) } } ;
148
174
self . huge_ptrs . push ( ( ret, layout. size ( ) ) ) ;
@@ -157,14 +183,16 @@ impl IsolatedAlloc {
157
183
pub unsafe fn dealloc ( & mut self , ptr : * mut u8 , layout : Layout ) {
158
184
let ( size, align) = IsolatedAlloc :: normalized_layout ( layout) ;
159
185
160
- let ptr_idx = ptr. addr ( ) % self . page_size ;
161
- let page_addr = ptr. addr ( ) - ptr_idx;
162
-
163
- if align > self . page_size || size > self . page_size {
186
+ if IsolatedAlloc :: is_huge_alloc ( size, align, self . page_size ) {
164
187
unsafe {
165
- self . dealloc_multi_page ( ptr, layout) ;
188
+ self . dealloc_huge ( ptr, layout) ;
166
189
}
167
190
} else {
191
+ // Offset of the pointer in the current page
192
+ let ptr_idx = ptr. addr ( ) % self . page_size ;
193
+ // And then the page's base address
194
+ let page_addr = ptr. addr ( ) - ptr_idx;
195
+
168
196
// Find the page this allocation belongs to.
169
197
// This could be made faster if the list was sorted -- the allocator isn't fully optimized at the moment.
170
198
let pinfo = std:: iter:: zip ( & mut self . page_ptrs , & mut self . page_infos )
@@ -175,49 +203,52 @@ impl IsolatedAlloc {
175
203
self . page_ptrs
176
204
)
177
205
} ;
178
- let ptr_idx_pinfo = ptr_idx / 8 ;
179
- let size_pinfo = size / 8 ;
206
+ // Since each bit of the bitset represents COMPRESSION_FACTOR bytes,
207
+ // we need to divide by that when getting the indices
208
+ let ptr_idx_pinfo = ptr_idx / COMPRESSION_FACTOR ;
209
+ let size_pinfo = size / COMPRESSION_FACTOR ;
180
210
for idx in ptr_idx_pinfo..ptr_idx_pinfo + size_pinfo {
181
211
pinfo. remove ( idx) ;
182
212
}
183
- }
184
213
185
- let mut free = vec ! [ ] ;
186
- let page_layout =
187
- unsafe { Layout :: from_size_align_unchecked ( self . page_size , self . page_size ) } ;
188
- for ( idx, pinfo) in self . page_infos . iter ( ) . enumerate ( ) {
189
- if pinfo. is_empty ( ) {
190
- free. push ( idx) ;
191
- }
192
- }
193
- free. reverse ( ) ;
194
- for idx in free {
195
- let _ = self . page_infos . remove ( idx) ;
196
- unsafe {
197
- alloc:: dealloc ( self . page_ptrs . remove ( idx) , page_layout) ;
214
+ // We allocated all the pages with this layout
215
+ let page_layout =
216
+ unsafe { Layout :: from_size_align_unchecked ( self . page_size , self . page_size ) } ;
217
+ // Only 1 page might have been freed after a dealloc, so if it exists,
218
+ // find it and free it (and adjust the vectors)
219
+ if let Some ( free_idx) = self . page_infos . iter ( ) . position ( |pinfo| pinfo. is_empty ( ) ) {
220
+ self . page_infos . remove ( free_idx) ;
221
+ unsafe {
222
+ alloc:: dealloc ( self . page_ptrs . remove ( free_idx) , page_layout) ;
223
+ }
198
224
}
199
225
}
200
226
}
201
227
202
228
/// SAFETY: Same as `dealloc()` with the added requirement that `layout`
203
229
/// must ask for a size larger than the host pagesize.
204
- unsafe fn dealloc_multi_page ( & mut self , ptr : * mut u8 , layout : Layout ) {
230
+ unsafe fn dealloc_huge ( & mut self , ptr : * mut u8 , layout : Layout ) {
231
+ let layout = IsolatedAlloc :: huge_normalized_layout ( layout, self . page_size ) ;
232
+ // Find the pointer matching in address with the one we got
205
233
let idx = self
206
234
. huge_ptrs
207
235
. iter ( )
208
236
. position ( |pg| ptr. addr ( ) == pg. 0 . addr ( ) )
209
237
. expect ( "Freeing unallocated pages" ) ;
210
- let ptr = self . huge_ptrs . remove ( idx) . 0 ;
238
+ // And kick it from the list
239
+ self . huge_ptrs . remove ( idx) ;
211
240
unsafe {
212
241
alloc:: dealloc ( ptr, layout) ;
213
242
}
214
243
}
215
244
}
216
- /*
245
+
217
246
#[ cfg( test) ]
218
247
mod tests {
219
248
use super :: * ;
220
249
250
+ /// Helper function to assert that all bytes from `ptr` to `ptr.add(layout.size())`
251
+ /// are zeroes.
221
252
fn assert_zeroes ( ptr : * mut u8 , layout : Layout ) {
222
253
unsafe {
223
254
for ofs in 0 ..layout. size ( ) {
@@ -226,85 +257,101 @@ mod tests {
226
257
}
227
258
}
228
259
260
+ /// Check that small (sub-pagesize) allocations are properly zeroed out.
229
261
#[ test]
230
262
fn small_zeroes ( ) {
263
+ let mut alloc = IsolatedAlloc :: new ( ) ;
264
+ // 256 should be less than the pagesize on *any* system
231
265
let layout = Layout :: from_size_align ( 256 , 32 ) . unwrap ( ) ;
232
- // allocate_zeroed
233
- let ptr = unsafe { IsolatedAlloc::alloc_zeroed(layout, 0) };
266
+ let ptr = unsafe { alloc. alloc_zeroed ( layout) } ;
234
267
assert_zeroes ( ptr, layout) ;
235
268
unsafe {
236
- IsolatedAlloc:: dealloc(ptr, layout, 0 );
269
+ alloc . dealloc ( ptr, layout) ;
237
270
}
238
271
}
239
272
273
+ /// Check that huge (> 1 page) allocations are properly zeroed out also.
240
274
#[ test]
241
- fn big_zeroes() {
275
+ fn huge_zeroes ( ) {
276
+ let mut alloc = IsolatedAlloc :: new ( ) ;
277
+ // 16k is about as big as pages get e.g. on macos aarch64
242
278
let layout = Layout :: from_size_align ( 16 * 1024 , 128 ) . unwrap ( ) ;
243
- let ptr = unsafe { IsolatedAlloc:: alloc_zeroed(layout, 1 ) };
279
+ let ptr = unsafe { alloc . alloc_zeroed ( layout) } ;
244
280
assert_zeroes ( ptr, layout) ;
245
281
unsafe {
246
- IsolatedAlloc:: dealloc(ptr, layout, 1 );
282
+ alloc . dealloc ( ptr, layout) ;
247
283
}
248
284
}
249
285
286
+ /// Check that repeatedly reallocating the same memory will still zero out
287
+ /// everything properly
250
288
#[ test]
251
289
fn repeated_allocs ( ) {
290
+ let mut alloc = IsolatedAlloc :: new ( ) ;
291
+ // Try both sub-pagesize allocs and those larger than / equal to a page
252
292
for sz in ( 1 ..=( 16 * 1024 ) ) . step_by ( 128 ) {
253
293
let layout = Layout :: from_size_align ( sz, 1 ) . unwrap ( ) ;
254
- let ptr = unsafe { IsolatedAlloc:: alloc_zeroed(layout, 2 ) };
294
+ let ptr = unsafe { alloc . alloc_zeroed ( layout) } ;
255
295
assert_zeroes ( ptr, layout) ;
256
296
unsafe {
257
297
ptr. write_bytes ( 255 , sz) ;
258
- IsolatedAlloc:: dealloc(ptr, layout, 2 );
298
+ alloc . dealloc ( ptr, layout) ;
259
299
}
260
300
}
261
301
}
262
302
303
+ /// Checks that allocations of different sizes do not overlap.
263
304
#[ test]
264
305
fn no_overlaps ( ) {
265
- no_overlaps_inner(3);
306
+ let mut alloc = IsolatedAlloc :: new ( ) ;
307
+ no_overlaps_inner ( & mut alloc) ;
266
308
}
267
309
268
- fn no_overlaps_inner(id: u64) {
310
+ /// Allows us to reuse this bit for `no_overlaps` and `check_leaks`.
311
+ fn no_overlaps_inner ( alloc : & mut IsolatedAlloc ) {
269
312
// Some random sizes and aligns
270
313
let mut sizes = vec ! [ 32 ; 10 ] ;
271
314
sizes. append ( & mut vec ! [ 15 ; 4 ] ) ;
272
315
sizes. append ( & mut vec ! [ 256 ; 12 ] ) ;
273
316
// Give it some multi-page ones too
274
317
sizes. append ( & mut vec ! [ 32 * 1024 ; 4 ] ) ;
275
318
319
+ // Matching aligns for the sizes
276
320
let mut aligns = vec ! [ 16 ; 12 ] ;
277
321
aligns. append ( & mut vec ! [ 256 ; 2 ] ) ;
278
322
aligns. append ( & mut vec ! [ 64 ; 12 ] ) ;
279
323
aligns. append ( & mut vec ! [ 4096 ; 4 ] ) ;
280
324
325
+ // Make sure we didn't mess up in the test itself!
281
326
assert_eq ! ( sizes. len( ) , aligns. len( ) ) ;
327
+
328
+ // Aggregate the sizes and aligns into a vec of layouts, then allocate them
282
329
let layouts: Vec < _ > = std:: iter:: zip ( sizes, aligns)
283
330
. map ( |( sz, al) | Layout :: from_size_align ( sz, al) . unwrap ( ) )
284
331
. collect ( ) ;
285
- let ptrs: Vec<_> = layouts
286
- .iter()
287
- .map(|layout| unsafe { IsolatedAlloc::alloc_zeroed(*layout, id) })
288
- .collect();
332
+ let ptrs: Vec < _ > =
333
+ layouts. iter ( ) . map ( |layout| unsafe { alloc. alloc_zeroed ( * layout) } ) . collect ( ) ;
289
334
290
335
for ( & ptr, & layout) in std:: iter:: zip ( & ptrs, & layouts) {
291
- // Make sure we don't allocate overlapping ranges
336
+ // We requested zeroed allocations, so check that that's true
337
+ // Then write to the end of the current size, so if the allocs
338
+ // overlap (or the zeroing is wrong) then `assert_zeroes` will panic
292
339
unsafe {
293
340
assert_zeroes ( ptr, layout) ;
294
341
ptr. write_bytes ( 255 , layout. size ( ) ) ;
295
- IsolatedAlloc:: dealloc(ptr, layout, id );
342
+ alloc . dealloc ( ptr, layout) ;
296
343
}
297
344
}
298
345
}
299
346
347
+ /// Check for memory leaks after repeated allocations and deallocations.
300
348
#[ test]
301
349
fn check_leaks ( ) {
302
- // Generate some noise first
303
- no_overlaps_inner(4);
304
- let alloc = ALLOCATOR.lock().unwrap();
350
+ let mut alloc = IsolatedAlloc :: new ( ) ;
305
351
306
- // Should get auto-deleted if the allocations are empty
307
- assert!(!alloc.allocators.contains_key(&4));
352
+ // Generate some noise first so leaks can manifest
353
+ no_overlaps_inner ( & mut alloc) ;
354
+ // And then verify that no memory was leaked
355
+ assert ! ( alloc. page_ptrs. is_empty( ) && alloc. huge_ptrs. is_empty( ) ) ;
308
356
}
309
357
}
310
- */
0 commit comments