@@ -3,9 +3,15 @@ use std::collections::HashMap;
3
3
use std:: hash:: { BuildHasherDefault , DefaultHasher } ;
4
4
use std:: sync;
5
5
6
+ use rustc_index:: bit_set:: DenseBitSet ;
7
+
6
8
static ALLOCATOR : sync:: Mutex < IsolatedAlloc > = sync:: Mutex :: new ( IsolatedAlloc :: empty ( ) ) ;
7
9
8
10
pub struct IsolatedAlloc {
11
+ /// A map of machine ID to allocator. If running in multi-seeded mode,
12
+ /// each machine should have its own pool of memory that can be accessed
13
+ /// separately. We use the normal `HashMap` type so that it's available
14
+ /// in a `const` context.
9
15
#[ allow( rustc:: default_hash_types) ]
10
16
allocators : HashMap < u64 , IsolatedAllocInner , BuildHasherDefault < DefaultHasher > > ,
11
17
/// The host (not emulated) page size, or 0 if it has not yet been set.
@@ -24,14 +30,14 @@ pub struct IsolatedAllocInner {
24
30
/// with their size stored as the second element of the vector.
25
31
huge_ptrs : Vec < ( * mut u8 , usize ) > ,
26
32
/// Metadata about which bytes have been allocated on each page. The length
27
- /// of this vector must be the same as that of `page_ptrs`, and the length of
28
- /// the boxed slice must be exactly `page_size / 8`.
33
+ /// of this vector must be the same as that of `page_ptrs`, and the domain
34
+ /// size of the bitset must be exactly `page_size / 8`.
29
35
///
30
- /// Conceptually, each bit of the `u8` represents the allocation status of one
31
- /// byte on the corresponding element of `page_ptrs`; in practice, we only allocate
32
- /// in 8-byte chunks currently, so the `u8`s are only ever 0 (fully free) or
33
- /// 255 (fully allocated) .
34
- page_infos : Vec < Box < [ u8 ] > > ,
36
+ /// Conceptually, each bit of the bitset represents the allocation status of
37
+ /// one 8- byte chunk on the corresponding element of `page_ptrs`. Thus,
38
+ /// indexing into it should be done with a value one-eighth of the corresponding
39
+ /// offset on the matching `page_ptrs` element .
40
+ page_infos : Vec < DenseBitSet < usize > > ,
35
41
}
36
42
37
43
// SAFETY: We only point to heap-allocated data
@@ -42,6 +48,7 @@ impl IsolatedAlloc {
42
48
/// allow this function to be `const`; it is updated to its real value on
43
49
/// the first call to `alloc()` or `alloc_zeroed()`.
44
50
const fn empty ( ) -> Self {
51
+ // We need this to be `const`
45
52
#[ allow( rustc:: default_hash_types) ]
46
53
Self { allocators : HashMap :: with_hasher ( BuildHasherDefault :: new ( ) ) , page_size : 0 }
47
54
}
@@ -108,14 +115,15 @@ impl IsolatedAllocInner {
108
115
}
109
116
110
117
/// Expands the available memory pool by adding one page.
111
- fn add_page ( & mut self , page_size : usize ) -> ( * mut u8 , & mut Box < [ u8 ] > ) {
118
+ fn add_page ( & mut self , page_size : usize ) -> ( * mut u8 , & mut DenseBitSet < usize > ) {
112
119
assert_ne ! ( page_size, 0 ) ;
113
120
114
121
let page_layout = unsafe { Layout :: from_size_align_unchecked ( page_size, page_size) } ;
115
122
// We don't overwrite the bytes we hand out so make sure they're zeroed by default!
116
123
let page_ptr = unsafe { alloc:: alloc ( page_layout) } ;
117
- // `page_infos` has to be one-eighth of the pagesize per the field docs
118
- self . page_infos . push ( vec ! [ 0u8 ; page_size / 8 ] . into_boxed_slice ( ) ) ;
124
+ // `page_infos` has to have one-eighth as many bits as a page has bytes
125
+ // (or one-64th as many bytes)
126
+ self . page_infos . push ( DenseBitSet :: new_empty ( page_size / 8 ) ) ;
119
127
self . page_ptrs . push ( page_ptr) ;
120
128
( page_ptr, self . page_infos . last_mut ( ) . unwrap ( ) )
121
129
}
@@ -160,23 +168,28 @@ impl IsolatedAllocInner {
160
168
page_size : usize ,
161
169
layout : Layout ,
162
170
page : * mut u8 ,
163
- pinfo : & mut Box < [ u8 ] > ,
171
+ pinfo : & mut DenseBitSet < usize > ,
164
172
zeroed : bool ,
165
173
) -> Option < * mut u8 > {
166
174
let ( size, align) = IsolatedAllocInner :: normalized_layout ( layout) ;
167
175
176
+ // Check every alignment-sized block and see if there exists a `size`
177
+ // chunk of empty space i.e. forall idx . !pinfo.contains(idx / 8)
168
178
for idx in ( 0 ..page_size) . step_by ( align) {
169
179
let idx_pinfo = idx / 8 ;
170
180
let size_pinfo = size / 8 ;
171
- if pinfo. len ( ) < idx_pinfo + size_pinfo {
181
+ // DenseBitSet::contains() panics if the index is out of bounds
182
+ if pinfo. domain_size ( ) < idx_pinfo + size_pinfo {
172
183
break ;
173
184
}
174
- if pinfo[ idx_pinfo..idx_pinfo + size_pinfo] . iter ( ) . all ( |v| * v == 0 ) {
175
- pinfo[ idx_pinfo..idx_pinfo + size_pinfo] . fill ( 255 ) ;
185
+ let pred = !( idx_pinfo..idx_pinfo + size_pinfo) . any ( |idx| pinfo. contains ( idx) ) ;
186
+ if pred {
187
+ pinfo. insert_range ( idx_pinfo..idx_pinfo + size_pinfo) ;
176
188
unsafe {
177
189
let ptr = page. add ( idx) ;
178
190
if zeroed {
179
- // Only write the bytes we were specifically asked to zero out
191
+ // Only write the bytes we were specifically asked to
192
+ // zero out, even if we allocated more
180
193
ptr. write_bytes ( 0 , layout. size ( ) ) ;
181
194
}
182
195
return Some ( ptr) ;
@@ -223,14 +236,15 @@ impl IsolatedAllocInner {
223
236
} ;
224
237
let ptr_idx_pinfo = ptr_idx / 8 ;
225
238
let size_pinfo = size / 8 ;
226
- // Everything is always aligned to at least 8 bytes so this is ok
227
- pinfo[ ptr_idx_pinfo..ptr_idx_pinfo + size_pinfo] . fill ( 0 ) ;
239
+ for idx in ptr_idx_pinfo..ptr_idx_pinfo + size_pinfo {
240
+ pinfo. remove ( idx) ;
241
+ }
228
242
}
229
243
230
244
let mut free = vec ! [ ] ;
231
245
let page_layout = unsafe { Layout :: from_size_align_unchecked ( page_size, page_size) } ;
232
246
for ( idx, pinfo) in self . page_infos . iter ( ) . enumerate ( ) {
233
- if pinfo. iter ( ) . all ( |p| * p == 0 ) {
247
+ if pinfo. is_empty ( ) {
234
248
free. push ( idx) ;
235
249
}
236
250
}
0 commit comments