1
1
use std:: alloc:: { self , Layout } ;
2
2
use std:: sync;
3
3
4
- use crate :: helpers:: ToU64 ;
5
-
6
4
static ALLOCATOR : sync:: Mutex < MachineAlloc > = sync:: Mutex :: new ( MachineAlloc :: empty ( ) ) ;
7
5
8
6
/// A distinct allocator for interpreter memory contents, allowing us to manage its
9
7
/// memory separately from that of Miri itself. This is very useful for native-lib mode.
10
8
#[ derive( Debug ) ]
11
9
pub struct MachineAlloc {
10
+ /// Pointers to page-aligned memory that has been claimed by the allocator.
11
+ /// Every pointer here must point to a page-sized allocation claimed via
12
+ /// the global allocator.
12
13
pages : Vec < * mut u8 > ,
14
+ /// Pointers to multi-page-sized allocations. These must also be page-aligned,
15
+ /// with a size of `page_size * count` (where `count` is the second element
16
+ /// of the vector).
13
17
huge_allocs : Vec < ( * mut u8 , usize ) > ,
18
+ /// Metadata about which bytes have been allocated on each page. The length
19
+ /// of this vector must be the same as that of `pages`, and the length of the
20
+ /// boxed slice must be exactly `page_size / 8`.
21
+ ///
22
+ /// Conceptually, each bit of the `u8` represents the allocation status of one
23
+ /// byte on the corresponding element of `pages`; in practice, we only allocate
24
+ /// in 8-byte chunks currently, so the `u8`s are only ever 0 (fully free) or
25
+ /// 255 (fully allocated).
14
26
allocated : Vec < Box < [ u8 ] > > ,
27
+ /// The host (not emulated) page size.
15
28
page_size : usize ,
29
+ /// If false, calls to `alloc()` and `alloc_zeroed()` just wrap the corresponding
30
+ /// function in the global allocator. Otherwise, uses the pages tracked
31
+ /// internally.
16
32
enabled : bool ,
17
33
}
18
34
19
35
// SAFETY: We only point to heap-allocated data
20
36
unsafe impl Send for MachineAlloc { }
21
37
22
38
impl MachineAlloc {
23
- // Allocation-related methods
24
-
25
- /// Initializes the allocator with placeholder 4k pages .
39
+ /// Initializes the allocator. `page_size` is set to 4k as a placeholder to
40
+ /// allow this function to be `const`; it is updated to its real value when
41
+ /// `enable()` is called .
26
42
const fn empty ( ) -> Self {
27
43
Self {
28
44
pages : Vec :: new ( ) ,
@@ -33,62 +49,64 @@ impl MachineAlloc {
33
49
}
34
50
}
35
51
36
- /// SAFETY: There must be no existing `MiriAllocBytes`
37
- pub unsafe fn enable ( ) {
52
+ /// Enables the allocator. From this point onwards, calls to `alloc()` and
53
+ /// `alloc_zeroed()` will return `(ptr, false)`.
54
+ pub fn enable ( ) {
38
55
let mut alloc = ALLOCATOR . lock ( ) . unwrap ( ) ;
39
56
alloc. enabled = true ;
40
57
// This needs to specifically be the system pagesize!
41
58
alloc. page_size = unsafe {
42
- let ret = libc:: sysconf ( libc:: _SC_PAGE_SIZE) ;
43
- if ret > 0 {
44
- ret. try_into ( ) . unwrap ( )
45
- } else {
46
- 4096 // fallback
47
- }
59
+ // If sysconf errors, better to just panic
60
+ libc:: sysconf ( libc:: _SC_PAGE_SIZE) . try_into ( ) . unwrap ( )
48
61
}
49
62
}
50
63
51
- /// Returns a vector of page addresses managed by the allocator.
52
- #[ expect( dead_code) ]
53
- pub fn pages ( ) -> Vec < u64 > {
54
- let alloc = ALLOCATOR . lock ( ) . unwrap ( ) ;
55
- alloc. pages . clone ( ) . into_iter ( ) . map ( |p| p. addr ( ) . to_u64 ( ) ) . collect ( )
56
- }
57
-
64
+ /// Expands the available memory pool by adding one page.
58
65
fn add_page ( & mut self ) {
59
66
let page_layout =
60
67
unsafe { Layout :: from_size_align_unchecked ( self . page_size , self . page_size ) } ;
61
68
let page_ptr = unsafe { alloc:: alloc ( page_layout) } ;
62
- if page_ptr. is_null ( ) {
63
- panic ! ( "aligned_alloc failed!!!" )
64
- }
65
69
self . allocated . push ( vec ! [ 0u8 ; self . page_size / 8 ] . into_boxed_slice ( ) ) ;
66
70
self . pages . push ( page_ptr) ;
67
71
}
68
72
73
+ /// For simplicity, we allocate in multiples of 8 bytes with at least that
74
+ /// alignment.
69
75
#[ inline]
70
76
fn normalized_layout ( layout : Layout ) -> ( usize , usize ) {
71
77
let align = if layout. align ( ) < 8 { 8 } else { layout. align ( ) } ;
72
78
let size = layout. size ( ) . next_multiple_of ( 8 ) ;
73
79
( size, align)
74
80
}
75
81
82
+ /// If a requested allocation is greater than one page, we simply allocate
83
+ /// a fixed number of pages for it.
76
84
#[ inline]
77
85
fn huge_normalized_layout ( & self , layout : Layout ) -> ( usize , usize ) {
78
86
let size = layout. size ( ) . next_multiple_of ( self . page_size ) ;
79
87
let align = std:: cmp:: max ( layout. align ( ) , self . page_size ) ;
80
88
( size, align)
81
89
}
82
90
91
+ /// Allocates memory as described in `Layout`. If `MachineAlloc::enable()`
92
+ /// has *not* been called yet, this is just a wrapper for `(alloc::alloc(),
93
+ /// true)`. Otherwise, it will allocate from its own memory pool and
94
+ /// return `(ptr, false)`. The latter field is meant to correspond with the
95
+ /// field `alloc_is_global` for `MiriAllocBytes`.
96
+ ///
83
97
/// SAFETY: See alloc::alloc()
84
98
#[ inline]
85
- pub unsafe fn alloc ( layout : Layout ) -> * mut u8 {
99
+ pub unsafe fn alloc ( layout : Layout ) -> ( * mut u8 , bool ) {
86
100
let mut alloc = ALLOCATOR . lock ( ) . unwrap ( ) ;
87
- unsafe { if alloc. enabled { alloc. alloc_inner ( layout) } else { alloc:: alloc ( layout) } }
101
+ unsafe { if alloc. enabled { ( alloc. alloc_inner ( layout) , false ) } else { ( alloc:: alloc ( layout) , true ) } }
88
102
}
89
103
104
+ /// Same as `alloc()`, but zeroes out data before allocating. Instead
105
+ /// wraps `alloc::alloc_zeroed()` if `MachineAlloc::enable()` has not been
106
+ /// called yet.
107
+ ///
90
108
/// SAFETY: See alloc::alloc_zeroed()
91
- pub unsafe fn alloc_zeroed ( layout : Layout ) -> * mut u8 {
109
+ pub unsafe fn alloc_zeroed ( layout : Layout ) -> ( * mut u8 , bool ) {
92
110
let mut alloc = ALLOCATOR . lock ( ) . unwrap ( ) ;
93
111
if alloc. enabled {
94
112
let ptr = unsafe { alloc. alloc_inner ( layout) } ;
@@ -97,13 +115,14 @@ impl MachineAlloc {
97
115
ptr. write_bytes ( 0 , layout. size ( ) ) ;
98
116
}
99
117
}
100
- ptr
118
+ ( ptr, false )
101
119
} else {
102
- unsafe { alloc:: alloc_zeroed ( layout) }
120
+ unsafe { ( alloc:: alloc_zeroed ( layout) , true ) }
103
121
}
104
122
}
105
123
106
- /// SAFETY: See alloc::alloc()
124
+ /// SAFETY: The allocator must have been `enable()`d already and
125
+ /// the `layout` must be valid.
107
126
unsafe fn alloc_inner ( & mut self , layout : Layout ) -> * mut u8 {
108
127
let ( size, align) = MachineAlloc :: normalized_layout ( layout) ;
109
128
@@ -136,7 +155,8 @@ impl MachineAlloc {
136
155
}
137
156
}
138
157
139
- /// SAFETY: See alloc::alloc()
158
+ /// SAFETY: Same as `alloc_inner()` with the added requirement that `layout`
159
+ /// must ask for a size larger than the host pagesize.
140
160
unsafe fn alloc_multi_page ( & mut self , layout : Layout ) -> * mut u8 {
141
161
let ( size, align) = self . huge_normalized_layout ( layout) ;
142
162
@@ -146,38 +166,36 @@ impl MachineAlloc {
146
166
ret
147
167
}
148
168
149
- /// Safety: see alloc::dealloc()
169
+ /// Deallocates a pointer from the machine allocator. While not unsound,
170
+ /// attempting to deallocate a pointer if `MachineAlloc` has not been enabled
171
+ /// will likely result in a panic.
172
+ ///
173
+ /// SAFETY: This pointer must have been allocated with `MachineAlloc::alloc()`
174
+ /// (or `alloc_zeroed()`) which must have returned `(ptr, false)` specifically!
175
+ /// If it returned `(ptr, true)`, then deallocate it with `alloc::dealloc()` instead.
150
176
pub unsafe fn dealloc ( ptr : * mut u8 , layout : Layout ) {
151
- let mut alloc = ALLOCATOR . lock ( ) . unwrap ( ) ;
152
- unsafe {
153
- if alloc. enabled {
154
- alloc. dealloc_inner ( ptr, layout) ;
155
- } else {
156
- alloc:: dealloc ( ptr, layout) ;
157
- }
158
- }
159
- }
177
+ let mut alloc_guard = ALLOCATOR . lock ( ) . unwrap ( ) ;
178
+ // Doing it this way lets us grab 2 mutable references to different fields at once
179
+ let alloc: & mut MachineAlloc = & mut alloc_guard;
160
180
161
- /// SAFETY: See alloc::dealloc()
162
- unsafe fn dealloc_inner ( & mut self , ptr : * mut u8 , layout : Layout ) {
163
181
let ( size, align) = MachineAlloc :: normalized_layout ( layout) ;
164
182
165
183
if size == 0 || ptr. is_null ( ) {
166
184
return ;
167
185
}
168
186
169
- let ptr_idx = ptr. addr ( ) % self . page_size ;
187
+ let ptr_idx = ptr. addr ( ) % alloc . page_size ;
170
188
let page_addr = ptr. addr ( ) - ptr_idx;
171
189
172
- if align > self . page_size || size > self . page_size {
190
+ if align > alloc . page_size || size > alloc . page_size {
173
191
unsafe {
174
- self . dealloc_multi_page ( ptr, layout) ;
192
+ alloc . dealloc_multi_page ( ptr, layout) ;
175
193
}
176
194
} else {
177
- let pinfo = std:: iter:: zip ( & mut self . pages , & mut self . allocated )
195
+ let pinfo = std:: iter:: zip ( & mut alloc . pages , & mut alloc . allocated )
178
196
. find ( |( page, _) | page. addr ( ) == page_addr) ;
179
197
let Some ( ( _, pinfo) ) = pinfo else {
180
- panic ! ( "Freeing in an unallocated page: {ptr:?}\n Holding pages {:?}" , self . pages)
198
+ panic ! ( "Freeing in an unallocated page: {ptr:?}\n Holding pages {:?}" , alloc . pages)
181
199
} ;
182
200
let ptr_idx_pinfo = ptr_idx / 8 ;
183
201
let size_pinfo = size / 8 ;
@@ -187,22 +205,23 @@ impl MachineAlloc {
187
205
188
206
let mut free = vec ! [ ] ;
189
207
let page_layout =
190
- unsafe { Layout :: from_size_align_unchecked ( self . page_size , self . page_size ) } ;
191
- for ( idx, pinfo) in self . allocated . iter ( ) . enumerate ( ) {
208
+ unsafe { Layout :: from_size_align_unchecked ( alloc . page_size , alloc . page_size ) } ;
209
+ for ( idx, pinfo) in alloc . allocated . iter ( ) . enumerate ( ) {
192
210
if pinfo. iter ( ) . all ( |p| * p == 0 ) {
193
211
free. push ( idx) ;
194
212
}
195
213
}
196
214
free. reverse ( ) ;
197
215
for idx in free {
198
- let _ = self . allocated . remove ( idx) ;
216
+ let _ = alloc . allocated . remove ( idx) ;
199
217
unsafe {
200
- alloc:: dealloc ( self . pages . remove ( idx) , page_layout) ;
218
+ alloc:: dealloc ( alloc . pages . remove ( idx) , page_layout) ;
201
219
}
202
220
}
203
221
}
204
222
205
- /// SAFETY: See alloc::dealloc()
223
+ /// SAFETY: Same as `dealloc()` with the added requirement that `layout`
224
+ /// must ask for a size larger than the host pagesize.
206
225
unsafe fn dealloc_multi_page ( & mut self , ptr : * mut u8 , layout : Layout ) {
207
226
let ( idx, _) = self
208
227
. huge_allocs
0 commit comments