1
1
use std:: alloc:: { self , Layout } ;
2
2
use std:: sync;
3
3
4
- use crate :: helpers:: ToU64 ;
5
-
6
4
static ALLOCATOR : sync:: Mutex < MachineAlloc > = sync:: Mutex :: new ( MachineAlloc :: empty ( ) ) ;
7
5
8
6
/// A distinct allocator for interpreter memory contents, allowing us to manage its
9
7
/// memory separately from that of Miri itself. This is very useful for native-lib mode.
10
8
#[ derive( Debug ) ]
11
9
pub struct MachineAlloc {
10
+ /// Pointers to page-aligned memory that has been claimed by the allocator.
11
+ /// Every pointer here must point to a page-sized allocation claimed via
12
+ /// the global allocator.
12
13
pages : Vec < * mut u8 > ,
14
+ /// Pointers to multi-page-sized allocations. These must also be page-aligned,
15
+ /// with a size of `page_size * count` (where `count` is the second element
16
+ /// of the vector).
13
17
huge_allocs : Vec < ( * mut u8 , usize ) > ,
18
+ /// Metadata about which bytes have been allocated on each page. The length
19
+ /// of this vector must be the same as that of `pages`, and the length of the
20
+ /// boxed slice must be exactly `page_size / 8`.
21
+ ///
22
+ /// Conceptually, each bit of the `u8` represents the allocation status of one
23
+ /// byte on the corresponding element of `pages`; in practice, we only allocate
24
+ /// in 8-byte chunks currently, so the `u8`s are only ever 0 (fully free) or
25
+ /// 255 (fully allocated).
14
26
allocated : Vec < Box < [ u8 ] > > ,
27
+ /// The host (not emulated) page size.
15
28
page_size : usize ,
29
+ /// If false, calls to `alloc()` and `alloc_zeroed()` just wrap the corresponding
30
+ /// function in the global allocator. Otherwise, uses the pages tracked
31
+ /// internally.
16
32
enabled : bool ,
17
33
}
18
34
19
35
// SAFETY: We only point to heap-allocated data
20
36
unsafe impl Send for MachineAlloc { }
21
37
22
38
impl MachineAlloc {
23
- // Allocation-related methods
24
-
25
- /// Initializes the allocator with placeholder 4k pages .
39
+ /// Initializes the allocator. `page_size` is set to 4k as a placeholder to
40
+ /// allow this function to be `const`; it is updated to its real value when
41
+ /// `enable()` is called .
26
42
const fn empty ( ) -> Self {
27
43
Self {
28
44
pages : Vec :: new ( ) ,
@@ -33,62 +49,70 @@ impl MachineAlloc {
33
49
}
34
50
}
35
51
36
- /// SAFETY: There must be no existing `MiriAllocBytes`
37
- pub unsafe fn enable ( ) {
52
+ /// Enables the allocator. From this point onwards, calls to `alloc()` and
53
+ /// `alloc_zeroed()` will return `(ptr, false)`.
54
+ pub fn enable ( ) {
38
55
let mut alloc = ALLOCATOR . lock ( ) . unwrap ( ) ;
39
56
alloc. enabled = true ;
40
57
// This needs to specifically be the system pagesize!
41
58
alloc. page_size = unsafe {
42
- let ret = libc:: sysconf ( libc:: _SC_PAGE_SIZE) ;
43
- if ret > 0 {
44
- ret. try_into ( ) . unwrap ( )
45
- } else {
46
- 4096 // fallback
47
- }
59
+ // If sysconf errors, better to just panic
60
+ libc:: sysconf ( libc:: _SC_PAGE_SIZE) . try_into ( ) . unwrap ( )
48
61
}
49
62
}
50
63
51
- /// Returns a vector of page addresses managed by the allocator.
52
- #[ expect( dead_code) ]
53
- pub fn pages ( ) -> Vec < u64 > {
54
- let alloc = ALLOCATOR . lock ( ) . unwrap ( ) ;
55
- alloc. pages . clone ( ) . into_iter ( ) . map ( |p| p. addr ( ) . to_u64 ( ) ) . collect ( )
56
- }
57
-
64
+ /// Expands the available memory pool by adding one page.
58
65
fn add_page ( & mut self ) {
59
66
let page_layout =
60
67
unsafe { Layout :: from_size_align_unchecked ( self . page_size , self . page_size ) } ;
61
68
let page_ptr = unsafe { alloc:: alloc ( page_layout) } ;
62
- if page_ptr. is_null ( ) {
63
- panic ! ( "aligned_alloc failed!!!" )
64
- }
65
69
self . allocated . push ( vec ! [ 0u8 ; self . page_size / 8 ] . into_boxed_slice ( ) ) ;
66
70
self . pages . push ( page_ptr) ;
67
71
}
68
72
73
+ /// For simplicity, we allocate in multiples of 8 bytes with at least that
74
+ /// alignment.
69
75
#[ inline]
70
76
fn normalized_layout ( layout : Layout ) -> ( usize , usize ) {
71
77
let align = if layout. align ( ) < 8 { 8 } else { layout. align ( ) } ;
72
78
let size = layout. size ( ) . next_multiple_of ( 8 ) ;
73
79
( size, align)
74
80
}
75
81
82
+ /// If a requested allocation is greater than one page, we simply allocate
83
+ /// a fixed number of pages for it.
76
84
#[ inline]
77
85
fn huge_normalized_layout ( & self , layout : Layout ) -> ( usize , usize ) {
78
86
let size = layout. size ( ) . next_multiple_of ( self . page_size ) ;
79
87
let align = std:: cmp:: max ( layout. align ( ) , self . page_size ) ;
80
88
( size, align)
81
89
}
82
90
91
+ /// Allocates memory as described in `Layout`. If `MachineAlloc::enable()`
92
+ /// has *not* been called yet, this is just a wrapper for `(alloc::alloc(),
93
+ /// true)`. Otherwise, it will allocate from its own memory pool and
94
+ /// return `(ptr, false)`. The latter field is meant to correspond with the
95
+ /// field `alloc_is_global` for `MiriAllocBytes`.
96
+ ///
83
97
/// SAFETY: See alloc::alloc()
84
98
#[ inline]
85
- pub unsafe fn alloc ( layout : Layout ) -> * mut u8 {
99
+ pub unsafe fn alloc ( layout : Layout ) -> ( * mut u8 , bool ) {
86
100
let mut alloc = ALLOCATOR . lock ( ) . unwrap ( ) ;
87
- unsafe { if alloc. enabled { alloc. alloc_inner ( layout) } else { alloc:: alloc ( layout) } }
101
+ unsafe {
102
+ if alloc. enabled {
103
+ ( alloc. alloc_inner ( layout) , false )
104
+ } else {
105
+ ( alloc:: alloc ( layout) , true )
106
+ }
107
+ }
88
108
}
89
109
110
+ /// Same as `alloc()`, but zeroes out data before allocating. Instead
111
+ /// wraps `alloc::alloc_zeroed()` if `MachineAlloc::enable()` has not been
112
+ /// called yet.
113
+ ///
90
114
/// SAFETY: See alloc::alloc_zeroed()
91
- pub unsafe fn alloc_zeroed ( layout : Layout ) -> * mut u8 {
115
+ pub unsafe fn alloc_zeroed ( layout : Layout ) -> ( * mut u8 , bool ) {
92
116
let mut alloc = ALLOCATOR . lock ( ) . unwrap ( ) ;
93
117
if alloc. enabled {
94
118
let ptr = unsafe { alloc. alloc_inner ( layout) } ;
@@ -97,13 +121,14 @@ impl MachineAlloc {
97
121
ptr. write_bytes ( 0 , layout. size ( ) ) ;
98
122
}
99
123
}
100
- ptr
124
+ ( ptr, false )
101
125
} else {
102
- unsafe { alloc:: alloc_zeroed ( layout) }
126
+ unsafe { ( alloc:: alloc_zeroed ( layout) , true ) }
103
127
}
104
128
}
105
129
106
- /// SAFETY: See alloc::alloc()
130
+ /// SAFETY: The allocator must have been `enable()`d already and
131
+ /// the `layout` must be valid.
107
132
unsafe fn alloc_inner ( & mut self , layout : Layout ) -> * mut u8 {
108
133
let ( size, align) = MachineAlloc :: normalized_layout ( layout) ;
109
134
@@ -136,7 +161,8 @@ impl MachineAlloc {
136
161
}
137
162
}
138
163
139
- /// SAFETY: See alloc::alloc()
164
+ /// SAFETY: Same as `alloc_inner()` with the added requirement that `layout`
165
+ /// must ask for a size larger than the host pagesize.
140
166
unsafe fn alloc_multi_page ( & mut self , layout : Layout ) -> * mut u8 {
141
167
let ( size, align) = self . huge_normalized_layout ( layout) ;
142
168
@@ -146,38 +172,36 @@ impl MachineAlloc {
146
172
ret
147
173
}
148
174
149
- /// Safety: see alloc::dealloc()
175
+ /// Deallocates a pointer from the machine allocator. While not unsound,
176
+ /// attempting to deallocate a pointer if `MachineAlloc` has not been enabled
177
+ /// will likely result in a panic.
178
+ ///
179
+ /// SAFETY: This pointer must have been allocated with `MachineAlloc::alloc()`
180
+ /// (or `alloc_zeroed()`) which must have returned `(ptr, false)` specifically!
181
+ /// If it returned `(ptr, true)`, then deallocate it with `alloc::dealloc()` instead.
150
182
pub unsafe fn dealloc ( ptr : * mut u8 , layout : Layout ) {
151
- let mut alloc = ALLOCATOR . lock ( ) . unwrap ( ) ;
152
- unsafe {
153
- if alloc. enabled {
154
- alloc. dealloc_inner ( ptr, layout) ;
155
- } else {
156
- alloc:: dealloc ( ptr, layout) ;
157
- }
158
- }
159
- }
183
+ let mut alloc_guard = ALLOCATOR . lock ( ) . unwrap ( ) ;
184
+ // Doing it this way lets us grab 2 mutable references to different fields at once
185
+ let alloc: & mut MachineAlloc = & mut alloc_guard;
160
186
161
- /// SAFETY: See alloc::dealloc()
162
- unsafe fn dealloc_inner ( & mut self , ptr : * mut u8 , layout : Layout ) {
163
187
let ( size, align) = MachineAlloc :: normalized_layout ( layout) ;
164
188
165
189
if size == 0 || ptr. is_null ( ) {
166
190
return ;
167
191
}
168
192
169
- let ptr_idx = ptr. addr ( ) % self . page_size ;
193
+ let ptr_idx = ptr. addr ( ) % alloc . page_size ;
170
194
let page_addr = ptr. addr ( ) - ptr_idx;
171
195
172
- if align > self . page_size || size > self . page_size {
196
+ if align > alloc . page_size || size > alloc . page_size {
173
197
unsafe {
174
- self . dealloc_multi_page ( ptr, layout) ;
198
+ alloc . dealloc_multi_page ( ptr, layout) ;
175
199
}
176
200
} else {
177
- let pinfo = std:: iter:: zip ( & mut self . pages , & mut self . allocated )
201
+ let pinfo = std:: iter:: zip ( & mut alloc . pages , & mut alloc . allocated )
178
202
. find ( |( page, _) | page. addr ( ) == page_addr) ;
179
203
let Some ( ( _, pinfo) ) = pinfo else {
180
- panic ! ( "Freeing in an unallocated page: {ptr:?}\n Holding pages {:?}" , self . pages)
204
+ panic ! ( "Freeing in an unallocated page: {ptr:?}\n Holding pages {:?}" , alloc . pages)
181
205
} ;
182
206
let ptr_idx_pinfo = ptr_idx / 8 ;
183
207
let size_pinfo = size / 8 ;
@@ -187,22 +211,23 @@ impl MachineAlloc {
187
211
188
212
let mut free = vec ! [ ] ;
189
213
let page_layout =
190
- unsafe { Layout :: from_size_align_unchecked ( self . page_size , self . page_size ) } ;
191
- for ( idx, pinfo) in self . allocated . iter ( ) . enumerate ( ) {
214
+ unsafe { Layout :: from_size_align_unchecked ( alloc . page_size , alloc . page_size ) } ;
215
+ for ( idx, pinfo) in alloc . allocated . iter ( ) . enumerate ( ) {
192
216
if pinfo. iter ( ) . all ( |p| * p == 0 ) {
193
217
free. push ( idx) ;
194
218
}
195
219
}
196
220
free. reverse ( ) ;
197
221
for idx in free {
198
- let _ = self . allocated . remove ( idx) ;
222
+ let _ = alloc . allocated . remove ( idx) ;
199
223
unsafe {
200
- alloc:: dealloc ( self . pages . remove ( idx) , page_layout) ;
224
+ alloc:: dealloc ( alloc . pages . remove ( idx) , page_layout) ;
201
225
}
202
226
}
203
227
}
204
228
205
- /// SAFETY: See alloc::dealloc()
229
+ /// SAFETY: Same as `dealloc()` with the added requirement that `layout`
230
+ /// must ask for a size larger than the host pagesize.
206
231
unsafe fn dealloc_multi_page ( & mut self , ptr : * mut u8 , layout : Layout ) {
207
232
let ( idx, _) = self
208
233
. huge_allocs
0 commit comments