Skip to content

Commit 4b1f50f

Browse files
committed
review comments
1 parent f03493c commit 4b1f50f

File tree

5 files changed

+102
-72
lines changed

5 files changed

+102
-72
lines changed

src/alloc_bytes.rs renamed to src/alloc/alloc_bytes.rs

Lines changed: 23 additions & 14 deletions
Original file line numberDiff line numberDiff line change
@@ -5,8 +5,8 @@ use std::{alloc, slice};
55
use rustc_abi::{Align, Size};
66
use rustc_middle::mir::interpret::AllocBytes;
77

8-
#[cfg(all(unix, any(target_arch = "x86", target_arch = "x86_64")))]
9-
use crate::discrete_alloc::MachineAlloc;
8+
#[cfg(target_os = "linux")]
9+
use crate::alloc::discrete_alloc::MachineAlloc;
1010
use crate::helpers::ToU64 as _;
1111

1212
/// Allocation bytes that explicitly handle the layout of the data they're storing.
@@ -20,6 +20,9 @@ pub struct MiriAllocBytes {
2020
/// * If `self.layout.size() == 0`, then `self.ptr` was allocated with the equivalent layout with size 1.
2121
/// * Otherwise, `self.ptr` points to memory allocated with `self.layout`.
2222
ptr: *mut u8,
23+
/// Whether this instance of `MiriAllocBytes` had its allocation created by calling `alloc::alloc()`
24+
/// (true) or the discrete allocator (false)
25+
alloc_is_global: bool,
2326
}
2427

2528
impl Clone for MiriAllocBytes {
@@ -42,9 +45,15 @@ impl Drop for MiriAllocBytes {
4245

4346
// SAFETY: Invariant, `self.ptr` points to memory allocated with `self.layout`.
4447
unsafe {
45-
#[cfg(all(unix, any(target_arch = "x86", target_arch = "x86_64")))]
46-
MachineAlloc::dealloc(self.ptr, alloc_layout);
47-
#[cfg(not(all(unix, any(target_arch = "x86", target_arch = "x86_64"))))]
48+
#[cfg(target_os = "linux")]
49+
{
50+
if self.alloc_is_global {
51+
alloc::dealloc(self.ptr, alloc_layout);
52+
} else {
53+
MachineAlloc::dealloc(self.ptr, alloc_layout);
54+
}
55+
}
56+
#[cfg(not(target_os = "linux"))]
4857
alloc::dealloc(self.ptr, alloc_layout);
4958
}
5059
}
@@ -75,20 +84,20 @@ impl MiriAllocBytes {
7584
fn alloc_with(
7685
size: u64,
7786
align: u64,
78-
alloc_fn: impl FnOnce(Layout) -> *mut u8,
87+
alloc_fn: impl FnOnce(Layout) -> (*mut u8, bool),
7988
) -> Result<MiriAllocBytes, ()> {
8089
let size = usize::try_from(size).map_err(|_| ())?;
8190
let align = usize::try_from(align).map_err(|_| ())?;
8291
let layout = Layout::from_size_align(size, align).map_err(|_| ())?;
8392
// When size is 0 we allocate 1 byte anyway, to ensure each allocation has a unique address.
8493
let alloc_layout =
8594
if size == 0 { Layout::from_size_align(1, align).unwrap() } else { layout };
86-
let ptr = alloc_fn(alloc_layout);
95+
let (ptr, alloc_is_global) = alloc_fn(alloc_layout);
8796
if ptr.is_null() {
8897
Err(())
8998
} else {
9099
// SAFETY: All `MiriAllocBytes` invariants are fulfilled.
91-
Ok(Self { ptr, layout })
100+
Ok(Self { ptr, layout, alloc_is_global })
92101
}
93102
}
94103
}
@@ -100,13 +109,13 @@ impl AllocBytes for MiriAllocBytes {
100109
let align = align.bytes();
101110
// SAFETY: `alloc_fn` will only be used with `size != 0`.
102111
let alloc_fn = |layout| unsafe {
103-
#[cfg(all(unix, any(target_arch = "x86", target_arch = "x86_64")))]
112+
#[cfg(target_os = "linux")]
104113
{
105114
MachineAlloc::alloc(layout)
106115
}
107-
#[cfg(not(all(unix, any(target_arch = "x86", target_arch = "x86_64"))))]
116+
#[cfg(not(target_os = "linux"))]
108117
{
109-
alloc::alloc(layout)
118+
(alloc::alloc(layout), true)
110119
}
111120
};
112121
let alloc_bytes = MiriAllocBytes::alloc_with(size.to_u64(), align, alloc_fn)
@@ -124,13 +133,13 @@ impl AllocBytes for MiriAllocBytes {
124133
let align = align.bytes();
125134
// SAFETY: `alloc_fn` will only be used with `size != 0`.
126135
let alloc_fn = |layout| unsafe {
127-
#[cfg(all(unix, any(target_arch = "x86", target_arch = "x86_64")))]
136+
#[cfg(target_os = "linux")]
128137
{
129138
MachineAlloc::alloc_zeroed(layout)
130139
}
131-
#[cfg(not(all(unix, any(target_arch = "x86", target_arch = "x86_64"))))]
140+
#[cfg(not(target_os = "linux"))]
132141
{
133-
alloc::alloc_zeroed(layout)
142+
(alloc::alloc_zeroed(layout), true)
134143
}
135144
};
136145
MiriAllocBytes::alloc_with(size, align, alloc_fn).ok()

src/discrete_alloc.rs renamed to src/alloc/discrete_alloc.rs

Lines changed: 71 additions & 52 deletions
Original file line numberDiff line numberDiff line change
@@ -1,28 +1,44 @@
11
use std::alloc::{self, Layout};
22
use std::sync;
33

4-
use crate::helpers::ToU64;
5-
64
static ALLOCATOR: sync::Mutex<MachineAlloc> = sync::Mutex::new(MachineAlloc::empty());
75

86
/// A distinct allocator for interpreter memory contents, allowing us to manage its
97
/// memory separately from that of Miri itself. This is very useful for native-lib mode.
108
#[derive(Debug)]
119
pub struct MachineAlloc {
10+
/// Pointers to page-aligned memory that has been claimed by the allocator.
11+
/// Every pointer here must point to a page-sized allocation claimed via
12+
/// the global allocator.
1213
pages: Vec<*mut u8>,
14+
/// Pointers to multi-page-sized allocations. These must also be page-aligned,
15+
/// with a size of `page_size * count` (where `count` is the second element
16+
/// of the vector).
1317
huge_allocs: Vec<(*mut u8, usize)>,
18+
/// Metadata about which bytes have been allocated on each page. The length
19+
/// of this vector must be the same as that of `pages`, and the length of the
20+
/// boxed slice must be exactly `page_size / 8`.
21+
///
22+
/// Conceptually, each bit of the `u8` represents the allocation status of one
23+
/// byte on the corresponding element of `pages`; in practice, we only allocate
24+
/// in 8-byte chunks currently, so the `u8`s are only ever 0 (fully free) or
25+
/// 255 (fully allocated).
1426
allocated: Vec<Box<[u8]>>,
27+
/// The host (not emulated) page size.
1528
page_size: usize,
29+
/// If false, calls to `alloc()` and `alloc_zeroed()` just wrap the corresponding
30+
/// function in the global allocator. Otherwise, uses the pages tracked
31+
/// internally.
1632
enabled: bool,
1733
}
1834

1935
// SAFETY: We only point to heap-allocated data
2036
unsafe impl Send for MachineAlloc {}
2137

2238
impl MachineAlloc {
23-
// Allocation-related methods
24-
25-
/// Initializes the allocator with placeholder 4k pages.
39+
/// Initializes the allocator. `page_size` is set to 4k as a placeholder to
40+
/// allow this function to be `const`; it is updated to its real value when
41+
/// `enable()` is called.
2642
const fn empty() -> Self {
2743
Self {
2844
pages: Vec::new(),
@@ -33,62 +49,64 @@ impl MachineAlloc {
3349
}
3450
}
3551

36-
/// SAFETY: There must be no existing `MiriAllocBytes`
37-
pub unsafe fn enable() {
52+
/// Enables the allocator. From this point onwards, calls to `alloc()` and
53+
/// `alloc_zeroed()` will return `(ptr, false)`.
54+
pub fn enable() {
3855
let mut alloc = ALLOCATOR.lock().unwrap();
3956
alloc.enabled = true;
4057
// This needs to specifically be the system pagesize!
4158
alloc.page_size = unsafe {
42-
let ret = libc::sysconf(libc::_SC_PAGE_SIZE);
43-
if ret > 0 {
44-
ret.try_into().unwrap()
45-
} else {
46-
4096 // fallback
47-
}
59+
// If sysconf errors, better to just panic
60+
libc::sysconf(libc::_SC_PAGE_SIZE).try_into().unwrap()
4861
}
4962
}
5063

51-
/// Returns a vector of page addresses managed by the allocator.
52-
#[expect(dead_code)]
53-
pub fn pages() -> Vec<u64> {
54-
let alloc = ALLOCATOR.lock().unwrap();
55-
alloc.pages.clone().into_iter().map(|p| p.addr().to_u64()).collect()
56-
}
57-
64+
/// Expands the available memory pool by adding one page.
5865
fn add_page(&mut self) {
5966
let page_layout =
6067
unsafe { Layout::from_size_align_unchecked(self.page_size, self.page_size) };
6168
let page_ptr = unsafe { alloc::alloc(page_layout) };
62-
if page_ptr.is_null() {
63-
panic!("aligned_alloc failed!!!")
64-
}
6569
self.allocated.push(vec![0u8; self.page_size / 8].into_boxed_slice());
6670
self.pages.push(page_ptr);
6771
}
6872

73+
/// For simplicity, we allocate in multiples of 8 bytes with at least that
74+
/// alignment.
6975
#[inline]
7076
fn normalized_layout(layout: Layout) -> (usize, usize) {
7177
let align = if layout.align() < 8 { 8 } else { layout.align() };
7278
let size = layout.size().next_multiple_of(8);
7379
(size, align)
7480
}
7581

82+
/// If a requested allocation is greater than one page, we simply allocate
83+
/// a fixed number of pages for it.
7684
#[inline]
7785
fn huge_normalized_layout(&self, layout: Layout) -> (usize, usize) {
7886
let size = layout.size().next_multiple_of(self.page_size);
7987
let align = std::cmp::max(layout.align(), self.page_size);
8088
(size, align)
8189
}
8290

91+
/// Allocates memory as described in `Layout`. If `MachineAlloc::enable()`
92+
/// has *not* been called yet, this is just a wrapper for `(alloc::alloc(),
93+
/// true)`. Otherwise, it will allocate from its own memory pool and
94+
/// return `(ptr, false)`. The latter field is meant to correspond with the
95+
/// field `alloc_is_global` for `MiriAllocBytes`.
96+
///
8397
/// SAFETY: See alloc::alloc()
8498
#[inline]
85-
pub unsafe fn alloc(layout: Layout) -> *mut u8 {
99+
pub unsafe fn alloc(layout: Layout) -> (*mut u8, bool) {
86100
let mut alloc = ALLOCATOR.lock().unwrap();
87-
unsafe { if alloc.enabled { alloc.alloc_inner(layout) } else { alloc::alloc(layout) } }
101+
unsafe { if alloc.enabled { (alloc.alloc_inner(layout), false) } else { (alloc::alloc(layout), true) } }
88102
}
89103

104+
/// Same as `alloc()`, but zeroes out data before allocating. Instead
105+
/// wraps `alloc::alloc_zeroed()` if `MachineAlloc::enable()` has not been
106+
/// called yet.
107+
///
90108
/// SAFETY: See alloc::alloc_zeroed()
91-
pub unsafe fn alloc_zeroed(layout: Layout) -> *mut u8 {
109+
pub unsafe fn alloc_zeroed(layout: Layout) -> (*mut u8, bool) {
92110
let mut alloc = ALLOCATOR.lock().unwrap();
93111
if alloc.enabled {
94112
let ptr = unsafe { alloc.alloc_inner(layout) };
@@ -97,13 +115,14 @@ impl MachineAlloc {
97115
ptr.write_bytes(0, layout.size());
98116
}
99117
}
100-
ptr
118+
(ptr, false)
101119
} else {
102-
unsafe { alloc::alloc_zeroed(layout) }
120+
unsafe { (alloc::alloc_zeroed(layout), true) }
103121
}
104122
}
105123

106-
/// SAFETY: See alloc::alloc()
124+
/// SAFETY: The allocator must have been `enable()`d already and
125+
/// the `layout` must be valid.
107126
unsafe fn alloc_inner(&mut self, layout: Layout) -> *mut u8 {
108127
let (size, align) = MachineAlloc::normalized_layout(layout);
109128

@@ -136,7 +155,8 @@ impl MachineAlloc {
136155
}
137156
}
138157

139-
/// SAFETY: See alloc::alloc()
158+
/// SAFETY: Same as `alloc_inner()` with the added requirement that `layout`
159+
/// must ask for a size larger than the host pagesize.
140160
unsafe fn alloc_multi_page(&mut self, layout: Layout) -> *mut u8 {
141161
let (size, align) = self.huge_normalized_layout(layout);
142162

@@ -146,38 +166,36 @@ impl MachineAlloc {
146166
ret
147167
}
148168

149-
/// Safety: see alloc::dealloc()
169+
/// Deallocates a pointer from the machine allocator. While not unsound,
170+
/// attempting to deallocate a pointer if `MachineAlloc` has not been enabled
171+
/// will likely result in a panic.
172+
///
173+
/// SAFETY: This pointer must have been allocated with `MachineAlloc::alloc()`
174+
/// (or `alloc_zeroed()`) which must have returned `(ptr, false)` specifically!
175+
/// If it returned `(ptr, true)`, then deallocate it with `alloc::dealloc()` instead.
150176
pub unsafe fn dealloc(ptr: *mut u8, layout: Layout) {
151-
let mut alloc = ALLOCATOR.lock().unwrap();
152-
unsafe {
153-
if alloc.enabled {
154-
alloc.dealloc_inner(ptr, layout);
155-
} else {
156-
alloc::dealloc(ptr, layout);
157-
}
158-
}
159-
}
177+
let mut alloc_guard = ALLOCATOR.lock().unwrap();
178+
// Doing it this way lets us grab 2 mutable references to different fields at once
179+
let alloc: &mut MachineAlloc = &mut alloc_guard;
160180

161-
/// SAFETY: See alloc::dealloc()
162-
unsafe fn dealloc_inner(&mut self, ptr: *mut u8, layout: Layout) {
163181
let (size, align) = MachineAlloc::normalized_layout(layout);
164182

165183
if size == 0 || ptr.is_null() {
166184
return;
167185
}
168186

169-
let ptr_idx = ptr.addr() % self.page_size;
187+
let ptr_idx = ptr.addr() % alloc.page_size;
170188
let page_addr = ptr.addr() - ptr_idx;
171189

172-
if align > self.page_size || size > self.page_size {
190+
if align > alloc.page_size || size > alloc.page_size {
173191
unsafe {
174-
self.dealloc_multi_page(ptr, layout);
192+
alloc.dealloc_multi_page(ptr, layout);
175193
}
176194
} else {
177-
let pinfo = std::iter::zip(&mut self.pages, &mut self.allocated)
195+
let pinfo = std::iter::zip(&mut alloc.pages, &mut alloc.allocated)
178196
.find(|(page, _)| page.addr() == page_addr);
179197
let Some((_, pinfo)) = pinfo else {
180-
panic!("Freeing in an unallocated page: {ptr:?}\nHolding pages {:?}", self.pages)
198+
panic!("Freeing in an unallocated page: {ptr:?}\nHolding pages {:?}", alloc.pages)
181199
};
182200
let ptr_idx_pinfo = ptr_idx / 8;
183201
let size_pinfo = size / 8;
@@ -187,22 +205,23 @@ impl MachineAlloc {
187205

188206
let mut free = vec![];
189207
let page_layout =
190-
unsafe { Layout::from_size_align_unchecked(self.page_size, self.page_size) };
191-
for (idx, pinfo) in self.allocated.iter().enumerate() {
208+
unsafe { Layout::from_size_align_unchecked(alloc.page_size, alloc.page_size) };
209+
for (idx, pinfo) in alloc.allocated.iter().enumerate() {
192210
if pinfo.iter().all(|p| *p == 0) {
193211
free.push(idx);
194212
}
195213
}
196214
free.reverse();
197215
for idx in free {
198-
let _ = self.allocated.remove(idx);
216+
let _ = alloc.allocated.remove(idx);
199217
unsafe {
200-
alloc::dealloc(self.pages.remove(idx), page_layout);
218+
alloc::dealloc(alloc.pages.remove(idx), page_layout);
201219
}
202220
}
203221
}
204222

205-
/// SAFETY: See alloc::dealloc()
223+
/// SAFETY: Same as `dealloc()` with the added requirement that `layout`
224+
/// must ask for a size larger than the host pagesize.
206225
unsafe fn dealloc_multi_page(&mut self, ptr: *mut u8, layout: Layout) {
207226
let (idx, _) = self
208227
.huge_allocs

src/alloc/mod.rs

Lines changed: 4 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,4 @@
1+
mod alloc_bytes;
2+
pub mod discrete_alloc;
3+
4+
pub use self::alloc_bytes::MiriAllocBytes;

src/lib.rs

Lines changed: 2 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -70,14 +70,12 @@ extern crate rustc_target;
7070
#[allow(unused_extern_crates)]
7171
extern crate rustc_driver;
7272

73+
mod alloc;
7374
mod alloc_addresses;
74-
mod alloc_bytes;
7575
mod borrow_tracker;
7676
mod clock;
7777
mod concurrency;
7878
mod diagnostics;
79-
#[cfg(all(unix, any(target_arch = "x86", target_arch = "x86_64")))]
80-
mod discrete_alloc;
8179
mod eval;
8280
mod helpers;
8381
mod intrinsics;
@@ -109,7 +107,7 @@ pub type PlaceTy<'tcx> = interpret::PlaceTy<'tcx, machine::Provenance>;
109107
pub type MPlaceTy<'tcx> = interpret::MPlaceTy<'tcx, machine::Provenance>;
110108

111109
pub use crate::alloc_addresses::{EvalContextExt as _, ProvenanceMode};
112-
pub use crate::alloc_bytes::MiriAllocBytes;
110+
pub use crate::alloc::MiriAllocBytes;
113111
pub use crate::borrow_tracker::stacked_borrows::{
114112
EvalContextExt as _, Item, Permission, Stack, Stacks,
115113
};

src/machine.rs

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -739,8 +739,8 @@ impl<'tcx> MiriMachine<'tcx> {
739739
// undefined behaviour in Miri itself!
740740
(
741741
unsafe {
742-
#[cfg(all(unix, any(target_arch = "x86", target_arch = "x86_64")))]
743-
discrete_alloc::MachineAlloc::enable();
742+
#[cfg(target_os = "linux")]
743+
alloc::discrete_alloc::MachineAlloc::enable();
744744
libloading::Library::new(lib_file_path)
745745
.expect("failed to read specified extern shared object file")
746746
},

0 commit comments

Comments
 (0)