Skip to content

Commit be2bcef

Browse files
authored
Merge pull request #4362 from nia-e/fix-alloc-perf
isolated_alloc: directly use mmap for allocations
2 parents 2f4f9ac + 0272c0c commit be2bcef

File tree

1 file changed

+72
-36
lines changed

1 file changed

+72
-36
lines changed

src/alloc/isolated_alloc.rs

Lines changed: 72 additions & 36 deletions
Original file line numberDiff line numberDiff line change
@@ -1,4 +1,4 @@
1-
use std::alloc::{self, Layout};
1+
use std::alloc::Layout;
22

33
use rustc_index::bit_set::DenseBitSet;
44

@@ -11,7 +11,7 @@ const COMPRESSION_FACTOR: usize = 4;
1111
pub struct IsolatedAlloc {
1212
/// Pointers to page-aligned memory that has been claimed by the allocator.
1313
/// Every pointer here must point to a page-sized allocation claimed via
14-
/// the global allocator. These pointers are used for "small" allocations.
14+
/// mmap. These pointers are used for "small" allocations.
1515
page_ptrs: Vec<*mut u8>,
1616
/// Metadata about which bytes have been allocated on each page. The length
1717
/// of this vector must be the same as that of `page_ptrs`, and the domain
@@ -52,20 +52,26 @@ impl IsolatedAlloc {
5252
Layout::from_size_align(size, align).unwrap()
5353
}
5454

55-
/// Returns the layout used to allocate the pages that hold small allocations.
55+
/// For greater-than-page-sized allocations, returns the allocation size we need to request
56+
/// including the slack we need to satisfy the alignment request.
5657
#[inline]
57-
fn page_layout(&self) -> Layout {
58-
Layout::from_size_align(self.page_size, self.page_size).unwrap()
59-
}
60-
61-
/// If the allocation is greater than a page, then round to the nearest page #.
62-
#[inline]
63-
fn huge_normalized_layout(layout: Layout, page_size: usize) -> Layout {
58+
fn huge_normalized_layout(&self, layout: Layout) -> usize {
6459
// Allocate in page-sized chunks
65-
let size = layout.size().next_multiple_of(page_size);
60+
let size = layout.size().next_multiple_of(self.page_size);
6661
// And make sure the align is at least one page
67-
let align = std::cmp::max(layout.align(), page_size);
68-
Layout::from_size_align(size, align).unwrap()
62+
let align = std::cmp::max(layout.align(), self.page_size);
63+
// pg_count gives us the # of pages needed to satisfy the size. For
64+
// align > page_size where align = n * page_size, a sufficently-aligned
65+
// address must exist somewhere in the range of
66+
// some_page_aligned_address..some_page_aligned_address + (n-1) * page_size
67+
// (since if some_page_aligned_address + n * page_size is sufficently aligned,
68+
// then so is some_page_aligned_address itself per the definition of n, so we
69+
// can avoid using that 1 extra page).
70+
// Thus we allocate n-1 extra pages
71+
let pg_count = size.div_ceil(self.page_size);
72+
let extra_pages = align.strict_div(self.page_size).saturating_sub(1);
73+
74+
pg_count.strict_add(extra_pages).strict_mul(self.page_size)
6975
}
7076

7177
/// Determined whether a given normalized (size, align) should be sent to
@@ -78,15 +84,15 @@ impl IsolatedAlloc {
7884
/// Allocates memory as described in `Layout`. This memory should be deallocated
7985
/// by calling `dealloc` on this same allocator.
8086
///
81-
/// SAFETY: See `alloc::alloc()`
87+
/// SAFETY: See `alloc::alloc()`.
8288
pub unsafe fn alloc(&mut self, layout: Layout) -> *mut u8 {
8389
// SAFETY: Upheld by caller
8490
unsafe { self.allocate(layout, false) }
8591
}
8692

8793
/// Same as `alloc`, but zeroes out the memory.
8894
///
89-
/// SAFETY: See `alloc::alloc_zeroed()`
95+
/// SAFETY: See `alloc::alloc_zeroed()`.
9096
pub unsafe fn alloc_zeroed(&mut self, layout: Layout) -> *mut u8 {
9197
// SAFETY: Upheld by caller
9298
unsafe { self.allocate(layout, true) }
@@ -95,14 +101,13 @@ impl IsolatedAlloc {
95101
/// Abstracts over the logic of `alloc_zeroed` vs `alloc`, as determined by
96102
/// the `zeroed` argument.
97103
///
98-
/// SAFETY: See `alloc::alloc()`, with the added restriction that `page_size`
99-
/// corresponds to the host pagesize.
104+
/// SAFETY: See `alloc::alloc()`.
100105
unsafe fn allocate(&mut self, layout: Layout, zeroed: bool) -> *mut u8 {
101106
let layout = IsolatedAlloc::normalized_layout(layout);
102107
if self.is_huge_alloc(&layout) {
103108
// SAFETY: Validity of `layout` upheld by caller; we checked that
104109
// the size and alignment are appropriate for being a huge alloc
105-
unsafe { self.alloc_huge(layout, zeroed) }
110+
unsafe { self.alloc_huge(layout) }
106111
} else {
107112
for (&mut page, pinfo) in std::iter::zip(&mut self.page_ptrs, &mut self.page_infos) {
108113
// SAFETY: The value in `self.page_size` is used to allocate
@@ -168,8 +173,19 @@ impl IsolatedAlloc {
168173

169174
/// Expands the available memory pool by adding one page.
170175
fn add_page(&mut self) -> (*mut u8, &mut DenseBitSet<usize>) {
171-
// SAFETY: The system page size, which is the layout size, cannot be 0
172-
let page_ptr = unsafe { alloc::alloc(self.page_layout()) };
176+
// SAFETY: mmap is always safe to call when requesting anonymous memory
177+
let page_ptr = unsafe {
178+
libc::mmap(
179+
std::ptr::null_mut(),
180+
self.page_size,
181+
libc::PROT_READ | libc::PROT_WRITE,
182+
libc::MAP_PRIVATE | libc::MAP_ANONYMOUS,
183+
-1,
184+
0,
185+
)
186+
.cast::<u8>()
187+
};
188+
assert_ne!(page_ptr.addr(), usize::MAX, "mmap failed");
173189
// `page_infos` has to have one bit for each `COMPRESSION_FACTOR`-sized chunk of bytes in the page.
174190
assert!(self.page_size % COMPRESSION_FACTOR == 0);
175191
self.page_infos.push(DenseBitSet::new_empty(self.page_size / COMPRESSION_FACTOR));
@@ -178,15 +194,28 @@ impl IsolatedAlloc {
178194
}
179195

180196
/// Allocates in multiples of one page on the host system.
197+
/// Will always be zeroed.
181198
///
182199
/// SAFETY: Same as `alloc()`.
183-
unsafe fn alloc_huge(&mut self, layout: Layout, zeroed: bool) -> *mut u8 {
184-
let layout = IsolatedAlloc::huge_normalized_layout(layout, self.page_size);
185-
// SAFETY: Upheld by caller
186-
let ret =
187-
unsafe { if zeroed { alloc::alloc_zeroed(layout) } else { alloc::alloc(layout) } };
188-
self.huge_ptrs.push((ret, layout.size()));
189-
ret
200+
unsafe fn alloc_huge(&mut self, layout: Layout) -> *mut u8 {
201+
let size = self.huge_normalized_layout(layout);
202+
// SAFETY: mmap is always safe to call when requesting anonymous memory
203+
let ret = unsafe {
204+
libc::mmap(
205+
std::ptr::null_mut(),
206+
size,
207+
libc::PROT_READ | libc::PROT_WRITE,
208+
libc::MAP_PRIVATE | libc::MAP_ANONYMOUS,
209+
-1,
210+
0,
211+
)
212+
.cast::<u8>()
213+
};
214+
assert_ne!(ret.addr(), usize::MAX, "mmap failed");
215+
self.huge_ptrs.push((ret, size));
216+
// huge_normalized_layout ensures that we've overallocated enough space
217+
// for this to be valid.
218+
ret.map_addr(|a| a.next_multiple_of(layout.align()))
190219
}
191220

192221
/// Deallocates a pointer from this allocator.
@@ -215,15 +244,15 @@ impl IsolatedAlloc {
215244
let page_ptr = self.page_ptrs.remove(idx);
216245
// SAFETY: We checked that there are no outstanding allocations
217246
// from us pointing to this page, and we know it was allocated
218-
// with this layout
247+
// in add_page as exactly a single page.
219248
unsafe {
220-
alloc::dealloc(page_ptr, self.page_layout());
249+
assert_eq!(libc::munmap(page_ptr.cast(), self.page_size), 0);
221250
}
222251
}
223252
}
224253
}
225254

226-
/// Returns the index of the page that this was deallocated from
255+
/// Returns the index of the page that this was deallocated from.
227256
///
228257
/// SAFETY: the pointer must have been allocated with `alloc_small`.
229258
unsafe fn dealloc_small(&mut self, ptr: *mut u8, layout: Layout) -> usize {
@@ -252,18 +281,22 @@ impl IsolatedAlloc {
252281
/// SAFETY: Same as `dealloc()` with the added requirement that `layout`
253282
/// must ask for a size larger than the host pagesize.
254283
unsafe fn dealloc_huge(&mut self, ptr: *mut u8, layout: Layout) {
255-
let layout = IsolatedAlloc::huge_normalized_layout(layout, self.page_size);
256-
// Find the pointer matching in address with the one we got
284+
let size = self.huge_normalized_layout(layout);
285+
// Find the huge allocation containing `ptr`.
257286
let idx = self
258287
.huge_ptrs
259288
.iter()
260-
.position(|pg| ptr.addr() == pg.0.addr())
289+
.position(|&(pg, size)| {
290+
pg.addr() <= ptr.addr() && ptr.addr() < pg.addr().strict_add(size)
291+
})
261292
.expect("Freeing unallocated pages");
262293
// And kick it from the list
263-
self.huge_ptrs.remove(idx);
264-
// SAFETY: Caller ensures validity of the layout
294+
let (un_offset_ptr, size2) = self.huge_ptrs.remove(idx);
295+
assert_eq!(size, size2, "got wrong layout in dealloc");
296+
// SAFETY: huge_ptrs contains allocations made with mmap with the size recorded there.
265297
unsafe {
266-
alloc::dealloc(ptr, layout);
298+
let ret = libc::munmap(un_offset_ptr.cast(), size);
299+
assert_eq!(ret, 0);
267300
}
268301
}
269302

@@ -359,12 +392,15 @@ mod tests {
359392
sizes.append(&mut vec![256; 12]);
360393
// Give it some multi-page ones too
361394
sizes.append(&mut vec![32 * 1024; 4]);
395+
sizes.push(4 * 1024);
362396

363397
// Matching aligns for the sizes
364398
let mut aligns = vec![16; 12];
365399
aligns.append(&mut vec![256; 2]);
366400
aligns.append(&mut vec![64; 12]);
367401
aligns.append(&mut vec![4096; 4]);
402+
// And one that requests align > page_size
403+
aligns.push(64 * 1024);
368404

369405
// Make sure we didn't mess up in the test itself!
370406
assert_eq!(sizes.len(), aligns.len());

0 commit comments

Comments
 (0)