Skip to content

Commit 32672ff

Browse files
committed
address review pt 2
1 parent 3628f0e commit 32672ff

File tree

1 file changed

+64
-29
lines changed

1 file changed

+64
-29
lines changed

src/alloc/isolated_alloc.rs

Lines changed: 64 additions & 29 deletions
Original file line numberDiff line numberDiff line change
@@ -36,17 +36,22 @@ impl IsolatedAlloc {
3636
page_ptrs: Vec::new(),
3737
huge_ptrs: Vec::new(),
3838
page_infos: Vec::new(),
39+
// SAFETY: `sysconf(_SC_PAGESIZE)` is always safe to call at runtime
40+
// See https://www.man7.org/linux/man-pages/man3/sysconf.3.html
3941
page_size: unsafe { libc::sysconf(libc::_SC_PAGESIZE).try_into().unwrap() },
4042
}
4143
}
4244

4345
/// Expands the available memory pool by adding one page.
44-
fn add_page(&mut self, page_size: usize) -> (*mut u8, &mut DenseBitSet<usize>) {
45-
let page_layout = unsafe { Layout::from_size_align_unchecked(page_size, page_size) };
46+
fn add_page(&mut self) -> (*mut u8, &mut DenseBitSet<usize>) {
47+
// SAFETY: the system page size must always be a power of 2 and nonzero,
48+
// and cannot overflow an isize on the host.
49+
let page_layout = unsafe { Layout::from_size_align_unchecked(self.page_size, self.page_size) };
50+
// SAFETY: The system page size, which is the layout size, cannot be 0
4651
let page_ptr = unsafe { alloc::alloc(page_layout) };
4752
// `page_infos` has to have one bit for each `COMPRESSION_FACTOR`-sized chunk of bytes in the page.
48-
assert!(page_size % COMPRESSION_FACTOR == 0);
49-
self.page_infos.push(DenseBitSet::new_empty(page_size / COMPRESSION_FACTOR));
53+
assert!(self.page_size % COMPRESSION_FACTOR == 0);
54+
self.page_infos.push(DenseBitSet::new_empty(self.page_size / COMPRESSION_FACTOR));
5055
self.page_ptrs.push(page_ptr);
5156
(page_ptr, self.page_infos.last_mut().unwrap())
5257
}
@@ -85,13 +90,15 @@ impl IsolatedAlloc {
8590
///
8691
/// SAFETY: See `alloc::alloc()`
8792
pub unsafe fn alloc(&mut self, layout: Layout) -> *mut u8 {
93+
// SAFETY: Upheld by caller
8894
unsafe { self.allocate(layout, false) }
8995
}
9096

9197
/// Same as `alloc`, but zeroes out the memory.
9298
///
9399
/// SAFETY: See `alloc::alloc_zeroed()`
94100
pub unsafe fn alloc_zeroed(&mut self, layout: Layout) -> *mut u8 {
101+
// SAFETY: Upheld by caller
95102
unsafe { self.allocate(layout, true) }
96103
}
97104

@@ -103,9 +110,13 @@ impl IsolatedAlloc {
103110
unsafe fn allocate(&mut self, layout: Layout, zeroed: bool) -> *mut u8 {
104111
let (size, align) = IsolatedAlloc::normalized_layout(layout);
105112
if IsolatedAlloc::is_huge_alloc(size, align, self.page_size) {
113+
// SAFETY: Validity of `layout` upheld by caller; we checked that
114+
// the size and alignment are appropriate for being a huge alloc
106115
unsafe { self.alloc_huge(layout, zeroed) }
107116
} else {
108117
for (&mut page, pinfo) in std::iter::zip(&mut self.page_ptrs, &mut self.page_infos) {
118+
// SAFETY: The value in `self.page_size` is used to allocate
119+
// `page`, with page alignment
109120
if let Some(ptr) =
110121
unsafe { Self::alloc_from_page(self.page_size, layout, page, pinfo, zeroed) }
111122
{
@@ -117,7 +128,9 @@ impl IsolatedAlloc {
117128
let page_size = self.page_size;
118129
// Add another page and allocate from it; this cannot fail since the
119130
// new page is empty and we already asserted it fits into a page
120-
let (page, pinfo) = self.add_page(page_size);
131+
let (page, pinfo) = self.add_page();
132+
133+
// SAFETY: See comment on `alloc_from_page` above
121134
unsafe { Self::alloc_from_page(page_size, layout, page, pinfo, zeroed).unwrap() }
122135
}
123136
}
@@ -149,6 +162,11 @@ impl IsolatedAlloc {
149162
let range_avail = !(idx_pinfo..idx_pinfo + size_pinfo).any(|idx| pinfo.contains(idx));
150163
if range_avail {
151164
pinfo.insert_range(idx_pinfo..idx_pinfo + size_pinfo);
165+
// SAFETY: We checked the available bytes after `idx` in the call
166+
// to `domain_size` above and asserted there are at least `idx +
167+
// layout.size()` bytes available and unallocated after it.
168+
// `page` must point to the start of the page, so adding `idx`
169+
// is safe per the above.
152170
unsafe {
153171
let ptr = page.add(idx);
154172
if zeroed {
@@ -168,6 +186,7 @@ impl IsolatedAlloc {
168186
/// SAFETY: Same as `alloc()`.
169187
unsafe fn alloc_huge(&mut self, layout: Layout, zeroed: bool) -> *mut u8 {
170188
let layout = IsolatedAlloc::huge_normalized_layout(layout, self.page_size);
189+
// SAFETY: Upheld by caller
171190
let ret =
172191
unsafe { if zeroed { alloc::alloc_zeroed(layout) } else { alloc::alloc(layout) } };
173192
self.huge_ptrs.push((ret, layout.size()));
@@ -183,6 +202,8 @@ impl IsolatedAlloc {
183202
let (size, align) = IsolatedAlloc::normalized_layout(layout);
184203

185204
if IsolatedAlloc::is_huge_alloc(size, align, self.page_size) {
205+
// SAFETY: Partly upheld by caller, and we checked that the size
206+
// and align mean this must have been allocated via `alloc_huge`
186207
unsafe {
187208
self.dealloc_huge(ptr, layout);
188209
}
@@ -195,8 +216,9 @@ impl IsolatedAlloc {
195216
// Find the page this allocation belongs to.
196217
// This could be made faster if the list was sorted -- the allocator isn't fully optimized at the moment.
197218
let pinfo = std::iter::zip(&mut self.page_ptrs, &mut self.page_infos)
198-
.find(|(page, _)| page.addr() == page_addr);
199-
let Some((_, pinfo)) = pinfo else {
219+
.enumerate()
220+
.find(|(_, (page, _))| page.addr() == page_addr);
221+
let Some((idx_of_pinfo, (_, pinfo))) = pinfo else {
200222
panic!(
201223
"Freeing in an unallocated page: {ptr:?}\nHolding pages {:?}",
202224
self.page_ptrs
@@ -209,15 +231,18 @@ impl IsolatedAlloc {
209231
pinfo.remove(idx);
210232
}
211233

212-
// We allocated all the pages with this layout
213-
let page_layout =
214-
unsafe { Layout::from_size_align_unchecked(self.page_size, self.page_size) };
215234
// Only 1 page might have been freed after a dealloc, so if it exists,
216235
// find it and free it (and adjust the vectors)
217-
if let Some(free_idx) = self.page_infos.iter().position(|pinfo| pinfo.is_empty()) {
218-
self.page_infos.remove(free_idx);
236+
if pinfo.is_empty() {
237+
// SAFETY: `self.page_size` is always a power of 2 and does not overflow an isize
238+
let page_layout =
239+
unsafe { Layout::from_size_align_unchecked(self.page_size, self.page_size) };
240+
self.page_infos.remove(idx_of_pinfo);
241+
// SAFETY: We checked that there are no outstanding allocations
242+
// from us pointing to this page, and we know it was allocated
243+
// with this layout
219244
unsafe {
220-
alloc::dealloc(self.page_ptrs.remove(free_idx), page_layout);
245+
alloc::dealloc(self.page_ptrs.remove(idx_of_pinfo), page_layout);
221246
}
222247
}
223248
}
@@ -235,6 +260,7 @@ impl IsolatedAlloc {
235260
.expect("Freeing unallocated pages");
236261
// And kick it from the list
237262
self.huge_ptrs.remove(idx);
263+
// SAFETY: Caller ensures validity of the layout
238264
unsafe {
239265
alloc::dealloc(ptr, layout);
240266
}
@@ -247,7 +273,10 @@ mod tests {
247273

248274
/// Helper function to assert that all bytes from `ptr` to `ptr.add(layout.size())`
249275
/// are zeroes.
250-
fn assert_zeroes(ptr: *mut u8, layout: Layout) {
276+
///
277+
/// SAFETY: `ptr` must have been allocated with `layout`.
278+
unsafe fn assert_zeroes(ptr: *mut u8, layout: Layout) {
279+
// SAFETY: Caller ensures this is valid
251280
unsafe {
252281
for ofs in 0..layout.size() {
253282
assert_eq!(0, ptr.add(ofs).read());
@@ -261,9 +290,11 @@ mod tests {
261290
let mut alloc = IsolatedAlloc::new();
262291
// 256 should be less than the pagesize on *any* system
263292
let layout = Layout::from_size_align(256, 32).unwrap();
293+
// SAFETY: layout size is the constant above, not 0
264294
let ptr = unsafe { alloc.alloc_zeroed(layout) };
265-
assert_zeroes(ptr, layout);
295+
// SAFETY: `ptr` was just allocated with `layout`
266296
unsafe {
297+
assert_zeroes(ptr, layout);
267298
alloc.dealloc(ptr, layout);
268299
}
269300
}
@@ -274,9 +305,11 @@ mod tests {
274305
let mut alloc = IsolatedAlloc::new();
275306
// 16k is about as big as pages get e.g. on macos aarch64
276307
let layout = Layout::from_size_align(16 * 1024, 128).unwrap();
308+
// SAFETY: layout size is the constant above, not 0
277309
let ptr = unsafe { alloc.alloc_zeroed(layout) };
278-
assert_zeroes(ptr, layout);
310+
// SAFETY: `ptr` was just allocated with `layout`
279311
unsafe {
312+
assert_zeroes(ptr, layout);
280313
alloc.dealloc(ptr, layout);
281314
}
282315
}
@@ -289,24 +322,21 @@ mod tests {
289322
// Try both sub-pagesize allocs and those larger than / equal to a page
290323
for sz in (1..=(16 * 1024)).step_by(128) {
291324
let layout = Layout::from_size_align(sz, 1).unwrap();
325+
// SAFETY: all sizes in the range above are nonzero as we start from 1
292326
let ptr = unsafe { alloc.alloc_zeroed(layout) };
293-
assert_zeroes(ptr, layout);
327+
// SAFETY: `ptr` was just allocated with `layout`, which was used
328+
// to bound the access size
294329
unsafe {
330+
assert_zeroes(ptr, layout);
295331
ptr.write_bytes(255, sz);
296332
alloc.dealloc(ptr, layout);
297333
}
298334
}
299335
}
300336

301-
/// Checks that allocations of different sizes do not overlap.
302-
#[test]
303-
fn no_overlaps() {
304-
let mut alloc = IsolatedAlloc::new();
305-
no_overlaps_inner(&mut alloc);
306-
}
307-
308-
/// Allows us to reuse this bit for `no_overlaps` and `check_leaks`.
309-
fn no_overlaps_inner(alloc: &mut IsolatedAlloc) {
337+
/// Checks that allocations of different sizes do not overlap. Not a freestanding
338+
/// test because we use it as part of `check_leaks()` also.
339+
fn no_overlaps(alloc: &mut IsolatedAlloc) {
310340
// Some random sizes and aligns
311341
let mut sizes = vec![32; 10];
312342
sizes.append(&mut vec![15; 4]);
@@ -327,13 +357,18 @@ mod tests {
327357
let layouts: Vec<_> = std::iter::zip(sizes, aligns)
328358
.map(|(sz, al)| Layout::from_size_align(sz, al).unwrap())
329359
.collect();
360+
// SAFETY: all sizes specified in `sizes` are nonzero
330361
let ptrs: Vec<_> =
331362
layouts.iter().map(|layout| unsafe { alloc.alloc_zeroed(*layout) }).collect();
332363

333364
for (&ptr, &layout) in std::iter::zip(&ptrs, &layouts) {
334365
// We requested zeroed allocations, so check that that's true
335366
// Then write to the end of the current size, so if the allocs
336-
// overlap (or the zeroing is wrong) then `assert_zeroes` will panic
367+
// overlap (or the zeroing is wrong) then `assert_zeroes` will panic.
368+
// Also check that the alignment we asked for was respected
369+
assert_eq!(ptr.addr().rem_euclid(layout.align()), 0);
370+
// SAFETY: each `ptr` was allocated with its corresponding `layout`,
371+
// which is used to bound the access size
337372
unsafe {
338373
assert_zeroes(ptr, layout);
339374
ptr.write_bytes(255, layout.size());
@@ -346,9 +381,9 @@ mod tests {
346381
#[test]
347382
fn check_leaks() {
348383
let mut alloc = IsolatedAlloc::new();
349-
350384
// Generate some noise first so leaks can manifest
351-
no_overlaps_inner(&mut alloc);
385+
no_overlaps(&mut alloc);
386+
352387
// And then verify that no memory was leaked
353388
assert!(alloc.page_ptrs.is_empty() && alloc.huge_ptrs.is_empty());
354389
}

0 commit comments

Comments
 (0)