Skip to content

Commit 2af831a

Browse files
authored
Move unix_stack_pool over to aligned byte counts (#9678)
This removes the last remaining use of `round_usize_up_to_host_pages`. I've added a couple of extra asserts that I believe are justified.
1 parent 02997b1 commit 2af831a

File tree

2 files changed

+32
-41
lines changed

2 files changed

+32
-41
lines changed

crates/wasmtime/src/runtime/vm.rs

Lines changed: 0 additions & 27 deletions
Original file line numberDiff line numberDiff line change
@@ -367,33 +367,6 @@ pub fn host_page_size() -> usize {
367367
};
368368
}
369369

370-
/// Round the given byte size up to a multiple of the host OS page size.
371-
///
372-
/// Returns an error if rounding up overflows.
373-
///
374-
/// (Deprecated: consider switching to `HostAlignedByteCount`.)
375-
#[cfg(all(feature = "async", unix, not(miri)))]
376-
pub fn round_u64_up_to_host_pages(bytes: u64) -> Result<u64> {
377-
let page_size = u64::try_from(crate::runtime::vm::host_page_size()).err2anyhow()?;
378-
debug_assert!(page_size.is_power_of_two());
379-
bytes
380-
.checked_add(page_size - 1)
381-
.ok_or_else(|| anyhow!(
382-
"{bytes} is too large to be rounded up to a multiple of the host page size of {page_size}"
383-
))
384-
.map(|val| val & !(page_size - 1))
385-
}
386-
387-
/// Same as `round_u64_up_to_host_pages` but for `usize`s.
388-
///
389-
/// (Deprecated: consider switching to `HostAlignedByteCount`.)
390-
#[cfg(all(feature = "async", unix, not(miri)))]
391-
pub fn round_usize_up_to_host_pages(bytes: usize) -> Result<usize> {
392-
let bytes = u64::try_from(bytes).err2anyhow()?;
393-
let rounded = round_u64_up_to_host_pages(bytes)?;
394-
Ok(usize::try_from(rounded).err2anyhow()?)
395-
}
396-
397370
/// Result of `Memory::atomic_wait32` and `Memory::atomic_wait64`
398371
#[derive(Copy, Clone, PartialEq, Eq, Debug)]
399372
pub enum WaitResult {

crates/wasmtime/src/runtime/vm/instance/allocator/pooling/unix_stack_pool.rs

Lines changed: 32 additions & 14 deletions
Original file line numberDiff line numberDiff line change
@@ -4,8 +4,7 @@ use super::index_allocator::{SimpleIndexAllocator, SlotId};
44
use crate::prelude::*;
55
use crate::runtime::vm::sys::vm::commit_pages;
66
use crate::runtime::vm::{
7-
mmap::AlignedLength, round_usize_up_to_host_pages, HostAlignedByteCount, Mmap,
8-
PoolingInstanceAllocatorConfig,
7+
mmap::AlignedLength, HostAlignedByteCount, Mmap, PoolingInstanceAllocatorConfig,
98
};
109

1110
/// Represents a pool of execution stacks (used for the async fiber implementation).
@@ -26,7 +25,7 @@ pub struct StackPool {
2625
page_size: HostAlignedByteCount,
2726
index_allocator: SimpleIndexAllocator,
2827
async_stack_zeroing: bool,
29-
async_stack_keep_resident: usize,
28+
async_stack_keep_resident: HostAlignedByteCount,
3029
}
3130

3231
impl StackPool {
@@ -80,7 +79,7 @@ impl StackPool {
8079
max_stacks,
8180
page_size,
8281
async_stack_zeroing: config.async_stack_zeroing,
83-
async_stack_keep_resident: round_usize_up_to_host_pages(
82+
async_stack_keep_resident: HostAlignedByteCount::new_rounded_up(
8483
config.async_stack_keep_resident,
8584
)?,
8685
index_allocator: SimpleIndexAllocator::new(config.limits.total_stacks),
@@ -95,7 +94,7 @@ impl StackPool {
9594

9695
/// Allocate a new fiber.
9796
pub fn allocate(&self) -> Result<wasmtime_fiber::FiberStack> {
98-
if self.stack_size == 0 {
97+
if self.stack_size.is_zero() {
9998
bail!("pooling allocator not configured to enable fiber stack allocation");
10099
}
101100

@@ -109,20 +108,23 @@ impl StackPool {
109108

110109
unsafe {
111110
// Remove the guard page from the size
112-
let size_without_guard = self.stack_size.byte_count() - self.page_size.byte_count();
111+
let size_without_guard = self.stack_size.checked_sub(self.page_size).expect(
112+
"self.stack_size is host-page-aligned and is > 0,\
113+
so it must be >= self.page_size",
114+
);
113115

114116
let bottom_of_stack = self
115117
.mapping
116118
.as_ptr()
117119
.add(self.stack_size.unchecked_mul(index).byte_count())
118120
.cast_mut();
119121

120-
commit_pages(bottom_of_stack, size_without_guard)?;
122+
commit_pages(bottom_of_stack, size_without_guard.byte_count())?;
121123

122124
let stack = wasmtime_fiber::FiberStack::from_raw_parts(
123125
bottom_of_stack,
124126
self.page_size.byte_count(),
125-
size_without_guard,
127+
size_without_guard.byte_count(),
126128
)?;
127129
Ok(stack)
128130
}
@@ -134,6 +136,11 @@ impl StackPool {
134136
/// that should be decommitted. It is the caller's responsibility to ensure
135137
/// that those decommits happen before this stack is reused.
136138
///
139+
/// # Panics
140+
///
141+
/// `zero_stack` panics if the passed in `stack` was not created by
142+
/// [`Self::allocate`].
143+
///
137144
/// # Safety
138145
///
139146
/// The stack must no longer be in use, and ready for returning to the pool
@@ -144,6 +151,11 @@ impl StackPool {
144151
mut decommit: impl FnMut(*mut u8, usize),
145152
) {
146153
assert!(stack.is_from_raw_parts());
154+
assert!(
155+
!self.stack_size.is_zero(),
156+
"pooling allocator not configured to enable fiber stack allocation \
157+
(Self::allocate should have returned an error)"
158+
);
147159

148160
if !self.async_stack_zeroing {
149161
return;
@@ -160,9 +172,12 @@ impl StackPool {
160172
"fiber stack top pointer not in range"
161173
);
162174

163-
// Remove the guard page from the size
164-
let stack_size = self.stack_size.byte_count() - self.page_size.byte_count();
165-
let bottom_of_stack = top - stack_size;
175+
// Remove the guard page from the size.
176+
let stack_size = self.stack_size.checked_sub(self.page_size).expect(
177+
"self.stack_size is host-page-aligned and is > 0,\
178+
so it must be >= self.page_size",
179+
);
180+
let bottom_of_stack = top - stack_size.byte_count();
166181
let start_of_stack = bottom_of_stack - self.page_size.byte_count();
167182
assert!(start_of_stack >= base && start_of_stack < (base + len));
168183
assert!((start_of_stack - base) % self.stack_size.byte_count() == 0);
@@ -175,14 +190,17 @@ impl StackPool {
175190
// * madvise for the whole range incurs expensive future page faults
176191
// * most threads probably don't use most of the stack anyway
177192
let size_to_memset = stack_size.min(self.async_stack_keep_resident);
193+
let rest = stack_size
194+
.checked_sub(size_to_memset)
195+
.expect("stack_size >= size_to_memset");
178196
std::ptr::write_bytes(
179-
(bottom_of_stack + stack_size - size_to_memset) as *mut u8,
197+
(bottom_of_stack + rest.byte_count()) as *mut u8,
180198
0,
181-
size_to_memset,
199+
size_to_memset.byte_count(),
182200
);
183201

184202
// Use the system to reset remaining stack pages to zero.
185-
decommit(bottom_of_stack as _, stack_size - size_to_memset);
203+
decommit(bottom_of_stack as _, rest.byte_count());
186204
}
187205

188206
/// Deallocate a previously-allocated fiber.

0 commit comments

Comments
 (0)