Skip to content

Commit d5db7a6

Browse files
authored
Move memory pool over to aligned byte counts (#9668)
* Move memory pool over to aligned byte counts Part of work to centralize memory management within Mmap instances -- some of that work becomes easier if the byte counts are known to be aligned. There were a few overflow cases that I added checks for. * Address review comments * Fix unused code warnings
1 parent 91d3219 commit d5db7a6

File tree

5 files changed

+209
-103
lines changed

5 files changed

+209
-103
lines changed

crates/wasmtime/src/runtime/vm.rs

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -372,7 +372,7 @@ pub fn host_page_size() -> usize {
372372
/// Returns an error if rounding up overflows.
373373
///
374374
/// (Deprecated: consider switching to `HostAlignedByteCount`.)
375-
#[cfg(feature = "signals-based-traps")]
375+
#[cfg(all(feature = "async", unix, not(miri)))]
376376
pub fn round_u64_up_to_host_pages(bytes: u64) -> Result<u64> {
377377
let page_size = u64::try_from(crate::runtime::vm::host_page_size()).err2anyhow()?;
378378
debug_assert!(page_size.is_power_of_two());
@@ -387,7 +387,7 @@ pub fn round_u64_up_to_host_pages(bytes: u64) -> Result<u64> {
387387
/// Same as `round_u64_up_to_host_pages` but for `usize`s.
388388
///
389389
/// (Deprecated: consider switching to `HostAlignedByteCount`.)
390-
#[cfg(feature = "signals-based-traps")]
390+
#[cfg(all(feature = "async", unix, not(miri)))]
391391
pub fn round_usize_up_to_host_pages(bytes: usize) -> Result<usize> {
392392
let bytes = u64::try_from(bytes).err2anyhow()?;
393393
let rounded = round_u64_up_to_host_pages(bytes)?;

crates/wasmtime/src/runtime/vm/byte_count.rs

Lines changed: 77 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -94,6 +94,9 @@ impl HostAlignedByteCount {
9494
.ok_or(ByteCountOutOfBounds(ByteCountOutOfBoundsKind::Add))
9595
}
9696

97+
// Note: saturating_add should not be naively added! usize::MAX is not a
98+
// power of 2 so is not aligned.
99+
97100
/// Compute `self - bytes`.
98101
///
99102
/// Returns an error if the result underflows.
@@ -105,6 +108,13 @@ impl HostAlignedByteCount {
105108
.ok_or_else(|| ByteCountOutOfBounds(ByteCountOutOfBoundsKind::Sub))
106109
}
107110

111+
/// Compute `self - bytes`, returning zero if the result underflows.
112+
#[inline]
113+
pub fn saturating_sub(self, bytes: HostAlignedByteCount) -> Self {
114+
// aligned - aligned = aligned, and 0 is always aligned.
115+
Self(self.0.saturating_sub(bytes.0))
116+
}
117+
108118
/// Multiply an aligned byte count by a scalar value.
109119
///
110120
/// Returns an error if the result overflows.
@@ -116,6 +126,39 @@ impl HostAlignedByteCount {
116126
.ok_or_else(|| ByteCountOutOfBounds(ByteCountOutOfBoundsKind::Mul))
117127
}
118128

129+
/// Divide an aligned byte count by another aligned byte count, producing a
130+
/// scalar value.
131+
///
132+
/// Returns an error in case the divisor is zero.
133+
pub fn checked_div(self, divisor: HostAlignedByteCount) -> Result<usize, ByteCountOutOfBounds> {
134+
self.0
135+
.checked_div(divisor.0)
136+
.ok_or_else(|| ByteCountOutOfBounds(ByteCountOutOfBoundsKind::Div))
137+
}
138+
139+
/// Compute the remainder of an aligned byte count divided by another
140+
/// aligned byte count.
141+
///
142+
/// The remainder is always an aligned byte count itself.
143+
///
144+
/// Returns an error in case the divisor is zero.
145+
pub fn checked_rem(self, divisor: HostAlignedByteCount) -> Result<Self, ByteCountOutOfBounds> {
146+
// Why is the remainder an aligned byte count? For example, if the page
147+
// size is 4KiB, then the remainder of dividing (say) 40KiB by 16KiB is
148+
// 8KiB, which is a multiple of 4KiB.
149+
//
150+
// More generally, for integers n >= 0, m > 0, k > 0:
151+
//
152+
// (n * k) % (m * k) = (n % m) * k
153+
//
154+
// which is a multiple of k. Here, k is the host page size, so the
155+
// remainder is a multiple of the host page size.
156+
self.0
157+
.checked_rem(divisor.0)
158+
.map(Self)
159+
.ok_or_else(|| ByteCountOutOfBounds(ByteCountOutOfBoundsKind::Rem))
160+
}
161+
119162
/// Unchecked multiplication by a scalar value.
120163
///
121164
/// ## Safety
@@ -214,6 +257,8 @@ enum ByteCountOutOfBoundsKind {
214257
Add,
215258
Sub,
216259
Mul,
260+
Div,
261+
Rem,
217262
}
218263

219264
impl fmt::Display for ByteCountOutOfBoundsKind {
@@ -228,6 +273,38 @@ impl fmt::Display for ByteCountOutOfBoundsKind {
228273
ByteCountOutOfBoundsKind::Mul => {
229274
f.write_str("byte count overflow during multiplication")
230275
}
276+
ByteCountOutOfBoundsKind::Div => f.write_str("division by zero"),
277+
ByteCountOutOfBoundsKind::Rem => f.write_str("remainder by zero"),
278+
}
279+
}
280+
}
281+
282+
#[cfg(test)]
283+
mod proptest_impls {
284+
use super::*;
285+
286+
use proptest::prelude::*;
287+
288+
impl Arbitrary for HostAlignedByteCount {
289+
type Strategy = BoxedStrategy<Self>;
290+
type Parameters = ();
291+
292+
fn arbitrary_with(_: ()) -> Self::Strategy {
293+
// Compute the number of pages that fit in a usize, rounded down.
294+
// For example, if:
295+
//
296+
// * usize::MAX is 2**64 - 1
297+
// * host_page_size is 2**12 (4KiB)
298+
//
299+
// Then page_count = floor(usize::MAX / host_page_size) = 2**52 - 1.
300+
// The range 0..=page_count, when multiplied by the page size, will
301+
// produce values in the range 0..=(2**64 - 2**12), in steps of
302+
// 2**12, uniformly at random. This is the desired uniform
303+
// distribution of byte counts.
304+
let page_count = usize::MAX / host_page_size();
305+
(0..=page_count)
306+
.prop_map(|n| HostAlignedByteCount::new(n * host_page_size()).unwrap())
307+
.boxed()
231308
}
232309
}
233310
}

0 commit comments

Comments
 (0)