@@ -4,8 +4,7 @@ use super::index_allocator::{SimpleIndexAllocator, SlotId};
4
4
use crate :: prelude:: * ;
5
5
use crate :: runtime:: vm:: sys:: vm:: commit_pages;
6
6
use crate :: runtime:: vm:: {
7
- mmap:: AlignedLength , round_usize_up_to_host_pages, HostAlignedByteCount , Mmap ,
8
- PoolingInstanceAllocatorConfig ,
7
+ mmap:: AlignedLength , HostAlignedByteCount , Mmap , PoolingInstanceAllocatorConfig ,
9
8
} ;
10
9
11
10
/// Represents a pool of execution stacks (used for the async fiber implementation).
@@ -26,7 +25,7 @@ pub struct StackPool {
26
25
page_size : HostAlignedByteCount ,
27
26
index_allocator : SimpleIndexAllocator ,
28
27
async_stack_zeroing : bool ,
29
- async_stack_keep_resident : usize ,
28
+ async_stack_keep_resident : HostAlignedByteCount ,
30
29
}
31
30
32
31
impl StackPool {
@@ -80,7 +79,7 @@ impl StackPool {
80
79
max_stacks,
81
80
page_size,
82
81
async_stack_zeroing : config. async_stack_zeroing ,
83
- async_stack_keep_resident : round_usize_up_to_host_pages (
82
+ async_stack_keep_resident : HostAlignedByteCount :: new_rounded_up (
84
83
config. async_stack_keep_resident ,
85
84
) ?,
86
85
index_allocator : SimpleIndexAllocator :: new ( config. limits . total_stacks ) ,
@@ -95,7 +94,7 @@ impl StackPool {
95
94
96
95
/// Allocate a new fiber.
97
96
pub fn allocate ( & self ) -> Result < wasmtime_fiber:: FiberStack > {
98
- if self . stack_size == 0 {
97
+ if self . stack_size . is_zero ( ) {
99
98
bail ! ( "pooling allocator not configured to enable fiber stack allocation" ) ;
100
99
}
101
100
@@ -109,20 +108,23 @@ impl StackPool {
109
108
110
109
unsafe {
111
110
// Remove the guard page from the size
112
- let size_without_guard = self . stack_size . byte_count ( ) - self . page_size . byte_count ( ) ;
111
+ let size_without_guard = self . stack_size . checked_sub ( self . page_size ) . expect (
112
+ "self.stack_size is host-page-aligned and is > 0,\
113
+ so it must be >= self.page_size",
114
+ ) ;
113
115
114
116
let bottom_of_stack = self
115
117
. mapping
116
118
. as_ptr ( )
117
119
. add ( self . stack_size . unchecked_mul ( index) . byte_count ( ) )
118
120
. cast_mut ( ) ;
119
121
120
- commit_pages ( bottom_of_stack, size_without_guard) ?;
122
+ commit_pages ( bottom_of_stack, size_without_guard. byte_count ( ) ) ?;
121
123
122
124
let stack = wasmtime_fiber:: FiberStack :: from_raw_parts (
123
125
bottom_of_stack,
124
126
self . page_size . byte_count ( ) ,
125
- size_without_guard,
127
+ size_without_guard. byte_count ( ) ,
126
128
) ?;
127
129
Ok ( stack)
128
130
}
@@ -134,6 +136,11 @@ impl StackPool {
134
136
/// that should be decommitted. It is the caller's responsibility to ensure
135
137
/// that those decommits happen before this stack is reused.
136
138
///
139
+ /// # Panics
140
+ ///
141
+ /// `zero_stack` panics if the passed in `stack` was not created by
142
+ /// [`Self::allocate`].
143
+ ///
137
144
/// # Safety
138
145
///
139
146
/// The stack must no longer be in use, and ready for returning to the pool
@@ -144,6 +151,11 @@ impl StackPool {
144
151
mut decommit : impl FnMut ( * mut u8 , usize ) ,
145
152
) {
146
153
assert ! ( stack. is_from_raw_parts( ) ) ;
154
+ assert ! (
155
+ !self . stack_size. is_zero( ) ,
156
+ "pooling allocator not configured to enable fiber stack allocation \
157
+ (Self::allocate should have returned an error)"
158
+ ) ;
147
159
148
160
if !self . async_stack_zeroing {
149
161
return ;
@@ -160,9 +172,12 @@ impl StackPool {
160
172
"fiber stack top pointer not in range"
161
173
) ;
162
174
163
- // Remove the guard page from the size
164
- let stack_size = self . stack_size . byte_count ( ) - self . page_size . byte_count ( ) ;
165
- let bottom_of_stack = top - stack_size;
175
+ // Remove the guard page from the size.
176
+ let stack_size = self . stack_size . checked_sub ( self . page_size ) . expect (
177
+ "self.stack_size is host-page-aligned and is > 0,\
178
+ so it must be >= self.page_size",
179
+ ) ;
180
+ let bottom_of_stack = top - stack_size. byte_count ( ) ;
166
181
let start_of_stack = bottom_of_stack - self . page_size . byte_count ( ) ;
167
182
assert ! ( start_of_stack >= base && start_of_stack < ( base + len) ) ;
168
183
assert ! ( ( start_of_stack - base) % self . stack_size. byte_count( ) == 0 ) ;
@@ -175,14 +190,17 @@ impl StackPool {
175
190
// * madvise for the whole range incurs expensive future page faults
176
191
// * most threads probably don't use most of the stack anyway
177
192
let size_to_memset = stack_size. min ( self . async_stack_keep_resident ) ;
193
+ let rest = stack_size
194
+ . checked_sub ( size_to_memset)
195
+ . expect ( "stack_size >= size_to_memset" ) ;
178
196
std:: ptr:: write_bytes (
179
- ( bottom_of_stack + stack_size - size_to_memset ) as * mut u8 ,
197
+ ( bottom_of_stack + rest . byte_count ( ) ) as * mut u8 ,
180
198
0 ,
181
- size_to_memset,
199
+ size_to_memset. byte_count ( ) ,
182
200
) ;
183
201
184
202
// Use the system to reset remaining stack pages to zero.
185
- decommit ( bottom_of_stack as _ , stack_size - size_to_memset ) ;
203
+ decommit ( bottom_of_stack as _ , rest . byte_count ( ) ) ;
186
204
}
187
205
188
206
/// Deallocate a previously-allocated fiber.
0 commit comments