1
1
use atomic:: Atomic ;
2
2
3
- use std:: marker:: PhantomData ;
4
3
use std:: sync:: atomic:: Ordering ;
4
+ use std:: sync:: Arc ;
5
5
6
6
use crate :: policy:: sft:: GCWorkerMutRef ;
7
7
use crate :: policy:: sft:: SFT ;
8
8
use crate :: policy:: space:: { CommonSpace , Space } ;
9
9
use crate :: util:: address:: Address ;
10
10
11
11
use crate :: util:: conversions;
12
+ use crate :: util:: heap:: gc_trigger:: GCTrigger ;
12
13
use crate :: util:: heap:: layout:: vm_layout:: vm_layout;
13
14
use crate :: util:: heap:: PageResource ;
15
+ use crate :: util:: heap:: VMRequest ;
14
16
use crate :: util:: memory:: MmapStrategy ;
15
17
use crate :: util:: metadata:: side_metadata:: SideMetadataContext ;
16
18
use crate :: util:: metadata:: side_metadata:: SideMetadataSanity ;
@@ -34,11 +36,11 @@ pub struct LockFreeImmortalSpace<VM: VMBinding> {
34
36
/// start of this space
35
37
start : Address ,
36
38
/// Total bytes for the space
37
- extent : usize ,
39
+ total_bytes : usize ,
38
40
/// Zero memory after slow-path allocation
39
41
slow_path_zeroing : bool ,
40
42
metadata : SideMetadataContext ,
41
- phantom : PhantomData < VM > ,
43
+ gc_trigger : Arc < GCTrigger < VM > > ,
42
44
}
43
45
44
46
impl < VM : VMBinding > SFT for LockFreeImmortalSpace < VM > {
@@ -99,12 +101,16 @@ impl<VM: VMBinding> Space<VM> for LockFreeImmortalSpace<VM> {
99
101
unimplemented ! ( )
100
102
}
101
103
104
+ fn get_gc_trigger ( & self ) -> & GCTrigger < VM > {
105
+ & self . gc_trigger
106
+ }
107
+
102
108
fn release_multiple_pages ( & mut self , _start : Address ) {
103
109
panic ! ( "immortalspace only releases pages enmasse" )
104
110
}
105
111
106
112
fn initialize_sft ( & self , sft_map : & mut dyn crate :: policy:: sft_map:: SFTMap ) {
107
- unsafe { sft_map. eager_initialize ( self . as_sft ( ) , self . start , self . extent ) } ;
113
+ unsafe { sft_map. eager_initialize ( self . as_sft ( ) , self . start , self . total_bytes ) } ;
108
114
}
109
115
110
116
fn reserved_pages ( & self ) -> usize {
@@ -115,6 +121,7 @@ impl<VM: VMBinding> Space<VM> for LockFreeImmortalSpace<VM> {
115
121
}
116
122
117
123
fn acquire ( & self , _tls : VMThread , pages : usize ) -> Address {
124
+ trace ! ( "LockFreeImmortalSpace::acquire" ) ;
118
125
let bytes = conversions:: pages_to_bytes ( pages) ;
119
126
let start = self
120
127
. cursor
@@ -170,8 +177,8 @@ impl<VM: VMBinding> LockFreeImmortalSpace<VM> {
170
177
#[ allow( dead_code) ] // Only used with certain features.
171
178
pub fn new ( args : crate :: policy:: space:: PlanCreateSpaceArgs < VM > ) -> Self {
172
179
let slow_path_zeroing = args. zeroed ;
173
- // FIXME: This space assumes that it can use the entire heap range, which is definitely wrong.
174
- // https://github.com/mmtk/mmtk-core/issues/314
180
+
181
+ // Get the total bytes for the heap.
175
182
let total_bytes = match * args. options . gc_trigger {
176
183
crate :: util:: options:: GCTriggerSelector :: FixedHeapSize ( bytes) => bytes,
177
184
_ => unimplemented ! ( ) ,
@@ -182,21 +189,30 @@ impl<VM: VMBinding> LockFreeImmortalSpace<VM> {
182
189
total_bytes,
183
190
vm_layout( ) . available_bytes( )
184
191
) ;
192
+ // Align up to chunks
193
+ let aligned_total_bytes = crate :: util:: conversions:: raw_align_up (
194
+ total_bytes,
195
+ crate :: util:: heap:: vm_layout:: BYTES_IN_CHUNK ,
196
+ ) ;
197
+
198
+ // Create a VM request of fixed size
199
+ let vmrequest = VMRequest :: fixed_size ( aligned_total_bytes) ;
200
+ // Reserve the space
201
+ let VMRequest :: Extent { extent, top } = vmrequest else { unreachable ! ( ) } ;
202
+ let start = args. heap . reserve ( extent, top) ;
185
203
186
- // FIXME: This space assumes that it can use the entire heap range, which is definitely wrong.
187
- // https://github.com/mmtk/mmtk-core/issues/314
188
204
let space = Self {
189
205
name : args. name ,
190
- cursor : Atomic :: new ( vm_layout ( ) . available_start ( ) ) ,
191
- limit : vm_layout ( ) . available_start ( ) + total_bytes ,
192
- start : vm_layout ( ) . available_start ( ) ,
193
- extent : total_bytes ,
206
+ cursor : Atomic :: new ( start ) ,
207
+ limit : start + aligned_total_bytes ,
208
+ start,
209
+ total_bytes : aligned_total_bytes ,
194
210
slow_path_zeroing,
195
211
metadata : SideMetadataContext {
196
212
global : args. global_side_metadata_specs ,
197
213
local : vec ! [ ] ,
198
214
} ,
199
- phantom : PhantomData ,
215
+ gc_trigger : args . gc_trigger ,
200
216
} ;
201
217
202
218
// Eagerly memory map the entire heap (also zero all the memory)
@@ -205,11 +221,10 @@ impl<VM: VMBinding> LockFreeImmortalSpace<VM> {
205
221
} else {
206
222
MmapStrategy :: Normal
207
223
} ;
208
- crate :: util:: memory:: dzmmap_noreplace ( vm_layout ( ) . available_start ( ) , total_bytes, strategy)
209
- . unwrap ( ) ;
224
+ crate :: util:: memory:: dzmmap_noreplace ( start, aligned_total_bytes, strategy) . unwrap ( ) ;
210
225
if space
211
226
. metadata
212
- . try_map_metadata_space ( vm_layout ( ) . available_start ( ) , total_bytes )
227
+ . try_map_metadata_space ( start , aligned_total_bytes )
213
228
. is_err ( )
214
229
{
215
230
// TODO(Javad): handle meta space allocation failure
0 commit comments