Skip to content

Commit 91191be

Browse files
authored
Implement get_gc_trigger() for LockFreeImmortalSpace (#1003)
This PR fixes two issues in `LockFreeImmortalSpace` and adds a test. * Implemented `get_gc_trigger()`. As `LockFreeImmortalSpace` does not have `CommonSpace`, we cannot use the default implementation. * Properly reserve space with `HeapMeta`, so the address range of `LockFreeImmortalSpace` won't clash with other spaces. This closes #314.
1 parent 80cf0ed commit 91191be

File tree

4 files changed

+69
-16
lines changed

4 files changed

+69
-16
lines changed

src/policy/lockfreeimmortalspace.rs

Lines changed: 31 additions & 16 deletions
Original file line numberDiff line numberDiff line change
@@ -1,16 +1,18 @@
11
use atomic::Atomic;
22

3-
use std::marker::PhantomData;
43
use std::sync::atomic::Ordering;
4+
use std::sync::Arc;
55

66
use crate::policy::sft::GCWorkerMutRef;
77
use crate::policy::sft::SFT;
88
use crate::policy::space::{CommonSpace, Space};
99
use crate::util::address::Address;
1010

1111
use crate::util::conversions;
12+
use crate::util::heap::gc_trigger::GCTrigger;
1213
use crate::util::heap::layout::vm_layout::vm_layout;
1314
use crate::util::heap::PageResource;
15+
use crate::util::heap::VMRequest;
1416
use crate::util::memory::MmapStrategy;
1517
use crate::util::metadata::side_metadata::SideMetadataContext;
1618
use crate::util::metadata::side_metadata::SideMetadataSanity;
@@ -34,11 +36,11 @@ pub struct LockFreeImmortalSpace<VM: VMBinding> {
3436
/// start of this space
3537
start: Address,
3638
/// Total bytes for the space
37-
extent: usize,
39+
total_bytes: usize,
3840
/// Zero memory after slow-path allocation
3941
slow_path_zeroing: bool,
4042
metadata: SideMetadataContext,
41-
phantom: PhantomData<VM>,
43+
gc_trigger: Arc<GCTrigger<VM>>,
4244
}
4345

4446
impl<VM: VMBinding> SFT for LockFreeImmortalSpace<VM> {
@@ -99,12 +101,16 @@ impl<VM: VMBinding> Space<VM> for LockFreeImmortalSpace<VM> {
99101
unimplemented!()
100102
}
101103

104+
fn get_gc_trigger(&self) -> &GCTrigger<VM> {
105+
&self.gc_trigger
106+
}
107+
102108
fn release_multiple_pages(&mut self, _start: Address) {
103109
panic!("immortalspace only releases pages enmasse")
104110
}
105111

106112
fn initialize_sft(&self, sft_map: &mut dyn crate::policy::sft_map::SFTMap) {
107-
unsafe { sft_map.eager_initialize(self.as_sft(), self.start, self.extent) };
113+
unsafe { sft_map.eager_initialize(self.as_sft(), self.start, self.total_bytes) };
108114
}
109115

110116
fn reserved_pages(&self) -> usize {
@@ -115,6 +121,7 @@ impl<VM: VMBinding> Space<VM> for LockFreeImmortalSpace<VM> {
115121
}
116122

117123
fn acquire(&self, _tls: VMThread, pages: usize) -> Address {
124+
trace!("LockFreeImmortalSpace::acquire");
118125
let bytes = conversions::pages_to_bytes(pages);
119126
let start = self
120127
.cursor
@@ -170,8 +177,8 @@ impl<VM: VMBinding> LockFreeImmortalSpace<VM> {
170177
#[allow(dead_code)] // Only used with certain features.
171178
pub fn new(args: crate::policy::space::PlanCreateSpaceArgs<VM>) -> Self {
172179
let slow_path_zeroing = args.zeroed;
173-
// FIXME: This space assumes that it can use the entire heap range, which is definitely wrong.
174-
// https://github.com/mmtk/mmtk-core/issues/314
180+
181+
// Get the total bytes for the heap.
175182
let total_bytes = match *args.options.gc_trigger {
176183
crate::util::options::GCTriggerSelector::FixedHeapSize(bytes) => bytes,
177184
_ => unimplemented!(),
@@ -182,21 +189,30 @@ impl<VM: VMBinding> LockFreeImmortalSpace<VM> {
182189
total_bytes,
183190
vm_layout().available_bytes()
184191
);
192+
// Align up to chunks
193+
let aligned_total_bytes = crate::util::conversions::raw_align_up(
194+
total_bytes,
195+
crate::util::heap::vm_layout::BYTES_IN_CHUNK,
196+
);
197+
198+
// Create a VM request of fixed size
199+
let vmrequest = VMRequest::fixed_size(aligned_total_bytes);
200+
// Reserve the space
201+
let VMRequest::Extent{ extent, top } = vmrequest else { unreachable!() };
202+
let start = args.heap.reserve(extent, top);
185203

186-
// FIXME: This space assumes that it can use the entire heap range, which is definitely wrong.
187-
// https://github.com/mmtk/mmtk-core/issues/314
188204
let space = Self {
189205
name: args.name,
190-
cursor: Atomic::new(vm_layout().available_start()),
191-
limit: vm_layout().available_start() + total_bytes,
192-
start: vm_layout().available_start(),
193-
extent: total_bytes,
206+
cursor: Atomic::new(start),
207+
limit: start + aligned_total_bytes,
208+
start,
209+
total_bytes: aligned_total_bytes,
194210
slow_path_zeroing,
195211
metadata: SideMetadataContext {
196212
global: args.global_side_metadata_specs,
197213
local: vec![],
198214
},
199-
phantom: PhantomData,
215+
gc_trigger: args.gc_trigger,
200216
};
201217

202218
// Eagerly memory map the entire heap (also zero all the memory)
@@ -205,11 +221,10 @@ impl<VM: VMBinding> LockFreeImmortalSpace<VM> {
205221
} else {
206222
MmapStrategy::Normal
207223
};
208-
crate::util::memory::dzmmap_noreplace(vm_layout().available_start(), total_bytes, strategy)
209-
.unwrap();
224+
crate::util::memory::dzmmap_noreplace(start, aligned_total_bytes, strategy).unwrap();
210225
if space
211226
.metadata
212-
.try_map_metadata_space(vm_layout().available_start(), total_bytes)
227+
.try_map_metadata_space(start, aligned_total_bytes)
213228
.is_err()
214229
{
215230
// TODO(Javad): handle meta space allocation failure

vmbindings/dummyvm/Cargo.toml

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -36,6 +36,7 @@ malloc_counted_size = ["mmtk/malloc_counted_size"]
3636
malloc_mark_sweep = ["mmtk/malloc_mark_sweep"]
3737
vo_bit = ["mmtk/vo_bit"]
3838
extreme_assertions = ["mmtk/extreme_assertions"]
39+
nogc_lock_free=["mmtk/nogc_lock_free"]
3940

4041
# Feature to control which benchmarks to run. See benches/main.rs
4142
bench_sft = []

vmbindings/dummyvm/src/tests/mod.rs

Lines changed: 2 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -25,6 +25,8 @@ mod malloc_api;
2525
#[cfg(feature = "malloc_counted_size")]
2626
mod malloc_counted;
2727
mod malloc_ms;
28+
#[cfg(feature = "nogc_lock_free")]
29+
mod nogc_lock_free;
2830
#[cfg(target_pointer_width = "64")]
2931
mod vm_layout_compressed_pointer_64;
3032
mod vm_layout_default;
Lines changed: 35 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,35 @@
1+
// GITHUB-CI: MMTK_PLAN=NoGC
2+
// GITHUB-CI: FEATURES=nogc_lock_free
3+
4+
use crate::api;
5+
use crate::test_fixtures::{MutatorFixture, SerialFixture};
6+
use crate::DummyVM;
7+
use log::info;
8+
use mmtk::plan::AllocationSemantics;
9+
use mmtk::vm::VMBinding;
10+
11+
lazy_static! {
12+
static ref MUTATOR: SerialFixture<MutatorFixture> = SerialFixture::new();
13+
}
14+
15+
#[test]
16+
pub fn nogc_lock_free_allocate() {
17+
MUTATOR.with_fixture(|fixture| {
18+
let min = DummyVM::MIN_ALIGNMENT;
19+
let max = DummyVM::MAX_ALIGNMENT;
20+
info!("Allowed alignment between {} and {}", min, max);
21+
let mut align = min;
22+
while align <= max {
23+
info!("Test allocation with alignment {}", align);
24+
let addr = api::mmtk_alloc(fixture.mutator, 8, align, 0, AllocationSemantics::Default);
25+
info!("addr = {}", addr);
26+
assert!(
27+
addr.is_aligned_to(align),
28+
"Expected allocation alignment {}, returned address is {:?}",
29+
align,
30+
addr
31+
);
32+
align *= 2;
33+
}
34+
})
35+
}

0 commit comments

Comments
 (0)