Skip to content

Commit 9786ce8

Browse files
authored
Add missing docs for some util modules (#1024)
This PR is a step towards #309. * Add missing docs for some `util` modules. * Make `util::metadata::side_metadata::helpers/helpers_32` not public. * Make `util::reference_processor` not public. * Allow adding docs for options defined by the `options!` macro.
1 parent da9fb1f commit 9786ce8

File tree

20 files changed

+234
-142
lines changed

20 files changed

+234
-142
lines changed

.github/scripts/ci-test.sh

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -13,7 +13,7 @@ fi
1313

1414
./examples/build.py
1515

16-
ALL_PLANS=$(sed -n '/enum PlanSelector/,/}/p' src/util/options.rs | xargs | grep -o '{.*}' | grep -o '\w\+')
16+
ALL_PLANS=$(sed -n '/enum PlanSelector/,/}/p' src/util/options.rs | sed -e 's;//.*;;g' -e '/^$/d' -e 's/,//g' | xargs | grep -o '{.*}' | grep -o '\w\+')
1717

1818
# Test with DummyVM (each test in a separate run)
1919
cd vmbindings/dummyvm

src/memory_manager.rs

Lines changed: 3 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -735,7 +735,7 @@ pub fn is_mapped_address(address: Address) -> bool {
735735
/// * `mmtk`: A reference to an MMTk instance.
736736
/// * `reff`: The weak reference to add.
737737
pub fn add_weak_candidate<VM: VMBinding>(mmtk: &MMTK<VM>, reff: ObjectReference) {
738-
mmtk.reference_processors.add_weak_candidate::<VM>(reff);
738+
mmtk.reference_processors.add_weak_candidate(reff);
739739
}
740740

741741
/// Add a reference to the list of soft references. A binding may
@@ -745,7 +745,7 @@ pub fn add_weak_candidate<VM: VMBinding>(mmtk: &MMTK<VM>, reff: ObjectReference)
745745
/// * `mmtk`: A reference to an MMTk instance.
746746
/// * `reff`: The soft reference to add.
747747
pub fn add_soft_candidate<VM: VMBinding>(mmtk: &MMTK<VM>, reff: ObjectReference) {
748-
mmtk.reference_processors.add_soft_candidate::<VM>(reff);
748+
mmtk.reference_processors.add_soft_candidate(reff);
749749
}
750750

751751
/// Add a reference to the list of phantom references. A binding may
@@ -755,7 +755,7 @@ pub fn add_soft_candidate<VM: VMBinding>(mmtk: &MMTK<VM>, reff: ObjectReference)
755755
/// * `mmtk`: A reference to an MMTk instance.
756756
/// * `reff`: The phantom reference to add.
757757
pub fn add_phantom_candidate<VM: VMBinding>(mmtk: &MMTK<VM>, reff: ObjectReference) {
758-
mmtk.reference_processors.add_phantom_candidate::<VM>(reff);
758+
mmtk.reference_processors.add_phantom_candidate(reff);
759759
}
760760

761761
/// Generic hook to allow benchmarks to be harnessed. We do a full heap

src/policy/marksweepspace/malloc_ms/metadata.rs

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -108,7 +108,7 @@ fn map_active_chunk_metadata(chunk_start: Address) {
108108
/// We map the active chunk metadata (if not previously mapped), as well as the VO bit metadata
109109
/// and active page metadata here. Note that if [addr, addr + size) crosses multiple chunks, we
110110
/// will map for each chunk.
111-
pub fn map_meta_space(metadata: &SideMetadataContext, addr: Address, size: usize) {
111+
pub(super) fn map_meta_space(metadata: &SideMetadataContext, addr: Address, size: usize) {
112112
// In order to prevent race conditions, we synchronize on the lock first and then
113113
// check if we need to map the active chunk metadata for `chunk_start`
114114
let _lock = CHUNK_MAP_LOCK.lock().unwrap();
@@ -197,7 +197,7 @@ pub(super) unsafe fn is_page_marked_unsafe(page_addr: Address) -> bool {
197197
ACTIVE_PAGE_METADATA_SPEC.load::<u8>(page_addr) == 1
198198
}
199199

200-
pub fn is_chunk_mapped(chunk_start: Address) -> bool {
200+
pub(super) fn is_chunk_mapped(chunk_start: Address) -> bool {
201201
// Since `address_to_meta_address` will translate a data address to a metadata address without caring
202202
// if it goes across metadata boundaries, we have to check if we have accidentally gone over the bounds
203203
// of the active chunk metadata spec before we check if the metadata has been mapped or not

src/policy/space.rs

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -431,7 +431,7 @@ pub struct CommonSpace<VM: VMBinding> {
431431
pub vm_map: &'static dyn VMMap,
432432
pub mmapper: &'static dyn Mmapper,
433433

434-
pub metadata: SideMetadataContext,
434+
pub(crate) metadata: SideMetadataContext,
435435

436436
/// This field equals to needs_log_bit in the plan constraints.
437437
// TODO: This should be a constant for performance.

src/util/heap/layout/vm_layout.rs

Lines changed: 14 additions & 15 deletions
Original file line numberDiff line numberDiff line change
@@ -1,3 +1,5 @@
1+
//! The module defines virutal memory layout parameters.
2+
13
use std::sync::atomic::AtomicBool;
24

35
use atomic::Ordering;
@@ -8,26 +10,17 @@ use crate::util::Address;
810

911
use crate::util::conversions::{chunk_align_down, chunk_align_up};
1012

11-
/**
12-
* log_2 of the coarsest unit of address space allocation.
13-
*
14-
* In the 32-bit VM layout, this determines the granularity of
15-
* allocation in a discontigouous space. In the 64-bit layout,
16-
* this determines the growth factor of the large contiguous spaces
17-
* that we provide.
18-
*/
13+
/// log_2 of the coarsest unit of address space allocation.
1914
pub const LOG_BYTES_IN_CHUNK: usize = 22;
20-
21-
/** Coarsest unit of address space allocation. */
15+
/// Coarsest unit of address space allocation.
2216
pub const BYTES_IN_CHUNK: usize = 1 << LOG_BYTES_IN_CHUNK;
17+
/// Mask for chunk size.
2318
pub const CHUNK_MASK: usize = (1 << LOG_BYTES_IN_CHUNK) - 1;
24-
25-
/** Coarsest unit of address space allocation, in pages */
19+
/// Coarsest unit of address space allocation, in pages
2620
pub const PAGES_IN_CHUNK: usize = 1 << (LOG_BYTES_IN_CHUNK - LOG_BYTES_IN_PAGE as usize);
27-
28-
/** Granularity at which we map and unmap virtual address space in the heap */
21+
/// log_2 of the granularity at which we map and unmap virtual address space in the heap
2922
pub const LOG_MMAP_CHUNK_BYTES: usize = LOG_BYTES_IN_CHUNK;
30-
23+
/// Granularity at which we map and unmap virtual address space in the heap
3124
pub const MMAP_CHUNK_BYTES: usize = 1 << LOG_MMAP_CHUNK_BYTES;
3225

3326
/// Runtime-initialized virtual memory constants
@@ -50,9 +43,12 @@ pub struct VMLayout {
5043

5144
impl VMLayout {
5245
#[cfg(target_pointer_width = "32")]
46+
/// The maximum virtual memory address space that can be used on the target.
5347
pub const LOG_ARCH_ADDRESS_SPACE: usize = 32;
5448
#[cfg(target_pointer_width = "64")]
49+
/// The maximum virtual memory address space that can be used on the target.
5550
pub const LOG_ARCH_ADDRESS_SPACE: usize = 47;
51+
5652
/// An upper bound on the extent of any space in the
5753
/// current memory layout
5854
pub const fn max_space_extent(&self) -> usize {
@@ -189,6 +185,9 @@ static mut VM_LAYOUT: VMLayout = VMLayout::new_64bit();
189185

190186
static VM_LAYOUT_FETCHED: AtomicBool = AtomicBool::new(false);
191187

188+
/// Get the current virtual memory layout in use.
189+
/// If the binding would like to set a custom virtual memory layout ([`crate::mmtk::MMTKBuilder::set_vm_layout`]), they should not
190+
/// call this function before they set a custom layout.
192191
pub fn vm_layout() -> &'static VMLayout {
193192
if cfg!(debug_assertions) {
194193
VM_LAYOUT_FETCHED.store(true, Ordering::SeqCst);

src/util/malloc/mod.rs

Lines changed: 25 additions & 11 deletions
Original file line numberDiff line numberDiff line change
@@ -1,6 +1,17 @@
1+
//! This module exposes a set of malloc API. They are currently implemented with
2+
//! the library malloc. This may change in the future, and will be replaced
3+
//! with a native MMTk implementation.
4+
5+
//! We have two versions for each function:
6+
//! * a normal version: it has the signature that is compatible with the standard malloc library.
7+
//! * a counted version: the allocated/freed bytes are calculated into MMTk's heap. So extra arguments
8+
//! are needed to maintain allocated bytes properly. The API is inspired by Julia's counted malloc.
9+
//! The counted version is only available with the feature `malloc_counted_size`.
10+
111
/// Malloc provided by libraries
212
pub(crate) mod library;
3-
/// Using malloc as mark sweep free-list allocator
13+
/// Using malloc as mark sweep free-list allocator.
14+
// This module is made public so we can test it from dummyvm. It should be pub(crate).
415
pub mod malloc_ms_util;
516

617
use crate::util::Address;
@@ -9,20 +20,13 @@ use crate::vm::VMBinding;
920
#[cfg(feature = "malloc_counted_size")]
1021
use crate::MMTK;
1122

12-
// The following expose a set of malloc API. They are currently implemented with
13-
// the library malloc. When we have native malloc implementation, we should change
14-
// their implementation to point to our native malloc.
15-
16-
// We have two versions for each function:
17-
// * a normal version: it has the signature that is compatible with the standard malloc library.
18-
// * a counted version: the allocated/freed bytes are calculated into MMTk's heap. So extra arguments
19-
// are needed to maintain allocated bytes properly. The API is inspired by Julia's counted malloc.
20-
// The counted version is only available with the feature `malloc_counted_size`.
21-
23+
/// Manually allocate memory. Similar to libc's malloc.
2224
pub fn malloc(size: usize) -> Address {
2325
Address::from_mut_ptr(unsafe { self::library::malloc(size) })
2426
}
2527

28+
/// Manually allocate memory. Similar to libc's malloc.
29+
/// This also counts the allocated memory into the heap size of the given MMTk instance.
2630
#[cfg(feature = "malloc_counted_size")]
2731
pub fn counted_malloc<VM: VMBinding>(mmtk: &MMTK<VM>, size: usize) -> Address {
2832
let res = malloc(size);
@@ -32,10 +36,13 @@ pub fn counted_malloc<VM: VMBinding>(mmtk: &MMTK<VM>, size: usize) -> Address {
3236
res
3337
}
3438

39+
/// Manually allocate memory and initialize the bytes in the allocated memory to zero. Similar to libc's calloc.
3540
pub fn calloc(num: usize, size: usize) -> Address {
3641
Address::from_mut_ptr(unsafe { self::library::calloc(num, size) })
3742
}
3843

44+
/// Manually allocate memory and initialize the bytes in the allocated memory to zero. Similar to libc's calloc.
45+
/// This also counts the allocated memory into the heap size of the given MMTk instance.
3946
#[cfg(feature = "malloc_counted_size")]
4047
pub fn counted_calloc<VM: VMBinding>(mmtk: &MMTK<VM>, num: usize, size: usize) -> Address {
4148
let res = calloc(num, size);
@@ -45,10 +52,14 @@ pub fn counted_calloc<VM: VMBinding>(mmtk: &MMTK<VM>, num: usize, size: usize) -
4552
res
4653
}
4754

55+
/// Reallocate the given area of memory. Similar to libc's realloc.
4856
pub fn realloc(addr: Address, size: usize) -> Address {
4957
Address::from_mut_ptr(unsafe { self::library::realloc(addr.to_mut_ptr(), size) })
5058
}
5159

60+
/// Reallocate the given area of memory. Similar to libc's realloc.
61+
/// This also adjusts the allocated memory size based on the original allocation and the new allocation, and counts
62+
/// that into the heap size for the given MMTk instance.
5263
#[cfg(feature = "malloc_counted_size")]
5364
pub fn realloc_with_old_size<VM: VMBinding>(
5465
mmtk: &MMTK<VM>,
@@ -68,10 +79,13 @@ pub fn realloc_with_old_size<VM: VMBinding>(
6879
res
6980
}
7081

82+
/// Manually free the memory that is returned from other manual allocation functions in this module.
7183
pub fn free(addr: Address) {
7284
unsafe { self::library::free(addr.to_mut_ptr()) }
7385
}
7486

87+
/// Manually free the memory that is returned from other manual allocation functions in this module.
88+
/// This also reduces the allocated memory size.
7589
#[cfg(feature = "malloc_counted_size")]
7690
pub fn free_with_size<VM: VMBinding>(mmtk: &MMTK<VM>, addr: Address, old_size: usize) {
7791
free(addr);

src/util/metadata/global.rs

Lines changed: 4 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -14,15 +14,19 @@ use atomic::Ordering;
1414
/// For performance reasons, objects of this struct should be constants.
1515
#[derive(Clone, Copy, Debug)]
1616
pub enum MetadataSpec {
17+
/// In-header metadata uses bits from an object header.
1718
InHeader(HeaderMetadataSpec),
19+
/// On-side metadata uses a side table.
1820
OnSide(SideMetadataSpec),
1921
}
2022

2123
impl MetadataSpec {
24+
/// Is this metadata stored in the side table?
2225
pub const fn is_on_side(&self) -> bool {
2326
matches!(self, &MetadataSpec::OnSide(_))
2427
}
2528

29+
/// Is this metadata stored in the object header?
2630
pub const fn is_in_header(&self) -> bool {
2731
matches!(self, &MetadataSpec::InHeader(_))
2832
}

src/util/metadata/log_bit.rs

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -28,6 +28,7 @@ impl VMGlobalLogBitSpec {
2828
}
2929
}
3030

31+
/// Check if the log bit represents the unlogged state (the bit is 1).
3132
pub fn is_unlogged<VM: VMBinding>(&self, object: ObjectReference, order: Ordering) -> bool {
3233
self.load_atomic::<VM, u8>(object, None, order) == 1
3334
}

src/util/metadata/metadata_val_traits.rs

Lines changed: 6 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -6,7 +6,9 @@ use num_traits::{Unsigned, WrappingAdd, WrappingSub, Zero};
66
/// Describes bits and log2 bits for the numbers.
77
/// If num_traits has this, we do not need our own implementation: <https://github.com/rust-num/num-traits/issues/247>
88
pub trait Bits {
9+
/// The size of this atomic type in bits.
910
const BITS: u32;
11+
/// The size (in log2) of this atomic type in bits.
1012
const LOG2: u32;
1113
}
1214
macro_rules! impl_bits_trait {
@@ -26,9 +28,13 @@ impl_bits_trait!(usize);
2628
/// Describes bitwise operations.
2729
/// If num_traits has this, we do not need our own implementation: <https://github.com/rust-num/num-traits/issues/232>
2830
pub trait BitwiseOps {
31+
/// Perform bitwise and for two values.
2932
fn bitand(self, other: Self) -> Self;
33+
/// Perform bitwise or for two values.
3034
fn bitor(self, other: Self) -> Self;
35+
/// Perform bitwise xor for two values.
3136
fn bitxor(self, other: Self) -> Self;
37+
/// Perform bitwise invert (not) for the value.
3238
fn inv(self) -> Self;
3339
}
3440
macro_rules! impl_bitwise_ops_trait {

src/util/metadata/mod.rs

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -199,7 +199,7 @@
199199
//! When a space is created by a plan (e.g. SemiSpace::new), the plan can create its global specs by `MetadataContext::new_global_specs(&[GLOBAL_META_1, GLOBAL_META_2])`. Then,
200200
//! the global specs are passed to each space that the plan creates.
201201
//!
202-
//! Each space will then combine the global specs and its own local specs to create a [SideMetadataContext](crate::util::metadata::side_metadata::SideMetadataContext).
202+
//! Each space will then combine the global specs and its own local specs to create a SideMetadataContext.
203203
//! Allocating side metadata space and accounting its memory usage is done by `SideMetadata`. If a space uses `CommonSpace`, `CommonSpace` will create `SideMetadata` and manage
204204
//! reserving and allocating metadata space when necessary. If a space does not use `CommonSpace`, it should create `SideMetadata` itself and manage allocating metadata space
205205
//! as its own responsibility.

0 commit comments

Comments
 (0)