diff --git a/benches/mock_bench/mod.rs b/benches/mock_bench/mod.rs index 95466bfdc7..48fc1cfe0d 100644 --- a/benches/mock_bench/mod.rs +++ b/benches/mock_bench/mod.rs @@ -27,7 +27,7 @@ pub fn bench(c: &mut Criterion) { "alloc" => alloc::bench(c), "internal_pointer" => internal_pointer::bench(c), "sft" => sft::bench(c), - _ => panic!("Unknown benchmark {:?}", bench), + _ => panic!("Unknown benchmark {bench:?}"), }, Err(_) => panic!("Need to name a benchmark by the env var MMTK_BENCH"), } diff --git a/src/global_state.rs b/src/global_state.rs index b5a78d9bbe..dcf73adab4 100644 --- a/src/global_state.rs +++ b/src/global_state.rs @@ -147,9 +147,7 @@ impl GlobalState { let old = self.scanned_stacks.fetch_add(1, Ordering::SeqCst); debug_assert!( old < n_mutators, - "The number of scanned stacks ({}) is more than the number of mutators ({})", - old, - n_mutators + "The number of scanned stacks ({old}) is more than the number of mutators ({n_mutators})", ); let scanning_done = old + 1 == n_mutators; if scanning_done { diff --git a/src/mmtk.rs b/src/mmtk.rs index 2916f98f82..40d65e9f66 100644 --- a/src/mmtk.rs +++ b/src/mmtk.rs @@ -546,8 +546,7 @@ impl MMTK { let live_bytes = live_bytes_per_space[space_idx]; debug_assert!( live_bytes <= used_bytes, - "Live bytes of objects in {} ({} bytes) is larger than used pages ({} bytes), something is wrong.", - space_name, live_bytes, used_bytes + "Live bytes of objects in {space_name} ({live_bytes} bytes) is larger than used pages ({used_bytes} bytes), something is wrong." ); ret.insert(space_name, crate::LiveBytesStats { live_bytes, @@ -604,7 +603,7 @@ impl MMTK { pub fn mmtk_debug_print_object(object: crate::util::ObjectReference) { // If the address is unmapped, we cannot access its metadata. Just quit. if !object.to_raw_address().is_mapped() { - println!("{} is not mapped in MMTk", object); + println!("{object} is not mapped in MMTk"); return; } @@ -613,10 +612,7 @@ pub fn mmtk_debug_print_object(object: crate::util::ObjectReference) { .to_raw_address() .is_aligned_to(crate::util::ObjectReference::ALIGNMENT) { - println!( - "{} is not properly aligned. It is not an object reference.", - object - ); + println!("{object} is not properly aligned. It is not an object reference.",); } // Forward to the space diff --git a/src/plan/generational/global.rs b/src/plan/generational/global.rs index 0404fc93bb..acd71d4813 100644 --- a/src/plan/generational/global.rs +++ b/src/plan/generational/global.rs @@ -106,12 +106,7 @@ impl CommonGenPlan { let cur_nursery = self.nursery.reserved_pages(); let max_nursery = self.common.base.gc_trigger.get_max_nursery_pages(); let nursery_full = cur_nursery >= max_nursery; - trace!( - "nursery_full = {:?} (nursery = {}, max_nursery = {})", - nursery_full, - cur_nursery, - max_nursery, - ); + trace!("nursery_full = {nursery_full:?} (nursery = {cur_nursery}, max_nursery = {max_nursery})"); if nursery_full { return true; } @@ -251,12 +246,7 @@ impl CommonGenPlan { let available = plan.get_available_pages(); let min_nursery = plan.base().gc_trigger.get_min_nursery_pages(); let next_gc_full_heap = available < min_nursery; - trace!( - "next gc will be full heap? {}, available pages = {}, min nursery = {}", - next_gc_full_heap, - available, - min_nursery - ); + trace!("next gc will be full heap? {next_gc_full_heap}, available pages = {available}, min nursery = {min_nursery}"); next_gc_full_heap } diff --git a/src/plan/global.rs b/src/plan/global.rs index 5a5bb38ab5..74c0e2baf8 100644 --- a/src/plan/global.rs +++ b/src/plan/global.rs @@ -233,13 +233,7 @@ pub trait Plan: 'static + HasSpaces + Sync + Downcast { let vm_live_pages = conversions::bytes_to_pages_up(vm_live_bytes); let total = used_pages + collection_reserve + vm_live_pages; - trace!( - "Reserved pages = {}, used pages: {}, collection reserve: {}, VM live pages: {}", - total, - used_pages, - collection_reserve, - vm_live_pages, - ); + trace!("Reserved pages = {total}, used pages: {used_pages}, collection reserve: {collection_reserve}, VM live pages: {vm_live_pages}"); total } @@ -268,12 +262,7 @@ pub trait Plan: 'static + HasSpaces + Sync + Downcast { // buffers for copy allocators). // 3. the binding disabled GC, and we end up over-allocating beyond the total pages determined by the GC trigger. let available_pages = total_pages.saturating_sub(reserved_pages); - trace!( - "Total pages = {}, reserved pages = {}, available pages = {}", - total_pages, - reserved_pages, - available_pages, - ); + trace!("Total pages = {total_pages}, reserved pages = {reserved_pages}, available pages = {available_pages}"); available_pages } diff --git a/src/plan/mutator_context.rs b/src/plan/mutator_context.rs index ee60269d72..db119ce8e9 100644 --- a/src/plan/mutator_context.rs +++ b/src/plan/mutator_context.rs @@ -104,8 +104,7 @@ impl std::fmt::Debug for MutatorConfig { None => "!!!missing space here!!!", }; f.write_fmt(format_args!( - "- {:?} = {:?} ({:?})\n", - semantic, selector, space_name + "- {semantic:?} = {selector:?} ({space_name:?})\n", ))?; } f.write_str("Space mapping:\n")?; diff --git a/src/plan/sticky/immix/global.rs b/src/plan/sticky/immix/global.rs index a06dee9816..643b302980 100644 --- a/src/plan/sticky/immix/global.rs +++ b/src/plan/sticky/immix/global.rs @@ -184,22 +184,19 @@ impl Plan for StickyImmix { if self.is_current_gc_nursery() { // Every reachable object should be logged if !VM::VMObjectModel::GLOBAL_LOG_BIT_SPEC.is_unlogged::(object, Ordering::SeqCst) { - error!("Object {} is not unlogged (all objects that have been traced should be unlogged/mature)", object); + error!("Object {object} is not unlogged (all objects that have been traced should be unlogged/mature)"); return false; } // Every reachable object should be marked if self.immix.immix_space.in_space(object) && !self.immix.immix_space.is_marked(object) { - error!( - "Object {} is not marked (all objects that have been traced should be marked)", - object - ); + error!("Object {object} is not marked (all objects that have been traced should be marked)"); return false; } else if self.immix.common.los.in_space(object) && !self.immix.common.los.is_live(object) { - error!("LOS Object {} is not marked", object); + error!("LOS Object {object} is not marked"); return false; } } @@ -253,15 +250,12 @@ impl crate::plan::generational::global::GenerationalPlanExt f if self.immix.immix_space.in_space(object) { if !self.is_object_in_nursery(object) { // Mature object - trace!("Immix mature object {}, skip", object); + trace!("Immix mature object {object}, skip"); return object; } else { // Nursery object let object = if KIND == TRACE_KIND_TRANSITIVE_PIN || KIND == TRACE_KIND_FAST { - trace!( - "Immix nursery object {} is being traced without moving", - object - ); + trace!("Immix nursery object {object} is being traced without moving"); self.immix .immix_space .trace_object_without_moving(queue, object) @@ -281,15 +275,12 @@ impl crate::plan::generational::global::GenerationalPlanExt f if ret == object { "".to_string() } else { - format!("-> new object {}", ret) + format!("-> new object {ret}") } ); ret } else { - trace!( - "Immix nursery object {} is being traced without moving", - object - ); + trace!("Immix nursery object {object} is being traced without moving"); self.immix .immix_space .trace_object_without_moving(queue, object) diff --git a/src/policy/copyspace.rs b/src/policy/copyspace.rs index 7067f65d43..a49f983f4a 100644 --- a/src/policy/copyspace.rs +++ b/src/policy/copyspace.rs @@ -229,7 +229,7 @@ impl CopySpace { semantics: Option, worker: &mut GCWorker, ) -> ObjectReference { - trace!("copyspace.trace_object(, {:?}, {:?})", object, semantics,); + trace!("copyspace.trace_object({object:?}, {semantics:?})"); // If this is not from space, we do not need to trace it (the object has been copied to the tosapce) if !self.is_from_space() { @@ -243,8 +243,7 @@ impl CopySpace { #[cfg(feature = "vo_bit")] debug_assert!( crate::util::metadata::vo_bit::is_vo_bit_set(object), - "{:x}: VO bit not set", - object + "{object:x}: VO bit not set", ); trace!("attempting to forward"); @@ -271,7 +270,7 @@ impl CopySpace { trace!("Forwarding pointer"); queue.enqueue(new_object); - trace!("Copied [{:?} -> {:?}]", object, new_object); + trace!("Copied [{object:?} -> {new_object:?}]"); new_object } } diff --git a/src/policy/immix/defrag.rs b/src/policy/immix/defrag.rs index 4f56a5e3c1..ec08d9e7ba 100644 --- a/src/policy/immix/defrag.rs +++ b/src/policy/immix/defrag.rs @@ -80,7 +80,7 @@ impl Defrag { || !exhausted_reusable_space || stress_defrag || (collect_whole_heap && user_triggered && full_heap_system_gc)); - info!("Defrag: {}", in_defrag); + info!("Defrag: {in_defrag}"); probe!(mmtk, immix_defrag, in_defrag); self.in_defrag_collection .store(in_defrag, Ordering::Release) diff --git a/src/policy/immix/immixspace.rs b/src/policy/immix/immixspace.rs index a8bcd54fda..08fb9be998 100644 --- a/src/policy/immix/immixspace.rs +++ b/src/policy/immix/immixspace.rs @@ -664,8 +664,7 @@ impl ImmixSpace { if new_object == object { debug_assert!( self.is_marked(object) || self.defrag.space_exhausted() || self.is_pinned(object), - "Forwarded object is the same as original object {} even though it should have been copied", - object, + "Forwarded object is the same as original object {object} even though it should have been copied", ); } else { // new_object != object diff --git a/src/policy/immortalspace.rs b/src/policy/immortalspace.rs index c08ffeeaaa..04561dcd19 100644 --- a/src/policy/immortalspace.rs +++ b/src/policy/immortalspace.rs @@ -209,8 +209,7 @@ impl ImmortalSpace { #[cfg(feature = "vo_bit")] debug_assert!( crate::util::metadata::vo_bit::is_vo_bit_set(object), - "{:x}: VO bit not set", - object + "{object:x}: VO bit not set", ); if self.mark_state.test_and_mark::(object) { // Set the unlog bit if required diff --git a/src/policy/largeobjectspace.rs b/src/policy/largeobjectspace.rs index a9346e010c..cf48845015 100644 --- a/src/policy/largeobjectspace.rs +++ b/src/policy/largeobjectspace.rs @@ -260,8 +260,7 @@ impl LargeObjectSpace { #[cfg(feature = "vo_bit")] debug_assert!( crate::util::metadata::vo_bit::is_vo_bit_set(object), - "{:x}: VO bit not set", - object + "{object:x}: VO bit not set", ); let nursery_object = self.is_in_nursery(object); trace!( @@ -273,7 +272,7 @@ impl LargeObjectSpace { // Note that test_and_mark() has side effects of // clearing nursery bit/moving objects out of logical nursery if self.test_and_mark(object, self.mark_state) { - trace!("LOS object {} is being marked now", object); + trace!("LOS object {object} is being marked now"); self.treadmill.copy(object, nursery_object); // We just moved the object out of the logical nursery, mark it as unlogged. // We also unlog mature objects as their unlog bit may have been unset before the @@ -284,10 +283,7 @@ impl LargeObjectSpace { } queue.enqueue(object); } else { - trace!( - "LOS object {} is not being marked now, it was marked before", - object - ); + trace!("LOS object {object} is not being marked now, it was marked before"); } } object diff --git a/src/policy/markcompactspace.rs b/src/policy/markcompactspace.rs index 2f3689f70b..c8d81f510a 100644 --- a/src/policy/markcompactspace.rs +++ b/src/policy/markcompactspace.rs @@ -251,8 +251,7 @@ impl MarkCompactSpace { ) -> ObjectReference { debug_assert!( crate::util::metadata::vo_bit::is_vo_bit_set(object), - "{:x}: VO bit not set", - object + "{object:x}: VO bit not set", ); if MarkCompactSpace::::test_and_mark(object) { queue.enqueue(object); @@ -267,8 +266,7 @@ impl MarkCompactSpace { ) -> ObjectReference { debug_assert!( crate::util::metadata::vo_bit::is_vo_bit_set(object), - "{:x}: VO bit not set", - object + "{object:x}: VO bit not set", ); // from this stage and onwards, mark bit is no longer needed // therefore, it can be reused to save one extra bit in metadata @@ -416,12 +414,12 @@ impl MarkCompactSpace { let maybe_forwarding_pointer = Self::get_header_forwarding_pointer(obj); if let Some(forwarding_pointer) = maybe_forwarding_pointer { - trace!("Compact {} to {}", obj, forwarding_pointer); + trace!("Compact {obj} to {forwarding_pointer}"); let new_object = forwarding_pointer; Self::clear_header_forwarding_pointer(new_object); // copy object - trace!(" copy from {} to {}", obj, new_object); + trace!(" copy from {obj} to {new_object}"); let end_of_new_object = VM::VMObjectModel::copy_to(obj, new_object, Address::ZERO); // update VO bit, @@ -429,12 +427,12 @@ impl MarkCompactSpace { to = new_object.to_object_start::() + copied_size; debug_assert_eq!(end_of_new_object, to); } else { - trace!("Skipping dead object {}", obj); + trace!("Skipping dead object {obj}"); } } } - debug!("Compact end: to = {}", to); + debug!("Compact end: to = {to}"); // reset the bump pointer self.pr.reset_cursor(to); diff --git a/src/policy/marksweepspace/malloc_ms/global.rs b/src/policy/marksweepspace/malloc_ms/global.rs index fd473dd0ee..3343375682 100644 --- a/src/policy/marksweepspace/malloc_ms/global.rs +++ b/src/policy/marksweepspace/malloc_ms/global.rs @@ -126,7 +126,7 @@ impl SFT for MallocSpace { } fn initialize_object_metadata(&self, object: ObjectReference, _alloc: bool) { - trace!("initialize_object_metadata for object {}", object); + trace!("initialize_object_metadata for object {object}"); set_vo_bit(object); } @@ -186,9 +186,7 @@ impl Space for MallocSpace { // The VO bit tells that the object is in space. debug_assert!( *active_mem.get(&addr).unwrap() != 0, - "active mem check failed for {} (object {}) - was freed", - addr, - object + "active mem check failed for {addr} (object {object}) - was freed", ); } else { // The VO bit tells that the object is not in space. It could never be allocated, or have been freed. @@ -455,12 +453,12 @@ impl MallocSpace { // indirect call instructions in the generated assembly fn free_internal(&self, addr: Address, bytes: usize, offset_malloc_bit: bool) { if offset_malloc_bit { - trace!("Free memory {:x}", addr); + trace!("Free memory {addr:x}"); offset_free(addr); unsafe { unset_offset_malloc_bit_unsafe(addr) }; } else { let ptr = addr.to_mut_ptr(); - trace!("Free memory {:?}", ptr); + trace!("Free memory {ptr:?}"); unsafe { free(ptr); } @@ -481,8 +479,7 @@ impl MallocSpace { ) -> ObjectReference { assert!( self.in_space(object), - "Cannot mark an object {} that was not alloced by malloc.", - object, + "Cannot mark an object {object} that was not alloced by malloc.", ); if !is_marked::(object, Ordering::Relaxed) { @@ -509,12 +506,11 @@ impl MallocSpace { .try_map_metadata_space(start, BYTES_IN_CHUNK, self.get_name()); debug_assert!( mmap_metadata_result.is_ok(), - "mmap sidemetadata failed for chunk_start ({})", - start + "mmap sidemetadata failed for chunk_start ({start})", ); // Set the chunk mark at the end. So if we have chunk mark set, we know we have mapped side metadata // for the chunk. - trace!("set chunk mark bit for {}", start); + trace!("set chunk mark bit for {start}"); self.chunk_map .set_allocated(Chunk::from_aligned_address(start), true); }; @@ -610,11 +606,11 @@ impl MallocSpace { // We are the only thread that is dealing with the object. We can use non-atomic methods for the metadata. if !unsafe { is_marked_unsafe::(object) } { // Dead object - trace!("Object {} has been allocated but not marked", object); + trace!("Object {object} has been allocated but not marked"); // Free object self.free_internal(obj_start, bytes, offset_malloc); - trace!("free object {}", object); + trace!("free object {object}"); unsafe { unset_vo_bit_unsafe(object) }; true @@ -687,7 +683,7 @@ impl MallocSpace { #[cfg(debug_assertions)] let mut live_bytes = 0; - debug!("Check active chunk {:?}", chunk_start); + debug!("Check active chunk {chunk_start:?}"); let mut address = chunk_start; let chunk_end = chunk_start + BYTES_IN_CHUNK; @@ -758,21 +754,18 @@ impl MallocSpace { if ASSERT_ALLOCATION { debug_assert!( self.active_mem.lock().unwrap().contains_key(&obj_start), - "Address {} with VO bit is not in active_mem", - obj_start + "Address {obj_start} with VO bit is not in active_mem", ); debug_assert_eq!( self.active_mem.lock().unwrap().get(&obj_start), Some(&bytes), - "Address {} size in active_mem does not match the size from malloc_usable_size", - obj_start + "Address {obj_start} size in active_mem does not match the size from malloc_usable_size", ); } debug_assert!( unsafe { is_marked_unsafe::(object) }, - "Dead object = {} found after sweep", - object + "Dead object = {object} found after sweep", ); live_bytes += bytes; @@ -807,7 +800,7 @@ impl MallocSpace { #[cfg(debug_assertions)] let mut live_bytes = 0; - debug!("Check active chunk {:?}", chunk_start); + debug!("Check active chunk {chunk_start:?}"); // The start of a possibly empty page. This will be updated during the sweeping, and always points to the next page of last live objects. let mut empty_page_start = Address::ZERO; @@ -824,14 +817,12 @@ impl MallocSpace { let (obj_start, _, bytes) = Self::get_malloc_addr_size(object); debug_assert!( self.active_mem.lock().unwrap().contains_key(&obj_start), - "Address {} with VO bit is not in active_mem", - obj_start + "Address {obj_start} with VO bit is not in active_mem", ); debug_assert_eq!( self.active_mem.lock().unwrap().get(&obj_start), Some(&bytes), - "Address {} size in active_mem does not match the size from malloc_usable_size", - obj_start + "Address {obj_start} size in active_mem does not match the size from malloc_usable_size", ); } diff --git a/src/policy/marksweepspace/native_ms/block.rs b/src/policy/marksweepspace/native_ms/block.rs index 5bef9a3e52..49b4c859f4 100644 --- a/src/policy/marksweepspace/native_ms/block.rs +++ b/src/policy/marksweepspace/native_ms/block.rs @@ -328,7 +328,7 @@ impl Block { // Current cursor let mut cursor = cell; - debug!("Sweep block {:?}, cell size {}", self, cell_size); + debug!("Sweep block {self:?}, cell size {cell_size}"); while cell + cell_size <= self.end() { // possible object ref @@ -338,19 +338,12 @@ impl Block { cursor + VM::VMObjectModel::OBJECT_REF_OFFSET_LOWER_BOUND, ) }; - trace!( - "{:?}: cell = {}, last cell in free list = {}, cursor = {}, potential object = {}", - self, - cell, - last, - cursor, - potential_object_ref - ); + trace!("{self:?}: cell = {cell}, last cell in free list = {last}, cursor = {cursor}, potential object = {potential_object_ref}"); if VM::VMObjectModel::LOCAL_MARK_BIT_SPEC .is_marked::(potential_object_ref, Ordering::SeqCst) { - debug!("{:?} Live cell: {}", self, cell); + debug!("{self:?} Live cell: {cell}"); // If the mark bit is set, the cell is alive. // We directly jump to the end of the cell. cell += cell_size; @@ -361,10 +354,7 @@ impl Block { if cursor >= cell + cell_size { // We now stepped to the next cell. This means we did not find mark bit in the current cell, and we can add this cell to free list. - debug!( - "{:?} Free cell: {}, last cell in freelist is {}", - self, cell, last - ); + debug!("{self:?} Free cell: {cell}, last cell in freelist is {last}"); // Clear VO bit: we don't know where the object reference actually is, so we bulk zero the cell. #[cfg(feature = "vo_bit")] diff --git a/src/policy/marksweepspace/native_ms/block_list.rs b/src/policy/marksweepspace/native_ms/block_list.rs index 693d078e02..27e823a665 100644 --- a/src/policy/marksweepspace/native_ms/block_list.rs +++ b/src/policy/marksweepspace/native_ms/block_list.rs @@ -356,7 +356,7 @@ mod tests { for size in 0..=MAX_BIN_SIZE { let bin = mi_bin_from_size(size); let bin_range = get_bin_size_range(bin, &block_lists); - assert!(bin_range.is_some(), "Invalid bin {} for size {}", bin, size); + assert!(bin_range.is_some(), "Invalid bin {bin} for size {size}"); assert!( size >= bin_range.unwrap().0 && bin < bin_range.unwrap().1, "Assigning size={} to bin={} ({:?}) incorrect", diff --git a/src/policy/marksweepspace/native_ms/global.rs b/src/policy/marksweepspace/native_ms/global.rs index 1dc87cd04c..574703bf69 100644 --- a/src/policy/marksweepspace/native_ms/global.rs +++ b/src/policy/marksweepspace/native_ms/global.rs @@ -390,8 +390,7 @@ impl MarkSweepSpace { ) -> ObjectReference { debug_assert!( self.in_space(object), - "Cannot mark an object {} that was not alloced by free list allocator.", - object, + "Cannot mark an object {object} that was not alloced by free list allocator.", ); if self.attempt_mark(object) { let block = Block::containing(object); diff --git a/src/policy/sft.rs b/src/policy/sft.rs index 052a145bf3..004cff87aa 100644 --- a/src/policy/sft.rs +++ b/src/policy/sft.rs @@ -130,10 +130,7 @@ impl SFT for EmptySpaceSFT { EMPTY_SFT_NAME } fn is_live(&self, object: ObjectReference) -> bool { - panic!( - "Called is_live() on {:x}, which maps to an empty space", - object - ) + panic!("Called is_live() on {object:x}, which maps to an empty space") } #[cfg(feature = "sanity")] fn is_sane(&self) -> bool { @@ -180,10 +177,7 @@ impl SFT for EmptySpaceSFT { } fn initialize_object_metadata(&self, object: ObjectReference, _alloc: bool) { - panic!( - "Called initialize_object_metadata() on {:x}, which maps to an empty space", - object - ) + panic!("Called initialize_object_metadata() on {object:x}, which maps to an empty space") } fn sft_trace_object( @@ -194,8 +188,7 @@ impl SFT for EmptySpaceSFT { ) -> ObjectReference { // We do not have the `VM` type parameter here, so we cannot forward the call to the VM. panic!( - "Call trace_object() on {}, which maps to an empty space. SFTProcessEdges does not support the fallback to vm_trace_object().", - object, + "Call trace_object() on {object}, which maps to an empty space. SFTProcessEdges does not support the fallback to vm_trace_object()." ) } } diff --git a/src/policy/sft_map.rs b/src/policy/sft_map.rs index c9db21830e..f8051d821b 100644 --- a/src/policy/sft_map.rs +++ b/src/policy/sft_map.rs @@ -278,7 +278,7 @@ mod space_map { let assert_for_index = |i: usize| { let (start, end) = SFTSpaceMap::index_to_space_range(i); - println!("Space: Index#{} = [{}, {})", i, start, end); + println!("Space: Index#{i} = [{start}, {end})"); assert_eq!(SFTSpaceMap::addr_to_index(start), i); assert_eq!(SFTSpaceMap::addr_to_index(end - 1), i); }; @@ -290,7 +290,7 @@ mod space_map { // assert space end let (_, last_space_end) = SFTSpaceMap::index_to_space_range(MAX_SPACES); - println!("Space end = {}", last_space_end); + println!("Space end = {last_space_end}"); println!("Heap end = {}", vm_layout().heap_end); assert_eq!(last_space_end, vm_layout().heap_end); @@ -407,7 +407,7 @@ mod dense_chunk_map { last_chunk ); while chunk < last_chunk { - trace!("Update {} to index {}", chunk, index); + trace!("Update {chunk} to index {index}"); SFT_DENSE_CHUNK_MAP_INDEX.store_atomic::(chunk, index, Ordering::SeqCst); chunk += BYTES_IN_CHUNK; } @@ -529,10 +529,7 @@ mod sparse_chunk_map { debug!("Update SFT for Chunk {} as {}", start, space.name(),); let first = start.chunk_index(); let start_chunk = chunk_index_to_address(first); - debug!( - "Update SFT for {} bytes of Chunk {} #{}", - bytes, start_chunk, first - ); + debug!("Update SFT for {bytes} bytes of Chunk {start_chunk} #{first}"); } fn trace_sft_map(&self) { diff --git a/src/policy/space.rs b/src/policy/space.rs index e44874fe5b..03b904def3 100644 --- a/src/policy/space.rs +++ b/src/policy/space.rs @@ -96,11 +96,7 @@ pub trait Space: 'static + SFT + Sync + Downcast { } fn acquire(&self, tls: VMThread, pages: usize, alloc_options: AllocationOptions) -> Address { - trace!( - "Space.acquire, tls={:?}, alloc_options={:?}", - tls, - alloc_options - ); + trace!("Space.acquire, tls={tls:?}, alloc_options={alloc_options:?}",); debug_assert!( !self.will_oom_on_acquire(pages << LOG_BYTES_IN_PAGE), @@ -304,12 +300,7 @@ pub trait Space: 'static + SFT + Sync + Downcast { * @param new_chunk {@code true} if the new space encroached upon or started a new chunk or chunks. */ fn grow_space(&self, start: Address, bytes: usize, new_chunk: bool) { - trace!( - "Grow space from {} for {} bytes (new chunk = {})", - start, - bytes, - new_chunk - ); + trace!("Grow space from {start} for {bytes} bytes (new chunk = {new_chunk})",); // If this is not a new chunk, the SFT for [start, start + bytes) should alreayd be initialized. #[cfg(debug_assertions)] @@ -465,10 +456,10 @@ pub(crate) fn print_vm_map( )?; match common.vmrequest { VMRequest::Extent { extent, .. } => { - write!(out, " E {}", extent)?; + write!(out, " E {extent}")?; } VMRequest::Fraction { frac, .. } => { - write!(out, " F {}", frac)?; + write!(out, " F {frac}")?; } _ => {} } @@ -748,9 +739,9 @@ fn get_frac_available(frac: f32) -> usize { trace!("bytes={}*{}={}", frac, vm_layout().available_bytes(), bytes); let mb = bytes >> LOG_BYTES_IN_MBYTE; let rtn = mb << LOG_BYTES_IN_MBYTE; - trace!("rtn={}", rtn); + trace!("rtn={rtn}"); let aligned_rtn = raw_align_up(rtn, BYTES_IN_CHUNK); - trace!("aligned_rtn={}", aligned_rtn); + trace!("aligned_rtn={aligned_rtn}"); aligned_rtn } diff --git a/src/policy/vmspace.rs b/src/policy/vmspace.rs index ab1101aaf5..16d64284cd 100644 --- a/src/policy/vmspace.rs +++ b/src/policy/vmspace.rs @@ -126,7 +126,7 @@ impl Space for VMSpace { crate::policy::sft::EMPTY_SFT_NAME ); // Set SFT - assert!(sft_map.has_sft_entry(start), "The VM space start (aligned to {}) does not have a valid SFT entry. Possibly the address range is not in the address range we use.", start); + assert!(sft_map.has_sft_entry(start), "The VM space start (aligned to {start}) does not have a valid SFT entry. Possibly the address range is not in the address range we use."); unsafe { sft_map.eager_initialize(self.as_sft(), start, size); } @@ -220,10 +220,7 @@ impl VMSpace { ) .is_empty()); - debug!( - "Align VM space ({}, {}) to chunk ({}, {})", - start, end, chunk_start, chunk_end - ); + debug!("Align VM space ({start}, {end}) to chunk ({chunk_start}, {chunk_end})"); // Mark as mapped in mmapper self.common.mmapper.mark_as_mapped(chunk_start, chunk_size); @@ -236,7 +233,7 @@ impl VMSpace { // self.common.vm_map.insert(chunk_start, chunk_size, self.common.descriptor); // Set SFT if we should if set_sft { - assert!(SFT_MAP.has_sft_entry(chunk_start), "The VM space start (aligned to {}) does not have a valid SFT entry. Possibly the address range is not in the address range we use.", chunk_start); + assert!(SFT_MAP.has_sft_entry(chunk_start), "The VM space start (aligned to {chunk_start}) does not have a valid SFT entry. Possibly the address range is not in the address range we use."); unsafe { SFT_MAP.update(self.as_sft(), chunk_start, chunk_size); } @@ -279,8 +276,7 @@ impl VMSpace { #[cfg(feature = "vo_bit")] debug_assert!( crate::util::metadata::vo_bit::is_vo_bit_set(object), - "{:x}: VO bit not set", - object + "{object:x}: VO bit not set", ); debug_assert!(self.in_space(object)); if self.mark_state.test_and_mark::(object) { diff --git a/src/scheduler/affinity.rs b/src/scheduler/affinity.rs index d76a517d15..774b903454 100644 --- a/src/scheduler/affinity.rs +++ b/src/scheduler/affinity.rs @@ -35,7 +35,7 @@ impl AffinityKind { AffinityKind::OsDefault => {} AffinityKind::RoundRobin(cpuset) => { let cpu = cpuset[thread % cpuset.len()]; - debug!("Set affinity for thread {} to core {}", thread, cpu); + debug!("Set affinity for thread {thread} to core {cpu}"); bind_current_thread_to_core(cpu); } } diff --git a/src/scheduler/gc_work.rs b/src/scheduler/gc_work.rs index 7e50c86aa3..42c96ea23a 100644 --- a/src/scheduler/gc_work.rs +++ b/src/scheduler/gc_work.rs @@ -837,7 +837,7 @@ pub trait ScanObjectsWork: GCWork + Sized { for object in objects_to_scan.iter().copied() { if ::VMScanning::support_slot_enqueuing(tls, object) { - trace!("Scan object (slot) {}", object); + trace!("Scan object (slot) {object}"); // If an object supports slot-enqueuing, we enqueue its slots. ::VMScanning::scan_object(tls, object, &mut closure); self.post_scan_object(object); @@ -867,7 +867,7 @@ pub trait ScanObjectsWork: GCWork + Sized { object_tracer_context.with_tracer(worker, |object_tracer| { // Scan objects and trace their outgoing edges at the same time. for object in scan_later.iter().copied() { - trace!("Scan object (node) {}", object); + trace!("Scan object (node) {object}"); ::VMScanning::scan_object_and_trace_edges( tls, object, @@ -1129,8 +1129,7 @@ impl, O2OPE: ProcessEdgesWork {}", - object, new_object + "Object moved while tracing root unmovable root object: {object} -> {new_object}", ); } diff --git a/src/scheduler/scheduler.rs b/src/scheduler/scheduler.rs index e849f9df07..00b279b4c6 100644 --- a/src/scheduler/scheduler.rs +++ b/src/scheduler/scheduler.rs @@ -242,7 +242,7 @@ impl GCWorkScheduler { let mut new_packets = false; for (id, work_bucket) in self.work_buckets.iter() { if work_bucket.is_activated() && work_bucket.maybe_schedule_sentinel() { - trace!("Scheduled sentinel packet into {:?}", id); + trace!("Scheduled sentinel packet into {id:?}"); new_packets = true; } } @@ -271,13 +271,13 @@ impl GCWorkScheduler { new_packets = new_packets || !bucket.is_drained(); if new_packets { // Quit the loop. There are already new packets in the newly opened buckets. - trace!("Found new packets at stage {:?}. Break.", id); + trace!("Found new packets at stage {id:?}. Break."); break; } new_packets = new_packets || bucket.maybe_schedule_sentinel(); if new_packets { // Quit the loop. A sentinel packet is added to the newly opened buckets. - trace!("Sentinel is scheduled at stage {:?}. Break.", id); + trace!("Sentinel is scheduled at stage {id:?}. Break."); break; } } @@ -317,7 +317,7 @@ impl GCWorkScheduler { let mut error_example = None; for (id, bucket) in self.work_buckets.iter() { if bucket.is_activated() && !bucket.is_empty() { - error!("Work bucket {:?} is active but not empty!", id); + error!("Work bucket {id:?} is active but not empty!"); // This error can be hard to reproduce. // If an error happens in the release build where logs are turned off, // we should show at least one abnormal bucket in the panic message @@ -326,7 +326,7 @@ impl GCWorkScheduler { } } if let Some(id) = error_example { - panic!("Some active buckets (such as {:?}) are not empty.", id); + panic!("Some active buckets (such as {id:?}) are not empty."); } } diff --git a/src/scheduler/stat.rs b/src/scheduler/stat.rs index 027d3c490c..e1652cb64b 100644 --- a/src/scheduler/stat.rs +++ b/src/scheduler/stat.rs @@ -60,7 +60,7 @@ impl SchedulerStat { let val = counts.entry(pkt).or_default(); *val += c; } - stat.insert("total-work.count".to_owned(), format!("{}", total_count)); + stat.insert("total-work.count".to_owned(), format!("{total_count}")); // Work execution times let mut duration_overall: WorkCounterBase = Default::default(); for (t, vs) in &self.work_counters { @@ -109,7 +109,7 @@ impl SchedulerStat { ); for (pkt, count) in counts { - stat.insert(pkt, format!("{}", count)); + stat.insert(pkt, format!("{count}")); } for (pkt, time) in times { diff --git a/src/scheduler/worker.rs b/src/scheduler/worker.rs index 40a54e4046..19eb7feae0 100644 --- a/src/scheduler/worker.rs +++ b/src/scheduler/worker.rs @@ -75,9 +75,7 @@ impl GCWorkerShared { let space_index = space_descriptor.get_index(); debug_assert!( space_index < MAX_SPACES, - "Space index {} is not in the range of [0, {})", - space_index, - MAX_SPACES + "Space index {space_index} is not in the range of [0, {MAX_SPACES})", ); // Accumulate the live bytes for the index live_bytes_per_space[space_index] += bytes; diff --git a/src/scheduler/worker_monitor.rs b/src/scheduler/worker_monitor.rs index c84dedfcd3..74e09a6548 100644 --- a/src/scheduler/worker_monitor.rs +++ b/src/scheduler/worker_monitor.rs @@ -147,7 +147,7 @@ impl WorkerMonitor { let mut should_wait = false; if all_parked { - trace!("Worker {} is the last worker parked.", ordinal); + trace!("Worker {ordinal} is the last worker parked."); let result = on_last_parked(&mut sync.goals); match result { LastParkedResult::ParkSelf => { @@ -272,16 +272,16 @@ mod tests { // This emulates the use pattern in the scheduler, i.e. checking the condition // ("Is there any work packets available") without holding a mutex. while !should_unpark.load(Ordering::SeqCst) { - println!("Thread {} parking...", ordinal); + println!("Thread {ordinal} parking..."); worker_monitor .park_and_wait(ordinal, |_goals| { - println!("Thread {} is the last thread parked.", ordinal); + println!("Thread {ordinal} is the last thread parked."); on_last_parked_called.fetch_add(1, Ordering::SeqCst); should_unpark.store(true, Ordering::SeqCst); super::LastParkedResult::WakeAll }) .unwrap(); - println!("Thread {} unparked.", ordinal); + println!("Thread {ordinal} unparked."); } }); } @@ -312,17 +312,17 @@ mod tests { // Record the number of threads entering the following `while` loop. threads_running.fetch_add(1, Ordering::SeqCst); while !should_unpark.load(Ordering::SeqCst) { - println!("Thread {} parking...", ordinal); + println!("Thread {ordinal} parking..."); worker_monitor .park_and_wait(ordinal, |_goals| { - println!("Thread {} is the last thread parked.", ordinal); + println!("Thread {ordinal} is the last thread parked."); on_last_parked_called.fetch_add(1, Ordering::SeqCst); should_unpark.store(true, Ordering::SeqCst); i_am_the_last_parked_worker = true; super::LastParkedResult::WakeSelf }) .unwrap(); - println!("Thread {} unparked.", ordinal); + println!("Thread {ordinal} unparked."); } threads_running.fetch_sub(1, Ordering::SeqCst); diff --git a/src/util/address.rs b/src/util/address.rs index c87a5d3abb..53a32e80ab 100644 --- a/src/util/address.rs +++ b/src/util/address.rs @@ -74,9 +74,7 @@ impl Sub
for Address { fn sub(self, other: Address) -> ByteSize { debug_assert!( self.0 >= other.0, - "for (addr_a - addr_b), a({}) needs to be larger than b({})", - self, - other + "for (addr_a - addr_b), a({self}) needs to be larger than b({other})", ); self.0 - other.0 } @@ -624,7 +622,7 @@ impl ObjectReference { pub fn to_object_start(self) -> Address { use crate::vm::ObjectModel; let object_start = VM::VMObjectModel::ref_to_object_start(self); - debug_assert!(!VM::VMObjectModel::UNIFIED_OBJECT_REFERENCE_ADDRESS || object_start == self.to_raw_address(), "The binding claims unified object reference address, but for object reference {}, ref_to_object_start() returns {}", self, object_start); + debug_assert!(!VM::VMObjectModel::UNIFIED_OBJECT_REFERENCE_ADDRESS || object_start == self.to_raw_address(), "The binding claims unified object reference address, but for object reference {self}, ref_to_object_start() returns {object_start}"); debug_assert!( self.to_raw_address() >= object_start + VM::VMObjectModel::OBJECT_REF_OFFSET_LOWER_BOUND, diff --git a/src/util/alloc/allocator.rs b/src/util/alloc/allocator.rs index 4e53ab953d..c935706925 100644 --- a/src/util/alloc/allocator.rs +++ b/src/util/alloc/allocator.rs @@ -454,7 +454,7 @@ pub trait Allocator: Downcast { .state .allocation_success .swap(true, Ordering::SeqCst); - trace!("fail with oom={}", fail_with_oom); + trace!("fail with oom={fail_with_oom}"); if fail_with_oom { // Note that we throw a `HeapOutOfMemory` error here and return a null ptr back to the VM trace!("Throw HeapOutOfMemory!"); @@ -480,7 +480,7 @@ pub trait Allocator: Downcast { // Record whether last collection was an Emergency collection. If so, we make one more // attempt to allocate before we signal an OOM. emergency_collection = self.get_context().state.is_emergency_collection(); - trace!("Got emergency collection as {}", emergency_collection); + trace!("Got emergency collection as {emergency_collection}"); previous_result_zero = true; } } diff --git a/src/util/alloc/bumpallocator.rs b/src/util/alloc/bumpallocator.rs index 9c70fba5c1..af9eb90c7a 100644 --- a/src/util/alloc/bumpallocator.rs +++ b/src/util/alloc/bumpallocator.rs @@ -212,11 +212,7 @@ impl BumpAllocator { trace!("Failed to acquire a new block"); acquired_start } else { - trace!( - "Acquired a new block of size {} with start address {}", - block_size, - acquired_start - ); + trace!("Acquired a new block of size {block_size} with start address {acquired_start}"); if !stress_test { self.set_limit(acquired_start, acquired_start + block_size); self.alloc(size, align, offset) diff --git a/src/util/alloc/free_list_allocator.rs b/src/util/alloc/free_list_allocator.rs index b443fe0c6a..e17c5a7fa6 100644 --- a/src/util/alloc/free_list_allocator.rs +++ b/src/util/alloc/free_list_allocator.rs @@ -50,8 +50,7 @@ impl Allocator for FreeListAllocator { fn alloc(&mut self, size: usize, align: usize, offset: usize) -> Address { debug_assert!( size <= MAX_BIN_SIZE, - "Alloc request for {} bytes is too big.", - size + "Alloc request for {size} bytes is too big.", ); debug_assert!(align <= VM::MAX_ALIGNMENT); debug_assert!(align >= VM::MIN_ALIGNMENT); @@ -108,7 +107,7 @@ impl Allocator for FreeListAllocator { offset: usize, need_poll: bool, ) -> Address { - trace!("allow slow precise stress s={}", size); + trace!("allow slow precise stress s={size}"); if need_poll { self.acquire_global_block(0, 0, true); } @@ -157,9 +156,7 @@ impl FreeListAllocator { unsafe { cell.store::
(Address::ZERO) }; debug_assert!( next_cell.is_zero() || block.includes_address(next_cell), - "next_cell {} is not in {:?}", - next_cell, - block + "next_cell {next_cell} is not in {block:?}", ); block.store_free_list(next_cell); @@ -304,7 +301,7 @@ impl FreeListAllocator { } crate::policy::marksweepspace::native_ms::BlockAcquireResult::Fresh(block) => { - debug!("Acquire global block: Fresh {:?}", block); + debug!("Acquire global block: Fresh {block:?}"); self.add_to_available_blocks(bin, block, stress_test); self.init_block(block, self.available_blocks[bin].size); @@ -312,7 +309,7 @@ impl FreeListAllocator { } crate::policy::marksweepspace::native_ms::BlockAcquireResult::AbandonedAvailable(block) => { - debug!("Acquire global block: AbandonedAvailable {:?}", block); + debug!("Acquire global block: AbandonedAvailable {block:?}"); block.store_tls(self.tls); if block.has_free_cells() { self.add_to_available_blocks(bin, block, stress_test); @@ -323,7 +320,7 @@ impl FreeListAllocator { } crate::policy::marksweepspace::native_ms::BlockAcquireResult::AbandonedUnswept(block) => { - debug!("Acquire global block: AbandonedUnswep {:?}", block); + debug!("Acquire global block: AbandonedUnswep {block:?}"); block.store_tls(self.tls); block.sweep::(); if block.has_free_cells() { diff --git a/src/util/finalizable_processor.rs b/src/util/finalizable_processor.rs index 70f97bfd52..5fd6cf8d3a 100644 --- a/src/util/finalizable_processor.rs +++ b/src/util/finalizable_processor.rs @@ -53,10 +53,10 @@ impl FinalizableProcessor { for mut f in self.candidates.drain(start..).collect::>() { let reff = f.get_reference(); - trace!("Pop {:?} for finalization", reff); + trace!("Pop {reff:?} for finalization"); if reff.is_live() { FinalizableProcessor::::forward_finalizable_reference(e, &mut f); - trace!("{:?} is live, push {:?} back to candidates", reff, f); + trace!("{reff:?} is live, push {f:?} back to candidates"); self.candidates.push(f); continue; } diff --git a/src/util/heap/chunk_map.rs b/src/util/heap/chunk_map.rs index 69caf12b12..064d76466c 100644 --- a/src/util/heap/chunk_map.rs +++ b/src/util/heap/chunk_map.rs @@ -137,11 +137,8 @@ impl ChunkMap { let old_state = self.get_internal(chunk); // If a chunk is free, any space may use it. If a chunk is not free, only the current space may update its state. assert!( - old_state.is_free() || old_state.get_space_index() == self.space_index, - "Chunk {:?}: old state {:?}, new state {:?}. Cannot set to new state.", - chunk, - old_state, - state + old_state.is_free() || old_state.get_space_index() == state.get_space_index(), + "Chunk {chunk:?}: old state {old_state:?}, new state {state:?}. Cannot set to new state.", ); } // Update alloc byte diff --git a/src/util/heap/freelistpageresource.rs b/src/util/heap/freelistpageresource.rs index aa66fe232d..b9f9c1f194 100644 --- a/src/util/heap/freelistpageresource.rs +++ b/src/util/heap/freelistpageresource.rs @@ -153,10 +153,7 @@ impl FreeListPageResource { // If it is RawMemoryFreeList, it will occupy `space_displacement` bytes at the start of // the space. We add it to the start address. let actual_start = start + space_displacement; - debug!( - " in new_contiguous: space_displacement = {:?}, actual_start = {}", - space_displacement, actual_start - ); + debug!(" in new_contiguous: space_displacement = {space_displacement:?}, actual_start = {actual_start}"); let growable = cfg!(target_pointer_width = "64"); FreeListPageResource { @@ -222,10 +219,7 @@ impl FreeListPageResource { // We are not using mmapper.protect(). mmapper.protect() protects the whole chunk and // may protect memory that is still in use. if let Err(e) = memory::mprotect(start, conversions::pages_to_bytes(pages)) { - panic!( - "Failed at protecting memory (starting at {}): {:?}", - start, e - ); + panic!("Failed at protecting memory (starting at {start}): {e:?}",); } } @@ -237,10 +231,7 @@ impl FreeListPageResource { conversions::pages_to_bytes(pages), self.protect_memory_on_release.unwrap(), ) { - panic!( - "Failed at unprotecting memory (starting at {}): {:?}", - start, e - ); + panic!("Failed at unprotecting memory (starting at {start}): {e:?}",); } } diff --git a/src/util/heap/gc_trigger.rs b/src/util/heap/gc_trigger.rs index 9e1912f1cf..d2b204a966 100644 --- a/src/util/heap/gc_trigger.rs +++ b/src/util/heap/gc_trigger.rs @@ -130,7 +130,7 @@ impl GCTrigger { let max_bytes = heap_size_bytes as f64 * max; let max_bytes = conversions::raw_align_up(max_bytes as usize, BYTES_IN_PAGE); if max_bytes > DEFAULT_MAX_NURSERY { - warn!("Proportional nursery with max size {} ({}) is larger than DEFAULT_MAX_NURSERY ({}). Use DEFAULT_MAX_NURSERY instead.", max, max_bytes, DEFAULT_MAX_NURSERY); + warn!("Proportional nursery with max size {max} ({max_bytes}) is larger than DEFAULT_MAX_NURSERY ({DEFAULT_MAX_NURSERY}). Use DEFAULT_MAX_NURSERY instead."); DEFAULT_MAX_NURSERY } else { max_bytes @@ -153,7 +153,7 @@ impl GCTrigger { * min; let min_bytes = conversions::raw_align_up(min_bytes as usize, BYTES_IN_PAGE); if min_bytes < DEFAULT_MIN_NURSERY { - warn!("Proportional nursery with min size {} ({}) is smaller than DEFAULT_MIN_NURSERY ({}). Use DEFAULT_MIN_NURSERY instead.", min, min_bytes, DEFAULT_MIN_NURSERY); + warn!("Proportional nursery with min size {min} ({min_bytes}) is smaller than DEFAULT_MIN_NURSERY ({DEFAULT_MIN_NURSERY}). Use DEFAULT_MIN_NURSERY instead."); DEFAULT_MIN_NURSERY } else { min_bytes @@ -561,7 +561,7 @@ impl MemBalancerTrigger { extra_reserve: usize, stats: &mut MemBalancerStats, ) { - trace!("compute new heap limit: {:?}", stats); + trace!("compute new heap limit: {stats:?}"); // Constants from the original paper const ALLOCATION_SMOOTH_FACTOR: f64 = 0.95; @@ -593,16 +593,8 @@ impl MemBalancerTrigger { stats.collection_time, COLLECTION_SMOOTH_FACTOR, ); - trace!( - "after smoothing, alloc mem = {}, alloc_time = {}", - alloc_mem, - alloc_time - ); - trace!( - "after smoothing, gc mem = {}, gc_time = {}", - gc_mem, - gc_time - ); + trace!("after smoothing, alloc mem = {alloc_mem}, alloc_time = {alloc_time}"); + trace!("after smoothing, gc mem = {gc_mem}, gc_time = {gc_time}"); // We got the smoothed stats. Now save the current stats as previous stats stats.allocation_pages_prev = Some(stats.allocation_pages); @@ -632,12 +624,7 @@ impl MemBalancerTrigger { // This is the optimal heap limit due to mem balancer. We will need to clamp the value to the defined min/max range. let optimal_heap = live + e as usize + extra_reserve + pending_pages; - trace!( - "optimal = live {} + sqrt(live) {} + extra {}", - live, - e, - extra_reserve - ); + trace!("optimal = live {live} + sqrt(live) {e} + extra {extra_reserve}"); // The new heap size must be within min/max. let new_heap = optimal_heap.clamp(self.min_heap_pages, self.max_heap_pages); diff --git a/src/util/heap/layout/byte_map_mmapper.rs b/src/util/heap/layout/byte_map_mmapper.rs index 412e53ddcb..f3c43d61dd 100644 --- a/src/util/heap/layout/byte_map_mmapper.rs +++ b/src/util/heap/layout/byte_map_mmapper.rs @@ -28,7 +28,7 @@ pub struct ByteMapMmapper { impl fmt::Debug for ByteMapMmapper { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - write!(f, "ByteMapMmapper({})", MMAP_NUM_CHUNKS) + write!(f, "ByteMapMmapper({MMAP_NUM_CHUNKS})") } } diff --git a/src/util/heap/layout/fragmented_mapper.rs b/src/util/heap/layout/fragmented_mapper.rs index 0ebec07b83..8950aa4b32 100644 --- a/src/util/heap/layout/fragmented_mapper.rs +++ b/src/util/heap/layout/fragmented_mapper.rs @@ -61,7 +61,7 @@ struct InnerFragmentedMapper { impl fmt::Debug for FragmentedMapper { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - write!(f, "FragmentedMapper({})", MMAP_NUM_CHUNKS) + write!(f, "FragmentedMapper({MMAP_NUM_CHUNKS})") } } diff --git a/src/util/heap/layout/map32.rs b/src/util/heap/layout/map32.rs index 90a7236a66..633743cbc1 100644 --- a/src/util/heap/layout/map32.rs +++ b/src/util/heap/layout/map32.rs @@ -160,7 +160,7 @@ impl VMMap for Map32 { } #[allow(clippy::while_immutable_condition)] fn free_all_chunks(&self, any_chunk: Address) { - debug!("free_all_chunks: {}", any_chunk); + debug!("free_all_chunks: {any_chunk}"); let (_sync, self_mut) = self.mut_self_with_sync(); debug_assert!(any_chunk == conversions::chunk_align_down(any_chunk)); if !any_chunk.is_zero() { @@ -178,7 +178,7 @@ impl VMMap for Map32 { } unsafe fn free_contiguous_chunks(&self, start: Address) -> usize { - debug!("free_contiguous_chunks: {}", start); + debug!("free_contiguous_chunks: {start}"); let (_sync, _) = self.mut_self_with_sync(); debug_assert!(start == conversions::chunk_align_down(start)); let chunk = start.chunk_index(); @@ -224,9 +224,7 @@ impl VMMap for Map32 { let alloced_chunk = self_mut.region_map.alloc(trailing_chunks as _); debug_assert!( alloced_chunk == unavail_start_chunk as i32, - "{} != {}", - alloced_chunk, - unavail_start_chunk + "{alloced_chunk} != {unavail_start_chunk}", ); /* set up the global page map and place chunks on free list */ let mut first_page = 0; @@ -265,6 +263,9 @@ impl Map32 { &mut *self.inner.get() } + /// Get a mutable reference to the inner Map32Inner with a lock. + /// The caller should only use the mutable reference when the lock is held. + #[allow(clippy::mut_from_ref)] fn mut_self_with_sync(&self) -> (MutexGuard<()>, &mut Map32Inner) { let guard = self.sync.lock().unwrap(); (guard, unsafe { self.mut_self() }) @@ -287,7 +288,7 @@ impl Map32 { for offset in 0..chunks { let index = (chunk + offset) as usize; let chunk_start = conversions::chunk_index_to_address(index); - debug!("Clear descriptor for Chunk {}", chunk_start); + debug!("Clear descriptor for Chunk {chunk_start}"); self.mut_self().descriptor_map[index] = SpaceDescriptor::UNINITIALIZED; SFT_MAP.clear(chunk_start); } diff --git a/src/util/heap/layout/mmapper.rs b/src/util/heap/layout/mmapper.rs index 364cc0bc0d..ddde50e382 100644 --- a/src/util/heap/layout/mmapper.rs +++ b/src/util/heap/layout/mmapper.rs @@ -197,7 +197,7 @@ impl MapState { match group.key { MapState::Unmapped => { - trace!("Trying to quarantine {} - {}", start_addr, end_addr); + trace!("Trying to quarantine {start_addr} - {end_addr}"); mmap_noreserve(start_addr, end_addr - start_addr, strategy, anno)?; for state in group { @@ -205,10 +205,10 @@ impl MapState { } } MapState::Quarantined => { - trace!("Already quarantine {} - {}", start_addr, end_addr); + trace!("Already quarantine {start_addr} - {end_addr}"); } MapState::Mapped => { - trace!("Already mapped {} - {}", start_addr, end_addr); + trace!("Already mapped {start_addr} - {end_addr}"); } MapState::Protected => { panic!("Cannot quarantine protected memory") @@ -233,7 +233,7 @@ impl MapState { state.store(MapState::Protected, Ordering::Relaxed); } MapState::Protected => {} - _ => panic!("Cannot transition {:?} to protected", mmap_start), + _ => panic!("Cannot transition {mmap_start:?} to protected"), } Ok(()) } diff --git a/src/util/heap/monotonepageresource.rs b/src/util/heap/monotonepageresource.rs index c30cb010cc..b30753cbaf 100644 --- a/src/util/heap/monotonepageresource.rs +++ b/src/util/heap/monotonepageresource.rs @@ -73,10 +73,7 @@ impl PageResource for MonotonePageResource { required_pages: usize, tls: VMThread, ) -> Result { - debug!( - "In MonotonePageResource, reserved_pages = {}, required_pages = {}", - reserved_pages, required_pages - ); + debug!("In MonotonePageResource, reserved_pages = {reserved_pages}, required_pages = {required_pages}"); let mut new_chunk = false; let mut sync = self.sync.lock().unwrap(); let mut rtn = sync.cursor; @@ -106,9 +103,9 @@ impl PageResource for MonotonePageResource { } let bytes = pages_to_bytes(required_pages); - debug!("bytes={}", bytes); + debug!("bytes={bytes}"); let mut tmp = sync.cursor + bytes; - debug!("tmp={:?}", tmp); + debug!("tmp={tmp:?}"); if !self.common().contiguous && tmp > sync.sentinel { /* we're out of virtual memory within our discontiguous region, so ask for more */ @@ -136,7 +133,7 @@ impl PageResource for MonotonePageResource { } else { //debug!("tmp={:?} <= sync.sentinel={:?}", tmp, sync.sentinel); sync.cursor = tmp; - debug!("update cursor = {}", tmp); + debug!("update cursor = {tmp}"); /* In a contiguous space we can bump along into the next chunk, so preserve the currentChunk invariant */ if self.common().contiguous && chunk_align_down(sync.cursor) != sync.current_chunk { diff --git a/src/util/malloc/malloc_ms_util.rs b/src/util/malloc/malloc_ms_util.rs index 5d27cf79cb..3e11eca02c 100644 --- a/src/util/malloc/malloc_ms_util.rs +++ b/src/util/malloc/malloc_ms_util.rs @@ -79,19 +79,14 @@ pub fn alloc(size: usize, align: usize, offset: usize) -> (Addres address = align_alloc(size, align); debug_assert!( address.is_aligned_to(align), - "Address: {:x} is not aligned to the given alignment: {}", - address, - align + "Address: {address:x} is not aligned to the given alignment: {align}", ); } else { address = align_offset_alloc::(size, align, offset); is_offset_malloc = true; debug_assert!( (address + offset).is_aligned_to(align), - "Address: {:x} is not aligned to the given alignment: {} at offset: {}", - address, - align, - offset + "Address: {address:x} is not aligned to the given alignment: {align} at offset: {offset}", ); } (address, is_offset_malloc) diff --git a/src/util/memory.rs b/src/util/memory.rs index 9a9d4d16dc..7f718d258b 100644 --- a/src/util/memory.rs +++ b/src/util/memory.rs @@ -312,7 +312,7 @@ pub fn handle_mmap_error( ) -> ! { use std::io::ErrorKind; - eprintln!("Failed to mmap {}, size {}", addr, bytes); + eprintln!("Failed to mmap {addr}, size {bytes}"); eprintln!("{}", get_process_memory_maps()); match error.kind() { @@ -342,7 +342,7 @@ pub fn handle_mmap_error( } _ => {} } - panic!("Unexpected mmap failure: {:?}", error) + panic!("Unexpected mmap failure: {error:?}") } /// Checks if the memory has already been mapped. If not, we panic. @@ -440,7 +440,7 @@ pub fn get_process_memory_maps() -> String { // Handle the error case let error_message = std::str::from_utf8(&output.stderr).expect("Failed to convert error message to string"); - panic!("Failed to get process memory map: {}", error_message) + panic!("Failed to get process memory map: {error_message}") } } @@ -667,6 +667,6 @@ mod tests { #[test] fn test_get_system_total_memory() { let total = get_system_total_memory(); - println!("Total memory: {:?}", total); + println!("Total memory: {total:?}"); } } diff --git a/src/util/metadata/header_metadata.rs b/src/util/metadata/header_metadata.rs index 176a508fbb..2a0f83b898 100644 --- a/src/util/metadata/header_metadata.rs +++ b/src/util/metadata/header_metadata.rs @@ -47,7 +47,7 @@ impl HeaderMetadataSpec { /// spec should be used with a mask to make sure that we exclude the forwarding bits. #[cfg(debug_assertions)] fn assert_mask(&self, mask: Option) { - debug_assert!(mask.is_none() || self.num_of_bits >= 8, "optional_mask is only supported for 8X-bits in-header metadata. Problematic MetadataSpec: ({:?})", self); + debug_assert!(mask.is_none() || self.num_of_bits >= 8, "optional_mask is only supported for 8X-bits in-header metadata. Problematic MetadataSpec: ({self:?})"); } /// Assert if this is a valid spec. @@ -59,8 +59,7 @@ impl HeaderMetadataSpec { debug_assert!( (self.bit_offset >> LOG_BITS_IN_BYTE) == ((self.bit_offset + self.num_of_bits as isize - 1) >> LOG_BITS_IN_BYTE), - "Metadata << 8-bits: ({:?}) stretches over two bytes!", - self + "Metadata << 8-bits: ({self:?}) stretches over two bytes!", ); } else if self.num_of_bits >= 8 && self.num_of_bits <= 64 { debug_assert!( diff --git a/src/util/metadata/side_metadata/global.rs b/src/util/metadata/side_metadata/global.rs index a8c22be242..e0903f752f 100644 --- a/src/util/metadata/side_metadata/global.rs +++ b/src/util/metadata/side_metadata/global.rs @@ -116,11 +116,7 @@ impl SideMetadataSpec { pub(crate) fn assert_metadata_mapped(&self, data_addr: Address) { let meta_start = address_to_meta_address(self, data_addr).align_down(BYTES_IN_PAGE); - trace!( - "ensure_metadata_is_mapped({}).meta_start({})", - data_addr, - meta_start - ); + trace!("ensure_metadata_is_mapped({data_addr}).meta_start({meta_start:?})"); memory::panic_if_unmapped( meta_start, @@ -143,9 +139,7 @@ impl SideMetadataSpec { if let Some(v) = val { assert!( v.to_u8().unwrap() < (1 << (1 << log_b)), - "Input value {:?} is invalid for the spec {:?}", - v, - self + "Input value {v:?} is invalid for the spec {self:?}", ); } } @@ -1515,7 +1509,7 @@ impl SideMetadataContext { /// the actual unmapped space will be bigger than what you specify. #[cfg(test)] pub fn ensure_unmap_metadata_space(&self, start: Address, size: usize) { - trace!("ensure_unmap_metadata_space({}, 0x{:x})", start, size); + trace!("ensure_unmap_metadata_space({start}, 0x{size:x})"); debug_assert!(start.is_aligned_to(BYTES_IN_PAGE)); debug_assert!(size % BYTES_IN_PAGE == 0); diff --git a/src/util/metadata/side_metadata/helpers.rs b/src/util/metadata/side_metadata/helpers.rs index adb56a28e1..07bfcc517c 100644 --- a/src/util/metadata/side_metadata/helpers.rs +++ b/src/util/metadata/side_metadata/helpers.rs @@ -94,7 +94,7 @@ pub(super) fn align_metadata_address( #[cfg(test)] pub(crate) fn ensure_munmap_metadata(start: Address, size: usize) { use crate::util::memory; - trace!("ensure_munmap_metadata({}, 0x{:x})", start, size); + trace!("ensure_munmap_metadata({start}, 0x{size:x})"); assert!(memory::munmap(start, size).is_ok()) } @@ -176,12 +176,7 @@ pub(crate) fn address_to_meta_address( #[cfg(target_pointer_width = "64")] let res = { address_to_contiguous_meta_address(metadata_spec, data_addr) }; - trace!( - "address_to_meta_address({:?}, addr: {}) -> 0x{:x}", - metadata_spec, - data_addr, - res - ); + trace!("address_to_meta_address({metadata_spec:?}, addr: {data_addr}) -> 0x{res:x}"); res } @@ -270,10 +265,7 @@ pub fn find_last_non_zero_bit_in_metadata_bytes( // The value we check has to be in the range. debug_assert!( cur >= meta_start && cur < meta_end, - "Check metadata value at meta address {}, which is not in the range of [{}, {})", - cur, - meta_start, - meta_end + "Check metadata value at meta address {cur}, which is not in the range of [{meta_start}, {meta_end})", ); // If we are looking at an address that is not in a mapped chunk, we need to check if the chunk if mapped. diff --git a/src/util/metadata/side_metadata/sanity.rs b/src/util/metadata/side_metadata/sanity.rs index 48d3c55de9..c76ddc2b15 100644 --- a/src/util/metadata/side_metadata/sanity.rs +++ b/src/util/metadata/side_metadata/sanity.rs @@ -72,7 +72,7 @@ fn verify_global_specs_total_size(g_specs: &[SideMetadataSpec]) -> Result<()> { } else { Err(Error::new( ErrorKind::InvalidInput, - format!("Not enough global metadata space for: \n{:?}", g_specs), + format!("Not enough global metadata space for: \n{g_specs:?}"), )) } } @@ -93,7 +93,7 @@ fn verify_local_specs_size(l_specs: &[SideMetadataSpec]) -> Result<()> { { return Err(Error::new( ErrorKind::InvalidInput, - format!("Local metadata is too big: \n{:?}", spec), + format!("Local metadata is too big: \n{spec:?}"), )); } } @@ -147,10 +147,7 @@ fn verify_no_overlap_contiguous( if !(spec_1.get_absolute_offset() >= end_2 || spec_2.get_absolute_offset() >= end_1) { return Err(Error::new( ErrorKind::InvalidInput, - format!( - "Overlapping metadata specs detected:\nTHIS:\n{:#?}\nAND:\n{:#?}", - spec_1, spec_2 - ), + format!("Overlapping metadata specs detected:\nTHIS:\n{spec_1:#?}\nAND:\n{spec_2:#?}",), )); } Ok(()) @@ -588,14 +585,10 @@ pub fn verify_load( }; assert!( expected_val == actual_val, - "verify_load({:#?}, {}) -> Expected (0x{:x}) but found (0x{:x})", - metadata_spec, - data_addr, - expected_val, - actual_val + "verify_load({metadata_spec:#?}, {data_addr}) -> Expected (0x{expected_val:x}) but found (0x{actual_val:x})", ); } - None => panic!("Invalid Metadata Spec: {:#?}", metadata_spec), + None => panic!("Invalid Metadata Spec: {metadata_spec:#?}"), } } @@ -624,7 +617,7 @@ pub fn verify_store( let content = spec_sanity_map.entry(data_addr).or_insert(0); *content = new_val_wrapped; } - None => panic!("Invalid Metadata Spec: {:#?}", metadata_spec), + None => panic!("Invalid Metadata Spec: {metadata_spec:#?}"), } } @@ -659,13 +652,11 @@ pub fn verify_update( assert_eq!( old_val.to_u64().unwrap(), *cur_val, - "Expected old value: {} but found {}", - old_val, - cur_val + "Expected old value: {old_val} but found {cur_val}", ); *cur_val = new_val_wrapped; } - None => panic!("Invalid metadata spec: {:#?}", metadata_spec), + None => panic!("Invalid metadata spec: {metadata_spec:#?}"), } } diff --git a/src/util/metadata/vo_bit/helper.rs b/src/util/metadata/vo_bit/helper.rs index 4dd06459de..1beb1e8530 100644 --- a/src/util/metadata/vo_bit/helper.rs +++ b/src/util/metadata/vo_bit/helper.rs @@ -141,11 +141,7 @@ pub(crate) fn on_trace_object(object: ObjectReference) { if strategy::().vo_bit_available_during_tracing() { // If the VO bits are available during tracing, // we validate the objects we trace using the VO bits. - debug_assert!( - vo_bit::is_vo_bit_set(object), - "{:x}: VO bit not set", - object - ); + debug_assert!(vo_bit::is_vo_bit_set(object), "{object:x}: VO bit not set",); } } diff --git a/src/util/metadata/vo_bit/mod.rs b/src/util/metadata/vo_bit/mod.rs index b5794e4075..f4c6e81d50 100644 --- a/src/util/metadata/vo_bit/mod.rs +++ b/src/util/metadata/vo_bit/mod.rs @@ -74,13 +74,13 @@ pub const VO_BIT_SIDE_METADATA_ADDR: Address = VO_BIT_SIDE_METADATA_SPEC.get_abs /// Atomically set the VO bit for an object. pub(crate) fn set_vo_bit(object: ObjectReference) { - debug_assert!(!is_vo_bit_set(object), "{:x}: VO bit already set", object); + debug_assert!(!is_vo_bit_set(object), "{object:x}: VO bit already set"); VO_BIT_SIDE_METADATA_SPEC.store_atomic::(object.to_raw_address(), 1, Ordering::SeqCst); } /// Atomically unset the VO bit for an object. pub(crate) fn unset_vo_bit(object: ObjectReference) { - debug_assert!(is_vo_bit_set(object), "{:x}: VO bit not set", object); + debug_assert!(is_vo_bit_set(object), "{object:x}: VO bit not set"); VO_BIT_SIDE_METADATA_SPEC.store_atomic::(object.to_raw_address(), 0, Ordering::SeqCst); } @@ -96,7 +96,7 @@ pub(crate) fn unset_vo_bit_nocheck(object: ObjectReference) { /// /// This is unsafe: check the comment on `side_metadata::store` pub(crate) unsafe fn unset_vo_bit_unsafe(object: ObjectReference) { - debug_assert!(is_vo_bit_set(object), "{:x}: VO bit not set", object); + debug_assert!(is_vo_bit_set(object), "{object:x}: VO bit not set"); VO_BIT_SIDE_METADATA_SPEC.store::(object.to_raw_address(), 0); } diff --git a/src/util/object_forwarding.rs b/src/util/object_forwarding.rs index 76c0e93715..8352a32030 100644 --- a/src/util/object_forwarding.rs +++ b/src/util/object_forwarding.rs @@ -66,9 +66,7 @@ pub fn spin_and_get_forwarded_object( // See: https://github.com/mmtk/mmtk-core/issues/579 debug_assert!( forwarding_bits == FORWARDING_NOT_TRIGGERED_YET, - "Invalid/Corrupted forwarding word {:x} for object {}", - forwarding_bits, - object, + "Invalid/Corrupted forwarding word {forwarding_bits:x} for object {object}", ); object } @@ -162,8 +160,7 @@ pub fn clear_forwarding_bits(object: ObjectReference) { pub fn read_forwarding_pointer(object: ObjectReference) -> ObjectReference { debug_assert!( is_forwarded_or_being_forwarded::(object), - "read_forwarding_pointer called for object {:?} that has not started forwarding!", - object, + "read_forwarding_pointer called for object {object:?} that has not started forwarding!", ); // We write the forwarding poiner. We know it is an object reference. @@ -193,7 +190,7 @@ pub fn write_forwarding_pointer( get_forwarding_status::(object), ); - trace!("write_forwarding_pointer({}, {})", object, new_object); + trace!("write_forwarding_pointer({object}, {new_object})"); VM::VMObjectModel::LOCAL_FORWARDING_POINTER_SPEC.store_atomic::( object, new_object.to_raw_address().as_usize(), diff --git a/src/util/options.rs b/src/util/options.rs index abfada69c8..90395445fd 100644 --- a/src/util/options.rs +++ b/src/util/options.rs @@ -288,13 +288,13 @@ impl Options { if let Err(e) = self.set_from_string_inner(key, val) { match e { SetOptionByStringError::InvalidKey => { - panic!("Invalid Options key: {}", key); + panic!("Invalid Options key: {key}"); } SetOptionByStringError::ValueParseError => { - eprintln!("Warn: unable to set {}={:?}. Can't parse value. Default value will be used.", key, val); + eprintln!("Warn: unable to set {key}={val:?}. Can't parse value. Default value will be used."); } SetOptionByStringError::ValueValidationError => { - eprintln!("Warn: unable to set {}={:?}. Invalid value. Default value will be used.", key, val); + eprintln!("Warn: unable to set {key}={val:?}. Invalid value. Default value will be used."); } } return false; @@ -320,10 +320,10 @@ impl Options { /* Silently skip unrecognized keys. */ } SetOptionByStringError::ValueParseError => { - eprintln!("Warn: unable to set {}={:?}. Can't parse value. Default value will be used.", key, val); + eprintln!("Warn: unable to set {key}={val:?}. Can't parse value. Default value will be used."); } SetOptionByStringError::ValueValidationError => { - eprintln!("Warn: unable to set {}={:?}. Invalid value. Default value will be used.", key, val); + eprintln!("Warn: unable to set {key}={val:?}. Invalid value. Default value will be used."); } } } @@ -538,7 +538,7 @@ mod nursery_size_parsing_tests { assert_eq!(min, 1); assert_eq!(max, 2); } else { - panic!("Failed: {:?}", result); + panic!("Failed: {result:?}"); } // Default min @@ -547,7 +547,7 @@ mod nursery_size_parsing_tests { assert_eq!(min, DEFAULT_MIN_NURSERY); assert_eq!(max, 2); } else { - panic!("Failed: {:?}", result); + panic!("Failed: {result:?}"); } // Default max @@ -556,7 +556,7 @@ mod nursery_size_parsing_tests { assert_eq!(min, 1); assert_eq!(max, DEFAULT_MAX_NURSERY); } else { - panic!("Failed: {:?}", result); + panic!("Failed: {result:?}"); } // Default both @@ -565,7 +565,7 @@ mod nursery_size_parsing_tests { assert_eq!(min, DEFAULT_MIN_NURSERY); assert_eq!(max, DEFAULT_MAX_NURSERY); } else { - panic!("Failed: {:?}", result); + panic!("Failed: {result:?}"); } } @@ -579,7 +579,7 @@ mod nursery_size_parsing_tests { assert_eq!(min, 0.1); assert_eq!(max, 0.8); } else { - panic!("Failed: {:?}", result); + panic!("Failed: {result:?}"); } // Default min @@ -588,7 +588,7 @@ mod nursery_size_parsing_tests { assert_eq!(min, DEFAULT_PROPORTIONAL_MIN_NURSERY); assert_eq!(max, 0.8); } else { - panic!("Failed: {:?}", result); + panic!("Failed: {result:?}"); } // Default max @@ -597,7 +597,7 @@ mod nursery_size_parsing_tests { assert_eq!(min, 0.1); assert_eq!(max, DEFAULT_PROPORTIONAL_MAX_NURSERY); } else { - panic!("Failed: {:?}", result); + panic!("Failed: {result:?}"); } // Default both @@ -606,7 +606,7 @@ mod nursery_size_parsing_tests { assert_eq!(min, DEFAULT_PROPORTIONAL_MIN_NURSERY); assert_eq!(max, DEFAULT_PROPORTIONAL_MAX_NURSERY); } else { - panic!("Failed: {:?}", result); + panic!("Failed: {result:?}"); } } } @@ -664,9 +664,9 @@ impl GCTriggerSelector { if let Some(size) = size { size.try_into() - .map_err(|_| format!("size overflow: {}", size)) + .map_err(|_| format!("size overflow: {size}")) } else { - Err(format!("size overflow: {}", s)) + Err(format!("size overflow: {s}")) } } else { s.parse::().map_err(|e| e.to_string()) @@ -710,7 +710,7 @@ impl FromStr for GCTriggerSelector { return Ok(Self::Delegated); } - Err(format!("Failed to parse the GC trigger option: {:?}", s)) + Err(format!("Failed to parse the GC trigger option: {s:?}")) } } diff --git a/src/util/reference_processor.rs b/src/util/reference_processor.rs index f997c16742..37e8920c40 100644 --- a/src/util/reference_processor.rs +++ b/src/util/reference_processor.rs @@ -39,17 +39,17 @@ impl ReferenceProcessors { } pub fn add_soft_candidate(&self, reff: ObjectReference) { - trace!("Add soft candidate: {}", reff); + trace!("Add soft candidate: {reff}"); self.soft.add_candidate(reff); } pub fn add_weak_candidate(&self, reff: ObjectReference) { - trace!("Add weak candidate: {}", reff); + trace!("Add weak candidate: {reff}"); self.weak.add_candidate(reff); } pub fn add_phantom_candidate(&self, reff: ObjectReference) { - trace!("Add phantom candidate: {}", reff); + trace!("Add phantom candidate: {reff}"); self.phantom.add_candidate(reff); } @@ -263,9 +263,7 @@ impl ReferenceProcessor { if let Some(referent) = VM::VMReferenceGlue::get_referent(*reff) { debug_assert!( referent.is_in_any_space(), - "Referent {:?} (of reference {:?}) is not in any space", - referent, - reff + "Referent {referent:?} (of reference {reff:?}) is not in any space", ); } }); @@ -313,15 +311,11 @@ impl ReferenceProcessor { let new_referent = ReferenceProcessor::trace_forward_object(trace, old_referent); ::VMReferenceGlue::set_referent(reference, new_referent); - trace!( - " referent: {} (forwarded to {})", - old_referent, - new_referent - ); + trace!(" referent: {old_referent} (forwarded to {new_referent})",); } let new_reference = ReferenceProcessor::trace_forward_object(trace, reference); - trace!(" reference: forwarded to {}", new_reference); + trace!(" reference: forwarded to {new_reference}"); new_reference } @@ -401,7 +395,7 @@ impl ReferenceProcessor { ); for reference in sync.references.iter() { - trace!("Processing reference: {:?}", reference); + trace!("Processing reference: {reference:?}"); if !reference.is_live() { // Reference is currently unreachable but may get reachable by the @@ -412,7 +406,7 @@ impl ReferenceProcessor { if let Some(referent) = ::VMReferenceGlue::get_referent(*reference) { Self::keep_referent_alive(trace, referent); - trace!(" ~> {:?} (retained)", referent); + trace!(" ~> {referent:?} (retained)"); } } @@ -431,23 +425,23 @@ impl ReferenceProcessor { reference: ObjectReference, enqueued_references: &mut Vec, ) -> Option { - trace!("Process reference: {}", reference); + trace!("Process reference: {reference}"); // If the reference is dead, we're done with it. Let it (and // possibly its referent) be garbage-collected. if !reference.is_live() { VM::VMReferenceGlue::clear_referent(reference); - trace!(" UNREACHABLE reference: {}", reference); + trace!(" UNREACHABLE reference: {reference}"); return None; } // The reference object is live. let new_reference = Self::get_forwarded_reference(reference); - trace!(" forwarded to: {}", new_reference); + trace!(" forwarded to: {new_reference}"); // Get the old referent. let maybe_old_referent = VM::VMReferenceGlue::get_referent(reference); - trace!(" referent: {:?}", maybe_old_referent); + trace!(" referent: {maybe_old_referent:?}"); // If the application has cleared the referent the Java spec says // this does not cause the Reference object to be enqueued. We @@ -463,7 +457,7 @@ impl ReferenceProcessor { // or stronger than the current reference level. let new_referent = Self::get_forwarded_referent(old_referent); debug_assert!(new_referent.is_live()); - trace!(" forwarded referent to: {}", new_referent); + trace!(" forwarded referent to: {new_referent}"); // The reference object stays on the waiting list, and the // referent is untouched. The only thing we must do is @@ -476,7 +470,7 @@ impl ReferenceProcessor { Some(new_reference) } else { // Referent is unreachable. Clear the referent and enqueue the reference object. - trace!(" UNREACHABLE referent: {}", old_referent); + trace!(" UNREACHABLE referent: {old_referent}"); VM::VMReferenceGlue::clear_referent(new_reference); enqueued_references.push(new_reference); diff --git a/src/util/rust_util/mod.rs b/src/util/rust_util/mod.rs index f632fdb607..bcc6eadf58 100644 --- a/src/util/rust_util/mod.rs +++ b/src/util/rust_util/mod.rs @@ -112,13 +112,13 @@ pub fn debug_process_thread_id() -> String { { // `gettid()` is Linux-specific. let tid = unsafe { libc::gettid() }; - format!("PID: {}, TID: {}", pid, tid) + format!("PID: {pid}, TID: {tid}") } #[cfg(not(target_os = "linux"))] { // TODO: When we support other platforms, use platform-specific methods to get thread // identifiers. - format!("PID: {}", pid) + format!("PID: {pid}") } } diff --git a/src/util/sanity/sanity_checker.rs b/src/util/sanity/sanity_checker.rs index 16f8884b07..e359dd86a7 100644 --- a/src/util/sanity/sanity_checker.rs +++ b/src/util/sanity/sanity_checker.rs @@ -192,25 +192,23 @@ impl ProcessEdgesWork for SanityGCProcessEdges { let mut sanity_checker = self.mmtk().sanity_checker.lock().unwrap(); if !sanity_checker.refs.contains(&object) { // FIXME steveb consider VM-specific integrity check on reference. - assert!(object.is_sane(), "Invalid reference {:?}", object); + assert!(object.is_sane(), "Invalid reference {object:?}"); // Let plan check object assert!( self.mmtk().get_plan().sanity_check_object(object), - "Invalid reference {:?}", - object + "Invalid reference {object:?}", ); // Let VM check object assert!( VM::VMObjectModel::is_object_sane(object), - "Invalid reference {:?}", - object + "Invalid reference {object:?}", ); // Object is not "marked" sanity_checker.refs.insert(object); // "Mark" it - trace!("Sanity mark object {}", object); + trace!("Sanity mark object {object}"); self.nodes.enqueue(object); } @@ -218,7 +216,7 @@ impl ProcessEdgesWork for SanityGCProcessEdges { // bit set when sanity GC starts. #[cfg(feature = "vo_bit")] if !crate::util::metadata::vo_bit::is_vo_bit_set(object) { - panic!("VO bit is not set: {}", object); + panic!("VO bit is not set: {object}"); } object diff --git a/src/util/slot_logger.rs b/src/util/slot_logger.rs index 7cb231d8dc..a557de9d75 100644 --- a/src/util/slot_logger.rs +++ b/src/util/slot_logger.rs @@ -32,13 +32,9 @@ impl SlotLogger { /// * `slot` - The slot to log. /// pub fn log_slot(&self, slot: SL) { - trace!("log_slot({:?})", slot); + trace!("log_slot({slot:?})"); let mut slot_log = self.slot_log.write().unwrap(); - assert!( - slot_log.insert(slot), - "duplicate slot ({:?}) detected", - slot - ); + assert!(slot_log.insert(slot), "duplicate slot ({slot:?}) detected",); } /// Reset the slot logger by clearing the hash-set of slots. diff --git a/src/util/statistics/counter/event_counter.rs b/src/util/statistics/counter/event_counter.rs index d417cf4a01..876a093a4b 100644 --- a/src/util/statistics/counter/event_counter.rs +++ b/src/util/statistics/counter/event_counter.rs @@ -57,7 +57,7 @@ impl EventCounter { } fn print_value(&self, value: u64) { - print!("{}", value); + print!("{value}"); } } diff --git a/src/util/statistics/stats.rs b/src/util/statistics/stats.rs index e3a12a730a..da0004e213 100644 --- a/src/util/statistics/stats.rs +++ b/src/util/statistics/stats.rs @@ -125,7 +125,7 @@ impl Stats { merge_phases: bool, ) -> Mutex { let u = self.new_event_counter(name, implicit_start, merge_phases); - let v = self.new_event_counter(&format!("{}.volume", name), implicit_start, merge_phases); + let v = self.new_event_counter(&format!("{name}.volume"), implicit_start, merge_phases); Mutex::new(SizeCounter::new(u, v)) } @@ -190,7 +190,7 @@ impl Stats { print!("\t"); } for value in scheduler_stat.values() { - print!("{}\t", value); + print!("{value}\t"); } println!(); print!("Total time: "); @@ -211,7 +211,7 @@ impl Stats { } } for name in scheduler_stat.keys() { - print!("{}\t", name); + print!("{name}\t"); } println!(); } diff --git a/src/util/test_util/mock_vm.rs b/src/util/test_util/mock_vm.rs index cc687f4b1f..74e61e21c0 100644 --- a/src/util/test_util/mock_vm.rs +++ b/src/util/test_util/mock_vm.rs @@ -271,7 +271,7 @@ impl Default for MockVM { mutator: MockMethod::new_unimplemented(), mutators: MockMethod::new_unimplemented(), vm_trace_object: MockMethod::new_fixed(Box::new(|(_, object, _)| { - panic!("MMTk cannot trace object {:?} as it does not belong to any MMTk space. If the object is known to the VM, the binding can override this method and handle its tracing.", object) + panic!("MMTk cannot trace object {object:?} as it does not belong to any MMTk space. If the object is known to the VM, the binding can override this method and handle its tracing.") })), stop_all_mutators: MockMethod::new_unimplemented(), @@ -279,7 +279,7 @@ impl Default for MockVM { block_for_gc: MockMethod::new_unimplemented(), spawn_gc_thread: MockMethod::new_default(), out_of_memory: MockMethod::new_fixed(Box::new(|(_, err)| { - panic!("Out of memory with {:?}!", err) + panic!("Out of memory with {err:?}!") })), schedule_finalization: MockMethod::new_default(), post_forwarding: MockMethod::new_default(), diff --git a/src/util/test_util/mod.rs b/src/util/test_util/mod.rs index 15214fb499..81e5754cbf 100644 --- a/src/util/test_util/mod.rs +++ b/src/util/test_util/mod.rs @@ -83,7 +83,7 @@ where match done_rx.recv_timeout(Duration::from_millis(millis)) { Ok(_) => handle.join().expect("Thread panicked"), - Err(e) => panic!("Thread took too long: {}", e), + Err(e) => panic!("Thread took too long: {e}"), } } diff --git a/src/util/treadmill.rs b/src/util/treadmill.rs index 96b5eb9e7d..c11bcfea40 100644 --- a/src/util/treadmill.rs +++ b/src/util/treadmill.rs @@ -36,10 +36,10 @@ impl TreadMill { pub fn add_to_treadmill(&self, object: ObjectReference, nursery: bool) { if nursery { - trace!("Adding {} to nursery", object); + trace!("Adding {object} to nursery"); self.alloc_nursery.lock().unwrap().insert(object); } else { - trace!("Adding {} to to_space", object); + trace!("Adding {object} to to_space"); self.to_space.lock().unwrap().insert(object); } } @@ -65,16 +65,14 @@ impl TreadMill { let mut guard = self.collect_nursery.lock().unwrap(); debug_assert!( guard.contains(&object), - "copy source object ({}) must be in collect_nursery", - object + "copy source object ({object}) must be in collect_nursery", ); guard.remove(&object); } else { let mut guard = self.from_space.lock().unwrap(); debug_assert!( guard.contains(&object), - "copy source object ({}) must be in from_space", - object + "copy source object ({object}) must be in from_space", ); guard.remove(&object); } diff --git a/src/vm/active_plan.rs b/src/vm/active_plan.rs index 7fb52cfab3..0a600ab43e 100644 --- a/src/vm/active_plan.rs +++ b/src/vm/active_plan.rs @@ -56,6 +56,6 @@ pub trait ActivePlan { object: ObjectReference, _worker: &mut GCWorker, ) -> ObjectReference { - panic!("MMTk cannot trace object {:?} as it does not belong to any MMTk space. If the object is known to the VM, the binding can override this method and handle its tracing.", object) + panic!("MMTk cannot trace object {object:?} as it does not belong to any MMTk space. If the object is known to the VM, the binding can override this method and handle its tracing.") } } diff --git a/src/vm/collection.rs b/src/vm/collection.rs index 16e87eebe0..6ac04db0d8 100644 --- a/src/vm/collection.rs +++ b/src/vm/collection.rs @@ -72,7 +72,7 @@ pub trait Collection { /// * `tls`: The thread pointer for the mutator which failed the allocation and triggered the OOM. /// * `err_kind`: The type of OOM error that was encountered. fn out_of_memory(_tls: VMThread, err_kind: AllocationError) { - panic!("Out of memory with {:?}!", err_kind); + panic!("Out of memory with {err_kind:?}!"); } /// Inform the VM to schedule finalization threads. diff --git a/src/vm/tests/mock_tests/mock_test_allocate_align_offset.rs b/src/vm/tests/mock_tests/mock_test_allocate_align_offset.rs index bef39dcab0..48a10f2c0a 100644 --- a/src/vm/tests/mock_tests/mock_test_allocate_align_offset.rs +++ b/src/vm/tests/mock_tests/mock_test_allocate_align_offset.rs @@ -17,10 +17,10 @@ pub fn allocate_alignment() { MUTATOR.with_fixture_mut(|fixture| { let min = MockVM::MIN_ALIGNMENT; let max = MockVM::MAX_ALIGNMENT; - info!("Allowed alignment between {} and {}", min, max); + info!("Allowed alignment between {min} and {max}"); let mut align = min; while align <= max { - info!("Test allocation with alignment {}", align); + info!("Test allocation with alignment {align}"); let addr = memory_manager::alloc( &mut fixture.mutator, 8, @@ -30,9 +30,7 @@ pub fn allocate_alignment() { ); assert!( addr.is_aligned_to(align), - "Expected allocation alignment {}, returned address is {:?}", - align, - addr + "Expected allocation alignment {align}, returned address is {addr:?}", ); align *= 2; } @@ -51,13 +49,10 @@ pub fn allocate_offset() { const OFFSET: usize = 4; let min = MockVM::MIN_ALIGNMENT; let max = MockVM::MAX_ALIGNMENT; - info!("Allowed alignment between {} and {}", min, max); + info!("Allowed alignment between {min} and {max}"); let mut align = min; while align <= max { - info!( - "Test allocation with alignment {} and offset {}", - align, OFFSET - ); + info!("Test allocation with alignment {align} and offset {OFFSET}"); let addr = memory_manager::alloc( &mut fixture.mutator, 8, @@ -67,9 +62,7 @@ pub fn allocate_offset() { ); assert!( (addr + OFFSET).is_aligned_to(align), - "Expected allocation alignment {}, returned address is {:?}", - align, - addr + "Expected allocation alignment {align}, returned address is {addr:?}", ); align *= 2; } diff --git a/src/vm/tests/mock_tests/mock_test_allocate_nonmoving.rs b/src/vm/tests/mock_tests/mock_test_allocate_nonmoving.rs index 7c7df436fe..eaf7f8c7e1 100644 --- a/src/vm/tests/mock_tests/mock_test_allocate_nonmoving.rs +++ b/src/vm/tests/mock_tests/mock_test_allocate_nonmoving.rs @@ -21,7 +21,7 @@ pub fn allocate_nonmoving() { let addr = memory_manager::alloc(&mut fixture.mutator, 16, 8, 0, AllocationSemantics::Default); assert!(!addr.is_zero()); - info!("Allocated default at: {:#x}", addr); + info!("Allocated default at: {addr:#x}"); // Non moving alloc let addr = memory_manager::alloc( @@ -32,7 +32,7 @@ pub fn allocate_nonmoving() { AllocationSemantics::NonMoving, ); assert!(!addr.is_zero()); - info!("Allocated nonmoving at: {:#x}", addr); + info!("Allocated nonmoving at: {addr:#x}"); }, no_cleanup, ) diff --git a/src/vm/tests/mock_tests/mock_test_slots.rs b/src/vm/tests/mock_tests/mock_test_slots.rs index 397e0f1caf..b062f8cfce 100644 --- a/src/vm/tests/mock_tests/mock_test_slots.rs +++ b/src/vm/tests/mock_tests/mock_test_slots.rs @@ -411,8 +411,7 @@ mod mixed { assert_eq!( objref, Some(fixture.objref1), - "Slot {} is not properly loaded", - i + "Slot {i} is not properly loaded", ); } @@ -423,8 +422,7 @@ mod mixed { assert_eq!( objref, Some(fixture.objref2), - "Slot {} is not properly loaded after store", - i + "Slot {i} is not properly loaded after store", ); }