|
| 1 | +use std::sync::atomic::{AtomicBool, AtomicUsize, Ordering}; |
| 2 | +use std::sync::Mutex; |
| 3 | + |
| 4 | +/// This stores some global states for an MMTK instance. |
| 5 | +/// Some MMTK components like plans and allocators may keep an reference to the struct, and can access it. |
| 6 | +// This used to be a part of the `BasePlan`. In that case, any component that accesses |
| 7 | +// the states needs a reference to the plan. It makes it harder for us to reason about the access pattern |
| 8 | +// for the plan, as many components hold references to the plan. Besides, the states |
| 9 | +// actually are not related with a plan, they are just global states for MMTK. So we refactored |
| 10 | +// those fields to this separate struct. For components that access the state, they just need |
| 11 | +// a reference to the struct, and are no longer dependent on the plan. |
| 12 | +// We may consider further break down the fields into smaller structs. |
| 13 | +pub struct GlobalState { |
| 14 | + /// Whether MMTk is now ready for collection. This is set to true when initialize_collection() is called. |
| 15 | + pub(crate) initialized: AtomicBool, |
| 16 | + /// Should we trigger a GC when the heap is full? It seems this should always be true. However, we allow |
| 17 | + /// bindings to temporarily disable GC, at which point, we do not trigger GC even if the heap is full. |
| 18 | + pub(crate) trigger_gc_when_heap_is_full: AtomicBool, |
| 19 | + /// The current GC status. |
| 20 | + pub(crate) gc_status: Mutex<GcStatus>, |
| 21 | + /// Is the current GC an emergency collection? Emergency means we may run out of memory soon, and we should |
| 22 | + /// attempt to collect as much as we can. |
| 23 | + pub(crate) emergency_collection: AtomicBool, |
| 24 | + /// Is the current GC triggered by the user? |
| 25 | + pub(crate) user_triggered_collection: AtomicBool, |
| 26 | + /// Is the current GC triggered internally by MMTK? This is unused for now. We may have internally triggered GC |
| 27 | + /// for a concurrent plan. |
| 28 | + pub(crate) internal_triggered_collection: AtomicBool, |
| 29 | + /// Is the last GC internally triggered? |
| 30 | + pub(crate) last_internal_triggered_collection: AtomicBool, |
| 31 | + // Has an allocation succeeded since the emergency collection? |
| 32 | + pub(crate) allocation_success: AtomicBool, |
| 33 | + // Maximum number of failed attempts by a single thread |
| 34 | + pub(crate) max_collection_attempts: AtomicUsize, |
| 35 | + // Current collection attempt |
| 36 | + pub(crate) cur_collection_attempts: AtomicUsize, |
| 37 | + /// A counter for per-mutator stack scanning |
| 38 | + pub(crate) scanned_stacks: AtomicUsize, |
| 39 | + /// Have we scanned all the stacks? |
| 40 | + pub(crate) stacks_prepared: AtomicBool, |
| 41 | + /// A counter that keeps tracks of the number of bytes allocated since last stress test |
| 42 | + pub(crate) allocation_bytes: AtomicUsize, |
| 43 | + /// A counteer that keeps tracks of the number of bytes allocated by malloc |
| 44 | + #[cfg(feature = "malloc_counted_size")] |
| 45 | + pub(crate) malloc_bytes: AtomicUsize, |
| 46 | + /// This stores the size in bytes for all the live objects in last GC. This counter is only updated in the GC release phase. |
| 47 | + #[cfg(feature = "count_live_bytes_in_gc")] |
| 48 | + pub(crate) live_bytes_in_last_gc: AtomicUsize, |
| 49 | +} |
| 50 | + |
| 51 | +impl GlobalState { |
| 52 | + /// Is MMTk initialized? |
| 53 | + pub fn is_initialized(&self) -> bool { |
| 54 | + self.initialized.load(Ordering::SeqCst) |
| 55 | + } |
| 56 | + |
| 57 | + /// Should MMTK trigger GC when heap is full? If GC is disabled, we wont trigger GC even if the heap is full. |
| 58 | + pub fn should_trigger_gc_when_heap_is_full(&self) -> bool { |
| 59 | + self.trigger_gc_when_heap_is_full.load(Ordering::SeqCst) |
| 60 | + } |
| 61 | + |
| 62 | + /// Set the collection kind for the current GC. This is called before |
| 63 | + /// scheduling collection to determin what kind of collection it will be. |
| 64 | + pub fn set_collection_kind( |
| 65 | + &self, |
| 66 | + last_collection_was_exhaustive: bool, |
| 67 | + heap_can_grow: bool, |
| 68 | + ) -> bool { |
| 69 | + self.cur_collection_attempts.store( |
| 70 | + if self.user_triggered_collection.load(Ordering::Relaxed) { |
| 71 | + 1 |
| 72 | + } else { |
| 73 | + self.determine_collection_attempts() |
| 74 | + }, |
| 75 | + Ordering::Relaxed, |
| 76 | + ); |
| 77 | + |
| 78 | + let emergency_collection = !self.is_internal_triggered_collection() |
| 79 | + && last_collection_was_exhaustive |
| 80 | + && self.cur_collection_attempts.load(Ordering::Relaxed) > 1 |
| 81 | + && !heap_can_grow; |
| 82 | + self.emergency_collection |
| 83 | + .store(emergency_collection, Ordering::Relaxed); |
| 84 | + |
| 85 | + emergency_collection |
| 86 | + } |
| 87 | + |
| 88 | + fn determine_collection_attempts(&self) -> usize { |
| 89 | + if !self.allocation_success.load(Ordering::Relaxed) { |
| 90 | + self.max_collection_attempts.fetch_add(1, Ordering::Relaxed); |
| 91 | + } else { |
| 92 | + self.allocation_success.store(false, Ordering::Relaxed); |
| 93 | + self.max_collection_attempts.store(1, Ordering::Relaxed); |
| 94 | + } |
| 95 | + |
| 96 | + self.max_collection_attempts.load(Ordering::Relaxed) |
| 97 | + } |
| 98 | + |
| 99 | + fn is_internal_triggered_collection(&self) -> bool { |
| 100 | + let is_internal_triggered = self |
| 101 | + .last_internal_triggered_collection |
| 102 | + .load(Ordering::SeqCst); |
| 103 | + // Remove this assertion when we have concurrent GC. |
| 104 | + assert!( |
| 105 | + !is_internal_triggered, |
| 106 | + "We have no concurrent GC implemented. We should not have internally triggered GC" |
| 107 | + ); |
| 108 | + is_internal_triggered |
| 109 | + } |
| 110 | + |
| 111 | + pub fn is_emergency_collection(&self) -> bool { |
| 112 | + self.emergency_collection.load(Ordering::Relaxed) |
| 113 | + } |
| 114 | + |
| 115 | + /// Return true if this collection was triggered by application code. |
| 116 | + pub fn is_user_triggered_collection(&self) -> bool { |
| 117 | + self.user_triggered_collection.load(Ordering::Relaxed) |
| 118 | + } |
| 119 | + |
| 120 | + /// Reset collection state information. |
| 121 | + pub fn reset_collection_trigger(&self) { |
| 122 | + self.last_internal_triggered_collection.store( |
| 123 | + self.internal_triggered_collection.load(Ordering::SeqCst), |
| 124 | + Ordering::Relaxed, |
| 125 | + ); |
| 126 | + self.internal_triggered_collection |
| 127 | + .store(false, Ordering::SeqCst); |
| 128 | + self.user_triggered_collection |
| 129 | + .store(false, Ordering::Relaxed); |
| 130 | + } |
| 131 | + |
| 132 | + /// Are the stacks scanned? |
| 133 | + pub fn stacks_prepared(&self) -> bool { |
| 134 | + self.stacks_prepared.load(Ordering::SeqCst) |
| 135 | + } |
| 136 | + |
| 137 | + /// Prepare for stack scanning. This is usually used with `inform_stack_scanned()`. |
| 138 | + /// This should be called before doing stack scanning. |
| 139 | + pub fn prepare_for_stack_scanning(&self) { |
| 140 | + self.scanned_stacks.store(0, Ordering::SeqCst); |
| 141 | + self.stacks_prepared.store(false, Ordering::SeqCst); |
| 142 | + } |
| 143 | + |
| 144 | + /// Inform that 1 stack has been scanned. The argument `n_mutators` indicates the |
| 145 | + /// total stacks we should scan. This method returns true if the number of scanned |
| 146 | + /// stacks equals the total mutator count. Otherwise it returns false. This method |
| 147 | + /// is thread safe and we guarantee only one thread will return true. |
| 148 | + pub fn inform_stack_scanned(&self, n_mutators: usize) -> bool { |
| 149 | + let old = self.scanned_stacks.fetch_add(1, Ordering::SeqCst); |
| 150 | + debug_assert!( |
| 151 | + old < n_mutators, |
| 152 | + "The number of scanned stacks ({}) is more than the number of mutators ({})", |
| 153 | + old, |
| 154 | + n_mutators |
| 155 | + ); |
| 156 | + let scanning_done = old + 1 == n_mutators; |
| 157 | + if scanning_done { |
| 158 | + self.stacks_prepared.store(true, Ordering::SeqCst); |
| 159 | + } |
| 160 | + scanning_done |
| 161 | + } |
| 162 | + |
| 163 | + /// Increase the allocation bytes and return the current allocation bytes after increasing |
| 164 | + pub fn increase_allocation_bytes_by(&self, size: usize) -> usize { |
| 165 | + let old_allocation_bytes = self.allocation_bytes.fetch_add(size, Ordering::SeqCst); |
| 166 | + trace!( |
| 167 | + "Stress GC: old_allocation_bytes = {}, size = {}, allocation_bytes = {}", |
| 168 | + old_allocation_bytes, |
| 169 | + size, |
| 170 | + self.allocation_bytes.load(Ordering::Relaxed), |
| 171 | + ); |
| 172 | + old_allocation_bytes + size |
| 173 | + } |
| 174 | + |
| 175 | + #[cfg(feature = "malloc_counted_size")] |
| 176 | + pub fn get_malloc_bytes_in_pages(&self) -> usize { |
| 177 | + crate::util::conversions::bytes_to_pages_up(self.malloc_bytes.load(Ordering::Relaxed)) |
| 178 | + } |
| 179 | + |
| 180 | + #[cfg(feature = "malloc_counted_size")] |
| 181 | + pub(crate) fn increase_malloc_bytes_by(&self, size: usize) { |
| 182 | + self.malloc_bytes.fetch_add(size, Ordering::SeqCst); |
| 183 | + } |
| 184 | + |
| 185 | + #[cfg(feature = "malloc_counted_size")] |
| 186 | + pub(crate) fn decrease_malloc_bytes_by(&self, size: usize) { |
| 187 | + self.malloc_bytes.fetch_sub(size, Ordering::SeqCst); |
| 188 | + } |
| 189 | + |
| 190 | + #[cfg(feature = "count_live_bytes_in_gc")] |
| 191 | + pub fn get_live_bytes_in_last_gc(&self) -> usize { |
| 192 | + self.live_bytes_in_last_gc.load(Ordering::SeqCst) |
| 193 | + } |
| 194 | + |
| 195 | + #[cfg(feature = "count_live_bytes_in_gc")] |
| 196 | + pub fn set_live_bytes_in_last_gc(&self, size: usize) { |
| 197 | + self.live_bytes_in_last_gc.store(size, Ordering::SeqCst); |
| 198 | + } |
| 199 | +} |
| 200 | + |
| 201 | +impl Default for GlobalState { |
| 202 | + fn default() -> Self { |
| 203 | + Self { |
| 204 | + initialized: AtomicBool::new(false), |
| 205 | + trigger_gc_when_heap_is_full: AtomicBool::new(true), |
| 206 | + gc_status: Mutex::new(GcStatus::NotInGC), |
| 207 | + stacks_prepared: AtomicBool::new(false), |
| 208 | + emergency_collection: AtomicBool::new(false), |
| 209 | + user_triggered_collection: AtomicBool::new(false), |
| 210 | + internal_triggered_collection: AtomicBool::new(false), |
| 211 | + last_internal_triggered_collection: AtomicBool::new(false), |
| 212 | + allocation_success: AtomicBool::new(false), |
| 213 | + max_collection_attempts: AtomicUsize::new(0), |
| 214 | + cur_collection_attempts: AtomicUsize::new(0), |
| 215 | + scanned_stacks: AtomicUsize::new(0), |
| 216 | + allocation_bytes: AtomicUsize::new(0), |
| 217 | + #[cfg(feature = "malloc_counted_size")] |
| 218 | + malloc_bytes: AtomicUsize::new(0), |
| 219 | + #[cfg(feature = "count_live_bytes_in_gc")] |
| 220 | + live_bytes_in_last_gc: AtomicUsize::new(0), |
| 221 | + } |
| 222 | + } |
| 223 | +} |
| 224 | + |
| 225 | +#[derive(PartialEq)] |
| 226 | +pub enum GcStatus { |
| 227 | + NotInGC, |
| 228 | + GcPrepare, |
| 229 | + GcProper, |
| 230 | +} |
0 commit comments