|
| 1 | +use std::mem::ManuallyDrop; |
| 2 | +use std::ffi::c_void; |
| 3 | +use std::alloc::Layout; |
| 4 | +use std::mem; |
| 5 | +use std::ptr::{Pointee, DynMetadata, NonNull}; |
| 6 | + |
| 7 | +use zerogc::GcSafe; |
| 8 | +use zerogc_context::field_offset; |
| 9 | +use zerogc_context::utils::AtomicCell; |
| 10 | +use crate::{RawMarkState, CollectorId, DynTrace, MarkVisitor}; |
| 11 | +use std::num::NonZeroUsize; |
| 12 | + |
| 13 | + |
| 14 | +/// A link in the chain of `BigGcObject`s |
| 15 | +type BigObjectLinkItem = Option<NonNull<BigGcObject<DynamicObj>>>; |
| 16 | +/// An atomic link in the linked-list of BigObjects |
| 17 | +/// |
| 18 | +/// This is thread-safe |
| 19 | +// TODO: Replace with a Vec |
| 20 | +#[derive(Default)] |
| 21 | +pub(crate) struct BigObjectLink(AtomicCell<BigObjectLinkItem>); |
| 22 | +impl BigObjectLink { |
| 23 | + #[inline] |
| 24 | + pub const fn new(item: BigObjectLinkItem) -> Self { |
| 25 | + BigObjectLink(AtomicCell::new(item)) |
| 26 | + } |
| 27 | + #[inline] |
| 28 | + pub(crate) fn item(&self) -> BigObjectLinkItem { |
| 29 | + self.0.load() |
| 30 | + } |
| 31 | + #[inline] |
| 32 | + pub(crate) unsafe fn set_item_forced(&self, val: BigObjectLinkItem) { |
| 33 | + self.0.store(val) |
| 34 | + } |
| 35 | + #[inline] |
| 36 | + pub(crate) fn append_item(&self, big_obj: Box<BigGcObject>) { |
| 37 | + // Must use CAS loop in case another thread updates |
| 38 | + let mut expected_prev = big_obj.prev.item(); |
| 39 | + let mut updated_item = unsafe { |
| 40 | + NonNull::new_unchecked(Box::into_raw(big_obj)) |
| 41 | + }; |
| 42 | + loop { |
| 43 | + match self.0.compare_exchange( |
| 44 | + expected_prev, Some(updated_item) |
| 45 | + ) { |
| 46 | + Ok(_) => break, |
| 47 | + Err(actual_prev) => { |
| 48 | + unsafe { |
| 49 | + /* |
| 50 | + * We have exclusive access to `updated_item` |
| 51 | + * here so we don't need to worry about CAS. |
| 52 | + * We just need to update its `prev` |
| 53 | + * link to point to the new value. |
| 54 | + */ |
| 55 | + updated_item.as_mut().prev.0.store(actual_prev); |
| 56 | + expected_prev = actual_prev; |
| 57 | + } |
| 58 | + } |
| 59 | + } |
| 60 | + } |
| 61 | + } |
| 62 | +} |
| 63 | + |
| 64 | +pub enum LayoutInfo { |
| 65 | + Fixed(Layout), |
| 66 | + Array { |
| 67 | + element_layout: Layout |
| 68 | + } |
| 69 | +} |
| 70 | +trait StaticGcType<Metadata> { |
| 71 | + const GC_TYPE_INFO: StaticTypeInfo; |
| 72 | +} |
| 73 | +/// The limited subset of type information that is available at compile-time |
| 74 | +/// |
| 75 | +/// For `Sized` types, we know all the type information. |
| 76 | +/// However for `dyn` types and slices, we may not. |
| 77 | +pub enum StaticTypeInfo { |
| 78 | + /// Indicates that the type and size are fixed |
| 79 | + Fixed { |
| 80 | + value_offset: usize, |
| 81 | + static_type: &'static GcType |
| 82 | + }, |
| 83 | + /// Indicates that the type is dynamically dispatched, |
| 84 | + /// and its type (and size at runtime) can be determined from a [GcType] |
| 85 | + /// pointer at the specified field offset. |
| 86 | + /// |
| 87 | + /// This is the case for `dyn Trait` pointers. |
| 88 | + TraitObject, |
| 89 | + /// Indicates that the object is an array, |
| 90 | + /// with a fixed element type but unknown size. |
| 91 | + /// |
| 92 | + /// This is also the case for `str`. |
| 93 | + Array { |
| 94 | + value_offset: usize, |
| 95 | + size_offset: usize, |
| 96 | + static_type: &'static GcType, |
| 97 | + element_type: &'static GcType |
| 98 | + } |
| 99 | +} |
| 100 | + |
| 101 | +impl StaticTypeInfo { |
| 102 | + #[inline] // NOTE: We expect this to be constant folded |
| 103 | + pub(crate) const fn resolve_type<T: GcSafe + ?Sized>(&self, val: &T) -> &GcType { |
| 104 | + match *self { |
| 105 | + StaticTypeInfo::Fixed { static_type, .. } | |
| 106 | + StaticTypeInfo::Array { static_type, .. } => static_type, |
| 107 | + StaticTypeInfo::TraitObject => { |
| 108 | + unsafe { &(*self.resolve_header(val)).type_info } |
| 109 | + } |
| 110 | + } |
| 111 | + } |
| 112 | + #[inline] |
| 113 | + pub(crate) const fn resolve_header<T: GcSafe + ?Sized>(&self, val: &T) -> &'_ GcHeader { |
| 114 | + match *self { |
| 115 | + StaticTypeInfo::Fixed { value_offset, .. } | |
| 116 | + StaticTypeInfo::Array { value_offset, .. } => { |
| 117 | + unsafe { &*(val as *const T).sub(value_offset) } |
| 118 | + }, |
| 119 | + StaticTypeInfo::TraitObject => unsafe { |
| 120 | + unsafe { |
| 121 | + &*(val as *mut T).sub(GcHeader::value_offset( |
| 122 | + std::mem::align_of_val(val) |
| 123 | + )) as *const GcHeader |
| 124 | + } |
| 125 | + }, |
| 126 | + } |
| 127 | + } |
| 128 | +} |
| 129 | + |
| 130 | +impl StaticTypeInfo { |
| 131 | + pub const fn for_type<T: GcSafe + ?Sized>() -> &'static StaticTypeInfo { |
| 132 | + &<T as StaticGcType<<T as Pointee>::Metadata>>::GC_TYPE_INFO |
| 133 | + } |
| 134 | + #[inline] |
| 135 | + pub const fn resolve_total_size<T: GcSafe + ?Sized>(&self, val: &T) -> usize { |
| 136 | + Self::for_type::<T>() |
| 137 | + } |
| 138 | +} |
| 139 | +impl<T: GcSafe + Sized + Pointee<Metadata=()>> StaticGcType<()> for T { |
| 140 | + const GC_TYPE_INFO: StaticTypeInfo = StaticTypeInfo::Fixed { |
| 141 | + value_offset: GcType::value_offset_for_sized::<T>(), |
| 142 | + static_type: &GcType::type_for_sized::<T>() |
| 143 | + }; |
| 144 | +} |
| 145 | +impl<T: GcSafe + Sized> StaticGcType<usize> for [T] { |
| 146 | + const GC_TYPE_INFO: StaticTypeInfo = StaticTypeInfo::Array { |
| 147 | + element_type: &GcType::type_for_sized::<T>(), |
| 148 | + size_offset: field_offset!(GcHeader, static_type) |
| 149 | + }; |
| 150 | +} |
| 151 | +impl StaticGcType<usize> for str { |
| 152 | + /// A `str` has exactly the same runtime layout as `[u8]` |
| 153 | + const GC_TYPE_INFO: StaticTypeInfo = <[u8] as StaticGcType<usize>>::GC_TYPE_INFO; |
| 154 | +} |
| 155 | +impl<Dyn: GcSafe + Pointee<Metadata=DynMetadata<Dyn>> + ?Sized> StaticGcType<DynMetadata<Dyn>> for Dyn { |
| 156 | + const GC_TYPE_INFO: StaticTypeInfo = StaticTypeInfo::TraitObject { |
| 157 | + runtime_type_offset: field_offset!(GcHeader, static_type) |
| 158 | + }; |
| 159 | +} |
| 160 | + |
| 161 | + |
| 162 | +/// A header for a GC object |
| 163 | +/// |
| 164 | +/// This is uniform for all objects |
| 165 | +#[repr(C)] |
| 166 | +pub(crate) struct GcHeader { |
| 167 | + /// The type of this object, or `None` if it is an array |
| 168 | + pub(crate) type_info: &'static GcType, |
| 169 | + /* |
| 170 | + * NOTE: State byte should come last |
| 171 | + * If the value is small `(u32)`, we could reduce |
| 172 | + * the padding to a 3 bytes and fit everything in a word. |
| 173 | + * |
| 174 | + * Do we really need to use atomic stores? |
| 175 | + */ |
| 176 | + pub(crate) raw_state: AtomicCell<RawMarkState>, |
| 177 | + pub(crate) collector_id: CollectorId, |
| 178 | +} |
| 179 | +impl GcHeader { |
| 180 | + #[inline] |
| 181 | + pub fn new(type_info: &'static GcType, raw_state: RawMarkState, collector_id: CollectorId) -> Self { |
| 182 | + GcHeader { type_info, raw_state: AtomicCell::new(raw_state), collector_id, prev: BigObjectLink::new() } |
| 183 | + } |
| 184 | + #[inline] |
| 185 | + pub fn value(&self) -> *mut c_void { |
| 186 | + unsafe { |
| 187 | + (self as *const GcHeader as *mut GcHeader as *mut u8) |
| 188 | + // NOTE: This takes into account the alignment and possible padding |
| 189 | + .add(self.type_info.value_offset) |
| 190 | + .cast::<c_void>() |
| 191 | + } |
| 192 | + } |
| 193 | + #[inline] |
| 194 | + pub unsafe fn from_value_ptr<T>(ptr: *mut T, static_type_info: &StaticTypeInfo) -> *mut GcHeader { |
| 195 | + (ptr as *mut u8).sub(static_type.value_offset).cast() |
| 196 | + } |
| 197 | + #[inline] |
| 198 | + pub(crate) fn raw_state(&self) -> RawMarkState { |
| 199 | + // TODO: Is this safe? Couldn't it be accessed concurrently? |
| 200 | + self.raw_state.load() |
| 201 | + } |
| 202 | + #[inline] |
| 203 | + pub(crate) fn update_raw_state(&self, raw_state: RawMarkState) { |
| 204 | + self.raw_state.store(raw_state); |
| 205 | + } |
| 206 | + #[inline] |
| 207 | + pub const fn value_offset(align: usize) -> usize { |
| 208 | + // Big object |
| 209 | + let layout = Layout::new::<BigGcObject<()>>(); |
| 210 | + layout.size() + layout.padding_needed_for(align) |
| 211 | + } |
| 212 | +} |
| 213 | + |
| 214 | +/// Marker for an unknown GC object |
| 215 | +struct DynamicObj; |
| 216 | + |
| 217 | + |
| 218 | +#[repr(C)] |
| 219 | +struct ArrayGcObject<T = DynamicObj> { |
| 220 | + header: GcHeader, |
| 221 | + /// This is dropped using dynamic type info |
| 222 | + static_value: ManuallyDrop<[T]> |
| 223 | +} |
| 224 | +impl ArrayGcObject { |
| 225 | + #[inline] |
| 226 | + fn size(&self) -> usize { |
| 227 | + self.static_value.as_ref().len() |
| 228 | + } |
| 229 | +} |
| 230 | +#[repr(C)] |
| 231 | +pub(crate) struct BigGcObject<T = DynamicObj> { |
| 232 | + pub(crate) header: GcHeader, |
| 233 | + /// This is dropped using dynamic type info |
| 234 | + pub(crate) static_value: ManuallyDrop<T> |
| 235 | +} |
| 236 | +impl<T> BigGcObject<T> { |
| 237 | + #[inline] |
| 238 | + pub(crate) unsafe fn into_dynamic_box(val: Box<Self>) -> Box<BigGcObject<DynamicObj>> { |
| 239 | + std::mem::transmute::<Box<BigGcObject<T>>, Box<BigGcObject<DynamicObj>>>(val) |
| 240 | + } |
| 241 | +} |
| 242 | +impl<T> Drop for BigGcObject<T> { |
| 243 | + fn drop(&mut self) { |
| 244 | + unsafe { |
| 245 | + if let Some(drop) = self.header.type_info.drop_func { |
| 246 | + drop(&mut *self.static_value as *mut T as *mut c_void); |
| 247 | + } |
| 248 | + } |
| 249 | + } |
| 250 | +} |
0 commit comments