|
| 1 | +//! Sampler cache for Vulkan backend. |
| 2 | +//! |
| 3 | +//! Nearly identical to the DX12 sampler cache, without descriptor heap management. |
| 4 | +
|
| 5 | +use std::collections::{hash_map::Entry, HashMap}; |
| 6 | + |
| 7 | +use ash::vk; |
| 8 | +use ordered_float::OrderedFloat; |
| 9 | + |
| 10 | +/// If the allowed sampler count is above this value, the sampler cache is disabled. |
| 11 | +const ENABLE_SAMPLER_CACHE_CUTOFF: u32 = 1 << 20; |
| 12 | + |
| 13 | +/// [`vk::SamplerCreateInfo`] is not hashable, so we wrap it in a newtype that is. |
| 14 | +/// |
| 15 | +/// We use [`OrderedFloat`] to allow for floating point values to be compared and |
| 16 | +/// hashed in a defined way. |
| 17 | +#[derive(Copy, Clone)] |
| 18 | +struct HashableSamplerCreateInfo(vk::SamplerCreateInfo<'static>); |
| 19 | + |
| 20 | +impl PartialEq for HashableSamplerCreateInfo { |
| 21 | + fn eq(&self, other: &Self) -> bool { |
| 22 | + self.0.flags == other.0.flags |
| 23 | + && self.0.mag_filter == other.0.mag_filter |
| 24 | + && self.0.min_filter == other.0.min_filter |
| 25 | + && self.0.mipmap_mode == other.0.mipmap_mode |
| 26 | + && self.0.address_mode_u == other.0.address_mode_u |
| 27 | + && self.0.address_mode_v == other.0.address_mode_v |
| 28 | + && self.0.address_mode_w == other.0.address_mode_w |
| 29 | + && OrderedFloat(self.0.mip_lod_bias) == OrderedFloat(other.0.mip_lod_bias) |
| 30 | + && self.0.anisotropy_enable == other.0.anisotropy_enable |
| 31 | + && OrderedFloat(self.0.max_anisotropy) == OrderedFloat(other.0.max_anisotropy) |
| 32 | + && self.0.compare_enable == other.0.compare_enable |
| 33 | + && self.0.compare_op == other.0.compare_op |
| 34 | + && OrderedFloat(self.0.min_lod) == OrderedFloat(other.0.min_lod) |
| 35 | + && OrderedFloat(self.0.max_lod) == OrderedFloat(other.0.max_lod) |
| 36 | + && self.0.border_color == other.0.border_color |
| 37 | + && self.0.unnormalized_coordinates == other.0.unnormalized_coordinates |
| 38 | + } |
| 39 | +} |
| 40 | + |
| 41 | +impl Eq for HashableSamplerCreateInfo {} |
| 42 | + |
| 43 | +impl std::hash::Hash for HashableSamplerCreateInfo { |
| 44 | + fn hash<H: std::hash::Hasher>(&self, state: &mut H) { |
| 45 | + self.0.flags.hash(state); |
| 46 | + self.0.mag_filter.hash(state); |
| 47 | + self.0.min_filter.hash(state); |
| 48 | + self.0.mipmap_mode.hash(state); |
| 49 | + self.0.address_mode_u.hash(state); |
| 50 | + self.0.address_mode_v.hash(state); |
| 51 | + self.0.address_mode_w.hash(state); |
| 52 | + OrderedFloat(self.0.mip_lod_bias).hash(state); |
| 53 | + self.0.anisotropy_enable.hash(state); |
| 54 | + OrderedFloat(self.0.max_anisotropy).hash(state); |
| 55 | + self.0.compare_enable.hash(state); |
| 56 | + self.0.compare_op.hash(state); |
| 57 | + OrderedFloat(self.0.min_lod).hash(state); |
| 58 | + OrderedFloat(self.0.max_lod).hash(state); |
| 59 | + self.0.border_color.hash(state); |
| 60 | + self.0.unnormalized_coordinates.hash(state); |
| 61 | + } |
| 62 | +} |
| 63 | + |
| 64 | +/// Entry in the sampler cache. |
| 65 | +struct CacheEntry { |
| 66 | + sampler: vk::Sampler, |
| 67 | + ref_count: u32, |
| 68 | +} |
| 69 | + |
| 70 | +/// Global sampler cache. |
| 71 | +/// |
| 72 | +/// As some devices have a low limit (4000) on the number of unique samplers that can be created, |
| 73 | +/// we need to cache samplers to avoid running out if people eagerly create duplicate samplers. |
| 74 | +pub(crate) struct SamplerCache { |
| 75 | + /// Mapping from the sampler description to sampler and reference count. |
| 76 | + samplers: HashMap<HashableSamplerCreateInfo, CacheEntry>, |
| 77 | + /// Maximum number of unique samplers that can be created. |
| 78 | + total_capacity: u32, |
| 79 | + /// If true, the sampler cache is disabled and all samplers are created on demand. |
| 80 | + passthrough: bool, |
| 81 | +} |
| 82 | + |
| 83 | +impl SamplerCache { |
| 84 | + pub fn new(total_capacity: u32) -> Self { |
| 85 | + let passthrough = total_capacity >= ENABLE_SAMPLER_CACHE_CUTOFF; |
| 86 | + Self { |
| 87 | + samplers: HashMap::new(), |
| 88 | + total_capacity, |
| 89 | + passthrough, |
| 90 | + } |
| 91 | + } |
| 92 | + |
| 93 | + /// Create a sampler, or return an existing one if it already exists. |
| 94 | + /// |
| 95 | + /// If the sampler already exists, the reference count is incremented. |
| 96 | + /// |
| 97 | + /// If the sampler does not exist, a new sampler is created and inserted into the cache. |
| 98 | + /// |
| 99 | + /// If the cache is full, an error is returned. |
| 100 | + pub fn create_sampler( |
| 101 | + &mut self, |
| 102 | + device: &ash::Device, |
| 103 | + create_info: vk::SamplerCreateInfo<'static>, |
| 104 | + ) -> Result<vk::Sampler, crate::DeviceError> { |
| 105 | + if self.passthrough { |
| 106 | + return unsafe { device.create_sampler(&create_info, None) } |
| 107 | + .map_err(super::map_host_device_oom_and_ioca_err); |
| 108 | + }; |
| 109 | + |
| 110 | + // Get the number of used samplers. Needs to be done before to appease the borrow checker. |
| 111 | + let used_samplers = self.samplers.len(); |
| 112 | + |
| 113 | + match self.samplers.entry(HashableSamplerCreateInfo(create_info)) { |
| 114 | + Entry::Occupied(occupied_entry) => { |
| 115 | + // We have found a match, so increment the refcount and return the index. |
| 116 | + let value = occupied_entry.into_mut(); |
| 117 | + value.ref_count += 1; |
| 118 | + Ok(value.sampler) |
| 119 | + } |
| 120 | + Entry::Vacant(vacant_entry) => { |
| 121 | + // We need to create a new sampler. |
| 122 | + |
| 123 | + // We need to check if we can create more samplers. |
| 124 | + if used_samplers >= self.total_capacity as usize { |
| 125 | + log::error!("There is no more room in the global sampler heap for more unique samplers. Your device supports a maximum of {} unique samplers.", self.samplers.len()); |
| 126 | + return Err(crate::DeviceError::OutOfMemory); |
| 127 | + } |
| 128 | + |
| 129 | + // Create the sampler. |
| 130 | + let sampler = unsafe { device.create_sampler(&create_info, None) } |
| 131 | + .map_err(super::map_host_device_oom_and_ioca_err)?; |
| 132 | + |
| 133 | + // Insert the new sampler into the mapping. |
| 134 | + vacant_entry.insert(CacheEntry { |
| 135 | + sampler, |
| 136 | + ref_count: 1, |
| 137 | + }); |
| 138 | + |
| 139 | + Ok(sampler) |
| 140 | + } |
| 141 | + } |
| 142 | + } |
| 143 | + |
| 144 | + /// Decrease the reference count of a sampler and destroy it if the reference count reaches 0. |
| 145 | + /// |
| 146 | + /// The provided sampler is checked against the sampler in the cache to ensure there is no clerical error. |
| 147 | + pub fn destroy_sampler( |
| 148 | + &mut self, |
| 149 | + device: &ash::Device, |
| 150 | + create_info: vk::SamplerCreateInfo<'static>, |
| 151 | + provided_sampler: vk::Sampler, |
| 152 | + ) { |
| 153 | + if self.passthrough { |
| 154 | + unsafe { device.destroy_sampler(provided_sampler, None) }; |
| 155 | + return; |
| 156 | + }; |
| 157 | + |
| 158 | + let Entry::Occupied(mut hash_map_entry) = |
| 159 | + self.samplers.entry(HashableSamplerCreateInfo(create_info)) |
| 160 | + else { |
| 161 | + log::error!("Trying to destroy a sampler that does not exist."); |
| 162 | + return; |
| 163 | + }; |
| 164 | + let cache_entry = hash_map_entry.get_mut(); |
| 165 | + |
| 166 | + assert_eq!( |
| 167 | + cache_entry.sampler, provided_sampler, |
| 168 | + "Provided sampler does not match the sampler in the cache." |
| 169 | + ); |
| 170 | + |
| 171 | + cache_entry.ref_count -= 1; |
| 172 | + |
| 173 | + if cache_entry.ref_count == 0 { |
| 174 | + unsafe { device.destroy_sampler(cache_entry.sampler, None) }; |
| 175 | + hash_map_entry.remove(); |
| 176 | + } |
| 177 | + } |
| 178 | +} |
0 commit comments