|
| 1 | +use crate::arch::barrier; |
1 | 2 | use crate::float::Float;
|
2 | 3 | use crate::integer::{Integer, SignedInteger, UnsignedInteger};
|
3 |
| -use crate::memory::Scope; |
| 4 | +use crate::memory::{Scope, Semantics}; |
4 | 5 | use crate::scalar::VectorOrScalar;
|
5 | 6 | #[cfg(target_arch = "spirv")]
|
6 | 7 | use core::arch::asm;
|
@@ -49,7 +50,116 @@ pub enum GroupOperation {
|
49 | 50 | /// function, it was removed from the [`GroupOperation`] enum and instead resides individually.
|
50 | 51 | pub const GROUP_OPERATION_CLUSTERED_REDUCE: u32 = 3;
|
51 | 52 |
|
52 |
| -// TODO barriers |
| 53 | +/// Only usable if the extension GL_KHR_shader_subgroup_basic is enabled. |
| 54 | +/// |
| 55 | +/// The function subgroupBarrier() enforces that all active invocations within a |
| 56 | +/// subgroup must execute this function before any are allowed to continue their |
| 57 | +/// execution, and the results of any memory stores performed using coherent |
| 58 | +/// variables performed prior to the call will be visible to any future |
| 59 | +/// coherent access to the same memory performed by any other shader invocation |
| 60 | +/// within the same subgroup. |
| 61 | +/// |
| 62 | +/// Requires Capability `GroupNonUniform`. |
| 63 | +#[spirv_std_macros::gpu_only] |
| 64 | +#[doc(alias = "subgroupBarrier")] |
| 65 | +#[inline] |
| 66 | +pub unsafe fn subgroup_barrier() { |
| 67 | + unsafe { |
| 68 | + barrier::control_barrier::< |
| 69 | + SUBGROUP, |
| 70 | + SUBGROUP, |
| 71 | + { |
| 72 | + Semantics::ACQUIRE_RELEASE.bits() |
| 73 | + | Semantics::UNIFORM_MEMORY.bits() |
| 74 | + | Semantics::WORKGROUP_MEMORY.bits() |
| 75 | + | Semantics::IMAGE_MEMORY.bits() |
| 76 | + }, |
| 77 | + >(); |
| 78 | + } |
| 79 | +} |
| 80 | + |
| 81 | +/// Only usable if the extension GL_KHR_shader_subgroup_basic is enabled. |
| 82 | +/// |
| 83 | +/// The function subgroupMemoryBarrier() enforces the ordering of all memory |
| 84 | +/// transactions issued within a single shader invocation, as viewed by other |
| 85 | +/// invocations in the same subgroup. |
| 86 | +/// |
| 87 | +/// Requires Capability `GroupNonUniform`. |
| 88 | +#[spirv_std_macros::gpu_only] |
| 89 | +#[doc(alias = "subgroupMemoryBarrier")] |
| 90 | +#[inline] |
| 91 | +pub unsafe fn subgroup_memory_barrier() { |
| 92 | + unsafe { |
| 93 | + barrier::memory_barrier::< |
| 94 | + SUBGROUP, |
| 95 | + { |
| 96 | + Semantics::ACQUIRE_RELEASE.bits() |
| 97 | + | Semantics::UNIFORM_MEMORY.bits() |
| 98 | + | Semantics::WORKGROUP_MEMORY.bits() |
| 99 | + | Semantics::IMAGE_MEMORY.bits() |
| 100 | + }, |
| 101 | + >(); |
| 102 | + } |
| 103 | +} |
| 104 | + |
| 105 | +/// Only usable if the extension GL_KHR_shader_subgroup_basic is enabled. |
| 106 | +/// |
| 107 | +/// The function subgroupMemoryBarrierBuffer() enforces the ordering of all |
| 108 | +/// memory transactions to buffer variables issued within a single shader |
| 109 | +/// invocation, as viewed by other invocations in the same subgroup. |
| 110 | +/// |
| 111 | +/// Requires Capability `GroupNonUniform`. |
| 112 | +#[spirv_std_macros::gpu_only] |
| 113 | +#[doc(alias = "subgroupMemoryBarrierBuffer")] |
| 114 | +#[inline] |
| 115 | +pub unsafe fn subgroup_memory_barrier_buffer() { |
| 116 | + unsafe { |
| 117 | + barrier::memory_barrier::< |
| 118 | + SUBGROUP, |
| 119 | + { Semantics::ACQUIRE_RELEASE.bits() | Semantics::UNIFORM_MEMORY.bits() }, |
| 120 | + >(); |
| 121 | + } |
| 122 | +} |
| 123 | + |
| 124 | +/// Only usable if the extension GL_KHR_shader_subgroup_basic is enabled. |
| 125 | +/// |
| 126 | +/// The function subgroupMemoryBarrierShared() enforces the ordering of all |
| 127 | +/// memory transactions to shared variables issued within a single shader |
| 128 | +/// invocation, as viewed by other invocations in the same subgroup. |
| 129 | +/// |
| 130 | +/// Only available in compute shaders. |
| 131 | +/// |
| 132 | +/// Requires Capability `GroupNonUniform`. |
| 133 | +#[spirv_std_macros::gpu_only] |
| 134 | +#[doc(alias = "subgroupMemoryBarrierShared")] |
| 135 | +#[inline] |
| 136 | +pub unsafe fn subgroup_memory_barrier_shared() { |
| 137 | + unsafe { |
| 138 | + barrier::memory_barrier::< |
| 139 | + SUBGROUP, |
| 140 | + { Semantics::ACQUIRE_RELEASE.bits() | Semantics::WORKGROUP_MEMORY.bits() }, |
| 141 | + >(); |
| 142 | + } |
| 143 | +} |
| 144 | + |
| 145 | +/// Only usable if the extension GL_KHR_shader_subgroup_basic is enabled. |
| 146 | +/// |
| 147 | +/// The function subgroupMemoryBarrierImage() enforces the ordering of all |
| 148 | +/// memory transactions to images issued within a single shader invocation, as |
| 149 | +/// viewed by other invocations in the same subgroup. |
| 150 | +/// |
| 151 | +/// Requires Capability `GroupNonUniform`. |
| 152 | +#[spirv_std_macros::gpu_only] |
| 153 | +#[doc(alias = "subgroupMemoryBarrierImage")] |
| 154 | +#[inline] |
| 155 | +pub unsafe fn subgroup_memory_barrier_image() { |
| 156 | + unsafe { |
| 157 | + barrier::memory_barrier::< |
| 158 | + SUBGROUP, |
| 159 | + { Semantics::ACQUIRE_RELEASE.bits() | Semantics::IMAGE_MEMORY.bits() }, |
| 160 | + >(); |
| 161 | + } |
| 162 | +} |
53 | 163 |
|
54 | 164 | /// Result is true only in the active invocation with the lowest id in the group, otherwise result is false.
|
55 | 165 | ///
|
|
0 commit comments