Skip to content

Commit d291571

Browse files
authored
[wgpu] Move Arcs to dispatch (#6850)
1 parent 1509512 commit d291571

27 files changed

+390
-400
lines changed

wgpu/src/api/adapter.rs

Lines changed: 7 additions & 14 deletions
Original file line numberDiff line numberDiff line change
@@ -1,4 +1,4 @@
1-
use std::{future::Future, sync::Arc};
1+
use std::future::Future;
22

33
use crate::*;
44

@@ -15,7 +15,7 @@ use crate::*;
1515
/// Corresponds to [WebGPU `GPUAdapter`](https://gpuweb.github.io/gpuweb/#gpu-adapter).
1616
#[derive(Debug, Clone)]
1717
pub struct Adapter {
18-
pub(crate) inner: Arc<dispatch::DispatchAdapter>,
18+
pub(crate) inner: dispatch::DispatchAdapter,
1919
}
2020
#[cfg(send_sync)]
2121
static_assertions::assert_impl_all!(Adapter: Send, Sync);
@@ -65,16 +65,9 @@ impl Adapter {
6565
) -> impl Future<Output = Result<(Device, Queue), RequestDeviceError>> + WasmNotSend {
6666
let device = self.inner.request_device(desc, trace_path);
6767
async move {
68-
device.await.map(|(device, queue)| {
69-
(
70-
Device {
71-
inner: Arc::new(device),
72-
},
73-
Queue {
74-
inner: Arc::new(queue),
75-
},
76-
)
77-
})
68+
device
69+
.await
70+
.map(|(device, queue)| (Device { inner: device }, Queue { inner: queue }))
7871
}
7972
}
8073

@@ -100,10 +93,10 @@ impl Adapter {
10093

10194
Ok((
10295
Device {
103-
inner: Arc::new(device.into()),
96+
inner: device.into(),
10497
},
10598
Queue {
106-
inner: Arc::new(queue.into()),
99+
inner: queue.into(),
107100
},
108101
))
109102
}

wgpu/src/api/bind_group.rs

Lines changed: 1 addition & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -1,5 +1,3 @@
1-
use std::sync::Arc;
2-
31
use crate::*;
42

53
/// Handle to a binding group.
@@ -12,7 +10,7 @@ use crate::*;
1210
/// Corresponds to [WebGPU `GPUBindGroup`](https://gpuweb.github.io/gpuweb/#gpubindgroup).
1311
#[derive(Debug, Clone)]
1412
pub struct BindGroup {
15-
pub(crate) inner: Arc<dispatch::DispatchBindGroup>,
13+
pub(crate) inner: dispatch::DispatchBindGroup,
1614
}
1715
#[cfg(send_sync)]
1816
static_assertions::assert_impl_all!(BindGroup: Send, Sync);

wgpu/src/api/bind_group_layout.rs

Lines changed: 1 addition & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -1,5 +1,3 @@
1-
use std::sync::Arc;
2-
31
use crate::*;
42

53
/// Handle to a binding group layout.
@@ -15,7 +13,7 @@ use crate::*;
1513
/// https://gpuweb.github.io/gpuweb/#gpubindgrouplayout).
1614
#[derive(Debug, Clone)]
1715
pub struct BindGroupLayout {
18-
pub(crate) inner: Arc<dispatch::DispatchBindGroupLayout>,
16+
pub(crate) inner: dispatch::DispatchBindGroupLayout,
1917
}
2018
#[cfg(send_sync)]
2119
static_assertions::assert_impl_all!(BindGroupLayout: Send, Sync);

wgpu/src/api/blas.rs

Lines changed: 2 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -1,6 +1,5 @@
11
use crate::dispatch;
22
use crate::{Buffer, Label};
3-
use std::sync::Arc;
43
use wgt::WasmNotSendSync;
54

65
/// Descriptor for the size defining attributes of a triangle geometry, for a bottom level acceleration structure.
@@ -44,7 +43,7 @@ static_assertions::assert_impl_all!(CreateBlasDescriptor<'_>: Send, Sync);
4443
/// [TlasPackage]: crate::TlasPackage
4544
#[derive(Debug, Clone)]
4645
pub struct TlasInstance {
47-
pub(crate) blas: Arc<dispatch::DispatchBlas>,
46+
pub(crate) blas: dispatch::DispatchBlas,
4847
/// Affine transform matrix 3x4 (rows x columns, row major order).
4948
pub transform: [f32; 12],
5049
/// Custom index for the instance used inside the shader.
@@ -138,7 +137,7 @@ static_assertions::assert_impl_all!(BlasBuildEntry<'_>: WasmNotSendSync);
138137
/// [Tlas]: crate::Tlas
139138
pub struct Blas {
140139
pub(crate) handle: Option<u64>,
141-
pub(crate) inner: Arc<dispatch::DispatchBlas>,
140+
pub(crate) inner: dispatch::DispatchBlas,
142141
}
143142
static_assertions::assert_impl_all!(Blas: WasmNotSendSync);
144143

wgpu/src/api/buffer.rs

Lines changed: 20 additions & 44 deletions
Original file line numberDiff line numberDiff line change
@@ -8,15 +8,6 @@ use parking_lot::Mutex;
88

99
use crate::*;
1010

11-
#[derive(Debug)]
12-
pub(crate) struct BufferShared {
13-
pub inner: dispatch::DispatchBuffer,
14-
pub map_context: Mutex<MapContext>,
15-
pub size: wgt::BufferAddress,
16-
pub usage: BufferUsages,
17-
// Todo: missing map_state https://www.w3.org/TR/webgpu/#dom-gpubuffer-mapstate
18-
}
19-
2011
/// Handle to a GPU-accessible buffer.
2112
///
2213
/// Created with [`Device::create_buffer`] or
@@ -179,12 +170,16 @@ pub(crate) struct BufferShared {
179170
/// [`MAP_WRITE`]: BufferUsages::MAP_WRITE
180171
#[derive(Debug, Clone)]
181172
pub struct Buffer {
182-
pub(crate) shared: Arc<BufferShared>,
173+
pub(crate) inner: dispatch::DispatchBuffer,
174+
pub(crate) map_context: Arc<Mutex<MapContext>>,
175+
pub(crate) size: wgt::BufferAddress,
176+
pub(crate) usage: BufferUsages,
177+
// Todo: missing map_state https://www.w3.org/TR/webgpu/#dom-gpubuffer-mapstate
183178
}
184179
#[cfg(send_sync)]
185180
static_assertions::assert_impl_all!(Buffer: Send, Sync);
186181

187-
crate::cmp::impl_eq_ord_hash_proxy!(Buffer => .shared.inner);
182+
crate::cmp::impl_eq_ord_hash_proxy!(Buffer => .inner);
188183

189184
impl Buffer {
190185
/// Return the binding view of the entire buffer.
@@ -212,7 +207,7 @@ impl Buffer {
212207
&self,
213208
hal_buffer_callback: F,
214209
) -> R {
215-
if let Some(buffer) = self.shared.inner.as_core_opt() {
210+
if let Some(buffer) = self.inner.as_core_opt() {
216211
unsafe {
217212
buffer
218213
.context
@@ -239,7 +234,7 @@ impl Buffer {
239234
/// end of the buffer.
240235
pub fn slice<S: RangeBounds<BufferAddress>>(&self, bounds: S) -> BufferSlice<'_> {
241236
let (offset, size) = range_to_offset_size(bounds);
242-
check_buffer_bounds(self.shared.size, offset, size);
237+
check_buffer_bounds(self.size, offset, size);
243238
BufferSlice {
244239
buffer: self,
245240
offset,
@@ -249,27 +244,27 @@ impl Buffer {
249244

250245
/// Flushes any pending write operations and unmaps the buffer from host memory.
251246
pub fn unmap(&self) {
252-
self.shared.map_context.lock().reset();
253-
self.shared.inner.unmap();
247+
self.map_context.lock().reset();
248+
self.inner.unmap();
254249
}
255250

256251
/// Destroy the associated native resources as soon as possible.
257252
pub fn destroy(&self) {
258-
self.shared.inner.destroy();
253+
self.inner.destroy();
259254
}
260255

261256
/// Returns the length of the buffer allocation in bytes.
262257
///
263258
/// This is always equal to the `size` that was specified when creating the buffer.
264259
pub fn size(&self) -> BufferAddress {
265-
self.shared.size
260+
self.size
266261
}
267262

268263
/// Returns the allowed usages for this `Buffer`.
269264
///
270265
/// This is always equal to the `usage` that was specified when creating the buffer.
271266
pub fn usage(&self) -> BufferUsages {
272-
self.shared.usage
267+
self.usage
273268
}
274269
}
275270

@@ -336,7 +331,7 @@ impl<'a> BufferSlice<'a> {
336331
mode: MapMode,
337332
callback: impl FnOnce(Result<(), BufferAsyncError>) + WasmNotSend + 'static,
338333
) {
339-
let mut mc = self.buffer.shared.map_context.lock();
334+
let mut mc = self.buffer.map_context.lock();
340335
assert_eq!(mc.initial_range, 0..0, "Buffer is already mapped");
341336
let end = match self.size {
342337
Some(s) => self.offset + s.get(),
@@ -345,7 +340,6 @@ impl<'a> BufferSlice<'a> {
345340
mc.initial_range = self.offset..end;
346341

347342
self.buffer
348-
.shared
349343
.inner
350344
.map_async(mode, self.offset..end, Box::new(callback));
351345
}
@@ -365,13 +359,8 @@ impl<'a> BufferSlice<'a> {
365359
///
366360
/// [mapped]: Buffer#mapping-buffers
367361
pub fn get_mapped_range(&self) -> BufferView<'a> {
368-
let end = self
369-
.buffer
370-
.shared
371-
.map_context
372-
.lock()
373-
.add(self.offset, self.size);
374-
let range = self.buffer.shared.inner.get_mapped_range(self.offset..end);
362+
let end = self.buffer.map_context.lock().add(self.offset, self.size);
363+
let range = self.buffer.inner.get_mapped_range(self.offset..end);
375364
BufferView {
376365
slice: *self,
377366
inner: range,
@@ -388,15 +377,9 @@ impl<'a> BufferSlice<'a> {
388377
/// This is only available on WebGPU, on any other backends this will return `None`.
389378
#[cfg(webgpu)]
390379
pub fn get_mapped_range_as_array_buffer(&self) -> Option<js_sys::ArrayBuffer> {
391-
let end = self
392-
.buffer
393-
.shared
394-
.map_context
395-
.lock()
396-
.add(self.offset, self.size);
380+
let end = self.buffer.map_context.lock().add(self.offset, self.size);
397381

398382
self.buffer
399-
.shared
400383
.inner
401384
.get_mapped_range_as_array_buffer(self.offset..end)
402385
}
@@ -416,17 +399,12 @@ impl<'a> BufferSlice<'a> {
416399
///
417400
/// [mapped]: Buffer#mapping-buffers
418401
pub fn get_mapped_range_mut(&self) -> BufferViewMut<'a> {
419-
let end = self
420-
.buffer
421-
.shared
422-
.map_context
423-
.lock()
424-
.add(self.offset, self.size);
425-
let range = self.buffer.shared.inner.get_mapped_range(self.offset..end);
402+
let end = self.buffer.map_context.lock().add(self.offset, self.size);
403+
let range = self.buffer.inner.get_mapped_range(self.offset..end);
426404
BufferViewMut {
427405
slice: *self,
428406
inner: range,
429-
readable: self.buffer.shared.usage.contains(BufferUsages::MAP_READ),
407+
readable: self.buffer.usage.contains(BufferUsages::MAP_READ),
430408
}
431409
}
432410
}
@@ -651,7 +629,6 @@ impl Drop for BufferView<'_> {
651629
fn drop(&mut self) {
652630
self.slice
653631
.buffer
654-
.shared
655632
.map_context
656633
.lock()
657634
.remove(self.slice.offset, self.slice.size);
@@ -662,7 +639,6 @@ impl Drop for BufferViewMut<'_> {
662639
fn drop(&mut self) {
663640
self.slice
664641
.buffer
665-
.shared
666642
.map_context
667643
.lock()
668644
.remove(self.slice.offset, self.slice.size);

wgpu/src/api/command_encoder.rs

Lines changed: 5 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -122,9 +122,9 @@ impl CommandEncoder {
122122
copy_size: BufferAddress,
123123
) {
124124
self.inner.copy_buffer_to_buffer(
125-
&source.shared.inner,
125+
&source.inner,
126126
source_offset,
127-
&destination.shared.inner,
127+
&destination.inner,
128128
destination_offset,
129129
copy_size,
130130
);
@@ -183,8 +183,7 @@ impl CommandEncoder {
183183
/// - `CLEAR_TEXTURE` extension not enabled
184184
/// - Range is out of bounds
185185
pub fn clear_texture(&mut self, texture: &Texture, subresource_range: &ImageSubresourceRange) {
186-
self.inner
187-
.clear_texture(&texture.shared.inner, subresource_range);
186+
self.inner.clear_texture(&texture.inner, subresource_range);
188187
}
189188

190189
/// Clears buffer to zero.
@@ -199,7 +198,7 @@ impl CommandEncoder {
199198
offset: BufferAddress,
200199
size: Option<BufferAddress>,
201200
) {
202-
self.inner.clear_buffer(&buffer.shared.inner, offset, size);
201+
self.inner.clear_buffer(&buffer.inner, offset, size);
203202
}
204203

205204
/// Inserts debug marker.
@@ -232,7 +231,7 @@ impl CommandEncoder {
232231
&query_set.inner,
233232
query_range.start,
234233
query_range.end - query_range.start,
235-
&destination.shared.inner,
234+
&destination.inner,
236235
destination_offset,
237236
);
238237
}

wgpu/src/api/compute_pass.rs

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -52,7 +52,7 @@ impl ComputePass<'_> {
5252
Option<&'a BindGroup>: From<BG>,
5353
{
5454
let bg: Option<&BindGroup> = bind_group.into();
55-
let bg = bg.map(|bg| &*bg.inner);
55+
let bg = bg.map(|bg| &bg.inner);
5656
self.inner.set_bind_group(index, bg, offsets);
5757
}
5858

@@ -92,7 +92,7 @@ impl ComputePass<'_> {
9292
indirect_offset: BufferAddress,
9393
) {
9494
self.inner
95-
.dispatch_workgroups_indirect(&indirect_buffer.shared.inner, indirect_offset);
95+
.dispatch_workgroups_indirect(&indirect_buffer.inner, indirect_offset);
9696
}
9797
}
9898

wgpu/src/api/compute_pipeline.rs

Lines changed: 2 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -1,5 +1,3 @@
1-
use std::sync::Arc;
2-
31
use crate::*;
42

53
/// Handle to a compute pipeline.
@@ -10,7 +8,7 @@ use crate::*;
108
/// Corresponds to [WebGPU `GPUComputePipeline`](https://gpuweb.github.io/gpuweb/#compute-pipeline).
119
#[derive(Debug, Clone)]
1210
pub struct ComputePipeline {
13-
pub(crate) inner: Arc<dispatch::DispatchComputePipeline>,
11+
pub(crate) inner: dispatch::DispatchComputePipeline,
1412
}
1513
#[cfg(send_sync)]
1614
static_assertions::assert_impl_all!(ComputePipeline: Send, Sync);
@@ -27,9 +25,7 @@ impl ComputePipeline {
2725
/// This method will raise a validation error if there is no bind group layout at `index`.
2826
pub fn get_bind_group_layout(&self, index: u32) -> BindGroupLayout {
2927
let bind_group = self.inner.get_bind_group_layout(index);
30-
BindGroupLayout {
31-
inner: Arc::new(bind_group),
32-
}
28+
BindGroupLayout { inner: bind_group }
3329
}
3430
}
3531

0 commit comments

Comments
 (0)