Skip to content

Commit 1504542

Browse files
bors[bot]swiftcoder
andcommitted
Merge #59
59: Map buffers async r=kvark a=swiftcoder This is not ready to merge. It works well enough for the example, but looking for some early feedback. In particular: - It's not really async, since gfx-hal's mapping APIs seem to all be immediate. - Async callbacks in Rust are really unpleasant due to lifetimes, so maybe it's just as well they aren't async. - There is no validation and no real error handling here yet. Co-authored-by: Tristam MacDonald <swiftcoder@gmail.com>
2 parents b2f58d0 + 8431da8 commit 1504542

File tree

6 files changed

+268
-29
lines changed

6 files changed

+268
-29
lines changed

Cargo.lock

Lines changed: 2 additions & 0 deletions
Some generated files are not rendered by default. Learn more about customizing how changed files appear on GitHub.

examples/hello_compute_rust/main.rs

Lines changed: 11 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -95,7 +95,17 @@ fn main() {
9595
}
9696
encoder.copy_buffer_to_buffer(&storage_buffer, 0, &staging_buffer, 0, size);
9797

98-
// TODO: read the results back out of the staging buffer
98+
99+
staging_buffer.map_read_async(0, size, |result: wgpu::BufferMapAsyncResult<&[u8]>| {
100+
if let wgpu::BufferMapAsyncResult::Success(data) = result {
101+
let results = unsafe { ::std::slice::from_raw_parts(data.as_ptr() as *const u32, data.len() / std::mem::size_of::<u32>()) };
102+
println!("Times: {:?}", results);
103+
}
104+
105+
});
99106

100107
device.get_queue().submit(&[encoder.finish()]);
108+
109+
// TODO: why does calling unmap() inside the callback prevent the program from exiting?
110+
staging_buffer.unmap();
101111
}

gfx-examples/src/cube.rs

Lines changed: 41 additions & 12 deletions
Original file line numberDiff line numberDiff line change
@@ -115,16 +115,30 @@ impl framework::Example for Example {
115115
// Create the vertex and index buffers
116116
let vertex_size = mem::size_of::<Vertex>();
117117
let (vertex_data, index_data) = create_vertices();
118+
let vertex_buffer_length = vertex_data.len() * vertex_size;
119+
let index_buffer_length = index_data.len() * mem::size_of::<u16>();
118120
let vertex_buf = device.create_buffer(&wgpu::BufferDescriptor {
119-
size: (vertex_data.len() * vertex_size) as u32,
120-
usage: wgpu::BufferUsageFlags::VERTEX | wgpu::BufferUsageFlags::TRANSFER_DST,
121+
size: vertex_buffer_length as u32,
122+
usage: wgpu::BufferUsageFlags::VERTEX | wgpu::BufferUsageFlags::TRANSFER_DST | wgpu::BufferUsageFlags::MAP_WRITE,
121123
});
122-
vertex_buf.set_sub_data(0, framework::cast_slice(&vertex_data));
124+
125+
//vertex_buf.set_sub_data(0, framework::cast_slice(&vertex_data));
126+
vertex_buf.map_write_async(0, vertex_buffer_length as u32, |result: wgpu::BufferMapAsyncResult<&mut [u8]>| {
127+
if let wgpu::BufferMapAsyncResult::Success(data) = result {
128+
unsafe { std::ptr::copy_nonoverlapping(vertex_data.as_ptr() as *const u8, data.as_mut_ptr(), vertex_buffer_length) };
129+
}
130+
});
131+
123132
let index_buf = device.create_buffer(&wgpu::BufferDescriptor {
124-
size: (index_data.len() * 2) as u32,
125-
usage: wgpu::BufferUsageFlags::INDEX | wgpu::BufferUsageFlags::TRANSFER_DST,
133+
size: index_buffer_length as u32,
134+
usage: wgpu::BufferUsageFlags::INDEX | wgpu::BufferUsageFlags::TRANSFER_DST | wgpu::BufferUsageFlags::MAP_WRITE,
135+
});
136+
// index_buf.set_sub_data(0, framework::cast_slice(&index_data));
137+
index_buf.map_write_async(0, index_buffer_length as u32, |result: wgpu::BufferMapAsyncResult<&mut [u8]>| {
138+
if let wgpu::BufferMapAsyncResult::Success(data) = result {
139+
unsafe { std::ptr::copy_nonoverlapping(index_data.as_ptr() as *const u8, data.as_mut_ptr(), index_buffer_length) };
140+
}
126141
});
127-
index_buf.set_sub_data(0, framework::cast_slice(&index_data));
128142

129143
// Create pipeline layout
130144
let bind_group_layout = device.create_bind_group_layout(&wgpu::BindGroupLayoutDescriptor {
@@ -163,14 +177,19 @@ impl framework::Example for Example {
163177
array_size: 1,
164178
dimension: wgpu::TextureDimension::D2,
165179
format: wgpu::TextureFormat::R8g8b8a8Unorm,
166-
usage: wgpu::TextureUsageFlags::SAMPLED | wgpu::TextureUsageFlags::TRANSFER_DST
180+
usage: wgpu::TextureUsageFlags::SAMPLED | wgpu::TextureUsageFlags::TRANSFER_DST,
167181
});
168182
let texture_view = texture.create_default_view();
169183
let temp_buf = device.create_buffer(&wgpu::BufferDescriptor {
170184
size: texels.len() as u32,
171-
usage: wgpu::BufferUsageFlags::TRANSFER_SRC | wgpu::BufferUsageFlags::TRANSFER_DST
185+
usage: wgpu::BufferUsageFlags::TRANSFER_SRC | wgpu::BufferUsageFlags::TRANSFER_DST | wgpu::BufferUsageFlags::MAP_WRITE,
186+
});
187+
// temp_buf.set_sub_data(0, &texels);
188+
temp_buf.map_write_async(0, texels.len() as u32, |result: wgpu::BufferMapAsyncResult<&mut [u8]>| {
189+
if let wgpu::BufferMapAsyncResult::Success(data) = result {
190+
unsafe { std::ptr::copy_nonoverlapping(texels.as_ptr() as *const u8, data.as_mut_ptr(), texels.len()) };
191+
}
172192
});
173-
temp_buf.set_sub_data(0, &texels);
174193
init_encoder.copy_buffer_to_texture(
175194
wgpu::BufferCopyView {
176195
buffer: &temp_buf,
@@ -207,11 +226,16 @@ impl framework::Example for Example {
207226
});
208227
let uniform_buf = device.create_buffer(&wgpu::BufferDescriptor {
209228
size: 64,
210-
usage: wgpu::BufferUsageFlags::UNIFORM | wgpu::BufferUsageFlags::TRANSFER_DST,
229+
usage: wgpu::BufferUsageFlags::UNIFORM | wgpu::BufferUsageFlags::TRANSFER_DST | wgpu::BufferUsageFlags::MAP_WRITE,
211230
});
212231
let mx_total = Self::generate_matrix(sc_desc.width as f32 / sc_desc.height as f32);
213232
let mx_ref: &[f32; 16] = mx_total.as_ref();
214-
uniform_buf.set_sub_data(0, framework::cast_slice(&mx_ref[..]));
233+
// uniform_buf.set_sub_data(0, framework::cast_slice(&mx_ref[..]));
234+
uniform_buf.map_write_async(0, 64, |result: wgpu::BufferMapAsyncResult<&mut [u8]>| {
235+
if let wgpu::BufferMapAsyncResult::Success(data) = result {
236+
unsafe { std::ptr::copy_nonoverlapping(mx_ref.as_ptr() as *const u8, data.as_mut_ptr(), 64) };
237+
}
238+
});
215239

216240
// Create bind group
217241
let bind_group = device.create_bind_group(&wgpu::BindGroupDescriptor {
@@ -310,7 +334,12 @@ impl framework::Example for Example {
310334
fn resize(&mut self, sc_desc: &wgpu::SwapChainDescriptor, _device: &mut wgpu::Device) {
311335
let mx_total = Self::generate_matrix(sc_desc.width as f32 / sc_desc.height as f32);
312336
let mx_ref: &[f32; 16] = mx_total.as_ref();
313-
self.uniform_buf.set_sub_data(0, framework::cast_slice(&mx_ref[..]));
337+
// self.uniform_buf.set_sub_data(0, framework::cast_slice(&mx_ref[..]));
338+
self.uniform_buf.map_write_async(0, 64, |result: wgpu::BufferMapAsyncResult<&mut [u8]>| {
339+
if let wgpu::BufferMapAsyncResult::Success(data) = result {
340+
unsafe { std::ptr::copy_nonoverlapping(mx_ref.as_ptr() as *const u8, data.as_mut_ptr(), 64) };
341+
}
342+
});
314343
}
315344

316345
fn render(&mut self, frame: &wgpu::SwapChainOutput, device: &mut wgpu::Device) {

0 commit comments

Comments
 (0)