Skip to content

Commit

Permalink
Address review feedback
Browse files Browse the repository at this point in the history
Mostly minor changes in response to review. The main behavior change is to preserve use_cpu when hot-reloading (just the GPU) shaders.
  • Loading branch information
raphlinus committed Sep 18, 2023
1 parent d0798f3 commit 81f46fa
Show file tree
Hide file tree
Showing 3 changed files with 21 additions and 5 deletions.
4 changes: 2 additions & 2 deletions examples/with_winit/src/lib.rs
Original file line number Diff line number Diff line change
Expand Up @@ -73,6 +73,7 @@ fn run(
let mut render_cx = render_cx;
#[cfg(not(target_arch = "wasm32"))]
let mut render_state = None::<RenderState>;
let use_cpu = args.use_cpu;
// The design of `RenderContext` forces delayed renderer initialisation to
// not work on wasm, as WASM futures effectively must be 'static.
// Otherwise, this could work by sending the result to event_loop.proxy
Expand All @@ -87,7 +88,7 @@ fn run(
&RendererOptions {
surface_format: Some(render_state.surface.format),
timestamp_period: render_cx.devices[id].queue.get_timestamp_period(),
use_cpu: false,
use_cpu: use_cpu,
},
)
.expect("Could create renderer"),
Expand Down Expand Up @@ -129,7 +130,6 @@ fn run(
let mut profile_stored = None;
let mut prev_scene_ix = scene_ix - 1;
let mut profile_taken = Instant::now();
let use_cpu = args.use_cpu;
// _event_loop is used on non-wasm platforms to create new windows
event_loop.run(move |event, _event_loop, control_flow| match event {
Event::WindowEvent {
Expand Down
3 changes: 3 additions & 0 deletions src/cpu_dispatch.rs
Original file line number Diff line number Diff line change
@@ -1,3 +1,6 @@
// Copyright 2023 The Vello authors
// SPDX-License-Identifier: Apache-2.0 OR MIT

//! Support for CPU implementations of compute shaders.

use std::{
Expand Down
19 changes: 16 additions & 3 deletions src/engine.rs
Original file line number Diff line number Diff line change
Expand Up @@ -409,6 +409,14 @@ impl Engine {
// println!("dispatching {:?} with {} bindings", wg_size, bindings.len());
let shader = &self.shaders[shader_id.0];
if let Some(cpu_shader) = shader.cpu_shader {
// The current strategy is to run the CPU shader synchronously. This
// works because there is currently the added constraint that data
// can only flow from CPU to GPU, not the other way around. If and
// when we implement that, we will need to defer the execution. Of
// course, we will also need to wire up more async sychronization
// mechanisms, as the CPU dispatch can't run until the preceding
// command buffer submission completes (and, in WebGPU, the async
// mapping operations on the buffers completes).
let resources =
transient_map.create_cpu_resources(&mut self.bind_map, bindings);
cpu_shader(wg_size.0, &resources);
Expand All @@ -435,6 +443,7 @@ impl Engine {
Command::DispatchIndirect(shader_id, proxy, offset, bindings) => {
let shader = &self.shaders[shader_id.0];
if let Some(cpu_shader) = shader.cpu_shader {
// Same consideration as above about running the CPU shader synchronously.
let n_wg;
if let CpuBinding::BufferRW(b) = self.bind_map.get_cpu_buf(proxy.id) {
let slice = b.borrow();
Expand Down Expand Up @@ -496,9 +505,7 @@ impl Engine {
if let Some(size) = size {
slice = &mut slice[..size.get() as usize];
}
for x in slice {
*x = 0;
}
slice.fill(0);
}
}
} else {
Expand Down Expand Up @@ -860,6 +867,11 @@ impl ResourcePool {
}

impl BindMapBuffer {
// Upload a buffer from CPU to GPU if needed.
//
// Note data flow is one way only, from CPU to GPU. Once this method is
// called, the buffer is no longer materialized on CPU, and cannot be
// accessed from a CPU shader.
fn upload_if_needed(
&mut self,
proxy: &BufProxy,
Expand Down Expand Up @@ -1031,6 +1043,7 @@ impl<'a> TransientBindMap<'a> {
match resource {
ResourceProxy::Buf(buf) => match self.bufs.get(&buf.id) {
Some(TransientBuf::Cpu(_)) => (),
Some(TransientBuf::Gpu(_)) => panic!("buffer was already materialized on GPU"),
_ => bind_map.materialize_cpu_buf(buf),
},
ResourceProxy::Image(_) => todo!(),
Expand Down

0 comments on commit 81f46fa

Please sign in to comment.