From 4e139ed1991905474fa2cac862486533c4d35bb5 Mon Sep 17 00:00:00 2001 From: Connor Fitzgerald Date: Wed, 4 Dec 2024 11:00:14 -0600 Subject: [PATCH] Rework wgpu-rs Context (#6619) --- Cargo.lock | 60 +- Cargo.toml | 1 + tests/Cargo.toml | 10 +- tests/compile_tests/fail/cpass_lifetime.rs | 18 + .../compile_tests/fail/cpass_lifetime.stderr | 13 + tests/compile_tests/fail/rpass_lifetime.rs | 18 + .../compile_tests/fail/rpass_lifetime.stderr | 13 + tests/compile_tests/root.rs | 7 + tests/tests/external_texture.rs | 2 +- wgpu-core/src/global.rs | 8 +- wgpu-types/Cargo.toml | 2 +- wgpu/build.rs | 2 +- wgpu/src/api/adapter.rs | 115 +- wgpu/src/api/bind_group.rs | 15 +- wgpu/src/api/bind_group_layout.rs | 15 +- wgpu/src/api/blas.rs | 74 +- wgpu/src/api/buffer.rs | 88 +- wgpu/src/api/command_buffer.rs | 15 +- wgpu/src/api/command_encoder.rs | 270 +- wgpu/src/api/compute_pass.rs | 116 +- wgpu/src/api/compute_pipeline.rs | 22 +- wgpu/src/api/device.rs | 250 +- wgpu/src/api/instance.rs | 92 +- wgpu/src/api/mod.rs | 44 +- wgpu/src/api/pipeline_cache.rs | 17 +- wgpu/src/api/pipeline_layout.rs | 15 +- wgpu/src/api/query_set.rs | 15 +- wgpu/src/api/queue.rs | 87 +- wgpu/src/api/render_bundle.rs | 15 +- wgpu/src/api/render_bundle_encoder.rs | 107 +- wgpu/src/api/render_pass.rs | 231 +- wgpu/src/api/render_pipeline.rs | 22 +- wgpu/src/api/sampler.rs | 15 +- wgpu/src/api/shader_module.rs | 17 +- wgpu/src/api/surface.rs | 63 +- wgpu/src/api/surface_texture.rs | 19 +- wgpu/src/api/texture.rs | 38 +- wgpu/src/api/texture_view.rs | 28 +- wgpu/src/api/tlas.rs | 50 +- wgpu/src/backend/mod.rs | 4 +- wgpu/src/backend/webgpu.rs | 3023 +++++++------- .../webgpu/defined_non_null_js_value.rs | 2 +- wgpu/src/backend/wgpu_core.rs | 3582 +++++++++-------- wgpu/src/cmp.rs | 107 + wgpu/src/context.rs | 2914 -------------- wgpu/src/dispatch.rs | 735 ++++ wgpu/src/lib.rs | 15 +- wgpu/src/send_sync.rs | 27 - wgpu/src/util/belt.rs | 1 - wgpu/src/util/mod.rs | 11 +- 50 files changed, 4936 insertions(+), 7494 deletions(-) create mode 100644 tests/compile_tests/fail/cpass_lifetime.rs create mode 100644 tests/compile_tests/fail/cpass_lifetime.stderr create mode 100644 tests/compile_tests/fail/rpass_lifetime.rs create mode 100644 tests/compile_tests/fail/rpass_lifetime.stderr create mode 100644 tests/compile_tests/root.rs create mode 100644 wgpu/src/cmp.rs delete mode 100644 wgpu/src/context.rs create mode 100644 wgpu/src/dispatch.rs delete mode 100644 wgpu/src/send_sync.rs diff --git a/Cargo.lock b/Cargo.lock index 501b5a86b7..dfab4e2ec8 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -1320,6 +1320,12 @@ dependencies = [ "bytemuck", ] +[[package]] +name = "glob" +version = "0.3.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d2fabcfbdc87f4758337ca535fb41a6d701b65693ce38287d856d1674551ec9b" + [[package]] name = "glow" version = "0.16.0" @@ -1844,7 +1850,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "fc2f4eb4bc735547cfed7c0a4922cbd04a4655978c09b54f1f7b228750664c34" dependencies = [ "cfg-if", - "windows-targets 0.48.5", + "windows-targets 0.52.6", ] [[package]] @@ -2875,6 +2881,15 @@ dependencies = [ "serde", ] +[[package]] +name = "serde_spanned" +version = "0.6.8" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "87607cb1398ed59d48732e575a4c28a7a8ebf2454b964fe3f224f2afc07909e1" +dependencies = [ + "serde", +] + [[package]] name = "serde_v8" version = "0.181.0" @@ -3108,6 +3123,12 @@ dependencies = [ "syn", ] +[[package]] +name = "target-triple" +version = "0.1.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "42a4d50cdb458045afc8131fd91b64904da29548bcb63c7236e0844936c13078" + [[package]] name = "termcolor" version = "1.4.1" @@ -3240,11 +3261,26 @@ dependencies = [ "syn", ] +[[package]] +name = "toml" +version = "0.8.19" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a1ed1f98e3fdc28d6d910e6737ae6ab1a93bf1985935a1193e68f93eeb68d24e" +dependencies = [ + "serde", + "serde_spanned", + "toml_datetime", + "toml_edit", +] + [[package]] name = "toml_datetime" version = "0.6.8" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "0dd7358ecb8fc2f8d014bf86f6f638ce72ba252a2c3a2572f2a795f1d23efb41" +dependencies = [ + "serde", +] [[package]] name = "toml_edit" @@ -3253,6 +3289,8 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "4ae48d6208a266e853d946088ed816055e556cc6028c5e8e2b84d9fa5dd7c7f5" dependencies = [ "indexmap", + "serde", + "serde_spanned", "toml_datetime", "winnow", ] @@ -3324,7 +3362,22 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "3637e734239e12ab152cd269302500bd063f37624ee210cd04b4936ed671f3b1" dependencies = [ "cc", - "windows-targets 0.48.5", + "windows-targets 0.52.6", +] + +[[package]] +name = "trybuild" +version = "1.0.101" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8dcd332a5496c026f1e14b7f3d2b7bd98e509660c04239c58b0ba38a12daded4" +dependencies = [ + "glob", + "serde", + "serde_derive", + "serde_json", + "target-triple", + "termcolor", + "toml", ] [[package]] @@ -3926,6 +3979,7 @@ dependencies = [ "serde", "serde_json", "strum", + "trybuild", "wasm-bindgen", "wasm-bindgen-futures", "wasm-bindgen-test", @@ -3981,7 +4035,7 @@ version = "0.1.9" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "cf221c93e13a30d793f7645a0e7762c55d169dbb0a49671918a2319d289b10bb" dependencies = [ - "windows-sys 0.48.0", + "windows-sys 0.59.0", ] [[package]] diff --git a/Cargo.toml b/Cargo.toml index acc15ee410..af3383700c 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -124,6 +124,7 @@ serde_json = "1.0.133" smallvec = "1" static_assertions = "1.1.0" strum = { version = "0.25.0", features = ["derive"] } +trybuild = "1" tracy-client = "0.17" thiserror = "1.0.69" wgpu = { version = "23.0.1", path = "./wgpu", default-features = false } diff --git a/tests/Cargo.toml b/tests/Cargo.toml index a9663a3f73..db91fb8665 100644 --- a/tests/Cargo.toml +++ b/tests/Cargo.toml @@ -16,6 +16,11 @@ name = "wgpu-test" path = "tests/root.rs" harness = false +[[test]] +name = "wgpu-compile-test" +path = "compile_tests/root.rs" +harness = true + [features] webgl = ["wgpu/webgl"] @@ -27,6 +32,7 @@ bytemuck.workspace = true cfg-if.workspace = true ctor.workspace = true futures-lite.workspace = true +glam.workspace = true itertools.workspace = true libtest-mimic.workspace = true log.workspace = true @@ -37,10 +43,10 @@ profiling.workspace = true serde_json.workspace = true serde.workspace = true strum = { workspace = true, features = ["derive"] } -wgpu-macros.workspace = true +trybuild.workspace = true wgpu = { workspace = true, features = ["wgsl"] } +wgpu-macros.workspace = true wgt = { workspace = true, features = ["serde"] } -glam.workspace = true [target.'cfg(not(target_arch = "wasm32"))'.dependencies] env_logger.workspace = true diff --git a/tests/compile_tests/fail/cpass_lifetime.rs b/tests/compile_tests/fail/cpass_lifetime.rs new file mode 100644 index 0000000000..3a26367187 --- /dev/null +++ b/tests/compile_tests/fail/cpass_lifetime.rs @@ -0,0 +1,18 @@ +// Test to ensure that ComputePass without forget_lifetime does not compile +// when the ComputePass is dropped before the CommandBuffer is finished. +// +// See #6145 for more info. + +fn main() { + let instance = wgpu::Instance::new(Default::default()); + let adapter = pollster::block_on(instance.request_adapter(&Default::default())).unwrap(); + let (device, queue) = + pollster::block_on(adapter.request_device(&Default::default(), None)).unwrap(); + + let mut encoder = device.create_command_encoder(&Default::default()); + let _compute_pass = encoder.begin_compute_pass(&Default::default()); + // set up the compute pass... + + let cmd_buffer = encoder.finish(); + queue.submit([cmd_buffer]); +} diff --git a/tests/compile_tests/fail/cpass_lifetime.stderr b/tests/compile_tests/fail/cpass_lifetime.stderr new file mode 100644 index 0000000000..e282bccfdb --- /dev/null +++ b/tests/compile_tests/fail/cpass_lifetime.stderr @@ -0,0 +1,13 @@ +error[E0505]: cannot move out of `encoder` because it is borrowed + --> compile_tests/fail/cpass_lifetime.rs:16:22 + | +12 | let mut encoder = device.create_command_encoder(&Default::default()); + | ----------- binding `encoder` declared here +13 | let _compute_pass = encoder.begin_compute_pass(&Default::default()); + | ------- borrow of `encoder` occurs here +... +16 | let cmd_buffer = encoder.finish(); + | ^^^^^^^ move out of `encoder` occurs here +17 | queue.submit([cmd_buffer]); +18 | } + | - borrow might be used here, when `_compute_pass` is dropped and runs the destructor for type `wgpu::ComputePass<'_>` diff --git a/tests/compile_tests/fail/rpass_lifetime.rs b/tests/compile_tests/fail/rpass_lifetime.rs new file mode 100644 index 0000000000..781460a8e1 --- /dev/null +++ b/tests/compile_tests/fail/rpass_lifetime.rs @@ -0,0 +1,18 @@ +// Test to ensure that ComputePass without forget_lifetime does not compile +// when the ComputePass is dropped before the CommandBuffer is finished. +// +// See #6145 for more info. + +fn main() { + let instance = wgpu::Instance::new(Default::default()); + let adapter = pollster::block_on(instance.request_adapter(&Default::default())).unwrap(); + let (device, queue) = + pollster::block_on(adapter.request_device(&Default::default(), None)).unwrap(); + + let mut encoder = device.create_command_encoder(&Default::default()); + let _render_pass = encoder.begin_render_pass(&Default::default()); + // set up the render pass... + + let cmd_buffer = encoder.finish(); + queue.submit([cmd_buffer]); +} diff --git a/tests/compile_tests/fail/rpass_lifetime.stderr b/tests/compile_tests/fail/rpass_lifetime.stderr new file mode 100644 index 0000000000..2f0d9f0908 --- /dev/null +++ b/tests/compile_tests/fail/rpass_lifetime.stderr @@ -0,0 +1,13 @@ +error[E0505]: cannot move out of `encoder` because it is borrowed + --> compile_tests/fail/rpass_lifetime.rs:16:22 + | +12 | let mut encoder = device.create_command_encoder(&Default::default()); + | ----------- binding `encoder` declared here +13 | let _render_pass = encoder.begin_render_pass(&Default::default()); + | ------- borrow of `encoder` occurs here +... +16 | let cmd_buffer = encoder.finish(); + | ^^^^^^^ move out of `encoder` occurs here +17 | queue.submit([cmd_buffer]); +18 | } + | - borrow might be used here, when `_render_pass` is dropped and runs the destructor for type `wgpu::RenderPass<'_>` diff --git a/tests/compile_tests/root.rs b/tests/compile_tests/root.rs new file mode 100644 index 0000000000..f35a0cd24b --- /dev/null +++ b/tests/compile_tests/root.rs @@ -0,0 +1,7 @@ +// Tests that ensure that various constructs that should not compile do not compile. + +#[test] +fn compile_fail() { + let t = trybuild::TestCases::new(); + t.compile_fail("compile_tests/fail/*.rs"); +} diff --git a/tests/tests/external_texture.rs b/tests/tests/external_texture.rs index f577afc0d0..60c4eb1b2f 100644 --- a/tests/tests/external_texture.rs +++ b/tests/tests/external_texture.rs @@ -274,7 +274,7 @@ static IMAGE_BITMAP_IMPORT: GpuTestConfiguration = origin: src_origin, flip_y: src_flip_y, }, - wgpu::CopyExternalImageDestInfo { + wgt::CopyExternalImageDestInfo { texture: &texture, mip_level: 0, origin: dest_origin, diff --git a/wgpu-core/src/global.rs b/wgpu-core/src/global.rs index cd4508d14a..bf68ed2f1d 100644 --- a/wgpu-core/src/global.rs +++ b/wgpu-core/src/global.rs @@ -1,4 +1,4 @@ -use std::sync::Arc; +use std::{fmt, sync::Arc}; use crate::{ hal_api::HalApi, @@ -85,6 +85,12 @@ impl Global { } } +impl fmt::Debug for Global { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + f.debug_struct("Global").finish() + } +} + impl Drop for Global { fn drop(&mut self) { profiling::scope!("Global::drop"); diff --git a/wgpu-types/Cargo.toml b/wgpu-types/Cargo.toml index e79ae330e5..3dc6d7f799 100644 --- a/wgpu-types/Cargo.toml +++ b/wgpu-types/Cargo.toml @@ -35,7 +35,7 @@ serde = ["dep:serde"] counters = [] [dependencies] -bitflags.workspace = true +bitflags = { workspace = true, features = ["serde"] } serde = { workspace = true, features = ["derive"], optional = true } [target.'cfg(target_arch = "wasm32")'.dependencies] diff --git a/wgpu/build.rs b/wgpu/build.rs index 1f07f7ebba..e77c0fdd39 100644 --- a/wgpu/build.rs +++ b/wgpu/build.rs @@ -4,7 +4,7 @@ fn main() { webgl: { all(target_arch = "wasm32", not(target_os = "emscripten"), feature = "webgl") }, webgpu: { all(target_arch = "wasm32", not(target_os = "emscripten"), feature = "webgpu") }, Emscripten: { all(target_arch = "wasm32", target_os = "emscripten") }, - wgpu_core: { any(native, webgl, emscripten) }, + wgpu_core: { any(native, webgl, Emscripten) }, send_sync: { any( not(target_arch = "wasm32"), all(feature = "fragile-send-sync-non-atomic-wasm", not(target_feature = "atomics")) diff --git a/wgpu/src/api/adapter.rs b/wgpu/src/api/adapter.rs index 10c6f4460c..8fb5225bfe 100644 --- a/wgpu/src/api/adapter.rs +++ b/wgpu/src/api/adapter.rs @@ -1,6 +1,5 @@ -use std::{future::Future, sync::Arc, thread}; +use std::future::Future; -use crate::context::{DeviceRequest, DynContext}; use crate::*; /// Handle to a physical graphics and/or compute device. @@ -16,19 +15,12 @@ use crate::*; /// Corresponds to [WebGPU `GPUAdapter`](https://gpuweb.github.io/gpuweb/#gpu-adapter). #[derive(Debug)] pub struct Adapter { - pub(crate) context: Arc, - pub(crate) data: Box, + pub(crate) inner: dispatch::DispatchAdapter, } #[cfg(send_sync)] static_assertions::assert_impl_all!(Adapter: Send, Sync); -impl Drop for Adapter { - fn drop(&mut self) { - if !thread::panicking() { - self.context.adapter_drop(self.data.as_ref()) - } - } -} +crate::cmp::impl_eq_ord_hash_proxy!(Adapter => .inner); pub use wgt::RequestAdapterOptions as RequestAdapterOptionsBase; /// Additional information required when requesting an adapter. @@ -71,31 +63,11 @@ impl Adapter { desc: &DeviceDescriptor<'_>, trace_path: Option<&std::path::Path>, ) -> impl Future> + WasmNotSend { - let context = Arc::clone(&self.context); - let device = DynContext::adapter_request_device( - &*self.context, - self.data.as_ref(), - desc, - trace_path, - ); + let device = self.inner.request_device(desc, trace_path); async move { - device.await.map( - |DeviceRequest { - device_data, - queue_data, - }| { - ( - Device { - context: Arc::clone(&context), - data: device_data, - }, - Queue { - context, - data: queue_data, - }, - ) - }, - ) + device + .await + .map(|(device, queue)| (Device { inner: device }, Queue { inner: queue })) } } @@ -112,33 +84,21 @@ impl Adapter { desc: &DeviceDescriptor<'_>, trace_path: Option<&std::path::Path>, ) -> Result<(Device, Queue), RequestDeviceError> { - let context = Arc::clone(&self.context); - unsafe { - self.context - .as_any() - .downcast_ref::() - // Part of the safety requirements is that the device was generated from the same adapter. - // Therefore, unwrap is fine here since only WgpuCoreContext based adapters have the ability to create hal devices. - .unwrap() - .create_device_from_hal( - crate::context::downcast_ref(self.data.as_ref()), - hal_device, - desc, - trace_path, - ) - } - .map(|(device, queue)| { - ( - Device { - context: Arc::clone(&context), - data: Box::new(device), - }, - Queue { - context, - data: Box::new(queue), - }, - ) - }) + let core_adapter = self.inner.as_core(); + let (device, queue) = unsafe { + core_adapter + .context + .create_device_from_hal(core_adapter, hal_device, desc, trace_path) + }?; + + Ok(( + Device { + inner: device.into(), + }, + Queue { + inner: queue.into(), + }, + )) } /// Apply a callback to this `Adapter`'s underlying backend adapter. @@ -165,16 +125,11 @@ impl Adapter { &self, hal_adapter_callback: F, ) -> R { - if let Some(ctx) = self - .context - .as_any() - .downcast_ref::() - { + if let Some(adapter) = self.inner.as_core_opt() { unsafe { - ctx.adapter_as_hal::( - crate::context::downcast_ref(self.data.as_ref()), - hal_adapter_callback, - ) + adapter + .context + .adapter_as_hal::(adapter, hal_adapter_callback) } } else { hal_adapter_callback(None) @@ -183,31 +138,27 @@ impl Adapter { /// Returns whether this adapter may present to the passed surface. pub fn is_surface_supported(&self, surface: &Surface<'_>) -> bool { - DynContext::adapter_is_surface_supported( - &*self.context, - self.data.as_ref(), - surface.surface_data.as_ref(), - ) + self.inner.is_surface_supported(&surface.inner) } /// The features which can be used to create devices on this adapter. pub fn features(&self) -> Features { - DynContext::adapter_features(&*self.context, self.data.as_ref()) + self.inner.features() } /// The best limits which can be used to create devices on this adapter. pub fn limits(&self) -> Limits { - DynContext::adapter_limits(&*self.context, self.data.as_ref()) + self.inner.limits() } /// Get info about the adapter itself. pub fn get_info(&self) -> AdapterInfo { - DynContext::adapter_get_info(&*self.context, self.data.as_ref()) + self.inner.get_info() } /// Get info about the adapter itself. pub fn get_downlevel_capabilities(&self) -> DownlevelCapabilities { - DynContext::adapter_downlevel_capabilities(&*self.context, self.data.as_ref()) + self.inner.downlevel_capabilities() } /// Returns the features supported for a given texture format by this adapter. @@ -215,7 +166,7 @@ impl Adapter { /// Note that the WebGPU spec further restricts the available usages/features. /// To disable these restrictions on a device, request the [`Features::TEXTURE_ADAPTER_SPECIFIC_FORMAT_FEATURES`] feature. pub fn get_texture_format_features(&self, format: TextureFormat) -> TextureFormatFeatures { - DynContext::adapter_get_texture_format_features(&*self.context, self.data.as_ref(), format) + self.inner.get_texture_format_features(format) } /// Generates a timestamp using the clock used by the presentation engine. @@ -240,6 +191,6 @@ impl Adapter { // /// [Instant]: std::time::Instant pub fn get_presentation_timestamp(&self) -> PresentationTimestamp { - DynContext::adapter_get_presentation_timestamp(&*self.context, self.data.as_ref()) + self.inner.get_presentation_timestamp() } } diff --git a/wgpu/src/api/bind_group.rs b/wgpu/src/api/bind_group.rs index 3b724e058b..1cb7337855 100644 --- a/wgpu/src/api/bind_group.rs +++ b/wgpu/src/api/bind_group.rs @@ -1,5 +1,3 @@ -use std::{sync::Arc, thread}; - use crate::*; /// Handle to a binding group. @@ -12,21 +10,12 @@ use crate::*; /// Corresponds to [WebGPU `GPUBindGroup`](https://gpuweb.github.io/gpuweb/#gpubindgroup). #[derive(Debug)] pub struct BindGroup { - pub(crate) context: Arc, - pub(crate) data: Box, + pub(crate) inner: dispatch::DispatchBindGroup, } #[cfg(send_sync)] static_assertions::assert_impl_all!(BindGroup: Send, Sync); -super::impl_partialeq_eq_hash!(BindGroup); - -impl Drop for BindGroup { - fn drop(&mut self) { - if !thread::panicking() { - self.context.bind_group_drop(self.data.as_ref()); - } - } -} +crate::cmp::impl_eq_ord_hash_proxy!(BindGroup => .inner); /// Resource that can be bound to a pipeline. /// diff --git a/wgpu/src/api/bind_group_layout.rs b/wgpu/src/api/bind_group_layout.rs index db335689ca..191752a239 100644 --- a/wgpu/src/api/bind_group_layout.rs +++ b/wgpu/src/api/bind_group_layout.rs @@ -1,5 +1,3 @@ -use std::{sync::Arc, thread}; - use crate::*; /// Handle to a binding group layout. @@ -15,21 +13,12 @@ use crate::*; /// https://gpuweb.github.io/gpuweb/#gpubindgrouplayout). #[derive(Debug)] pub struct BindGroupLayout { - pub(crate) context: Arc, - pub(crate) data: Box, + pub(crate) inner: dispatch::DispatchBindGroupLayout, } #[cfg(send_sync)] static_assertions::assert_impl_all!(BindGroupLayout: Send, Sync); -super::impl_partialeq_eq_hash!(BindGroupLayout); - -impl Drop for BindGroupLayout { - fn drop(&mut self) { - if !thread::panicking() { - self.context.bind_group_layout_drop(self.data.as_ref()); - } - } -} +crate::cmp::impl_eq_ord_hash_proxy!(BindGroupLayout => .inner); /// Describes a [`BindGroupLayout`]. /// diff --git a/wgpu/src/api/blas.rs b/wgpu/src/api/blas.rs index a0dc97c7dd..b64c01ba8f 100644 --- a/wgpu/src/api/blas.rs +++ b/wgpu/src/api/blas.rs @@ -1,7 +1,6 @@ -use crate::context::{Context, DynContext}; -use crate::{Buffer, Data, Label, C}; +use crate::dispatch; +use crate::{Buffer, Label}; use std::sync::Arc; -use std::thread; use wgt::WasmNotSendSync; /// Descriptor for the size defining attributes of a triangle geometry, for a bottom level acceleration structure. @@ -88,22 +87,6 @@ impl TlasInstance { } } -pub(crate) struct DynContextTlasInstance<'a> { - pub(crate) blas: &'a Data, - pub(crate) transform: &'a [f32; 12], - pub(crate) custom_index: u32, - pub(crate) mask: u8, -} - -/// Context version of [TlasInstance]. -#[allow(dead_code)] -pub struct ContextTlasInstance<'a, T: Context> { - pub(crate) blas_data: &'a T::BlasData, - pub(crate) transform: &'a [f32; 12], - pub(crate) custom_index: u32, - pub(crate) mask: u8, -} - #[derive(Debug)] /// Definition for a triangle geometry for a Bottom Level Acceleration Structure (BLAS). /// @@ -147,8 +130,7 @@ static_assertions::assert_impl_all!(BlasBuildEntry<'_>: WasmNotSendSync); #[derive(Debug)] pub(crate) struct BlasShared { - pub(crate) context: Arc, - pub(crate) data: Box, + pub(crate) inner: dispatch::DispatchBlas, } static_assertions::assert_impl_all!(BlasShared: WasmNotSendSync); @@ -166,6 +148,8 @@ pub struct Blas { } static_assertions::assert_impl_all!(Blas: WasmNotSendSync); +crate::cmp::impl_eq_ord_hash_proxy!(Blas => .shared.inner); + impl Blas { /// Raw handle to the acceleration structure, used inside raw instance buffers. pub fn handle(&self) -> Option { @@ -173,45 +157,17 @@ impl Blas { } /// Destroy the associated native resources as soon as possible. pub fn destroy(&self) { - DynContext::blas_destroy(&*self.shared.context, self.shared.data.as_ref()); + self.shared.inner.destroy(); } } -impl Drop for BlasShared { - fn drop(&mut self) { - if !thread::panicking() { - self.context.blas_drop(self.data.as_ref()); - } - } -} - -pub(crate) struct DynContextBlasTriangleGeometry<'a> { - pub(crate) size: &'a BlasTriangleGeometrySizeDescriptor, - pub(crate) vertex_buffer: &'a Data, - pub(crate) index_buffer: Option<&'a Data>, - pub(crate) transform_buffer: Option<&'a Data>, - pub(crate) first_vertex: u32, - pub(crate) vertex_stride: wgt::BufferAddress, - pub(crate) index_buffer_offset: Option, - pub(crate) transform_buffer_offset: Option, -} - -pub(crate) enum DynContextBlasGeometries<'a> { - TriangleGeometries(Box> + 'a>), -} - -pub(crate) struct DynContextBlasBuildEntry<'a> { - pub(crate) blas_data: &'a Data, - pub(crate) geometries: DynContextBlasGeometries<'a>, -} - /// Context version of [BlasTriangleGeometry]. #[allow(dead_code)] -pub struct ContextBlasTriangleGeometry<'a, T: Context> { +pub struct ContextBlasTriangleGeometry<'a> { pub(crate) size: &'a BlasTriangleGeometrySizeDescriptor, - pub(crate) vertex_buffer: &'a T::BufferData, - pub(crate) index_buffer: Option<&'a T::BufferData>, - pub(crate) transform_buffer: Option<&'a T::BufferData>, + pub(crate) vertex_buffer: &'a dispatch::DispatchBuffer, + pub(crate) index_buffer: Option<&'a dispatch::DispatchBuffer>, + pub(crate) transform_buffer: Option<&'a dispatch::DispatchBuffer>, pub(crate) first_vertex: u32, pub(crate) vertex_stride: wgt::BufferAddress, pub(crate) index_buffer_offset: Option, @@ -219,14 +175,14 @@ pub struct ContextBlasTriangleGeometry<'a, T: Context> { } /// Context version of [BlasGeometries]. -pub enum ContextBlasGeometries<'a, T: Context> { +pub enum ContextBlasGeometries<'a> { /// Triangle geometries. - TriangleGeometries(Box> + 'a>), + TriangleGeometries(Box> + 'a>), } /// Context version see [BlasBuildEntry]. #[allow(dead_code)] -pub struct ContextBlasBuildEntry<'a, T: Context> { - pub(crate) blas_data: &'a T::BlasData, - pub(crate) geometries: ContextBlasGeometries<'a, T>, +pub struct ContextBlasBuildEntry<'a> { + pub(crate) blas: &'a dispatch::DispatchBlas, + pub(crate) geometries: ContextBlasGeometries<'a>, } diff --git a/wgpu/src/api/buffer.rs b/wgpu/src/api/buffer.rs index fa9c7f9ec0..eacfd9ecc5 100644 --- a/wgpu/src/api/buffer.rs +++ b/wgpu/src/api/buffer.rs @@ -1,13 +1,10 @@ use std::{ error, fmt, ops::{Bound, Deref, DerefMut, Range, RangeBounds}, - sync::Arc, - thread, }; use parking_lot::Mutex; -use crate::context::DynContext; use crate::*; /// Handle to a GPU-accessible buffer. @@ -172,8 +169,7 @@ use crate::*; /// [`MAP_WRITE`]: BufferUsages::MAP_WRITE #[derive(Debug)] pub struct Buffer { - pub(crate) context: Arc, - pub(crate) data: Box, + pub(crate) inner: dispatch::DispatchBuffer, pub(crate) map_context: Mutex, pub(crate) size: wgt::BufferAddress, pub(crate) usage: BufferUsages, @@ -182,7 +178,7 @@ pub struct Buffer { #[cfg(send_sync)] static_assertions::assert_impl_all!(Buffer: Send, Sync); -super::impl_partialeq_eq_hash!(Buffer); +crate::cmp::impl_eq_ord_hash_proxy!(Buffer => .inner); impl Buffer { /// Return the binding view of the entire buffer. @@ -210,16 +206,11 @@ impl Buffer { &self, hal_buffer_callback: F, ) -> R { - if let Some(ctx) = self - .context - .as_any() - .downcast_ref::() - { + if let Some(buffer) = self.inner.as_core_opt() { unsafe { - ctx.buffer_as_hal::( - crate::context::downcast_ref(self.data.as_ref()), - hal_buffer_callback, - ) + buffer + .context + .buffer_as_hal::(buffer, hal_buffer_callback) } } else { hal_buffer_callback(None) @@ -253,12 +244,12 @@ impl Buffer { /// Flushes any pending write operations and unmaps the buffer from host memory. pub fn unmap(&self) { self.map_context.lock().reset(); - DynContext::buffer_unmap(&*self.context, self.data.as_ref()); + self.inner.unmap(); } /// Destroy the associated native resources as soon as possible. pub fn destroy(&self) { - DynContext::buffer_destroy(&*self.context, self.data.as_ref()); + self.inner.destroy(); } /// Returns the length of the buffer allocation in bytes. @@ -347,13 +338,9 @@ impl<'a> BufferSlice<'a> { }; mc.initial_range = self.offset..end; - DynContext::buffer_map_async( - &*self.buffer.context, - self.buffer.data.as_ref(), - mode, - self.offset..end, - Box::new(callback), - ) + self.buffer + .inner + .map_async(mode, self.offset..end, Box::new(callback)); } /// Gain read-only access to the bytes of a [mapped] [`Buffer`]. @@ -372,12 +359,11 @@ impl<'a> BufferSlice<'a> { /// [mapped]: Buffer#mapping-buffers pub fn get_mapped_range(&self) -> BufferView<'a> { let end = self.buffer.map_context.lock().add(self.offset, self.size); - let data = DynContext::buffer_get_mapped_range( - &*self.buffer.context, - self.buffer.data.as_ref(), - self.offset..end, - ); - BufferView { slice: *self, data } + let range = self.buffer.inner.get_mapped_range(self.offset..end); + BufferView { + slice: *self, + inner: range, + } } /// Synchronously and immediately map a buffer for reading. If the buffer is not immediately mappable @@ -390,15 +376,11 @@ impl<'a> BufferSlice<'a> { /// This is only available on WebGPU, on any other backends this will return `None`. #[cfg(webgpu)] pub fn get_mapped_range_as_array_buffer(&self) -> Option { + let end = self.buffer.map_context.lock().add(self.offset, self.size); + self.buffer - .context - .as_any() - .downcast_ref::() - .map(|ctx| { - let buffer_data = crate::context::downcast_ref(self.buffer.data.as_ref()); - let end = self.buffer.map_context.lock().add(self.offset, self.size); - ctx.buffer_get_mapped_range_as_array_buffer(buffer_data, self.offset..end) - }) + .inner + .get_mapped_range_as_array_buffer(self.offset..end) } /// Gain write access to the bytes of a [mapped] [`Buffer`]. @@ -417,14 +399,10 @@ impl<'a> BufferSlice<'a> { /// [mapped]: Buffer#mapping-buffers pub fn get_mapped_range_mut(&self) -> BufferViewMut<'a> { let end = self.buffer.map_context.lock().add(self.offset, self.size); - let data = DynContext::buffer_get_mapped_range( - &*self.buffer.context, - self.buffer.data.as_ref(), - self.offset..end, - ); + let range = self.buffer.inner.get_mapped_range(self.offset..end); BufferViewMut { slice: *self, - data, + inner: range, readable: self.buffer.usage.contains(BufferUsages::MAP_READ), } } @@ -577,7 +555,7 @@ static_assertions::assert_impl_all!(MapMode: Send, Sync); #[derive(Debug)] pub struct BufferView<'a> { slice: BufferSlice<'a>, - data: Box, + inner: dispatch::DispatchBufferMappedRange, } impl std::ops::Deref for BufferView<'_> { @@ -585,14 +563,14 @@ impl std::ops::Deref for BufferView<'_> { #[inline] fn deref(&self) -> &[u8] { - self.data.slice() + self.inner.slice() } } impl AsRef<[u8]> for BufferView<'_> { #[inline] fn as_ref(&self) -> &[u8] { - self.data.slice() + self.inner.slice() } } @@ -617,14 +595,14 @@ impl AsRef<[u8]> for BufferView<'_> { #[derive(Debug)] pub struct BufferViewMut<'a> { slice: BufferSlice<'a>, - data: Box, + inner: dispatch::DispatchBufferMappedRange, readable: bool, } impl AsMut<[u8]> for BufferViewMut<'_> { #[inline] fn as_mut(&mut self) -> &mut [u8] { - self.data.slice_mut() + self.inner.slice_mut() } } @@ -636,13 +614,13 @@ impl Deref for BufferViewMut<'_> { log::warn!("Reading from a BufferViewMut is slow and not recommended."); } - self.data.slice() + self.inner.slice() } } impl DerefMut for BufferViewMut<'_> { fn deref_mut(&mut self) -> &mut Self::Target { - self.data.slice_mut() + self.inner.slice_mut() } } @@ -666,14 +644,6 @@ impl Drop for BufferViewMut<'_> { } } -impl Drop for Buffer { - fn drop(&mut self) { - if !thread::panicking() { - self.context.buffer_drop(self.data.as_ref()); - } - } -} - fn check_buffer_bounds( buffer_size: BufferAddress, offset: BufferAddress, diff --git a/wgpu/src/api/command_buffer.rs b/wgpu/src/api/command_buffer.rs index 6c519ed65a..e76ae2d5e9 100644 --- a/wgpu/src/api/command_buffer.rs +++ b/wgpu/src/api/command_buffer.rs @@ -1,5 +1,3 @@ -use std::{sync::Arc, thread}; - use crate::*; /// Handle to a command buffer on the GPU. @@ -11,18 +9,9 @@ use crate::*; /// Corresponds to [WebGPU `GPUCommandBuffer`](https://gpuweb.github.io/gpuweb/#command-buffer). #[derive(Debug)] pub struct CommandBuffer { - pub(crate) context: Arc, - pub(crate) data: Option>, + pub(crate) inner: Option, } #[cfg(send_sync)] static_assertions::assert_impl_all!(CommandBuffer: Send, Sync); -impl Drop for CommandBuffer { - fn drop(&mut self) { - if !thread::panicking() { - if let Some(data) = self.data.take() { - self.context.command_buffer_drop(data.as_ref()); - } - } - } -} +crate::cmp::impl_eq_ord_hash_proxy!(CommandBuffer => .inner); diff --git a/wgpu/src/api/command_encoder.rs b/wgpu/src/api/command_encoder.rs index f86ccdf039..cd493587a7 100644 --- a/wgpu/src/api/command_encoder.rs +++ b/wgpu/src/api/command_encoder.rs @@ -1,7 +1,12 @@ -use std::{marker::PhantomData, ops::Range, sync::Arc, thread}; - -use crate::context::DynContext; -use crate::*; +use std::ops::Range; + +use crate::{ + api::{ + blas::BlasBuildEntry, + tlas::{TlasBuildEntry, TlasPackage}, + }, + *, +}; /// Encodes a series of GPU operations. /// @@ -14,19 +19,12 @@ use crate::*; /// Corresponds to [WebGPU `GPUCommandEncoder`](https://gpuweb.github.io/gpuweb/#command-encoder). #[derive(Debug)] pub struct CommandEncoder { - pub(crate) context: Arc, - pub(crate) data: Box, + pub(crate) inner: dispatch::DispatchCommandEncoder, } #[cfg(send_sync)] static_assertions::assert_impl_all!(CommandEncoder: Send, Sync); -impl Drop for CommandEncoder { - fn drop(&mut self) { - if !thread::panicking() { - self.context.command_encoder_drop(self.data.as_ref()); - } - } -} +crate::cmp::impl_eq_ord_hash_proxy!(CommandEncoder => .inner); /// Describes a [`CommandEncoder`]. /// @@ -55,31 +53,13 @@ pub type TexelCopyTextureInfo<'a> = TexelCopyTextureInfoBase<&'a Texture>; #[cfg(send_sync)] static_assertions::assert_impl_all!(TexelCopyTextureInfo<'_>: Send, Sync); -use crate::api::blas::{ - BlasBuildEntry, BlasGeometries, BlasTriangleGeometry, DynContextBlasBuildEntry, - DynContextBlasGeometries, DynContextBlasTriangleGeometry, DynContextTlasInstance, TlasInstance, -}; -use crate::api::tlas::{ - DynContextTlasBuildEntry, DynContextTlasPackage, TlasBuildEntry, TlasPackage, -}; -pub use wgt::CopyExternalImageDestInfo as CopyExternalImageDestInfoBase; - -/// View of a texture which can be used to copy to a texture, including -/// color space and alpha premultiplication information. -/// -/// Corresponds to [WebGPU `GPUCopyExternalImageDestInfo`]( -/// https://gpuweb.github.io/gpuweb/#dictdef-gpuimagecopytexturetagged). -pub type CopyExternalImageDestInfo<'a> = CopyExternalImageDestInfoBase<&'a Texture>; -#[cfg(send_sync)] -static_assertions::assert_impl_all!(TexelCopyTextureInfo<'_>: Send, Sync); - impl CommandEncoder { /// Finishes recording and returns a [`CommandBuffer`] that can be submitted for execution. pub fn finish(mut self) -> CommandBuffer { - let data = DynContext::command_encoder_finish(&*self.context, self.data.as_mut()); + let buffer = self.inner.finish(); + CommandBuffer { - context: Arc::clone(&self.context), - data: Some(data), + inner: Some(buffer), } } @@ -97,14 +77,10 @@ impl CommandEncoder { &'encoder mut self, desc: &RenderPassDescriptor<'_>, ) -> RenderPass<'encoder> { - let data = - DynContext::command_encoder_begin_render_pass(&*self.context, self.data.as_ref(), desc); + let rpass = self.inner.begin_render_pass(desc); RenderPass { - inner: RenderPassInner { - data, - context: self.context.clone(), - }, - encoder_guard: PhantomData, + inner: rpass, + _encoder_guard: api::PhantomDrop::default(), } } @@ -122,17 +98,10 @@ impl CommandEncoder { &'encoder mut self, desc: &ComputePassDescriptor<'_>, ) -> ComputePass<'encoder> { - let data = DynContext::command_encoder_begin_compute_pass( - &*self.context, - self.data.as_ref(), - desc, - ); + let cpass = self.inner.begin_compute_pass(desc); ComputePass { - inner: ComputePassInner { - data, - context: self.context.clone(), - }, - encoder_guard: PhantomData, + inner: cpass, + _encoder_guard: api::PhantomDrop::default(), } } @@ -151,12 +120,10 @@ impl CommandEncoder { destination_offset: BufferAddress, copy_size: BufferAddress, ) { - DynContext::command_encoder_copy_buffer_to_buffer( - &*self.context, - self.data.as_ref(), - source.data.as_ref(), + self.inner.copy_buffer_to_buffer( + &source.inner, source_offset, - destination.data.as_ref(), + &destination.inner, destination_offset, copy_size, ); @@ -169,13 +136,8 @@ impl CommandEncoder { destination: TexelCopyTextureInfo<'_>, copy_size: Extent3d, ) { - DynContext::command_encoder_copy_buffer_to_texture( - &*self.context, - self.data.as_ref(), - source, - destination, - copy_size, - ); + self.inner + .copy_buffer_to_texture(source, destination, copy_size); } /// Copy data from a texture to a buffer. @@ -185,13 +147,8 @@ impl CommandEncoder { destination: TexelCopyBufferInfo<'_>, copy_size: Extent3d, ) { - DynContext::command_encoder_copy_texture_to_buffer( - &*self.context, - self.data.as_ref(), - source, - destination, - copy_size, - ); + self.inner + .copy_texture_to_buffer(source, destination, copy_size); } /// Copy data from one texture to another. @@ -207,13 +164,8 @@ impl CommandEncoder { destination: TexelCopyTextureInfo<'_>, copy_size: Extent3d, ) { - DynContext::command_encoder_copy_texture_to_texture( - &*self.context, - self.data.as_ref(), - source, - destination, - copy_size, - ); + self.inner + .copy_texture_to_texture(source, destination, copy_size); } /// Clears texture to zero. @@ -230,12 +182,7 @@ impl CommandEncoder { /// - `CLEAR_TEXTURE` extension not enabled /// - Range is out of bounds pub fn clear_texture(&mut self, texture: &Texture, subresource_range: &ImageSubresourceRange) { - DynContext::command_encoder_clear_texture( - &*self.context, - self.data.as_ref(), - texture.data.as_ref(), - subresource_range, - ); + self.inner.clear_texture(&texture.inner, subresource_range); } /// Clears buffer to zero. @@ -250,28 +197,22 @@ impl CommandEncoder { offset: BufferAddress, size: Option, ) { - DynContext::command_encoder_clear_buffer( - &*self.context, - self.data.as_ref(), - buffer.data.as_ref(), - offset, - size, - ); + self.inner.clear_buffer(&buffer.inner, offset, size); } /// Inserts debug marker. pub fn insert_debug_marker(&mut self, label: &str) { - DynContext::command_encoder_insert_debug_marker(&*self.context, self.data.as_ref(), label); + self.inner.insert_debug_marker(label); } /// Start record commands and group it into debug marker group. pub fn push_debug_group(&mut self, label: &str) { - DynContext::command_encoder_push_debug_group(&*self.context, self.data.as_ref(), label); + self.inner.push_debug_group(label); } /// Stops command recording and creates debug group. pub fn pop_debug_group(&mut self) { - DynContext::command_encoder_pop_debug_group(&*self.context, self.data.as_ref()); + self.inner.pop_debug_group(); } /// Resolves a query set, writing the results into the supplied destination buffer. @@ -285,15 +226,13 @@ impl CommandEncoder { destination: &Buffer, destination_offset: BufferAddress, ) { - DynContext::command_encoder_resolve_query_set( - &*self.context, - self.data.as_ref(), - query_set.data.as_ref(), + self.inner.resolve_query_set( + &query_set.inner, query_range.start, query_range.end - query_range.start, - destination.data.as_ref(), + &destination.inner, destination_offset, - ) + ); } /// Returns the inner hal CommandEncoder using a callback. The hal command encoder will be `None` if the @@ -312,16 +251,16 @@ impl CommandEncoder { >( &mut self, hal_command_encoder_callback: F, - ) -> Option { - self.context - .as_any() - .downcast_ref::() - .map(|ctx| unsafe { - ctx.command_encoder_as_hal_mut::( - crate::context::downcast_ref(self.data.as_ref()), - hal_command_encoder_callback, - ) - }) + ) -> R { + if let Some(encoder) = self.inner.as_core_mut_opt() { + unsafe { + encoder + .context + .command_encoder_as_hal_mut::(encoder, hal_command_encoder_callback) + } + } else { + hal_command_encoder_callback(None) + } } } @@ -340,12 +279,7 @@ impl CommandEncoder { /// recorded so far and all before all commands recorded after. /// This may depend both on the backend and the driver. pub fn write_timestamp(&mut self, query_set: &QuerySet, query_index: u32) { - DynContext::command_encoder_write_timestamp( - &*self.context, - self.data.as_mut(), - query_set.data.as_ref(), - query_index, - ) + self.inner.write_timestamp(&query_set.inner, query_index); } } @@ -387,61 +321,8 @@ impl CommandEncoder { blas: impl IntoIterator>, tlas: impl IntoIterator, ) { - let mut blas = blas.into_iter().map(|e: &BlasBuildEntry<'_>| { - let geometries = match &e.geometry { - BlasGeometries::TriangleGeometries(triangle_geometries) => { - let iter = triangle_geometries - .iter() - .map( - |tg: &BlasTriangleGeometry<'_>| DynContextBlasTriangleGeometry { - size: tg.size, - vertex_buffer: tg.vertex_buffer.data.as_ref(), - - index_buffer: tg - .index_buffer - .map(|index_buffer| index_buffer.data.as_ref()), - - transform_buffer: tg - .transform_buffer - .map(|transform_buffer| transform_buffer.data.as_ref()), - - first_vertex: tg.first_vertex, - vertex_stride: tg.vertex_stride, - index_buffer_offset: tg.index_buffer_offset, - transform_buffer_offset: tg.transform_buffer_offset, - }, - ); - DynContextBlasGeometries::TriangleGeometries(Box::new(iter)) - } - }; - DynContextBlasBuildEntry { - blas_data: e.blas.shared.data.as_ref(), - geometries, - } - }); - - let mut tlas = tlas.into_iter().map(|e: &TlasPackage| { - let instances = e.instances.iter().map(|instance: &Option| { - instance.as_ref().map(|instance| DynContextTlasInstance { - blas: instance.blas.data.as_ref(), - transform: &instance.transform, - custom_index: instance.custom_index, - mask: instance.mask, - }) - }); - DynContextTlasPackage { - tlas_data: e.tlas.data.as_ref(), - instances: Box::new(instances), - lowest_unmodified: e.lowest_unmodified, - } - }); - - DynContext::command_encoder_build_acceleration_structures( - &*self.context, - self.data.as_ref(), - &mut blas, - &mut tlas, - ); + self.inner + .build_acceleration_structures(&mut blas.into_iter(), &mut tlas.into_iter()); } /// Build bottom and top level acceleration structures. @@ -460,52 +341,9 @@ impl CommandEncoder { blas: impl IntoIterator>, tlas: impl IntoIterator>, ) { - let mut blas = blas.into_iter().map(|e: &BlasBuildEntry<'_>| { - let geometries = match &e.geometry { - BlasGeometries::TriangleGeometries(triangle_geometries) => { - let iter = triangle_geometries - .iter() - .map( - |tg: &BlasTriangleGeometry<'_>| DynContextBlasTriangleGeometry { - size: tg.size, - vertex_buffer: tg.vertex_buffer.data.as_ref(), - - index_buffer: tg - .index_buffer - .map(|index_buffer| index_buffer.data.as_ref()), - - transform_buffer: tg - .transform_buffer - .map(|transform_buffer| transform_buffer.data.as_ref()), - - first_vertex: tg.first_vertex, - vertex_stride: tg.vertex_stride, - index_buffer_offset: tg.index_buffer_offset, - transform_buffer_offset: tg.transform_buffer_offset, - }, - ); - DynContextBlasGeometries::TriangleGeometries(Box::new(iter)) - } - }; - DynContextBlasBuildEntry { - blas_data: e.blas.shared.data.as_ref(), - geometries, - } - }); - - let mut tlas = tlas - .into_iter() - .map(|e: &TlasBuildEntry<'_>| DynContextTlasBuildEntry { - tlas_data: e.tlas.data.as_ref(), - instance_buffer_data: e.instance_buffer.data.as_ref(), - instance_count: e.instance_count, - }); - - DynContext::command_encoder_build_acceleration_structures_unsafe_tlas( - &*self.context, - self.data.as_ref(), - &mut blas, - &mut tlas, + self.inner.build_acceleration_structures_unsafe_tlas( + &mut blas.into_iter(), + &mut tlas.into_iter(), ); } } diff --git a/wgpu/src/api/compute_pass.rs b/wgpu/src/api/compute_pass.rs index 3a8d31b467..c7fa7462f1 100644 --- a/wgpu/src/api/compute_pass.rs +++ b/wgpu/src/api/compute_pass.rs @@ -1,6 +1,3 @@ -use std::{marker::PhantomData, sync::Arc, thread}; - -use crate::context::DynContext; use crate::*; /// In-progress recording of a compute pass. @@ -11,14 +8,19 @@ use crate::*; /// https://gpuweb.github.io/gpuweb/#compute-pass-encoder). #[derive(Debug)] pub struct ComputePass<'encoder> { - /// The inner data of the compute pass, separated out so it's easy to replace the lifetime with 'static if desired. - pub(crate) inner: ComputePassInner, + pub(crate) inner: dispatch::DispatchComputePass, /// This lifetime is used to protect the [`CommandEncoder`] from being used - /// while the pass is alive. - pub(crate) encoder_guard: PhantomData<&'encoder ()>, + /// while the pass is alive. This needs to be PhantomDrop to prevent the lifetime + /// from being shortened. + pub(crate) _encoder_guard: crate::api::PhantomDrop<&'encoder ()>, } +#[cfg(send_sync)] +static_assertions::assert_impl_all!(ComputePass<'_>: Send, Sync); + +crate::cmp::impl_eq_ord_hash_proxy!(ComputePass<'_> => .inner); + impl ComputePass<'_> { /// Drops the lifetime relationship to the parent command encoder, making usage of /// the encoder while this pass is recorded a run-time error instead. @@ -35,7 +37,7 @@ impl ComputePass<'_> { pub fn forget_lifetime(self) -> ComputePass<'static> { ComputePass { inner: self.inner, - encoder_guard: PhantomData, + _encoder_guard: crate::api::PhantomDrop::default(), } } @@ -45,65 +47,40 @@ impl ComputePass<'_> { /// If the bind group have dynamic offsets, provide them in the binding order. /// These offsets have to be aligned to [`Limits::min_uniform_buffer_offset_alignment`] /// or [`Limits::min_storage_buffer_offset_alignment`] appropriately. - pub fn set_bind_group<'a>( - &mut self, - index: u32, - bind_group: impl Into>, - offsets: &[DynamicOffset], - ) { - let bg = bind_group.into().map(|x| x.data.as_ref()); - DynContext::compute_pass_set_bind_group( - &*self.inner.context, - self.inner.data.as_mut(), - index, - bg, - offsets, - ); + pub fn set_bind_group<'a, BG>(&mut self, index: u32, bind_group: BG, offsets: &[DynamicOffset]) + where + Option<&'a BindGroup>: From, + { + let bg: Option<&BindGroup> = bind_group.into(); + let bg = bg.map(|bg| &bg.inner); + self.inner.set_bind_group(index, bg, offsets); } /// Sets the active compute pipeline. pub fn set_pipeline(&mut self, pipeline: &ComputePipeline) { - DynContext::compute_pass_set_pipeline( - &*self.inner.context, - self.inner.data.as_mut(), - pipeline.data.as_ref(), - ); + self.inner.set_pipeline(&pipeline.inner); } /// Inserts debug marker. pub fn insert_debug_marker(&mut self, label: &str) { - DynContext::compute_pass_insert_debug_marker( - &*self.inner.context, - self.inner.data.as_mut(), - label, - ); + self.inner.insert_debug_marker(label); } /// Start record commands and group it into debug marker group. pub fn push_debug_group(&mut self, label: &str) { - DynContext::compute_pass_push_debug_group( - &*self.inner.context, - self.inner.data.as_mut(), - label, - ); + self.inner.push_debug_group(label); } /// Stops command recording and creates debug group. pub fn pop_debug_group(&mut self) { - DynContext::compute_pass_pop_debug_group(&*self.inner.context, self.inner.data.as_mut()); + self.inner.pop_debug_group(); } /// Dispatches compute work operations. /// /// `x`, `y` and `z` denote the number of work groups to dispatch in each dimension. pub fn dispatch_workgroups(&mut self, x: u32, y: u32, z: u32) { - DynContext::compute_pass_dispatch_workgroups( - &*self.inner.context, - self.inner.data.as_mut(), - x, - y, - z, - ); + self.inner.dispatch_workgroups(x, y, z); } /// Dispatches compute work operations, based on the contents of the `indirect_buffer`. @@ -114,12 +91,8 @@ impl ComputePass<'_> { indirect_buffer: &Buffer, indirect_offset: BufferAddress, ) { - DynContext::compute_pass_dispatch_workgroups_indirect( - &*self.inner.context, - self.inner.data.as_mut(), - indirect_buffer.data.as_ref(), - indirect_offset, - ); + self.inner + .dispatch_workgroups_indirect(&indirect_buffer.inner, indirect_offset); } } @@ -134,12 +107,7 @@ impl ComputePass<'_> { /// For example, if `offset` is `4` and `data` is eight bytes long, this /// call will write `data` to bytes `4..12` of push constant storage. pub fn set_push_constants(&mut self, offset: u32, data: &[u8]) { - DynContext::compute_pass_set_push_constants( - &*self.inner.context, - self.inner.data.as_mut(), - offset, - data, - ); + self.inner.set_push_constants(offset, data); } } @@ -152,12 +120,7 @@ impl ComputePass<'_> { /// but timestamps can be subtracted to get the time it takes /// for a string of operations to complete. pub fn write_timestamp(&mut self, query_set: &QuerySet, query_index: u32) { - DynContext::compute_pass_write_timestamp( - &*self.inner.context, - self.inner.data.as_mut(), - query_set.data.as_ref(), - query_index, - ) + self.inner.write_timestamp(&query_set.inner, query_index); } } @@ -166,35 +129,14 @@ impl ComputePass<'_> { /// Start a pipeline statistics query on this compute pass. It can be ended with /// `end_pipeline_statistics_query`. Pipeline statistics queries may not be nested. pub fn begin_pipeline_statistics_query(&mut self, query_set: &QuerySet, query_index: u32) { - DynContext::compute_pass_begin_pipeline_statistics_query( - &*self.inner.context, - self.inner.data.as_mut(), - query_set.data.as_ref(), - query_index, - ); + self.inner + .begin_pipeline_statistics_query(&query_set.inner, query_index); } /// End the pipeline statistics query on this compute pass. It can be started with /// `begin_pipeline_statistics_query`. Pipeline statistics queries may not be nested. pub fn end_pipeline_statistics_query(&mut self) { - DynContext::compute_pass_end_pipeline_statistics_query( - &*self.inner.context, - self.inner.data.as_mut(), - ); - } -} - -#[derive(Debug)] -pub(crate) struct ComputePassInner { - pub(crate) data: Box, - pub(crate) context: Arc, -} - -impl Drop for ComputePassInner { - fn drop(&mut self) { - if !thread::panicking() { - self.context.compute_pass_end(self.data.as_mut()); - } + self.inner.end_pipeline_statistics_query(); } } diff --git a/wgpu/src/api/compute_pipeline.rs b/wgpu/src/api/compute_pipeline.rs index 16885ac96b..b1919301cc 100644 --- a/wgpu/src/api/compute_pipeline.rs +++ b/wgpu/src/api/compute_pipeline.rs @@ -1,5 +1,3 @@ -use std::{sync::Arc, thread}; - use crate::*; /// Handle to a compute pipeline. @@ -10,13 +8,12 @@ use crate::*; /// Corresponds to [WebGPU `GPUComputePipeline`](https://gpuweb.github.io/gpuweb/#compute-pipeline). #[derive(Debug)] pub struct ComputePipeline { - pub(crate) context: Arc, - pub(crate) data: Box, + pub(crate) inner: dispatch::DispatchComputePipeline, } #[cfg(send_sync)] static_assertions::assert_impl_all!(ComputePipeline: Send, Sync); -super::impl_partialeq_eq_hash!(ComputePipeline); +crate::cmp::impl_eq_ord_hash_proxy!(ComputePipeline => .inner); impl ComputePipeline { /// Get an object representing the bind group layout at a given index. @@ -27,19 +24,8 @@ impl ComputePipeline { /// /// This method will raise a validation error if there is no bind group layout at `index`. pub fn get_bind_group_layout(&self, index: u32) -> BindGroupLayout { - let context = Arc::clone(&self.context); - let data = self - .context - .compute_pipeline_get_bind_group_layout(self.data.as_ref(), index); - BindGroupLayout { context, data } - } -} - -impl Drop for ComputePipeline { - fn drop(&mut self) { - if !thread::panicking() { - self.context.compute_pipeline_drop(self.data.as_ref()); - } + let bind_group = self.inner.get_bind_group_layout(index); + BindGroupLayout { inner: bind_group } } } diff --git a/wgpu/src/api/device.rs b/wgpu/src/api/device.rs index cd279db128..be2f2a908b 100644 --- a/wgpu/src/api/device.rs +++ b/wgpu/src/api/device.rs @@ -1,10 +1,9 @@ -use std::{error, fmt, future::Future, sync::Arc, thread}; +use std::{error, fmt, future::Future, sync::Arc}; use parking_lot::Mutex; use crate::api::blas::{Blas, BlasGeometrySizeDescriptors, BlasShared, CreateBlasDescriptor}; use crate::api::tlas::{CreateTlasDescriptor, Tlas}; -use crate::context::DynContext; use crate::*; /// Open connection to a graphics and/or compute device. @@ -17,12 +16,13 @@ use crate::*; /// Corresponds to [WebGPU `GPUDevice`](https://gpuweb.github.io/gpuweb/#gpu-device). #[derive(Debug)] pub struct Device { - pub(crate) context: Arc, - pub(crate) data: Box, + pub(crate) inner: dispatch::DispatchDevice, } #[cfg(send_sync)] static_assertions::assert_impl_all!(Device: Send, Sync); +crate::cmp::impl_eq_ord_hash_proxy!(Device => .inner); + /// Describes a [`Device`]. /// /// For use with [`Adapter::request_device`]. @@ -43,7 +43,7 @@ impl Device { /// /// When running on WebGPU, this is a no-op. `Device`s are automatically polled. pub fn poll(&self, maintain: Maintain) -> MaintainResult { - DynContext::device_poll(&*self.context, self.data.as_ref(), maintain) + self.inner.poll(maintain) } /// The features which can be used on this device. @@ -51,7 +51,7 @@ impl Device { /// No additional features can be used, even if the underlying adapter can support them. #[must_use] pub fn features(&self) -> Features { - DynContext::device_features(&*self.context, self.data.as_ref()) + self.inner.features() } /// The limits which can be used on this device. @@ -59,7 +59,7 @@ impl Device { /// No better limits can be used, even if the underlying adapter can support them. #[must_use] pub fn limits(&self) -> Limits { - DynContext::device_limits(&*self.context, self.data.as_ref()) + self.inner.limits() } /// Creates a shader module from either SPIR-V or WGSL source code. @@ -78,16 +78,10 @@ impl Device { /// #[must_use] pub fn create_shader_module(&self, desc: ShaderModuleDescriptor<'_>) -> ShaderModule { - let data = DynContext::device_create_shader_module( - &*self.context, - self.data.as_ref(), - desc, - wgt::ShaderBoundChecks::new(), - ); - ShaderModule { - context: Arc::clone(&self.context), - data, - } + let module = self + .inner + .create_shader_module(desc, wgt::ShaderBoundChecks::new()); + ShaderModule { inner: module } } /// Creates a shader module from either SPIR-V or WGSL source code without runtime checks. @@ -105,16 +99,10 @@ impl Device { &self, desc: ShaderModuleDescriptor<'_>, ) -> ShaderModule { - let data = DynContext::device_create_shader_module( - &*self.context, - self.data.as_ref(), - desc, - unsafe { wgt::ShaderBoundChecks::unchecked() }, - ); - ShaderModule { - context: Arc::clone(&self.context), - data, - } + let module = self + .inner + .create_shader_module(desc, unsafe { wgt::ShaderBoundChecks::unchecked() }); + ShaderModule { inner: module } } /// Creates a shader module from SPIR-V binary directly. @@ -130,53 +118,35 @@ impl Device { &self, desc: &ShaderModuleDescriptorSpirV<'_>, ) -> ShaderModule { - let data = unsafe { - DynContext::device_create_shader_module_spirv(&*self.context, self.data.as_ref(), desc) - }; - ShaderModule { - context: Arc::clone(&self.context), - data, - } + let module = unsafe { self.inner.create_shader_module_spirv(desc) }; + ShaderModule { inner: module } } /// Creates an empty [`CommandEncoder`]. #[must_use] pub fn create_command_encoder(&self, desc: &CommandEncoderDescriptor<'_>) -> CommandEncoder { - let data = - DynContext::device_create_command_encoder(&*self.context, self.data.as_ref(), desc); - CommandEncoder { - context: Arc::clone(&self.context), - data, - } + let encoder = self.inner.create_command_encoder(desc); + CommandEncoder { inner: encoder } } /// Creates an empty [`RenderBundleEncoder`]. #[must_use] - pub fn create_render_bundle_encoder( + pub fn create_render_bundle_encoder<'a>( &self, desc: &RenderBundleEncoderDescriptor<'_>, - ) -> RenderBundleEncoder<'_> { - let data = DynContext::device_create_render_bundle_encoder( - &*self.context, - self.data.as_ref(), - desc, - ); + ) -> RenderBundleEncoder<'a> { + let encoder = self.inner.create_render_bundle_encoder(desc); RenderBundleEncoder { - context: Arc::clone(&self.context), - data, - parent: self, - _p: Default::default(), + inner: encoder, + _p: std::marker::PhantomData, } } /// Creates a new [`BindGroup`]. #[must_use] pub fn create_bind_group(&self, desc: &BindGroupDescriptor<'_>) -> BindGroup { - let data = DynContext::device_create_bind_group(&*self.context, self.data.as_ref(), desc); - BindGroup { - context: Arc::clone(&self.context), - data, - } + let group = self.inner.create_bind_group(desc); + BindGroup { inner: group } } /// Creates a [`BindGroupLayout`]. @@ -185,45 +155,29 @@ impl Device { &self, desc: &BindGroupLayoutDescriptor<'_>, ) -> BindGroupLayout { - let data = - DynContext::device_create_bind_group_layout(&*self.context, self.data.as_ref(), desc); - BindGroupLayout { - context: Arc::clone(&self.context), - data, - } + let layout = self.inner.create_bind_group_layout(desc); + BindGroupLayout { inner: layout } } /// Creates a [`PipelineLayout`]. #[must_use] pub fn create_pipeline_layout(&self, desc: &PipelineLayoutDescriptor<'_>) -> PipelineLayout { - let data = - DynContext::device_create_pipeline_layout(&*self.context, self.data.as_ref(), desc); - PipelineLayout { - context: Arc::clone(&self.context), - data, - } + let layout = self.inner.create_pipeline_layout(desc); + PipelineLayout { inner: layout } } /// Creates a [`RenderPipeline`]. #[must_use] pub fn create_render_pipeline(&self, desc: &RenderPipelineDescriptor<'_>) -> RenderPipeline { - let data = - DynContext::device_create_render_pipeline(&*self.context, self.data.as_ref(), desc); - RenderPipeline { - context: Arc::clone(&self.context), - data, - } + let pipeline = self.inner.create_render_pipeline(desc); + RenderPipeline { inner: pipeline } } /// Creates a [`ComputePipeline`]. #[must_use] pub fn create_compute_pipeline(&self, desc: &ComputePipelineDescriptor<'_>) -> ComputePipeline { - let data = - DynContext::device_create_compute_pipeline(&*self.context, self.data.as_ref(), desc); - ComputePipeline { - context: Arc::clone(&self.context), - data, - } + let pipeline = self.inner.create_compute_pipeline(desc); + ComputePipeline { inner: pipeline } } /// Creates a [`Buffer`]. @@ -234,11 +188,10 @@ impl Device { map_context.initial_range = 0..desc.size; } - let data = DynContext::device_create_buffer(&*self.context, self.data.as_ref(), desc); + let buffer = self.inner.create_buffer(desc); Buffer { - context: Arc::clone(&self.context), - data, + inner: buffer, map_context: Mutex::new(map_context), size: desc.size, usage: desc.usage, @@ -250,10 +203,10 @@ impl Device { /// `desc` specifies the general format of the texture. #[must_use] pub fn create_texture(&self, desc: &TextureDescriptor<'_>) -> Texture { - let data = DynContext::device_create_texture(&*self.context, self.data.as_ref(), desc); + let texture = self.inner.create_texture(desc); + Texture { - context: Arc::clone(&self.context), - data, + inner: texture, descriptor: TextureDescriptor { label: None, view_formats: &[], @@ -277,21 +230,13 @@ impl Device { desc: &TextureDescriptor<'_>, ) -> Texture { let texture = unsafe { - self.context - .as_any() - .downcast_ref::() - // Part of the safety requirements is that the texture was generated from the same hal device. - // Therefore, unwrap is fine here since only WgpuCoreContext has the ability to create hal textures. - .unwrap() - .create_texture_from_hal::( - hal_texture, - crate::context::downcast_ref(self.data.as_ref()), - desc, - ) + let core_device = self.inner.as_core(); + core_device + .context + .create_texture_from_hal::(hal_texture, core_device, desc) }; Texture { - context: Arc::clone(&self.context), - data: Box::new(texture), + inner: texture.into(), descriptor: TextureDescriptor { label: None, view_formats: &[], @@ -320,22 +265,14 @@ impl Device { } let buffer = unsafe { - self.context - .as_any() - .downcast_ref::() - // Part of the safety requirements is that the buffer was generated from the same hal device. - // Therefore, unwrap is fine here since only WgpuCoreContext has the ability to create hal buffers. - .unwrap() - .create_buffer_from_hal::( - hal_buffer, - crate::context::downcast_ref(self.data.as_ref()), - desc, - ) + let core_device = self.inner.as_core(); + core_device + .context + .create_buffer_from_hal::(hal_buffer, core_device, desc) }; Buffer { - context: Arc::clone(&self.context), - data: Box::new(buffer), + inner: buffer.into(), map_context: Mutex::new(map_context), size: desc.size, usage: desc.usage, @@ -347,48 +284,40 @@ impl Device { /// `desc` specifies the behavior of the sampler. #[must_use] pub fn create_sampler(&self, desc: &SamplerDescriptor<'_>) -> Sampler { - let data = DynContext::device_create_sampler(&*self.context, self.data.as_ref(), desc); - Sampler { - context: Arc::clone(&self.context), - data, - } + let sampler = self.inner.create_sampler(desc); + Sampler { inner: sampler } } /// Creates a new [`QuerySet`]. #[must_use] pub fn create_query_set(&self, desc: &QuerySetDescriptor<'_>) -> QuerySet { - let data = DynContext::device_create_query_set(&*self.context, self.data.as_ref(), desc); - QuerySet { - context: Arc::clone(&self.context), - data, - } + let query_set = self.inner.create_query_set(desc); + QuerySet { inner: query_set } } /// Set a callback for errors that are not handled in error scopes. pub fn on_uncaptured_error(&self, handler: Box) { - self.context - .device_on_uncaptured_error(self.data.as_ref(), handler); + self.inner.on_uncaptured_error(handler) } /// Push an error scope. pub fn push_error_scope(&self, filter: ErrorFilter) { - self.context - .device_push_error_scope(self.data.as_ref(), filter); + self.inner.push_error_scope(filter) } /// Pop an error scope. pub fn pop_error_scope(&self) -> impl Future> + WasmNotSend { - self.context.device_pop_error_scope(self.data.as_ref()) + self.inner.pop_error_scope() } /// Starts frame capture. pub fn start_capture(&self) { - DynContext::device_start_capture(&*self.context, self.data.as_ref()) + self.inner.start_capture() } /// Stops frame capture. pub fn stop_capture(&self) { - DynContext::device_stop_capture(&*self.context, self.data.as_ref()) + self.inner.stop_capture() } /// Query internal counters from the native backend for debugging purposes. @@ -399,7 +328,7 @@ impl Device { /// If a counter is not set, its contains its default value (zero). #[must_use] pub fn get_internal_counters(&self) -> wgt::InternalCounters { - DynContext::device_get_internal_counters(&*self.context, self.data.as_ref()) + self.inner.get_internal_counters() } /// Generate an GPU memory allocation report if the underlying backend supports it. @@ -409,7 +338,7 @@ impl Device { /// for example as a workaround for driver issues. #[must_use] pub fn generate_allocator_report(&self) -> Option { - DynContext::generate_allocator_report(&*self.context, self.data.as_ref()) + self.inner.generate_allocator_report() } /// Apply a callback to this `Device`'s underlying backend device. @@ -435,21 +364,21 @@ impl Device { pub unsafe fn as_hal) -> R, R>( &self, hal_device_callback: F, - ) -> Option { - self.context - .as_any() - .downcast_ref::() - .map(|ctx| unsafe { - ctx.device_as_hal::( - crate::context::downcast_ref(self.data.as_ref()), - hal_device_callback, - ) - }) + ) -> R { + if let Some(core_device) = self.inner.as_core_opt() { + unsafe { + core_device + .context + .device_as_hal::(core_device, hal_device_callback) + } + } else { + hal_device_callback(None) + } } /// Destroy this device. pub fn destroy(&self) { - DynContext::device_destroy(&*self.context, self.data.as_ref()) + self.inner.destroy() } /// Set a DeviceLostCallback on this device. @@ -457,11 +386,7 @@ impl Device { &self, callback: impl Fn(DeviceLostReason, String) + Send + 'static, ) { - DynContext::device_set_device_lost_callback( - &*self.context, - self.data.as_ref(), - Box::new(callback), - ) + self.inner.set_device_lost_callback(Box::new(callback)) } /// Create a [`PipelineCache`] with initial data @@ -506,13 +431,8 @@ impl Device { &self, desc: &PipelineCacheDescriptor<'_>, ) -> PipelineCache { - let data = unsafe { - DynContext::device_create_pipeline_cache(&*self.context, self.data.as_ref(), desc) - }; - PipelineCache { - context: Arc::clone(&self.context), - data, - } + let cache = unsafe { self.inner.create_pipeline_cache(desc) }; + PipelineCache { inner: cache } } } @@ -540,15 +460,10 @@ impl Device { desc: &CreateBlasDescriptor<'_>, sizes: BlasGeometrySizeDescriptors, ) -> Blas { - let (handle, data) = - DynContext::device_create_blas(&*self.context, self.data.as_ref(), desc, sizes); + let (handle, blas) = self.inner.create_blas(desc, sizes); Blas { - #[allow(clippy::arc_with_non_send_sync)] - shared: Arc::new(BlasShared { - context: Arc::clone(&self.context), - data, - }), + shared: Arc::new(BlasShared { inner: blas }), handle, } } @@ -564,24 +479,15 @@ impl Device { /// [Features::EXPERIMENTAL_RAY_TRACING_ACCELERATION_STRUCTURE]: wgt::Features::EXPERIMENTAL_RAY_TRACING_ACCELERATION_STRUCTURE #[must_use] pub fn create_tlas(&self, desc: &CreateTlasDescriptor<'_>) -> Tlas { - let data = DynContext::device_create_tlas(&*self.context, self.data.as_ref(), desc); + let tlas = self.inner.create_tlas(desc); Tlas { - context: Arc::clone(&self.context), - data, + inner: tlas, max_instances: desc.max_instances, } } } -impl Drop for Device { - fn drop(&mut self) { - if !thread::panicking() { - self.context.device_drop(self.data.as_ref()); - } - } -} - /// Requesting a device from an [`Adapter`] failed. #[derive(Clone, Debug)] pub struct RequestDeviceError { diff --git a/wgpu/src/api/instance.rs b/wgpu/src/api/instance.rs index b21c9f70ec..f03e348183 100644 --- a/wgpu/src/api/instance.rs +++ b/wgpu/src/api/instance.rs @@ -1,8 +1,8 @@ use parking_lot::Mutex; -use crate::*; +use crate::{dispatch::InstanceInterface, *}; -use std::{future::Future, sync::Arc}; +use std::future::Future; /// Context for all other wgpu objects. Instance of wgpu. /// @@ -14,11 +14,13 @@ use std::{future::Future, sync::Arc}; /// Corresponds to [WebGPU `GPU`](https://gpuweb.github.io/gpuweb/#gpu-interface). #[derive(Debug)] pub struct Instance { - context: Arc, + inner: dispatch::DispatchInstance, } #[cfg(send_sync)] static_assertions::assert_impl_all!(Instance: Send, Sync); +crate::cmp::impl_eq_ord_hash_proxy!(Instance => .inner); + impl Default for Instance { /// Creates a new instance of wgpu with default options. /// @@ -129,7 +131,7 @@ impl Instance { if is_only_available_backend || (requested_webgpu && support_webgpu) { return Self { - context: Arc::from(crate::backend::ContextWebGpu::init(_instance_desc)), + inner: crate::backend::ContextWebGpu::new(_instance_desc).into(), }; } } @@ -137,7 +139,7 @@ impl Instance { #[cfg(wgpu_core)] { return Self { - context: Arc::from(crate::backend::ContextWgpuCore::init(_instance_desc)), + inner: crate::backend::ContextWgpuCore::new(_instance_desc).into(), }; } @@ -158,9 +160,9 @@ impl Instance { #[cfg(wgpu_core)] pub unsafe fn from_hal(hal_instance: A::Instance) -> Self { Self { - context: Arc::new(unsafe { - crate::backend::ContextWgpuCore::from_hal_instance::(hal_instance) - }), + inner: unsafe { + crate::backend::ContextWgpuCore::from_hal_instance::(hal_instance).into() + }, } } @@ -176,10 +178,8 @@ impl Instance { /// [`Instance`]: hal::Api::Instance #[cfg(wgpu_core)] pub unsafe fn as_hal(&self) -> Option<&A::Instance> { - self.context - .as_any() - // If we don't have a wgpu-core instance, we don't have a hal instance either. - .downcast_ref::() + self.inner + .as_core_opt() .and_then(|ctx| unsafe { ctx.instance_as_hal::() }) } @@ -195,9 +195,9 @@ impl Instance { #[cfg(wgpu_core)] pub unsafe fn from_core(core_instance: wgc::instance::Instance) -> Self { Self { - context: Arc::new(unsafe { - crate::backend::ContextWgpuCore::from_core_instance(core_instance) - }), + inner: unsafe { + crate::backend::ContextWgpuCore::from_core_instance(core_instance).into() + }, } } @@ -208,20 +208,21 @@ impl Instance { /// - `backends` - Backends from which to enumerate adapters. #[cfg(native)] pub fn enumerate_adapters(&self, backends: Backends) -> Vec { - let context = Arc::clone(&self.context); - self.context - .as_any() - .downcast_ref::() - .map(|ctx| { - ctx.enumerate_adapters(backends) - .into_iter() - .map(move |adapter| crate::Adapter { - context: Arc::clone(&context), - data: Box::new(adapter), - }) - .collect() + let Some(core_instance) = self.inner.as_core_opt() else { + return Vec::new(); + }; + + core_instance + .enumerate_adapters(backends) + .into_iter() + .map(|adapter| { + let core = backend::wgpu_core::CoreAdapter { + context: core_instance.clone(), + id: adapter, + }; + crate::Adapter { inner: core.into() } }) - .unwrap() + .collect() } /// Retrieves an [`Adapter`] which matches the given [`RequestAdapterOptions`]. @@ -235,9 +236,8 @@ impl Instance { &self, options: &RequestAdapterOptions<'_, '_>, ) -> impl Future> + WasmNotSend { - let context = Arc::clone(&self.context); - let adapter = self.context.instance_request_adapter(options); - async move { adapter.await.map(|data| Adapter { context, data }) } + let future = self.inner.request_adapter(options); + async move { future.await.map(|inner| Adapter { inner }) } } /// Converts a wgpu-hal `ExposedAdapter` to a wgpu [`Adapter`]. @@ -250,18 +250,14 @@ impl Instance { &self, hal_adapter: hal::ExposedAdapter, ) -> Adapter { - let context = Arc::clone(&self.context); - let adapter = unsafe { - context - .as_any() - .downcast_ref::() - .unwrap() - .create_adapter_from_hal(hal_adapter) + let core_instance = self.inner.as_core(); + let adapter = unsafe { core_instance.create_adapter_from_hal(hal_adapter) }; + let core = backend::wgpu_core::CoreAdapter { + context: core_instance.clone(), + id: adapter, }; - Adapter { - context, - data: Box::new(adapter), - } + + Adapter { inner: core.into() } } /// Creates a new surface targeting a given window/canvas/surface/etc.. @@ -352,12 +348,11 @@ impl Instance { &self, target: SurfaceTargetUnsafe, ) -> Result, CreateSurfaceError> { - let data = unsafe { self.context.instance_create_surface(target) }?; + let surface = unsafe { self.inner.create_surface(target)? }; Ok(Surface { - context: Arc::clone(&self.context), _handle_source: None, - surface_data: data, + inner: surface, config: Mutex::new(None), }) } @@ -379,7 +374,7 @@ impl Instance { /// /// [`Queue`s]: Queue pub fn poll_all(&self, force_wait: bool) -> bool { - self.context.instance_poll_all_devices(force_wait) + self.inner.poll_all_devices(force_wait) } /// Generates memory report. @@ -388,9 +383,6 @@ impl Instance { /// which happens only when WebGPU is pre-selected by the instance creation. #[cfg(wgpu_core)] pub fn generate_report(&self) -> Option { - self.context - .as_any() - .downcast_ref::() - .map(|ctx| ctx.generate_report()) + self.inner.as_core_opt().map(|ctx| ctx.generate_report()) } } diff --git a/wgpu/src/api/mod.rs b/wgpu/src/api/mod.rs index b94235393d..593d8a2593 100644 --- a/wgpu/src/api/mod.rs +++ b/wgpu/src/api/mod.rs @@ -19,16 +19,15 @@ //! - Avoid having to write out a long list of imports for each module. //! - Allow docs to be written naturally, without needing to worry about needing dedicated doc imports. //! - Treat wgpu-types types and wgpu-core types as a single set. -//! mod adapter; mod bind_group; mod bind_group_layout; +mod blas; mod buffer; mod command_buffer; mod command_encoder; // Not a root type, but common descriptor types for pipelines. -mod blas; mod common_pipeline; mod compute_pass; mod compute_pipeline; @@ -81,34 +80,19 @@ pub use tlas::*; /// Object debugging label. pub type Label<'a> = Option<&'a str>; -macro_rules! impl_partialeq_eq_hash { - ($ty:ty) => { - impl PartialEq for $ty { - fn eq(&self, other: &Self) -> bool { - std::ptr::addr_eq(self.data.as_ref(), other.data.as_ref()) - } - } - impl Eq for $ty {} +/// A cute utility type that works just like PhantomData, but also +/// implements Drop. This forces any lifetimes that are associated +/// with the type to be used until the Drop impl is ran. This prevents +/// lifetimes from being shortened. +#[derive(Debug)] +pub(crate) struct PhantomDrop(std::marker::PhantomData); - impl std::hash::Hash for $ty { - fn hash(&self, state: &mut H) { - let ptr = self.data.as_ref() as *const Data as *const (); - ptr.hash(state); - } - } +impl Default for PhantomDrop { + fn default() -> Self { + Self(std::marker::PhantomData) + } +} - impl PartialOrd for $ty { - fn partial_cmp(&self, other: &Self) -> Option { - Some(self.cmp(other)) - } - } - impl Ord for $ty { - fn cmp(&self, other: &Self) -> std::cmp::Ordering { - let a = self.data.as_ref() as *const Data as *const (); - let b = other.data.as_ref() as *const Data as *const (); - a.cmp(&b) - } - } - }; +impl Drop for PhantomDrop { + fn drop(&mut self) {} } -pub(crate) use impl_partialeq_eq_hash; diff --git a/wgpu/src/api/pipeline_cache.rs b/wgpu/src/api/pipeline_cache.rs index 800e786cae..4462a405eb 100644 --- a/wgpu/src/api/pipeline_cache.rs +++ b/wgpu/src/api/pipeline_cache.rs @@ -1,5 +1,3 @@ -use std::{sync::Arc, thread}; - use crate::*; /// Handle to a pipeline cache, which is used to accelerate @@ -66,13 +64,14 @@ use crate::*; /// [renaming]: std::fs::rename #[derive(Debug)] pub struct PipelineCache { - pub(crate) context: Arc, - pub(crate) data: Box, + pub(crate) inner: dispatch::DispatchPipelineCache, } #[cfg(send_sync)] static_assertions::assert_impl_all!(PipelineCache: Send, Sync); +crate::cmp::impl_eq_ord_hash_proxy!(PipelineCache => .inner); + impl PipelineCache { /// Get the data associated with this pipeline cache. /// The data format is an implementation detail of `wgpu`. @@ -81,14 +80,6 @@ impl PipelineCache { /// /// This function is unique to the Rust API of `wgpu`. pub fn get_data(&self) -> Option> { - self.context.pipeline_cache_get_data(self.data.as_ref()) - } -} - -impl Drop for PipelineCache { - fn drop(&mut self) { - if !thread::panicking() { - self.context.pipeline_cache_drop(self.data.as_ref()); - } + self.inner.get_data() } } diff --git a/wgpu/src/api/pipeline_layout.rs b/wgpu/src/api/pipeline_layout.rs index 20538dd9e7..604dd78efd 100644 --- a/wgpu/src/api/pipeline_layout.rs +++ b/wgpu/src/api/pipeline_layout.rs @@ -1,5 +1,3 @@ -use std::{sync::Arc, thread}; - use crate::*; /// Handle to a pipeline layout. @@ -10,21 +8,12 @@ use crate::*; /// Corresponds to [WebGPU `GPUPipelineLayout`](https://gpuweb.github.io/gpuweb/#gpupipelinelayout). #[derive(Debug)] pub struct PipelineLayout { - pub(crate) context: Arc, - pub(crate) data: Box, + pub(crate) inner: dispatch::DispatchPipelineLayout, } #[cfg(send_sync)] static_assertions::assert_impl_all!(PipelineLayout: Send, Sync); -super::impl_partialeq_eq_hash!(PipelineLayout); - -impl Drop for PipelineLayout { - fn drop(&mut self) { - if !thread::panicking() { - self.context.pipeline_layout_drop(self.data.as_ref()); - } - } -} +crate::cmp::impl_eq_ord_hash_proxy!(PipelineLayout => .inner); /// Describes a [`PipelineLayout`]. /// diff --git a/wgpu/src/api/query_set.rs b/wgpu/src/api/query_set.rs index a0cac6847b..a0d358ed4d 100644 --- a/wgpu/src/api/query_set.rs +++ b/wgpu/src/api/query_set.rs @@ -1,5 +1,3 @@ -use std::{sync::Arc, thread}; - use crate::*; /// Handle to a query set. @@ -9,22 +7,13 @@ use crate::*; /// Corresponds to [WebGPU `GPUQuerySet`](https://gpuweb.github.io/gpuweb/#queryset). #[derive(Debug)] pub struct QuerySet { - pub(crate) context: Arc, - pub(crate) data: Box, + pub(crate) inner: dispatch::DispatchQuerySet, } #[cfg(send_sync)] #[cfg(send_sync)] static_assertions::assert_impl_all!(QuerySet: Send, Sync); -super::impl_partialeq_eq_hash!(QuerySet); - -impl Drop for QuerySet { - fn drop(&mut self) { - if !thread::panicking() { - self.context.query_set_drop(self.data.as_ref()); - } - } -} +crate::cmp::impl_eq_ord_hash_proxy!(QuerySet => .inner); /// Describes a [`QuerySet`]. /// diff --git a/wgpu/src/api/queue.rs b/wgpu/src/api/queue.rs index 83ea8a6136..89f505d572 100644 --- a/wgpu/src/api/queue.rs +++ b/wgpu/src/api/queue.rs @@ -1,10 +1,5 @@ -use std::{ - ops::{Deref, DerefMut}, - sync::Arc, - thread, -}; +use std::ops::{Deref, DerefMut}; -use crate::context::{DynContext, QueueWriteBuffer}; use crate::*; /// Handle to a command queue on a device. @@ -16,19 +11,12 @@ use crate::*; /// Corresponds to [WebGPU `GPUQueue`](https://gpuweb.github.io/gpuweb/#gpu-queue). #[derive(Debug)] pub struct Queue { - pub(crate) context: Arc, - pub(crate) data: Box, + pub(crate) inner: dispatch::DispatchQueue, } #[cfg(send_sync)] static_assertions::assert_impl_all!(Queue: Send, Sync); -impl Drop for Queue { - fn drop(&mut self) { - if !thread::panicking() { - self.context.queue_drop(self.data.as_ref()); - } - } -} +crate::cmp::impl_eq_ord_hash_proxy!(Queue => .inner); /// Identifier for a particular call to [`Queue::submit`]. Can be used /// as part of an argument to [`Device::poll`] to block for a particular @@ -39,7 +27,7 @@ impl Drop for Queue { #[derive(Debug, Clone)] pub struct SubmissionIndex { #[cfg_attr(not(native), allow(dead_code))] - pub(crate) data: Arc, + pub(crate) index: u64, } #[cfg(send_sync)] static_assertions::assert_impl_all!(SubmissionIndex: Send, Sync); @@ -59,7 +47,7 @@ pub struct QueueWriteBufferView<'a> { queue: &'a Queue, buffer: &'a Buffer, offset: BufferAddress, - inner: Box, + inner: dispatch::DispatchQueueWriteBuffer, } #[cfg(send_sync)] static_assertions::assert_impl_all!(QueueWriteBufferView<'_>: Send, Sync); @@ -87,13 +75,9 @@ impl AsMut<[u8]> for QueueWriteBufferView<'_> { impl Drop for QueueWriteBufferView<'_> { fn drop(&mut self) { - DynContext::queue_write_staging_buffer( - &*self.queue.context, - self.queue.data.as_ref(), - self.buffer.data.as_ref(), - self.offset, - &*self.inner, - ); + self.queue + .inner + .write_staging_buffer(&self.buffer.inner, self.offset, &self.inner); } } @@ -119,13 +103,7 @@ impl Queue { /// method avoids an intermediate copy and is often able to transfer data /// more efficiently than this one. pub fn write_buffer(&self, buffer: &Buffer, offset: BufferAddress, data: &[u8]) { - DynContext::queue_write_buffer( - &*self.context, - self.data.as_ref(), - buffer.data.as_ref(), - offset, - data, - ) + self.inner.write_buffer(&buffer.inner, offset, data); } /// Write to a buffer via a directly mapped staging buffer. @@ -164,15 +142,9 @@ impl Queue { size: BufferSize, ) -> Option> { profiling::scope!("Queue::write_buffer_with"); - DynContext::queue_validate_write_buffer( - &*self.context, - self.data.as_ref(), - buffer.data.as_ref(), - offset, - size, - )?; - let staging_buffer = - DynContext::queue_create_staging_buffer(&*self.context, self.data.as_ref(), size)?; + self.inner + .validate_write_buffer(&buffer.inner, offset, size)?; + let staging_buffer = self.inner.create_staging_buffer(size)?; Some(QueueWriteBufferView { queue: self, buffer, @@ -212,14 +184,7 @@ impl Queue { data_layout: TexelCopyBufferLayout, size: Extent3d, ) { - DynContext::queue_write_texture( - &*self.context, - self.data.as_ref(), - texture, - data, - data_layout, - size, - ) + self.inner.write_texture(texture, data, data_layout, size); } /// Schedule a copy of data from `image` into `texture`. @@ -227,16 +192,11 @@ impl Queue { pub fn copy_external_image_to_texture( &self, source: &wgt::CopyExternalImageSourceInfo, - dest: crate::CopyExternalImageDestInfo<'_>, + dest: wgt::CopyExternalImageDestInfo<&api::Texture>, size: Extent3d, ) { - DynContext::queue_copy_external_image_to_texture( - &*self.context, - self.data.as_ref(), - source, - dest, - size, - ) + self.inner + .copy_external_image_to_texture(source, dest, size); } /// Submits a series of finished command buffers for execution. @@ -246,12 +206,11 @@ impl Queue { ) -> SubmissionIndex { let mut command_buffers = command_buffers .into_iter() - .map(|mut comb| comb.data.take().unwrap()); + .map(|mut comb| comb.inner.take().unwrap()); - let data = - DynContext::queue_submit(&*self.context, self.data.as_ref(), &mut command_buffers); + let index = self.inner.submit(&mut command_buffers); - SubmissionIndex { data } + SubmissionIndex { index } } /// Gets the amount of nanoseconds each tick of a timestamp query represents. @@ -261,7 +220,7 @@ impl Queue { /// Timestamp values are represented in nanosecond values on WebGPU, see `` /// Therefore, this is always 1.0 on the web, but on wgpu-core a manual conversion is required. pub fn get_timestamp_period(&self) -> f32 { - DynContext::queue_get_timestamp_period(&*self.context, self.data.as_ref()) + self.inner.get_timestamp_period() } /// Registers a callback when the previous call to submit finishes running on the gpu. This callback @@ -276,10 +235,6 @@ impl Queue { /// call to the function will not complete until the callback returns, so prefer keeping callbacks short /// and used to set flags, send messages, etc. pub fn on_submitted_work_done(&self, callback: impl FnOnce() + Send + 'static) { - DynContext::queue_on_submitted_work_done( - &*self.context, - self.data.as_ref(), - Box::new(callback), - ) + self.inner.on_submitted_work_done(Box::new(callback)); } } diff --git a/wgpu/src/api/render_bundle.rs b/wgpu/src/api/render_bundle.rs index 5932458aeb..1d603eab6b 100644 --- a/wgpu/src/api/render_bundle.rs +++ b/wgpu/src/api/render_bundle.rs @@ -1,5 +1,3 @@ -use std::{sync::Arc, thread}; - use crate::*; /// Pre-prepared reusable bundle of GPU operations. @@ -13,21 +11,12 @@ use crate::*; /// Corresponds to [WebGPU `GPURenderBundle`](https://gpuweb.github.io/gpuweb/#render-bundle). #[derive(Debug)] pub struct RenderBundle { - pub(crate) context: Arc, - pub(crate) data: Box, + pub(crate) inner: dispatch::DispatchRenderBundle, } #[cfg(send_sync)] static_assertions::assert_impl_all!(RenderBundle: Send, Sync); -super::impl_partialeq_eq_hash!(RenderBundle); - -impl Drop for RenderBundle { - fn drop(&mut self) { - if !thread::panicking() { - self.context.render_bundle_drop(self.data.as_ref()); - } - } -} +crate::cmp::impl_eq_ord_hash_proxy!(RenderBundle => .inner); /// Describes a [`RenderBundle`]. /// diff --git a/wgpu/src/api/render_bundle_encoder.rs b/wgpu/src/api/render_bundle_encoder.rs index 6aa8a8c332..af0a225102 100644 --- a/wgpu/src/api/render_bundle_encoder.rs +++ b/wgpu/src/api/render_bundle_encoder.rs @@ -1,6 +1,6 @@ -use std::{marker::PhantomData, num::NonZeroU32, ops::Range, sync::Arc}; +use std::{marker::PhantomData, num::NonZeroU32, ops::Range}; -use crate::context::DynContext; +use crate::dispatch::RenderBundleEncoderInterface; use crate::*; /// Encodes a series of GPU operations into a reusable "render bundle". @@ -16,15 +16,15 @@ use crate::*; /// https://gpuweb.github.io/gpuweb/#gpurenderbundleencoder). #[derive(Debug)] pub struct RenderBundleEncoder<'a> { - pub(crate) context: Arc, - pub(crate) data: Box, - pub(crate) parent: &'a Device, + pub(crate) inner: dispatch::DispatchRenderBundleEncoder, /// This type should be !Send !Sync, because it represents an allocation on this thread's /// command buffer. - pub(crate) _p: PhantomData<*const u8>, + pub(crate) _p: PhantomData<(*const u8, &'a ())>, } static_assertions::assert_not_impl_any!(RenderBundleEncoder<'_>: Send, Sync); +crate::cmp::impl_eq_ord_hash_proxy!(RenderBundleEncoder<'_> => .inner); + /// Describes a [`RenderBundleEncoder`]. /// /// For use with [`Device::create_render_bundle_encoder`]. @@ -52,42 +52,34 @@ static_assertions::assert_impl_all!(RenderBundleEncoderDescriptor<'_>: Send, Syn impl<'a> RenderBundleEncoder<'a> { /// Finishes recording and returns a [`RenderBundle`] that can be executed in other render passes. pub fn finish(self, desc: &RenderBundleDescriptor<'_>) -> RenderBundle { - let data = DynContext::render_bundle_encoder_finish(&*self.context, self.data, desc); - RenderBundle { - context: Arc::clone(&self.context), - data, - } + let bundle = match self.inner { + #[cfg(wgpu_core)] + dispatch::DispatchRenderBundleEncoder::Core(b) => b.finish(desc), + #[cfg(webgpu)] + dispatch::DispatchRenderBundleEncoder::WebGPU(b) => b.finish(desc), + }; + + RenderBundle { inner: bundle } } /// Sets the active bind group for a given bind group index. The bind group layout /// in the active pipeline when any `draw()` function is called must match the layout of this bind group. /// /// If the bind group have dynamic offsets, provide them in the binding order. - pub fn set_bind_group<'b>( - &mut self, - index: u32, - bind_group: impl Into>, - offsets: &[DynamicOffset], - ) { - let bg = bind_group.into().map(|x| x.data.as_ref()); - DynContext::render_bundle_encoder_set_bind_group( - &*self.parent.context, - self.data.as_mut(), - index, - bg, - offsets, - ) + pub fn set_bind_group<'b, BG>(&mut self, index: u32, bind_group: BG, offsets: &[DynamicOffset]) + where + Option<&'b BindGroup>: From, + { + let bg: Option<&'b BindGroup> = bind_group.into(); + let bg = bg.map(|x| &x.inner); + self.inner.set_bind_group(index, bg, offsets); } /// Sets the active render pipeline. /// /// Subsequent draw calls will exhibit the behavior defined by `pipeline`. pub fn set_pipeline(&mut self, pipeline: &'a RenderPipeline) { - DynContext::render_bundle_encoder_set_pipeline( - &*self.parent.context, - self.data.as_mut(), - pipeline.data.as_ref(), - ) + self.inner.set_pipeline(&pipeline.inner); } /// Sets the active index buffer. @@ -95,14 +87,12 @@ impl<'a> RenderBundleEncoder<'a> { /// Subsequent calls to [`draw_indexed`](RenderBundleEncoder::draw_indexed) on this [`RenderBundleEncoder`] will /// use `buffer` as the source index buffer. pub fn set_index_buffer(&mut self, buffer_slice: BufferSlice<'a>, index_format: IndexFormat) { - DynContext::render_bundle_encoder_set_index_buffer( - &*self.parent.context, - self.data.as_mut(), - buffer_slice.buffer.data.as_ref(), + self.inner.set_index_buffer( + &buffer_slice.buffer.inner, index_format, buffer_slice.offset, buffer_slice.size, - ) + ); } /// Assign a vertex buffer to a slot. @@ -116,14 +106,12 @@ impl<'a> RenderBundleEncoder<'a> { /// [`draw`]: RenderBundleEncoder::draw /// [`draw_indexed`]: RenderBundleEncoder::draw_indexed pub fn set_vertex_buffer(&mut self, slot: u32, buffer_slice: BufferSlice<'a>) { - DynContext::render_bundle_encoder_set_vertex_buffer( - &*self.parent.context, - self.data.as_mut(), + self.inner.set_vertex_buffer( slot, - buffer_slice.buffer.data.as_ref(), + &buffer_slice.buffer.inner, buffer_slice.offset, buffer_slice.size, - ) + ); } /// Draws primitives from the active vertex buffer(s). @@ -145,12 +133,7 @@ impl<'a> RenderBundleEncoder<'a> { /// } /// ``` pub fn draw(&mut self, vertices: Range, instances: Range) { - DynContext::render_bundle_encoder_draw( - &*self.parent.context, - self.data.as_mut(), - vertices, - instances, - ) + self.inner.draw(vertices, instances); } /// Draws indexed primitives using the active index buffer and the active vertex buffer(s). @@ -175,13 +158,7 @@ impl<'a> RenderBundleEncoder<'a> { /// } /// ``` pub fn draw_indexed(&mut self, indices: Range, base_vertex: i32, instances: Range) { - DynContext::render_bundle_encoder_draw_indexed( - &*self.parent.context, - self.data.as_mut(), - indices, - base_vertex, - instances, - ); + self.inner.draw_indexed(indices, base_vertex, instances); } /// Draws primitives from the active vertex buffer(s) based on the contents of the `indirect_buffer`. @@ -190,12 +167,8 @@ impl<'a> RenderBundleEncoder<'a> { /// /// The structure expected in `indirect_buffer` must conform to [`DrawIndirectArgs`](crate::util::DrawIndirectArgs). pub fn draw_indirect(&mut self, indirect_buffer: &'a Buffer, indirect_offset: BufferAddress) { - DynContext::render_bundle_encoder_draw_indirect( - &*self.parent.context, - self.data.as_mut(), - indirect_buffer.data.as_ref(), - indirect_offset, - ); + self.inner + .draw_indirect(&indirect_buffer.inner, indirect_offset); } /// Draws indexed primitives using the active index buffer and the active vertex buffers, @@ -210,12 +183,8 @@ impl<'a> RenderBundleEncoder<'a> { indirect_buffer: &'a Buffer, indirect_offset: BufferAddress, ) { - DynContext::render_bundle_encoder_draw_indexed_indirect( - &*self.parent.context, - self.data.as_mut(), - indirect_buffer.data.as_ref(), - indirect_offset, - ); + self.inner + .draw_indexed_indirect(&indirect_buffer.inner, indirect_offset); } } @@ -250,12 +219,6 @@ impl RenderBundleEncoder<'_> { /// You would need to upload this in three set_push_constants calls. First for the `Vertex` only range 0..4, second /// for the `Vertex | Fragment` range 4..8, third for the `Fragment` range 8..12. pub fn set_push_constants(&mut self, stages: ShaderStages, offset: u32, data: &[u8]) { - DynContext::render_bundle_encoder_set_push_constants( - &*self.parent.context, - self.data.as_mut(), - stages, - offset, - data, - ); + self.inner.set_push_constants(stages, offset, data); } } diff --git a/wgpu/src/api/render_pass.rs b/wgpu/src/api/render_pass.rs index 8b4500850b..5e245dc3ef 100644 --- a/wgpu/src/api/render_pass.rs +++ b/wgpu/src/api/render_pass.rs @@ -1,22 +1,7 @@ -use std::{marker::PhantomData, ops::Range, sync::Arc, thread}; +use std::ops::Range; -use crate::context::DynContext; use crate::*; -#[derive(Debug)] -pub(crate) struct RenderPassInner { - pub(crate) data: Box, - pub(crate) context: Arc, -} - -impl Drop for RenderPassInner { - fn drop(&mut self) { - if !thread::panicking() { - self.context.render_pass_end(self.data.as_mut()); - } - } -} - /// In-progress recording of a render pass: a list of render commands in a [`CommandEncoder`]. /// /// It can be created with [`CommandEncoder::begin_render_pass()`], whose [`RenderPassDescriptor`] @@ -37,14 +22,19 @@ impl Drop for RenderPassInner { /// https://gpuweb.github.io/gpuweb/#render-pass-encoder). #[derive(Debug)] pub struct RenderPass<'encoder> { - /// The inner data of the render pass, separated out so it's easy to replace the lifetime with 'static if desired. - pub(crate) inner: RenderPassInner, + pub(crate) inner: dispatch::DispatchRenderPass, /// This lifetime is used to protect the [`CommandEncoder`] from being used - /// while the pass is alive. - pub(crate) encoder_guard: PhantomData<&'encoder ()>, + /// while the pass is alive. This needs to be PhantomDrop to prevent the lifetime + /// from being shortened. + pub(crate) _encoder_guard: PhantomDrop<&'encoder ()>, } +#[cfg(send_sync)] +static_assertions::assert_impl_all!(RenderPass<'_>: Send, Sync); + +crate::cmp::impl_eq_ord_hash_proxy!(RenderPass<'_> => .inner); + impl RenderPass<'_> { /// Drops the lifetime relationship to the parent command encoder, making usage of /// the encoder while this pass is recorded a run-time error instead. @@ -61,7 +51,7 @@ impl RenderPass<'_> { pub fn forget_lifetime(self) -> RenderPass<'static> { RenderPass { inner: self.inner, - encoder_guard: PhantomData, + _encoder_guard: crate::api::PhantomDrop::default(), } } @@ -74,31 +64,21 @@ impl RenderPass<'_> { /// or [`Limits::min_storage_buffer_offset_alignment`] appropriately. /// /// Subsequent draw calls’ shader executions will be able to access data in these bind groups. - pub fn set_bind_group<'a>( - &mut self, - index: u32, - bind_group: impl Into>, - offsets: &[DynamicOffset], - ) { - let bg = bind_group.into().map(|x| x.data.as_ref()); - DynContext::render_pass_set_bind_group( - &*self.inner.context, - self.inner.data.as_mut(), - index, - bg, - offsets, - ) + pub fn set_bind_group<'a, BG>(&mut self, index: u32, bind_group: BG, offsets: &[DynamicOffset]) + where + Option<&'a BindGroup>: From, + { + let bg: Option<&'a BindGroup> = bind_group.into(); + let bg = bg.map(|bg| &bg.inner); + + self.inner.set_bind_group(index, bg, offsets); } /// Sets the active render pipeline. /// /// Subsequent draw calls will exhibit the behavior defined by `pipeline`. pub fn set_pipeline(&mut self, pipeline: &RenderPipeline) { - DynContext::render_pass_set_pipeline( - &*self.inner.context, - self.inner.data.as_mut(), - pipeline.data.as_ref(), - ) + self.inner.set_pipeline(&pipeline.inner); } /// Sets the blend color as used by some of the blending modes. @@ -107,11 +87,7 @@ impl RenderPass<'_> { /// If this method has not been called, the blend constant defaults to [`Color::TRANSPARENT`] /// (all components zero). pub fn set_blend_constant(&mut self, color: Color) { - DynContext::render_pass_set_blend_constant( - &*self.inner.context, - self.inner.data.as_mut(), - color, - ) + self.inner.set_blend_constant(color); } /// Sets the active index buffer. @@ -119,14 +95,12 @@ impl RenderPass<'_> { /// Subsequent calls to [`draw_indexed`](RenderPass::draw_indexed) on this [`RenderPass`] will /// use `buffer` as the source index buffer. pub fn set_index_buffer(&mut self, buffer_slice: BufferSlice<'_>, index_format: IndexFormat) { - DynContext::render_pass_set_index_buffer( - &*self.inner.context, - self.inner.data.as_mut(), - buffer_slice.buffer.data.as_ref(), + self.inner.set_index_buffer( + &buffer_slice.buffer.inner, index_format, buffer_slice.offset, buffer_slice.size, - ) + ); } /// Assign a vertex buffer to a slot. @@ -140,14 +114,12 @@ impl RenderPass<'_> { /// [`draw`]: RenderPass::draw /// [`draw_indexed`]: RenderPass::draw_indexed pub fn set_vertex_buffer(&mut self, slot: u32, buffer_slice: BufferSlice<'_>) { - DynContext::render_pass_set_vertex_buffer( - &*self.inner.context, - self.inner.data.as_mut(), + self.inner.set_vertex_buffer( slot, - buffer_slice.buffer.data.as_ref(), + &buffer_slice.buffer.inner, buffer_slice.offset, buffer_slice.size, - ) + ); } /// Sets the scissor rectangle used during the rasterization stage. @@ -160,14 +132,7 @@ impl RenderPass<'_> { /// The function of the scissor rectangle resembles [`set_viewport()`](Self::set_viewport), /// but it does not affect the coordinate system, only which fragments are discarded. pub fn set_scissor_rect(&mut self, x: u32, y: u32, width: u32, height: u32) { - DynContext::render_pass_set_scissor_rect( - &*self.inner.context, - self.inner.data.as_mut(), - x, - y, - width, - height, - ); + self.inner.set_scissor_rect(x, y, width, height); } /// Sets the viewport used during the rasterization stage to linearly map @@ -177,16 +142,7 @@ impl RenderPass<'_> { /// If this method has not been called, the viewport defaults to the entire bounds of the render /// targets. pub fn set_viewport(&mut self, x: f32, y: f32, w: f32, h: f32, min_depth: f32, max_depth: f32) { - DynContext::render_pass_set_viewport( - &*self.inner.context, - self.inner.data.as_mut(), - x, - y, - w, - h, - min_depth, - max_depth, - ); + self.inner.set_viewport(x, y, w, h, min_depth, max_depth); } /// Sets the stencil reference. @@ -194,34 +150,22 @@ impl RenderPass<'_> { /// Subsequent stencil tests will test against this value. /// If this method has not been called, the stencil reference value defaults to `0`. pub fn set_stencil_reference(&mut self, reference: u32) { - DynContext::render_pass_set_stencil_reference( - &*self.inner.context, - self.inner.data.as_mut(), - reference, - ); + self.inner.set_stencil_reference(reference); } /// Inserts debug marker. pub fn insert_debug_marker(&mut self, label: &str) { - DynContext::render_pass_insert_debug_marker( - &*self.inner.context, - self.inner.data.as_mut(), - label, - ); + self.inner.insert_debug_marker(label); } /// Start record commands and group it into debug marker group. pub fn push_debug_group(&mut self, label: &str) { - DynContext::render_pass_push_debug_group( - &*self.inner.context, - self.inner.data.as_mut(), - label, - ); + self.inner.push_debug_group(label); } /// Stops command recording and creates debug group. pub fn pop_debug_group(&mut self) { - DynContext::render_pass_pop_debug_group(&*self.inner.context, self.inner.data.as_mut()); + self.inner.pop_debug_group(); } /// Draws primitives from the active vertex buffer(s). @@ -246,12 +190,7 @@ impl RenderPass<'_> { /// This drawing command uses the current render state, as set by preceding `set_*()` methods. /// It is not affected by changes to the state that are performed after it is called. pub fn draw(&mut self, vertices: Range, instances: Range) { - DynContext::render_pass_draw( - &*self.inner.context, - self.inner.data.as_mut(), - vertices, - instances, - ) + self.inner.draw(vertices, instances); } /// Draws indexed primitives using the active index buffer and the active vertex buffers. @@ -279,13 +218,7 @@ impl RenderPass<'_> { /// This drawing command uses the current render state, as set by preceding `set_*()` methods. /// It is not affected by changes to the state that are performed after it is called. pub fn draw_indexed(&mut self, indices: Range, base_vertex: i32, instances: Range) { - DynContext::render_pass_draw_indexed( - &*self.inner.context, - self.inner.data.as_mut(), - indices, - base_vertex, - instances, - ); + self.inner.draw_indexed(indices, base_vertex, instances); } /// Draws primitives from the active vertex buffer(s) based on the contents of the `indirect_buffer`. @@ -302,12 +235,8 @@ impl RenderPass<'_> { /// /// See details on the individual flags for more information. pub fn draw_indirect(&mut self, indirect_buffer: &Buffer, indirect_offset: BufferAddress) { - DynContext::render_pass_draw_indirect( - &*self.inner.context, - self.inner.data.as_mut(), - indirect_buffer.data.as_ref(), - indirect_offset, - ); + self.inner + .draw_indirect(&indirect_buffer.inner, indirect_offset); } /// Draws indexed primitives using the active index buffer and the active vertex buffers, @@ -329,12 +258,8 @@ impl RenderPass<'_> { indirect_buffer: &Buffer, indirect_offset: BufferAddress, ) { - DynContext::render_pass_draw_indexed_indirect( - &*self.inner.context, - self.inner.data.as_mut(), - indirect_buffer.data.as_ref(), - indirect_offset, - ); + self.inner + .draw_indexed_indirect(&indirect_buffer.inner, indirect_offset); } /// Execute a [render bundle][RenderBundle], which is a set of pre-recorded commands @@ -346,13 +271,9 @@ impl RenderPass<'_> { &mut self, render_bundles: I, ) { - let mut render_bundles = render_bundles.into_iter().map(|rb| rb.data.as_ref()); + let mut render_bundles = render_bundles.into_iter().map(|rb| &rb.inner); - DynContext::render_pass_execute_bundles( - &*self.inner.context, - self.inner.data.as_mut(), - &mut render_bundles, - ) + self.inner.execute_bundles(&mut render_bundles); } } @@ -374,13 +295,8 @@ impl RenderPass<'_> { indirect_offset: BufferAddress, count: u32, ) { - DynContext::render_pass_multi_draw_indirect( - &*self.inner.context, - self.inner.data.as_mut(), - indirect_buffer.data.as_ref(), - indirect_offset, - count, - ); + self.inner + .multi_draw_indirect(&indirect_buffer.inner, indirect_offset, count); } /// Dispatches multiple draw calls from the active index buffer and the active vertex buffers, @@ -400,13 +316,8 @@ impl RenderPass<'_> { indirect_offset: BufferAddress, count: u32, ) { - DynContext::render_pass_multi_draw_indexed_indirect( - &*self.inner.context, - self.inner.data.as_mut(), - indirect_buffer.data.as_ref(), - indirect_offset, - count, - ); + self.inner + .multi_draw_indexed_indirect(&indirect_buffer.inner, indirect_offset, count); } } @@ -442,12 +353,10 @@ impl RenderPass<'_> { count_offset: BufferAddress, max_count: u32, ) { - DynContext::render_pass_multi_draw_indirect_count( - &*self.inner.context, - self.inner.data.as_mut(), - indirect_buffer.data.as_ref(), + self.inner.multi_draw_indirect_count( + &indirect_buffer.inner, indirect_offset, - count_buffer.data.as_ref(), + &count_buffer.inner, count_offset, max_count, ); @@ -486,12 +395,10 @@ impl RenderPass<'_> { count_offset: BufferAddress, max_count: u32, ) { - DynContext::render_pass_multi_draw_indexed_indirect_count( - &*self.inner.context, - self.inner.data.as_mut(), - indirect_buffer.data.as_ref(), + self.inner.multi_draw_indexed_indirect_count( + &indirect_buffer.inner, indirect_offset, - count_buffer.data.as_ref(), + &count_buffer.inner, count_offset, max_count, ); @@ -541,13 +448,7 @@ impl RenderPass<'_> { /// /// [`PushConstant`]: https://docs.rs/naga/latest/naga/enum.StorageClass.html#variant.PushConstant pub fn set_push_constants(&mut self, stages: ShaderStages, offset: u32, data: &[u8]) { - DynContext::render_pass_set_push_constants( - &*self.inner.context, - self.inner.data.as_mut(), - stages, - offset, - data, - ); + self.inner.set_push_constants(stages, offset, data); } } @@ -561,12 +462,7 @@ impl RenderPass<'_> { /// but timestamps can be subtracted to get the time it takes /// for a string of operations to complete. pub fn write_timestamp(&mut self, query_set: &QuerySet, query_index: u32) { - DynContext::render_pass_write_timestamp( - &*self.inner.context, - self.inner.data.as_mut(), - query_set.data.as_ref(), - query_index, - ) + self.inner.write_timestamp(&query_set.inner, query_index); } } @@ -574,17 +470,13 @@ impl RenderPass<'_> { /// Start a occlusion query on this render pass. It can be ended with /// `end_occlusion_query`. Occlusion queries may not be nested. pub fn begin_occlusion_query(&mut self, query_index: u32) { - DynContext::render_pass_begin_occlusion_query( - &*self.inner.context, - self.inner.data.as_mut(), - query_index, - ); + self.inner.begin_occlusion_query(query_index); } /// End the occlusion query on this render pass. It can be started with /// `begin_occlusion_query`. Occlusion queries may not be nested. pub fn end_occlusion_query(&mut self) { - DynContext::render_pass_end_occlusion_query(&*self.inner.context, self.inner.data.as_mut()); + self.inner.end_occlusion_query(); } } @@ -593,21 +485,14 @@ impl RenderPass<'_> { /// Start a pipeline statistics query on this render pass. It can be ended with /// `end_pipeline_statistics_query`. Pipeline statistics queries may not be nested. pub fn begin_pipeline_statistics_query(&mut self, query_set: &QuerySet, query_index: u32) { - DynContext::render_pass_begin_pipeline_statistics_query( - &*self.inner.context, - self.inner.data.as_mut(), - query_set.data.as_ref(), - query_index, - ); + self.inner + .begin_pipeline_statistics_query(&query_set.inner, query_index); } /// End the pipeline statistics query on this render pass. It can be started with /// `begin_pipeline_statistics_query`. Pipeline statistics queries may not be nested. pub fn end_pipeline_statistics_query(&mut self) { - DynContext::render_pass_end_pipeline_statistics_query( - &*self.inner.context, - self.inner.data.as_mut(), - ); + self.inner.end_pipeline_statistics_query(); } } diff --git a/wgpu/src/api/render_pipeline.rs b/wgpu/src/api/render_pipeline.rs index dd1c1cefe8..71131e941e 100644 --- a/wgpu/src/api/render_pipeline.rs +++ b/wgpu/src/api/render_pipeline.rs @@ -1,4 +1,4 @@ -use std::{num::NonZeroU32, sync::Arc, thread}; +use std::num::NonZeroU32; use crate::*; @@ -10,21 +10,12 @@ use crate::*; /// Corresponds to [WebGPU `GPURenderPipeline`](https://gpuweb.github.io/gpuweb/#render-pipeline). #[derive(Debug)] pub struct RenderPipeline { - pub(crate) context: Arc, - pub(crate) data: Box, + pub(crate) inner: dispatch::DispatchRenderPipeline, } #[cfg(send_sync)] static_assertions::assert_impl_all!(RenderPipeline: Send, Sync); -super::impl_partialeq_eq_hash!(RenderPipeline); - -impl Drop for RenderPipeline { - fn drop(&mut self) { - if !thread::panicking() { - self.context.render_pipeline_drop(self.data.as_ref()); - } - } -} +crate::cmp::impl_eq_ord_hash_proxy!(RenderPipeline => .inner); impl RenderPipeline { /// Get an object representing the bind group layout at a given index. @@ -34,11 +25,8 @@ impl RenderPipeline { /// /// This method will raise a validation error if there is no bind group layout at `index`. pub fn get_bind_group_layout(&self, index: u32) -> BindGroupLayout { - let context = Arc::clone(&self.context); - let data = self - .context - .render_pipeline_get_bind_group_layout(self.data.as_ref(), index); - BindGroupLayout { context, data } + let inner = self.inner.get_bind_group_layout(index); + BindGroupLayout { inner } } } diff --git a/wgpu/src/api/sampler.rs b/wgpu/src/api/sampler.rs index 4363991477..4c57819c99 100644 --- a/wgpu/src/api/sampler.rs +++ b/wgpu/src/api/sampler.rs @@ -1,5 +1,3 @@ -use std::{sync::Arc, thread}; - use crate::*; /// Handle to a sampler. @@ -13,21 +11,12 @@ use crate::*; /// Corresponds to [WebGPU `GPUSampler`](https://gpuweb.github.io/gpuweb/#sampler-interface). #[derive(Debug)] pub struct Sampler { - pub(crate) context: Arc, - pub(crate) data: Box, + pub(crate) inner: dispatch::DispatchSampler, } #[cfg(send_sync)] static_assertions::assert_impl_all!(Sampler: Send, Sync); -super::impl_partialeq_eq_hash!(Sampler); - -impl Drop for Sampler { - fn drop(&mut self) { - if !thread::panicking() { - self.context.sampler_drop(self.data.as_ref()); - } - } -} +crate::cmp::impl_eq_ord_hash_proxy!(Sampler => .inner); /// Describes a [`Sampler`]. /// diff --git a/wgpu/src/api/shader_module.rs b/wgpu/src/api/shader_module.rs index 20334a75ad..218f4342ca 100644 --- a/wgpu/src/api/shader_module.rs +++ b/wgpu/src/api/shader_module.rs @@ -1,4 +1,4 @@ -use std::{borrow::Cow, future::Future, marker::PhantomData, sync::Arc, thread}; +use std::{borrow::Cow, future::Future, marker::PhantomData}; use crate::*; @@ -12,26 +12,17 @@ use crate::*; /// Corresponds to [WebGPU `GPUShaderModule`](https://gpuweb.github.io/gpuweb/#shader-module). #[derive(Debug)] pub struct ShaderModule { - pub(crate) context: Arc, - pub(crate) data: Box, + pub(crate) inner: dispatch::DispatchShaderModule, } #[cfg(send_sync)] static_assertions::assert_impl_all!(ShaderModule: Send, Sync); -super::impl_partialeq_eq_hash!(ShaderModule); - -impl Drop for ShaderModule { - fn drop(&mut self) { - if !thread::panicking() { - self.context.shader_module_drop(self.data.as_ref()); - } - } -} +crate::cmp::impl_eq_ord_hash_proxy!(ShaderModule => .inner); impl ShaderModule { /// Get the compilation info for the shader module. pub fn get_compilation_info(&self) -> impl Future + WasmNotSend { - self.context.shader_get_compilation_info(self.data.as_ref()) + self.inner.get_compilation_info() } } diff --git a/wgpu/src/api/surface.rs b/wgpu/src/api/surface.rs index bd0833e78c..99a4b8b5b3 100644 --- a/wgpu/src/api/surface.rs +++ b/wgpu/src/api/surface.rs @@ -1,9 +1,8 @@ -use std::{error, fmt, sync::Arc, thread}; +use std::{error, fmt}; use parking_lot::Mutex; use raw_window_handle::{HasDisplayHandle, HasWindowHandle}; -use crate::context::DynContext; use crate::*; /// Describes a [`Surface`]. @@ -24,8 +23,6 @@ static_assertions::assert_impl_all!(SurfaceConfiguration: Send, Sync); /// [`GPUCanvasContext`](https://gpuweb.github.io/gpuweb/#canvas-context) /// serves a similar role. pub struct Surface<'window> { - pub(crate) context: Arc, - /// Optionally, keep the source of the handle used for the surface alive. /// /// This is useful for platforms where the surface is created from a window and the surface @@ -33,7 +30,7 @@ pub struct Surface<'window> { pub(crate) _handle_source: Option>, /// Additional surface data returned by [`DynContext::instance_create_surface`]. - pub(crate) surface_data: Box, + pub(crate) inner: dispatch::DispatchSurface, // Stores the latest `SurfaceConfiguration` that was set using `Surface::configure`. // It is required to set the attributes of the `SurfaceTexture` in the @@ -49,11 +46,7 @@ impl Surface<'_> { /// /// Returns specified values (see [`SurfaceCapabilities`]) if surface is incompatible with the adapter. pub fn get_capabilities(&self, adapter: &Adapter) -> SurfaceCapabilities { - DynContext::surface_get_capabilities( - &*self.context, - self.surface_data.as_ref(), - adapter.data.as_ref(), - ) + self.inner.get_capabilities(&adapter.inner) } /// Return a default `SurfaceConfiguration` from width and height to use for the [`Surface`] with this adapter. @@ -86,12 +79,7 @@ impl Surface<'_> { /// - Texture format requested is unsupported on the surface. /// - `config.width` or `config.height` is zero. pub fn configure(&self, device: &Device, config: &SurfaceConfiguration) { - DynContext::surface_configure( - &*self.context, - self.surface_data.as_ref(), - device.data.as_ref(), - config, - ); + self.inner.configure(&device.inner, config); let mut conf = self.config.lock(); *conf = Some(config.clone()); @@ -106,8 +94,7 @@ impl Surface<'_> { /// If a SurfaceTexture referencing this surface is alive when the swapchain is recreated, /// recreating the swapchain will panic. pub fn get_current_texture(&self) -> Result { - let (texture_data, status, detail) = - DynContext::surface_get_current_texture(&*self.context, self.surface_data.as_ref()); + let (texture, status, detail) = self.inner.get_current_texture(); let suboptimal = match status { SurfaceStatus::Good => false, @@ -137,11 +124,10 @@ impl Surface<'_> { view_formats: &[], }; - texture_data - .map(|data| SurfaceTexture { + texture + .map(|texture| SurfaceTexture { texture: Texture { - context: Arc::clone(&self.context), - data, + inner: texture, descriptor, }, suboptimal, @@ -161,16 +147,18 @@ impl Surface<'_> { pub unsafe fn as_hal) -> R, R>( &self, hal_surface_callback: F, - ) -> Option { - self.context - .as_any() - .downcast_ref::() - .map(|ctx| unsafe { - ctx.surface_as_hal::( - crate::context::downcast_ref(self.surface_data.as_ref()), - hal_surface_callback, - ) - }) + ) -> R { + let core_surface = self.inner.as_core_opt(); + + if let Some(core_surface) = core_surface { + unsafe { + core_surface + .context + .surface_as_hal::(core_surface, hal_surface_callback) + } + } else { + hal_surface_callback(None) + } } } @@ -179,7 +167,6 @@ impl Surface<'_> { impl fmt::Debug for Surface<'_> { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { f.debug_struct("Surface") - .field("context", &self.context) .field( "_handle_source", &if self._handle_source.is_some() { @@ -188,7 +175,7 @@ impl fmt::Debug for Surface<'_> { "None" }, ) - .field("data", &self.surface_data) + .field("inner", &self.inner) .field("config", &self.config) .finish() } @@ -197,13 +184,7 @@ impl fmt::Debug for Surface<'_> { #[cfg(send_sync)] static_assertions::assert_impl_all!(Surface<'_>: Send, Sync); -impl Drop for Surface<'_> { - fn drop(&mut self) { - if !thread::panicking() { - self.context.surface_drop(self.surface_data.as_ref()) - } - } -} +crate::cmp::impl_eq_ord_hash_proxy!(Surface<'_> => .inner); /// Super trait for window handles as used in [`SurfaceTarget`]. pub trait WindowHandle: HasWindowHandle + HasDisplayHandle + WasmNotSendSync {} diff --git a/wgpu/src/api/surface_texture.rs b/wgpu/src/api/surface_texture.rs index 417ad56169..ce4934e4a3 100644 --- a/wgpu/src/api/surface_texture.rs +++ b/wgpu/src/api/surface_texture.rs @@ -1,6 +1,5 @@ use std::{error, fmt, thread}; -use crate::context::DynContext; use crate::*; /// Surface texture that can be rendered to. @@ -17,11 +16,13 @@ pub struct SurfaceTexture { /// but should be recreated for maximum performance. pub suboptimal: bool, pub(crate) presented: bool, - pub(crate) detail: Box, + pub(crate) detail: dispatch::DispatchSurfaceOutputDetail, } #[cfg(send_sync)] static_assertions::assert_impl_all!(SurfaceTexture: Send, Sync); +crate::cmp::impl_eq_ord_hash_proxy!(SurfaceTexture => .texture.inner); + impl SurfaceTexture { /// Schedule this texture to be presented on the owning surface. /// @@ -34,24 +35,14 @@ impl SurfaceTexture { /// or synchronize other double buffered state, then these operations should be done before the call to `present`. pub fn present(mut self) { self.presented = true; - DynContext::surface_present( - &*self.texture.context, - // This call to as_ref is essential because we want the DynContext implementation to see the inner - // value of the Box (T::SurfaceOutputDetail), not the Box itself. - self.detail.as_ref(), - ); + self.detail.present(); } } impl Drop for SurfaceTexture { fn drop(&mut self) { if !self.presented && !thread::panicking() { - DynContext::surface_texture_discard( - &*self.texture.context, - // This call to as_ref is essential because we want the DynContext implementation to see the inner - // value of the Box (T::SurfaceOutputDetail), not the Box itself. - self.detail.as_ref(), - ); + self.detail.texture_discard(); } } } diff --git a/wgpu/src/api/texture.rs b/wgpu/src/api/texture.rs index 7826118bff..3fdecd320b 100644 --- a/wgpu/src/api/texture.rs +++ b/wgpu/src/api/texture.rs @@ -1,6 +1,3 @@ -use std::{sync::Arc, thread}; - -use crate::context::DynContext; use crate::*; /// Handle to a texture on the GPU. @@ -10,14 +7,13 @@ use crate::*; /// Corresponds to [WebGPU `GPUTexture`](https://gpuweb.github.io/gpuweb/#texture-interface). #[derive(Debug)] pub struct Texture { - pub(crate) context: Arc, - pub(crate) data: Box, + pub(crate) inner: dispatch::DispatchTexture, pub(crate) descriptor: TextureDescriptor<'static>, } #[cfg(send_sync)] static_assertions::assert_impl_all!(Texture: Send, Sync); -super::impl_partialeq_eq_hash!(Texture); +crate::cmp::impl_eq_ord_hash_proxy!(Texture => .inner); impl Texture { /// Returns the inner hal Texture using a callback. The hal texture will be `None` if the @@ -31,16 +27,10 @@ impl Texture { &self, hal_texture_callback: F, ) -> R { - if let Some(ctx) = self - .context - .as_any() - .downcast_ref::() - { + if let Some(tex) = self.inner.as_core_opt() { unsafe { - ctx.texture_as_hal::( - crate::context::downcast_ref(self.data.as_ref()), - hal_texture_callback, - ) + tex.context + .texture_as_hal::(tex, hal_texture_callback) } } else { hal_texture_callback(None) @@ -49,16 +39,14 @@ impl Texture { /// Creates a view of this texture. pub fn create_view(&self, desc: &TextureViewDescriptor<'_>) -> TextureView { - let data = DynContext::texture_create_view(&*self.context, self.data.as_ref(), desc); - TextureView { - context: Arc::clone(&self.context), - data, - } + let view = self.inner.create_view(desc); + + TextureView { inner: view } } /// Destroy the associated native resources as soon as possible. pub fn destroy(&self) { - DynContext::texture_destroy(&*self.context, self.data.as_ref()); + self.inner.destroy(); } /// Make an `TexelCopyTextureInfo` representing the whole texture. @@ -135,14 +123,6 @@ impl Texture { } } -impl Drop for Texture { - fn drop(&mut self) { - if !thread::panicking() { - self.context.texture_drop(self.data.as_ref()); - } - } -} - /// Describes a [`Texture`]. /// /// For use with [`Device::create_texture`]. diff --git a/wgpu/src/api/texture_view.rs b/wgpu/src/api/texture_view.rs index 84b46bfe5a..f255603bcb 100644 --- a/wgpu/src/api/texture_view.rs +++ b/wgpu/src/api/texture_view.rs @@ -1,5 +1,3 @@ -use std::{sync::Arc, thread}; - use crate::*; /// Handle to a texture view. @@ -10,13 +8,12 @@ use crate::*; /// Corresponds to [WebGPU `GPUTextureView`](https://gpuweb.github.io/gpuweb/#gputextureview). #[derive(Debug)] pub struct TextureView { - pub(crate) context: Arc, - pub(crate) data: Box, + pub(crate) inner: dispatch::DispatchTextureView, } #[cfg(send_sync)] static_assertions::assert_impl_all!(TextureView: Send, Sync); -super::impl_partialeq_eq_hash!(TextureView); +crate::cmp::impl_eq_ord_hash_proxy!(TextureView => .inner); impl TextureView { /// Returns the inner hal TextureView using a callback. The hal texture will be `None` if the @@ -30,16 +27,11 @@ impl TextureView { &self, hal_texture_view_callback: F, ) -> R { - if let Some(ctx) = self - .context - .as_any() - .downcast_ref::() - { + if let Some(core_view) = self.inner.as_core_opt() { unsafe { - ctx.texture_view_as_hal::( - crate::context::downcast_ref(self.data.as_ref()), - hal_texture_view_callback, - ) + core_view + .context + .texture_view_as_hal::(core_view, hal_texture_view_callback) } } else { hal_texture_view_callback(None) @@ -47,14 +39,6 @@ impl TextureView { } } -impl Drop for TextureView { - fn drop(&mut self) { - if !thread::panicking() { - self.context.texture_view_drop(self.data.as_ref()); - } - } -} - /// Describes a [`TextureView`]. /// /// For use with [`Texture::create_view`]. diff --git a/wgpu/src/api/tlas.rs b/wgpu/src/api/tlas.rs index d387e508a0..538f4e16c2 100644 --- a/wgpu/src/api/tlas.rs +++ b/wgpu/src/api/tlas.rs @@ -1,9 +1,6 @@ -use crate::api::blas::{ContextTlasInstance, DynContextTlasInstance, TlasInstance}; -use crate::context::{Context, DynContext}; -use crate::{BindingResource, Buffer, Data, Label, C}; +use crate::{api::blas::TlasInstance, dispatch}; +use crate::{BindingResource, Buffer, Label}; use std::ops::{Index, IndexMut, Range}; -use std::sync::Arc; -use std::thread; use wgt::WasmNotSendSync; /// Descriptor to create top level acceleration structures. @@ -21,24 +18,17 @@ static_assertions::assert_impl_all!(CreateTlasDescriptor<'_>: Send, Sync); /// /// [TLAS instances]: TlasInstance pub struct Tlas { - pub(crate) context: Arc, - pub(crate) data: Box, + pub(crate) inner: dispatch::DispatchTlas, pub(crate) max_instances: u32, } static_assertions::assert_impl_all!(Tlas: WasmNotSendSync); +crate::cmp::impl_eq_ord_hash_proxy!(Tlas => .inner); + impl Tlas { /// Destroy the associated native resources as soon as possible. pub fn destroy(&self) { - DynContext::tlas_destroy(&*self.context, self.data.as_ref()); - } -} - -impl Drop for Tlas { - fn drop(&mut self) { - if !thread::panicking() { - self.context.tlas_drop(self.data.as_ref()); - } + self.inner.destroy(); } } @@ -168,31 +158,3 @@ impl IndexMut> for TlasPackage { idx } } - -pub(crate) struct DynContextTlasBuildEntry<'a> { - pub(crate) tlas_data: &'a Data, - pub(crate) instance_buffer_data: &'a Data, - pub(crate) instance_count: u32, -} - -pub(crate) struct DynContextTlasPackage<'a> { - pub(crate) tlas_data: &'a Data, - pub(crate) instances: Box>> + 'a>, - pub(crate) lowest_unmodified: u32, -} - -/// Context version see [TlasBuildEntry]. -#[allow(dead_code)] -pub struct ContextTlasBuildEntry<'a, T: Context> { - pub(crate) tlas_data: &'a T::TlasData, - pub(crate) instance_buffer_data: &'a T::BufferData, - pub(crate) instance_count: u32, -} - -/// Context version see [TlasPackage]. -#[allow(dead_code)] -pub struct ContextTlasPackage<'a, T: Context> { - pub(crate) tlas_data: &'a T::TlasData, - pub(crate) instances: Box>> + 'a>, - pub(crate) lowest_unmodified: u32, -} diff --git a/wgpu/src/backend/mod.rs b/wgpu/src/backend/mod.rs index 7364eb3fd6..1ebeddd8ea 100644 --- a/wgpu/src/backend/mod.rs +++ b/wgpu/src/backend/mod.rs @@ -1,10 +1,10 @@ #[cfg(webgpu)] -mod webgpu; +pub mod webgpu; #[cfg(webgpu)] pub(crate) use webgpu::{get_browser_gpu_property, ContextWebGpu}; #[cfg(wgpu_core)] -mod wgpu_core; +pub mod wgpu_core; #[cfg(wgpu_core)] pub(crate) use wgpu_core::ContextWgpuCore; diff --git a/wgpu/src/backend/webgpu.rs b/wgpu/src/backend/webgpu.rs index ba96875066..789d2f22cd 100644 --- a/wgpu/src/backend/webgpu.rs +++ b/wgpu/src/backend/webgpu.rs @@ -6,7 +6,6 @@ mod webgpu_sys; use js_sys::Promise; use std::{ - any::Any, cell::RefCell, collections::HashMap, fmt, @@ -18,41 +17,32 @@ use std::{ }; use wasm_bindgen::{prelude::*, JsCast}; -use crate::{ - context::{downcast_ref, QueueWriteBuffer}, - CompilationInfo, SurfaceTargetUnsafe, UncapturedErrorHandler, -}; +use crate::{dispatch, SurfaceTargetUnsafe}; use defined_non_null_js_value::DefinedNonNullJsValue; -// We need to make a wrapper for some of the handle types returned by the web backend to make them -// implement `Send` and `Sync` to match native. +// We need to mark various types as Send and Sync to satisfy the Rust type system. // // SAFETY: All webgpu handle types in wasm32 are internally a `JsValue`, and `JsValue` is neither // Send nor Sync. Currently, wasm32 has no threading support by default, so implementing `Send` or // `Sync` for a type is harmless. However, nightly Rust supports compiling wasm with experimental // threading support via `--target-features`. If `wgpu` is being compiled with those features, we do // not implement `Send` and `Sync` on the webgpu handle types. - -#[derive(Clone, Debug)] -pub(crate) struct Sendable(T); -#[cfg(send_sync)] -unsafe impl Send for Sendable {} -#[cfg(send_sync)] -unsafe impl Sync for Sendable {} +macro_rules! impl_send_sync { + ($name:ty) => { + #[cfg(send_sync)] + unsafe impl Send for $name {} + #[cfg(send_sync)] + unsafe impl Sync for $name {} + }; +} pub(crate) struct ContextWebGpu { /// `None` if browser does not advertise support for WebGPU. gpu: Option>, + /// Unique identifier for this context. + ident: crate::cmp::Identifier, } -#[cfg(send_sync)] -unsafe impl Send for ContextWebGpu {} -#[cfg(send_sync)] -unsafe impl Sync for ContextWebGpu {} -#[cfg(send_sync)] -unsafe impl Send for BufferMappedRange {} -#[cfg(send_sync)] -unsafe impl Sync for BufferMappedRange {} impl fmt::Debug for ContextWebGpu { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { @@ -82,6 +72,8 @@ impl crate::Error { pub struct WebShaderModule { module: webgpu_sys::GpuShaderModule, compilation_info: WebShaderCompilationInfo, + /// Unique identifier for this shader module. + ident: crate::cmp::Identifier, } #[derive(Debug, Clone)] @@ -632,9 +624,8 @@ fn map_texture_view_dimension( fn map_buffer_copy_view( view: crate::TexelCopyBufferInfo<'_>, ) -> webgpu_sys::GpuTexelCopyBufferInfo { - let buffer: &::BufferData = - downcast_ref(view.buffer.data.as_ref()); - let mapped = webgpu_sys::GpuTexelCopyBufferInfo::new(&buffer.0.buffer); + let buffer = view.buffer.inner.as_webgpu(); + let mapped = webgpu_sys::GpuTexelCopyBufferInfo::new(&buffer.inner); if let Some(bytes_per_row) = view.layout.bytes_per_row { mapped.set_bytes_per_row(bytes_per_row); } @@ -648,24 +639,22 @@ fn map_buffer_copy_view( fn map_texture_copy_view( view: crate::TexelCopyTextureInfo<'_>, ) -> webgpu_sys::GpuTexelCopyTextureInfo { - let texture: &::TextureData = - downcast_ref(view.texture.data.as_ref()); - let mapped = webgpu_sys::GpuTexelCopyTextureInfo::new(&texture.0); + let texture = view.texture.inner.as_webgpu(); + let mapped = webgpu_sys::GpuTexelCopyTextureInfo::new(&texture.inner); mapped.set_mip_level(view.mip_level); mapped.set_origin(&map_origin_3d(view.origin)); mapped } fn map_tagged_texture_copy_view( - view: crate::CopyExternalImageDestInfo<'_>, + view: wgt::CopyExternalImageDestInfo<&crate::api::Texture>, ) -> webgpu_sys::GpuCopyExternalImageDestInfo { - let texture: &::TextureData = - downcast_ref(view.texture.data.as_ref()); - let mapped = webgpu_sys::GpuCopyExternalImageDestInfo::new(&texture.0); + let texture = view.texture.inner.as_webgpu(); + let mapped = webgpu_sys::GpuCopyExternalImageDestInfo::new(&texture.inner); mapped.set_mip_level(view.mip_level); mapped.set_origin(&map_origin_3d(view.origin)); mapped.set_aspect(map_texture_aspect(view.aspect)); - // mapped.color_space(map_color_space(view.color_space)); + // mapped.set_color_space(map_color_space(view.color_space)); mapped.set_premultiplied_alpha(view.premultiplied_alpha); mapped } @@ -892,28 +881,38 @@ fn map_js_sys_limits(limits: &wgt::Limits) -> js_sys::Object { type JsFutureResult = Result; -fn future_request_adapter(result: JsFutureResult) -> Option> { - match result.and_then(wasm_bindgen::JsCast::dyn_into) { - Ok(adapter) => Some(Sendable(adapter)), - Err(_) => None, - } +fn future_request_adapter(result: JsFutureResult) -> Option { + let web_adapter: Option = + result.and_then(wasm_bindgen::JsCast::dyn_into).ok(); + web_adapter.map(|adapter| { + WebAdapter { + inner: adapter, + ident: crate::cmp::Identifier::create(), + } + .into() + }) } fn future_request_device( result: JsFutureResult, -) -> Result< - ( - Sendable, - Sendable, - ), - crate::RequestDeviceError, -> { +) -> Result<(dispatch::DispatchDevice, dispatch::DispatchQueue), crate::RequestDeviceError> { result .map(|js_value| { - let device_data = Sendable(webgpu_sys::GpuDevice::from(js_value)); - let queue_data = Sendable(device_data.0.queue()); + let device = webgpu_sys::GpuDevice::from(js_value); + let queue = device.queue(); - (device_data, queue_data) + ( + WebDevice { + inner: device, + ident: crate::cmp::Identifier::create(), + } + .into(), + WebQueue { + inner: queue, + ident: crate::cmp::Identifier::create(), + } + .into(), + ) }) .map_err(|error_value| crate::RequestDeviceError { inner: crate::RequestDeviceErrorKind::WebGpu(error_value), @@ -1016,7 +1015,7 @@ impl ContextWebGpu { &self, canvas: Canvas, context_result: Result, wasm_bindgen::JsValue>, - ) -> Result<::SurfaceData, crate::CreateSurfaceError> { + ) -> Result { let context: js_sys::Object = match context_result { Ok(Some(context)) => context, Ok(None) => { @@ -1051,16 +1050,13 @@ impl ContextWebGpu { .dyn_into() .expect("canvas context is not a GPUCanvasContext"); - Ok(Sendable((canvas, context))) - } - - /// Get mapped buffer range directly as a `js_sys::ArrayBuffer`. - pub fn buffer_get_mapped_range_as_array_buffer( - &self, - buffer_data: &::BufferData, - sub_range: Range, - ) -> js_sys::ArrayBuffer { - buffer_data.0.get_mapped_array_buffer(sub_range) + Ok(WebSurface { + gpu: self.gpu.clone(), + context, + canvas, + ident: crate::cmp::Identifier::create(), + } + .into()) } } @@ -1119,69 +1115,362 @@ pub fn get_browser_gpu_property( Ok(DefinedNonNullJsValue::new(maybe_undefined_gpu)) } -impl crate::context::Context for ContextWebGpu { - type AdapterData = Sendable; - type DeviceData = Sendable; - type QueueData = Sendable; - type ShaderModuleData = Sendable; - type BindGroupLayoutData = Sendable; - type BindGroupData = Sendable; - type TextureViewData = Sendable; - type SamplerData = Sendable; - type BufferData = Sendable; - type TextureData = Sendable; - type QuerySetData = Sendable; - type PipelineLayoutData = Sendable; - type RenderPipelineData = Sendable; - type ComputePipelineData = Sendable; - type CommandEncoderData = Sendable; - type ComputePassData = Sendable; - type RenderPassData = Sendable; - type CommandBufferData = Sendable; - type RenderBundleEncoderData = Sendable; - type RenderBundleData = Sendable; - type SurfaceData = Sendable<(Canvas, webgpu_sys::GpuCanvasContext)>; - type BlasData = (); - type TlasData = (); - - type SurfaceOutputDetail = SurfaceOutputDetail; - type SubmissionIndexData = (); - type PipelineCacheData = (); - - type RequestAdapterFuture = OptionFuture< - MakeSendFuture< - wasm_bindgen_futures::JsFuture, - fn(JsFutureResult) -> Option, - >, - >; - type RequestDeviceFuture = MakeSendFuture< - wasm_bindgen_futures::JsFuture, - fn( - JsFutureResult, - ) -> Result<(Self::DeviceData, Self::QueueData), crate::RequestDeviceError>, - >; - type PopErrorScopeFuture = - MakeSendFuture Option>; - - type CompilationInfoFuture = MakeSendFuture< - wasm_bindgen_futures::JsFuture, - Box CompilationInfo>, - >; - - fn init(_instance_desc: wgt::InstanceDescriptor) -> Self { +#[derive(Debug)] +pub struct WebAdapter { + pub(crate) inner: webgpu_sys::GpuAdapter, + /// Unique identifier for this Adapter. + ident: crate::cmp::Identifier, +} + +#[derive(Debug)] +pub struct WebDevice { + pub(crate) inner: webgpu_sys::GpuDevice, + /// Unique identifier for this Device. + ident: crate::cmp::Identifier, +} + +#[derive(Debug)] +pub struct WebQueue { + pub(crate) inner: webgpu_sys::GpuQueue, + /// Unique identifier for this Queue. + ident: crate::cmp::Identifier, +} + +#[derive(Debug)] +pub struct WebBindGroupLayout { + pub(crate) inner: webgpu_sys::GpuBindGroupLayout, + /// Unique identifier for this BindGroupLayout. + ident: crate::cmp::Identifier, +} + +#[derive(Debug)] +pub struct WebBindGroup { + pub(crate) inner: webgpu_sys::GpuBindGroup, + /// Unique identifier for this BindGroup. + ident: crate::cmp::Identifier, +} + +#[derive(Debug)] +pub struct WebTextureView { + pub(crate) inner: webgpu_sys::GpuTextureView, + /// Unique identifier for this TextureView. + ident: crate::cmp::Identifier, +} + +#[derive(Debug)] +pub struct WebSampler { + pub(crate) inner: webgpu_sys::GpuSampler, + /// Unique identifier for this Sampler. + ident: crate::cmp::Identifier, +} + +/// Remembers which portion of a buffer has been mapped, along with a reference +/// to the mapped portion. +#[derive(Debug)] +struct WebBufferMapState { + /// The mapped memory of the buffer. + pub mapped_buffer: Option, + /// The total range which has been mapped in the buffer overall. + pub range: Range, +} + +/// Stores the state of a GPU buffer and a reference to its mapped `ArrayBuffer` (if any). +/// The WebGPU specification forbids calling `getMappedRange` on a `webgpu_sys::GpuBuffer` more than +/// once, so this struct stores the initial mapped range and re-uses it, allowing for multiple `get_mapped_range` +/// calls on the Rust-side. +#[derive(Debug)] +pub struct WebBuffer { + /// The associated GPU buffer. + inner: webgpu_sys::GpuBuffer, + /// The mapped array buffer and mapped range. + mapping: RefCell, + /// Unique identifier for this Buffer. + ident: crate::cmp::Identifier, +} + +impl WebBuffer { + /// Creates a new web buffer for the given Javascript object and description. + fn new(inner: webgpu_sys::GpuBuffer, desc: &crate::BufferDescriptor<'_>) -> Self { + Self { + inner, + mapping: RefCell::new(WebBufferMapState { + mapped_buffer: None, + range: 0..desc.size, + }), + ident: crate::cmp::Identifier::create(), + } + } + + /// Creates a raw Javascript array buffer over the provided range. + fn get_mapped_array_buffer(&self, sub_range: Range) -> js_sys::ArrayBuffer { + self.inner + .get_mapped_range_with_f64_and_f64( + sub_range.start as f64, + (sub_range.end - sub_range.start) as f64, + ) + .unwrap() + } + + /// Obtains a reference to the re-usable buffer mapping as a Javascript array view. + fn get_mapped_range(&self, sub_range: Range) -> js_sys::Uint8Array { + let mut mapping = self.mapping.borrow_mut(); + let range = mapping.range.clone(); + let array_buffer = mapping.mapped_buffer.get_or_insert_with(|| { + self.inner + .get_mapped_range_with_f64_and_f64( + range.start as f64, + (range.end - range.start) as f64, + ) + .unwrap() + }); + js_sys::Uint8Array::new_with_byte_offset_and_length( + array_buffer, + (sub_range.start - range.start) as u32, + (sub_range.end - sub_range.start) as u32, + ) + } + + /// Sets the range of the buffer which is presently mapped. + fn set_mapped_range(&self, range: Range) { + self.mapping.borrow_mut().range = range; + } +} + +#[derive(Debug)] +pub struct WebTexture { + pub(crate) inner: webgpu_sys::GpuTexture, + /// Unique identifier for this Texture. + ident: crate::cmp::Identifier, +} + +#[derive(Debug)] +pub(crate) struct WebBlas { + /// Unique identifier for this Blas. + ident: crate::cmp::Identifier, +} + +#[derive(Debug)] +pub(crate) struct WebTlas { + /// Unique identifier for this Blas. + ident: crate::cmp::Identifier, +} + +#[derive(Debug)] +pub struct WebQuerySet { + pub(crate) inner: webgpu_sys::GpuQuerySet, + /// Unique identifier for this QuerySet. + ident: crate::cmp::Identifier, +} + +#[derive(Debug)] +pub struct WebPipelineLayout { + pub(crate) inner: webgpu_sys::GpuPipelineLayout, + /// Unique identifier for this PipelineLayout. + ident: crate::cmp::Identifier, +} + +#[derive(Debug)] +pub struct WebRenderPipeline { + pub(crate) inner: webgpu_sys::GpuRenderPipeline, + /// Unique identifier for this RenderPipeline. + ident: crate::cmp::Identifier, +} + +#[derive(Debug)] +pub struct WebComputePipeline { + pub(crate) inner: webgpu_sys::GpuComputePipeline, + /// Unique identifier for this ComputePipeline. + ident: crate::cmp::Identifier, +} + +#[derive(Debug)] +pub(crate) struct WebPipelineCache { + /// Unique identifier for this PipelineCache. + ident: crate::cmp::Identifier, +} + +#[derive(Debug)] +pub struct WebCommandEncoder { + pub(crate) inner: webgpu_sys::GpuCommandEncoder, + /// Unique identifier for this CommandEncoder. + ident: crate::cmp::Identifier, +} + +#[derive(Debug)] +pub struct WebComputePassEncoder { + pub(crate) inner: webgpu_sys::GpuComputePassEncoder, + /// Unique identifier for this ComputePassEncoder. + ident: crate::cmp::Identifier, +} + +#[derive(Debug)] +pub struct WebRenderPassEncoder { + pub(crate) inner: webgpu_sys::GpuRenderPassEncoder, + /// Unique identifier for this RenderPassEncoder. + ident: crate::cmp::Identifier, +} + +#[derive(Debug)] +pub struct WebCommandBuffer { + pub(crate) inner: webgpu_sys::GpuCommandBuffer, + /// Unique identifier for this CommandBuffer. + ident: crate::cmp::Identifier, +} + +#[derive(Debug)] +pub struct WebRenderBundleEncoder { + pub(crate) inner: webgpu_sys::GpuRenderBundleEncoder, + /// Unique identifier for this RenderBundleEncoder. + ident: crate::cmp::Identifier, +} + +#[derive(Debug)] +pub struct WebRenderBundle { + pub(crate) inner: webgpu_sys::GpuRenderBundle, + /// Unique identifier for this RenderBundle. + ident: crate::cmp::Identifier, +} + +#[derive(Debug)] +pub struct WebSurface { + gpu: Option>, + canvas: Canvas, + context: webgpu_sys::GpuCanvasContext, + /// Unique identifier for this Surface. + ident: crate::cmp::Identifier, +} + +#[derive(Debug)] +pub(crate) struct WebSurfaceOutputDetail { + /// Unique identifier for this SurfaceOutputDetail. + ident: crate::cmp::Identifier, +} + +#[derive(Debug)] +pub struct WebQueueWriteBuffer { + inner: Box<[u8]>, + /// Unique identifier for this QueueWriteBuffer. + ident: crate::cmp::Identifier, +} + +#[derive(Debug)] +pub struct WebBufferMappedRange { + actual_mapping: js_sys::Uint8Array, + temporary_mapping: Vec, + /// Unique identifier for this BufferMappedRange. + ident: crate::cmp::Identifier, +} + +impl_send_sync!(ContextWebGpu); +impl_send_sync!(WebAdapter); +impl_send_sync!(WebDevice); +impl_send_sync!(WebQueue); +impl_send_sync!(WebShaderModule); +impl_send_sync!(WebBindGroupLayout); +impl_send_sync!(WebBindGroup); +impl_send_sync!(WebTextureView); +impl_send_sync!(WebSampler); +impl_send_sync!(WebBuffer); +impl_send_sync!(WebTexture); +impl_send_sync!(WebBlas); +impl_send_sync!(WebTlas); +impl_send_sync!(WebQuerySet); +impl_send_sync!(WebPipelineLayout); +impl_send_sync!(WebRenderPipeline); +impl_send_sync!(WebComputePipeline); +impl_send_sync!(WebPipelineCache); +impl_send_sync!(WebCommandEncoder); +impl_send_sync!(WebComputePassEncoder); +impl_send_sync!(WebRenderPassEncoder); +impl_send_sync!(WebCommandBuffer); +impl_send_sync!(WebRenderBundleEncoder); +impl_send_sync!(WebRenderBundle); +impl_send_sync!(WebSurface); +impl_send_sync!(WebSurfaceOutputDetail); +impl_send_sync!(WebQueueWriteBuffer); +impl_send_sync!(WebBufferMappedRange); + +crate::cmp::impl_eq_ord_hash_proxy!(ContextWebGpu => .ident); +crate::cmp::impl_eq_ord_hash_proxy!(WebAdapter => .ident); +crate::cmp::impl_eq_ord_hash_proxy!(WebDevice => .ident); +crate::cmp::impl_eq_ord_hash_proxy!(WebQueue => .ident); +crate::cmp::impl_eq_ord_hash_proxy!(WebShaderModule => .ident); +crate::cmp::impl_eq_ord_hash_proxy!(WebBindGroupLayout => .ident); +crate::cmp::impl_eq_ord_hash_proxy!(WebBindGroup => .ident); +crate::cmp::impl_eq_ord_hash_proxy!(WebTextureView => .ident); +crate::cmp::impl_eq_ord_hash_proxy!(WebSampler => .ident); +crate::cmp::impl_eq_ord_hash_proxy!(WebBuffer => .ident); +crate::cmp::impl_eq_ord_hash_proxy!(WebTexture => .ident); +crate::cmp::impl_eq_ord_hash_proxy!(WebBlas => .ident); +crate::cmp::impl_eq_ord_hash_proxy!(WebTlas => .ident); +crate::cmp::impl_eq_ord_hash_proxy!(WebQuerySet => .ident); +crate::cmp::impl_eq_ord_hash_proxy!(WebPipelineLayout => .ident); +crate::cmp::impl_eq_ord_hash_proxy!(WebRenderPipeline => .ident); +crate::cmp::impl_eq_ord_hash_proxy!(WebComputePipeline => .ident); +crate::cmp::impl_eq_ord_hash_proxy!(WebPipelineCache => .ident); +crate::cmp::impl_eq_ord_hash_proxy!(WebCommandEncoder => .ident); +crate::cmp::impl_eq_ord_hash_proxy!(WebComputePassEncoder => .ident); +crate::cmp::impl_eq_ord_hash_proxy!(WebRenderPassEncoder => .ident); +crate::cmp::impl_eq_ord_hash_proxy!(WebCommandBuffer => .ident); +crate::cmp::impl_eq_ord_hash_proxy!(WebRenderBundleEncoder => .ident); +crate::cmp::impl_eq_ord_hash_proxy!(WebRenderBundle => .ident); +crate::cmp::impl_eq_ord_hash_proxy!(WebSurface => .ident); +crate::cmp::impl_eq_ord_hash_proxy!(WebSurfaceOutputDetail => .ident); +crate::cmp::impl_eq_ord_hash_proxy!(WebQueueWriteBuffer => .ident); +crate::cmp::impl_eq_ord_hash_proxy!(WebBufferMappedRange => .ident); + +impl dispatch::InterfaceTypes for ContextWebGpu { + type Instance = ContextWebGpu; + type Adapter = WebAdapter; + type Device = WebDevice; + type Queue = WebQueue; + type ShaderModule = WebShaderModule; + type BindGroupLayout = WebBindGroupLayout; + type BindGroup = WebBindGroup; + type TextureView = WebTextureView; + type Sampler = WebSampler; + type Buffer = WebBuffer; + type Texture = WebTexture; + type Blas = WebBlas; + type Tlas = WebTlas; + type QuerySet = WebQuerySet; + type PipelineLayout = WebPipelineLayout; + type RenderPipeline = WebRenderPipeline; + type ComputePipeline = WebComputePipeline; + type PipelineCache = WebPipelineCache; + type CommandEncoder = WebCommandEncoder; + type ComputePass = WebComputePassEncoder; + type RenderPass = WebRenderPassEncoder; + type CommandBuffer = WebCommandBuffer; + type RenderBundleEncoder = WebRenderBundleEncoder; + type RenderBundle = WebRenderBundle; + type Surface = WebSurface; + type SurfaceOutputDetail = WebSurfaceOutputDetail; + type QueueWriteBuffer = WebQueueWriteBuffer; + type BufferMappedRange = WebBufferMappedRange; +} + +impl dispatch::InstanceInterface for ContextWebGpu { + fn new(_desc: crate::InstanceDescriptor) -> Self + where + Self: Sized, + { let Ok(gpu) = get_browser_gpu_property() else { panic!( "Accessing the GPU is only supported on the main thread or from a dedicated worker" ); }; - ContextWebGpu { gpu } + ContextWebGpu { + gpu, + ident: crate::cmp::Identifier::create(), + } } - unsafe fn instance_create_surface( + unsafe fn create_surface( &self, - target: SurfaceTargetUnsafe, - ) -> Result { + target: crate::SurfaceTargetUnsafe, + ) -> Result { match target { SurfaceTargetUnsafe::RawHandle { raw_display_handle: _, @@ -1226,10 +1515,10 @@ impl crate::context::Context for ContextWebGpu { } } - fn instance_request_adapter( + fn request_adapter( &self, options: &crate::RequestAdapterOptions<'_, '_>, - ) -> Self::RequestAdapterFuture { + ) -> Pin> { //TODO: support this check, return `None` if the flag is not set. // It's not trivial, since we need the Future logic to have this check, // and currently the Future here has no room for extra parameter `backends`. @@ -1245,7 +1534,7 @@ impl crate::context::Context for ContextWebGpu { if let Some(mapped_pref) = mapped_power_preference { mapped_options.set_power_preference(mapped_pref); } - if let Some(gpu) = &self.gpu { + let future = if let Some(gpu) = &self.gpu { let adapter_promise = gpu.request_adapter_with_options(&mapped_options); OptionFuture::some(MakeSendFuture::new( wasm_bindgen_futures::JsFuture::from(adapter_promise), @@ -1254,15 +1543,29 @@ impl crate::context::Context for ContextWebGpu { } else { // Gpu is undefined; WebGPU is not supported in this browser. OptionFuture::none() - } + }; + + Box::pin(future) + } + + fn poll_all_devices(&self, _force_wait: bool) -> bool { + // Devices are automatically polled. + true + } +} + +impl Drop for ContextWebGpu { + fn drop(&mut self) { + // no-op } +} - fn adapter_request_device( +impl dispatch::AdapterInterface for WebAdapter { + fn request_device( &self, - adapter_data: &Self::AdapterData, desc: &crate::DeviceDescriptor<'_>, trace_dir: Option<&std::path::Path>, - ) -> Self::RequestDeviceFuture { + ) -> Pin> { if trace_dir.is_some() { //Error: Tracing isn't supported on the Web target } @@ -1297,44 +1600,33 @@ impl crate::context::Context for ContextWebGpu { mapped_desc.set_label(label); } - let device_promise = adapter_data.0.request_device_with_descriptor(&mapped_desc); + let device_promise = self.inner.request_device_with_descriptor(&mapped_desc); - MakeSendFuture::new( + Box::pin(MakeSendFuture::new( wasm_bindgen_futures::JsFuture::from(device_promise), future_request_device, - ) - } - - fn instance_poll_all_devices(&self, _force_wait: bool) -> bool { - // Devices are automatically polled. - true + )) } - fn adapter_is_surface_supported( - &self, - _adapter_data: &Self::AdapterData, - _surface_data: &Self::SurfaceData, - ) -> bool { + fn is_surface_supported(&self, _surface: &dispatch::DispatchSurface) -> bool { + // All surfaces are inherently supported. true } - fn adapter_features(&self, adapter_data: &Self::AdapterData) -> wgt::Features { - map_wgt_features(adapter_data.0.features()) + fn features(&self) -> crate::Features { + map_wgt_features(self.inner.features()) } - fn adapter_limits(&self, adapter_data: &Self::AdapterData) -> wgt::Limits { - map_wgt_limits(adapter_data.0.limits()) + fn limits(&self) -> crate::Limits { + map_wgt_limits(self.inner.limits()) } - fn adapter_downlevel_capabilities( - &self, - _adapter_data: &Self::AdapterData, - ) -> wgt::DownlevelCapabilities { + fn downlevel_capabilities(&self) -> crate::DownlevelCapabilities { // WebGPU is assumed to be fully compliant - wgt::DownlevelCapabilities::default() + crate::DownlevelCapabilities::default() } - fn adapter_get_info(&self, _adapter_data: &Self::AdapterData) -> wgt::AdapterInfo { + fn get_info(&self) -> crate::AdapterInfo { // TODO: web-sys has no way of getting information on adapters wgt::AdapterInfo { name: String::new(), @@ -1347,155 +1639,41 @@ impl crate::context::Context for ContextWebGpu { } } - fn adapter_get_texture_format_features( + fn get_texture_format_features( &self, - adapter_data: &Self::AdapterData, - format: wgt::TextureFormat, - ) -> wgt::TextureFormatFeatures { - format.guaranteed_format_features(self.adapter_features(adapter_data)) + format: crate::TextureFormat, + ) -> crate::TextureFormatFeatures { + format.guaranteed_format_features(dispatch::AdapterInterface::features(self)) } - fn adapter_get_presentation_timestamp( - &self, - _adapter_data: &Self::AdapterData, - ) -> wgt::PresentationTimestamp { - wgt::PresentationTimestamp::INVALID_TIMESTAMP + fn get_presentation_timestamp(&self) -> crate::PresentationTimestamp { + crate::PresentationTimestamp::INVALID_TIMESTAMP } - - fn surface_get_capabilities( - &self, - _surface_data: &Self::SurfaceData, - _adapter_data: &Self::AdapterData, - ) -> wgt::SurfaceCapabilities { - let mut formats = vec![ - wgt::TextureFormat::Rgba8Unorm, - wgt::TextureFormat::Bgra8Unorm, - wgt::TextureFormat::Rgba16Float, - ]; - let mut mapped_formats = formats.iter().map(|format| map_texture_format(*format)); - // Preferred canvas format will only be either "rgba8unorm" or "bgra8unorm". - // https://www.w3.org/TR/webgpu/#dom-gpu-getpreferredcanvasformat - let gpu = self - .gpu - .as_ref() - .expect("Caller could not have created an adapter if gpu is undefined."); - let preferred_format = gpu.get_preferred_canvas_format(); - if let Some(index) = mapped_formats.position(|format| format == preferred_format) { - formats.swap(0, index); - } - - wgt::SurfaceCapabilities { - // https://gpuweb.github.io/gpuweb/#supported-context-formats - formats, - // Doesn't really have meaning on the web. - present_modes: vec![wgt::PresentMode::Fifo], - alpha_modes: vec![wgt::CompositeAlphaMode::Opaque], - // Statically set to RENDER_ATTACHMENT for now. See https://gpuweb.github.io/gpuweb/#dom-gpucanvasconfiguration-usage - usages: wgt::TextureUsages::RENDER_ATTACHMENT, - } +} +impl Drop for WebAdapter { + fn drop(&mut self) { + // no-op } +} - fn surface_configure( - &self, - surface_data: &Self::SurfaceData, - device_data: &Self::DeviceData, - config: &crate::SurfaceConfiguration, - ) { - match surface_data.0 .0 { - Canvas::Canvas(ref canvas) => { - canvas.set_width(config.width); - canvas.set_height(config.height); - } - Canvas::Offscreen(ref canvas) => { - canvas.set_width(config.width); - canvas.set_height(config.height); - } - } +impl dispatch::DeviceInterface for WebDevice { + fn features(&self) -> crate::Features { + map_wgt_features(self.inner.features()) + } - if let wgt::PresentMode::Mailbox | wgt::PresentMode::Immediate = config.present_mode { - panic!("Only FIFO/Auto* is supported on web"); - } - if let wgt::CompositeAlphaMode::PostMultiplied | wgt::CompositeAlphaMode::Inherit = - config.alpha_mode - { - panic!("Only Opaque/Auto or PreMultiplied alpha mode are supported on web"); - } - let alpha_mode = match config.alpha_mode { - wgt::CompositeAlphaMode::PreMultiplied => webgpu_sys::GpuCanvasAlphaMode::Premultiplied, - _ => webgpu_sys::GpuCanvasAlphaMode::Opaque, - }; - let mapped = webgpu_sys::GpuCanvasConfiguration::new( - &device_data.0, - map_texture_format(config.format), - ); - mapped.set_usage(config.usage.bits()); - mapped.set_alpha_mode(alpha_mode); - let mapped_view_formats = config - .view_formats - .iter() - .map(|format| JsValue::from(map_texture_format(*format))) - .collect::(); - mapped.set_view_formats(&mapped_view_formats); - surface_data - .0 - .1 - .configure(&mapped) - .expect_throw("invalid surface configuration"); + fn limits(&self) -> crate::Limits { + map_wgt_limits(self.inner.limits()) } - fn surface_get_current_texture( + fn create_shader_module( &self, - surface_data: &Self::SurfaceData, - ) -> ( - Option, - wgt::SurfaceStatus, - Self::SurfaceOutputDetail, - ) { - let surface_data = Sendable( - surface_data - .0 - .1 - .get_current_texture() - .expect_throw("invalid surface configuration"), - ); - (Some(surface_data), wgt::SurfaceStatus::Good, ()) - } - - fn surface_present(&self, _detail: &Self::SurfaceOutputDetail) { - // Swapchain is presented automatically - } - - fn surface_texture_discard(&self, _detail: &Self::SurfaceOutputDetail) { - // Can't really discard this on the Web - } - - fn device_features(&self, device_data: &Self::DeviceData) -> wgt::Features { - map_wgt_features(device_data.0.features()) - } - - fn device_limits(&self, device_data: &Self::DeviceData) -> wgt::Limits { - map_wgt_limits(device_data.0.limits()) - } - - #[cfg_attr( - not(any( - feature = "spirv", - feature = "glsl", - feature = "wgsl", - feature = "naga-ir" - )), - allow(unreachable_code, unused_variables) - )] - fn device_create_shader_module( - &self, - device_data: &Self::DeviceData, - desc: crate::ShaderModuleDescriptor<'_>, - _shader_bound_checks: wgt::ShaderBoundChecks, - ) -> Self::ShaderModuleData { - let shader_module_result = match desc.source { - #[cfg(feature = "spirv")] - crate::ShaderSource::SpirV(ref spv) => { - use naga::front; + desc: crate::ShaderModuleDescriptor<'_>, + _shader_bound_checks: wgt::ShaderBoundChecks, + ) -> dispatch::DispatchShaderModule { + let shader_module_result = match desc.source { + #[cfg(feature = "spirv")] + crate::ShaderSource::SpirV(ref spv) => { + use naga::front; let options = naga::front::spv::Options { adjust_coordinate_space: false, @@ -1506,7 +1684,7 @@ impl crate::context::Context for ContextWebGpu { spv_parser .parse() .map_err(|inner| { - CompilationInfo::from(naga::error::ShaderError { + crate::CompilationInfo::from(naga::error::ShaderError { source: String::new(), label: desc.label.map(|s| s.to_string()), inner: Box::new(inner), @@ -1517,7 +1695,7 @@ impl crate::context::Context for ContextWebGpu { ( v, WebShaderCompilationInfo::Transformed { - compilation_info: CompilationInfo { messages: vec![] }, + compilation_info: crate::CompilationInfo { messages: vec![] }, }, ) }) @@ -1540,7 +1718,7 @@ impl crate::context::Context for ContextWebGpu { parser .parse(&options, shader) .map_err(|inner| { - CompilationInfo::from(naga::error::ShaderError { + crate::CompilationInfo::from(naga::error::ShaderError { source: shader.to_string(), label: desc.label.map(|s| s.to_string()), inner: Box::new(inner), @@ -1551,7 +1729,7 @@ impl crate::context::Context for ContextWebGpu { ( v, WebShaderCompilationInfo::Transformed { - compilation_info: CompilationInfo { messages: vec![] }, + compilation_info: crate::CompilationInfo { messages: vec![] }, }, ) }) @@ -1573,7 +1751,7 @@ impl crate::context::Context for ContextWebGpu { ( v, WebShaderCompilationInfo::Transformed { - compilation_info: CompilationInfo { messages: vec![] }, + compilation_info: crate::CompilationInfo { messages: vec![] }, }, ) }) @@ -1593,7 +1771,7 @@ impl crate::context::Context for ContextWebGpu { let mut validator = valid::Validator::new(valid::ValidationFlags::all(), valid::Capabilities::all()); let module_info = validator.validate(module).map_err(|err| { - CompilationInfo::from(naga::error::ShaderError { + crate::CompilationInfo::from(naga::error::ShaderError { source: source.to_string(), label: desc.label.map(|s| s.to_string()), inner: Box::new(err), @@ -1616,26 +1794,25 @@ impl crate::context::Context for ContextWebGpu { if let Some(label) = desc.label { descriptor.set_label(label); } - let shader_module = WebShaderModule { - module: device_data.0.create_shader_module(&descriptor), + WebShaderModule { + module: self.inner.create_shader_module(&descriptor), compilation_info, - }; - Sendable(shader_module) + ident: crate::cmp::Identifier::create(), + } + .into() } - unsafe fn device_create_shader_module_spirv( + unsafe fn create_shader_module_spirv( &self, - _device_data: &Self::DeviceData, _desc: &crate::ShaderModuleDescriptorSpirV<'_>, - ) -> Self::ShaderModuleData { + ) -> dispatch::DispatchShaderModule { unreachable!("SPIRV_SHADER_PASSTHROUGH is not enabled for this backend") } - fn device_create_bind_group_layout( + fn create_bind_group_layout( &self, - device_data: &Self::DeviceData, desc: &crate::BindGroupLayoutDescriptor<'_>, - ) -> Self::BindGroupLayoutData { + ) -> dispatch::DispatchBindGroupLayout { let mapped_bindings = desc .entries .iter() @@ -1728,19 +1905,19 @@ impl crate::context::Context for ContextWebGpu { if let Some(label) = desc.label { mapped_desc.set_label(label); } - Sendable( - device_data - .0 - .create_bind_group_layout(&mapped_desc) - .expect_throw("could not create bind group layout"), - ) + let bind_group_layout = self.inner.create_bind_group_layout(&mapped_desc).unwrap(); + + WebBindGroupLayout { + inner: bind_group_layout, + ident: crate::cmp::Identifier::create(), + } + .into() } - fn device_create_bind_group( + fn create_bind_group( &self, - device_data: &Self::DeviceData, desc: &crate::BindGroupDescriptor<'_>, - ) -> Self::BindGroupData { + ) -> dispatch::DispatchBindGroup { let mapped_entries = desc .entries .iter() @@ -1751,10 +1928,9 @@ impl crate::context::Context for ContextWebGpu { offset, size, }) => { - let buffer: &::BufferData = - downcast_ref(buffer.data.as_ref()); + let buffer = buffer.inner.as_webgpu(); let mapped_buffer_binding = - webgpu_sys::GpuBufferBinding::new(&buffer.0.buffer); + webgpu_sys::GpuBufferBinding::new(&buffer.inner); mapped_buffer_binding.set_offset(offset as f64); if let Some(s) = size { mapped_buffer_binding.set_size(s.get() as f64); @@ -1765,17 +1941,15 @@ impl crate::context::Context for ContextWebGpu { panic!("Web backend does not support arrays of buffers") } crate::BindingResource::Sampler(sampler) => { - let sampler: &::SamplerData = - downcast_ref(sampler.data.as_ref()); - JsValue::from(&sampler.0) + let sampler = &sampler.inner.as_webgpu().inner; + JsValue::from(sampler) } crate::BindingResource::SamplerArray(..) => { panic!("Web backend does not support arrays of samplers") } crate::BindingResource::TextureView(texture_view) => { - let texture_view: &::TextureViewData = - downcast_ref(texture_view.data.as_ref()); - JsValue::from(&texture_view.0) + let texture_view = &texture_view.inner.as_webgpu().inner; + JsValue::from(texture_view) } crate::BindingResource::TextureViewArray(..) => { panic!("Web backend does not support BINDING_INDEXING extension") @@ -1789,44 +1963,49 @@ impl crate::context::Context for ContextWebGpu { }) .collect::(); - let bgl: &::BindGroupLayoutData = - downcast_ref(desc.layout.data.as_ref()); - let mapped_desc = webgpu_sys::GpuBindGroupDescriptor::new(&mapped_entries, &bgl.0); + let bgl = &desc.layout.inner.as_webgpu().inner; + let mapped_desc = webgpu_sys::GpuBindGroupDescriptor::new(&mapped_entries, bgl); if let Some(label) = desc.label { mapped_desc.set_label(label); } - Sendable(device_data.0.create_bind_group(&mapped_desc)) + let bind_group = self.inner.create_bind_group(&mapped_desc); + + WebBindGroup { + inner: bind_group, + ident: crate::cmp::Identifier::create(), + } + .into() } - fn device_create_pipeline_layout( + fn create_pipeline_layout( &self, - device_data: &Self::DeviceData, desc: &crate::PipelineLayoutDescriptor<'_>, - ) -> Self::PipelineLayoutData { + ) -> dispatch::DispatchPipelineLayout { let temp_layouts = desc .bind_group_layouts .iter() - .map(|bgl| { - let bgl: &::BindGroupLayoutData = - downcast_ref(bgl.data.as_ref()); - &bgl.0 - }) + .map(|bgl| &bgl.inner.as_webgpu().inner) .collect::(); let mapped_desc = webgpu_sys::GpuPipelineLayoutDescriptor::new(&temp_layouts); if let Some(label) = desc.label { mapped_desc.set_label(label); } - Sendable(device_data.0.create_pipeline_layout(&mapped_desc)) + + let pipeline_layout = self.inner.create_pipeline_layout(&mapped_desc); + + WebPipelineLayout { + inner: pipeline_layout, + ident: crate::cmp::Identifier::create(), + } + .into() } - fn device_create_render_pipeline( + fn create_render_pipeline( &self, - device_data: &Self::DeviceData, desc: &crate::RenderPipelineDescriptor<'_>, - ) -> Self::RenderPipelineData { - let module: &::ShaderModuleData = - downcast_ref(desc.vertex.module.data.as_ref()); - let mapped_vertex_state = webgpu_sys::GpuVertexState::new(&module.0.module); + ) -> dispatch::DispatchRenderPipeline { + let module = desc.vertex.module.inner.as_webgpu(); + let mapped_vertex_state = webgpu_sys::GpuVertexState::new(&module.module); insert_constants_map( &mapped_vertex_state, desc.vertex.compilation_options.constants, @@ -1867,9 +2046,8 @@ impl crate::context::Context for ContextWebGpu { let mapped_desc = webgpu_sys::GpuRenderPipelineDescriptor::new( &match desc.layout { Some(layout) => { - let layout: &::PipelineLayoutData = - downcast_ref(layout.data.as_ref()); - JsValue::from(&layout.0) + let layout = &layout.inner.as_webgpu().inner; + JsValue::from(layout) } None => auto_layout, }, @@ -1905,10 +2083,8 @@ impl crate::context::Context for ContextWebGpu { None => wasm_bindgen::JsValue::null(), }) .collect::(); - let module: &::ShaderModuleData = - downcast_ref(frag.module.data.as_ref()); - let mapped_fragment_desc = - webgpu_sys::GpuFragmentState::new(&module.0.module, &targets); + let module = frag.module.inner.as_webgpu(); + let mapped_fragment_desc = webgpu_sys::GpuFragmentState::new(&module.module, &targets); insert_constants_map(&mapped_vertex_state, frag.compilation_options.constants); if let Some(ep) = frag.entry_point { mapped_fragment_desc.set_entry_point(ep); @@ -1926,22 +2102,21 @@ impl crate::context::Context for ContextWebGpu { let mapped_primitive = map_primitive_state(&desc.primitive); mapped_desc.set_primitive(&mapped_primitive); - Sendable( - device_data - .0 - .create_render_pipeline(&mapped_desc) - .expect_throw("could not create render pipeline"), - ) + let render_pipeline = self.inner.create_render_pipeline(&mapped_desc).unwrap(); + + WebRenderPipeline { + inner: render_pipeline, + ident: crate::cmp::Identifier::create(), + } + .into() } - fn device_create_compute_pipeline( + fn create_compute_pipeline( &self, - device_data: &Self::DeviceData, desc: &crate::ComputePipelineDescriptor<'_>, - ) -> Self::ComputePipelineData { - let shader_module: &::ShaderModuleData = - downcast_ref(desc.module.data.as_ref()); - let mapped_compute_stage = webgpu_sys::GpuProgrammableStage::new(&shader_module.0.module); + ) -> dispatch::DispatchComputePipeline { + let shader_module = desc.module.inner.as_webgpu(); + let mapped_compute_stage = webgpu_sys::GpuProgrammableStage::new(&shader_module.module); insert_constants_map(&mapped_compute_stage, desc.compilation_options.constants); if let Some(ep) = desc.entry_point { mapped_compute_stage.set_entry_point(ep); @@ -1950,9 +2125,8 @@ impl crate::context::Context for ContextWebGpu { let mapped_desc = webgpu_sys::GpuComputePipelineDescriptor::new( &match desc.layout { Some(layout) => { - let layout: &::PipelineLayoutData = - downcast_ref(layout.data.as_ref()); - JsValue::from(&layout.0) + let layout = &layout.inner.as_webgpu().inner; + JsValue::from(layout) } None => auto_layout, }, @@ -1962,41 +2136,35 @@ impl crate::context::Context for ContextWebGpu { mapped_desc.set_label(label); } - Sendable(device_data.0.create_compute_pipeline(&mapped_desc)) + let compute_pipeline = self.inner.create_compute_pipeline(&mapped_desc); + + WebComputePipeline { + inner: compute_pipeline, + ident: crate::cmp::Identifier::create(), + } + .into() } - unsafe fn device_create_pipeline_cache( + unsafe fn create_pipeline_cache( &self, - _: &Self::DeviceData, - _: &crate::PipelineCacheDescriptor<'_>, - ) -> Self::PipelineCacheData { + _desc: &crate::PipelineCacheDescriptor<'_>, + ) -> dispatch::DispatchPipelineCache { + WebPipelineCache { + ident: crate::cmp::Identifier::create(), + } + .into() } - fn pipeline_cache_drop(&self, _: &Self::PipelineCacheData) {} - fn device_create_buffer( - &self, - device_data: &Self::DeviceData, - desc: &crate::BufferDescriptor<'_>, - ) -> Self::BufferData { + fn create_buffer(&self, desc: &crate::BufferDescriptor<'_>) -> dispatch::DispatchBuffer { let mapped_desc = webgpu_sys::GpuBufferDescriptor::new(desc.size as f64, desc.usage.bits()); mapped_desc.set_mapped_at_creation(desc.mapped_at_creation); if let Some(label) = desc.label { mapped_desc.set_label(label); } - Sendable(WebBuffer::new( - device_data - .0 - .create_buffer(&mapped_desc) - .expect_throw("could not create buffer"), - desc, - )) + WebBuffer::new(self.inner.create_buffer(&mapped_desc).unwrap(), desc).into() } - fn device_create_texture( - &self, - device_data: &Self::DeviceData, - desc: &crate::TextureDescriptor<'_>, - ) -> Self::TextureData { + fn create_texture(&self, desc: &crate::TextureDescriptor<'_>) -> dispatch::DispatchTexture { let mapped_desc = webgpu_sys::GpuTextureDescriptor::new( map_texture_format(desc.format), &map_extent_3d(desc.size), @@ -2014,19 +2182,28 @@ impl crate::context::Context for ContextWebGpu { .map(|format| JsValue::from(map_texture_format(*format))) .collect::(); mapped_desc.set_view_formats(&mapped_view_formats); - Sendable( - device_data - .0 - .create_texture(&mapped_desc) - .expect_throw("could not create texture"), - ) + + let texture = self.inner.create_texture(&mapped_desc).unwrap(); + WebTexture { + inner: texture, + ident: crate::cmp::Identifier::create(), + } + .into() } - fn device_create_sampler( + fn create_blas( &self, - device_data: &Self::DeviceData, - desc: &crate::SamplerDescriptor<'_>, - ) -> Self::SamplerData { + _desc: &crate::CreateBlasDescriptor<'_>, + _sizes: crate::BlasGeometrySizeDescriptors, + ) -> (Option, dispatch::DispatchBlas) { + unimplemented!("Raytracing not implemented for web"); + } + + fn create_tlas(&self, _desc: &crate::CreateTlasDescriptor<'_>) -> dispatch::DispatchTlas { + unimplemented!("Raytracing not implemented for web"); + } + + fn create_sampler(&self, desc: &crate::SamplerDescriptor<'_>) -> dispatch::DispatchSampler { let mapped_desc = webgpu_sys::GpuSamplerDescriptor::new(); mapped_desc.set_address_mode_u(map_address_mode(desc.address_mode_u)); mapped_desc.set_address_mode_v(map_address_mode(desc.address_mode_v)); @@ -2039,19 +2216,21 @@ impl crate::context::Context for ContextWebGpu { mapped_desc.set_mag_filter(map_filter_mode(desc.mag_filter)); mapped_desc.set_min_filter(map_filter_mode(desc.min_filter)); mapped_desc.set_mipmap_filter(map_mipmap_filter_mode(desc.mipmap_filter)); - // TODO: `max_anisotropy` is not available on `desc` yet - // mapped_desc.max_anisotropy(desc.max_anisotropy); + mapped_desc.set_max_anisotropy(desc.anisotropy_clamp); if let Some(label) = desc.label { mapped_desc.set_label(label); } - Sendable(device_data.0.create_sampler_with_descriptor(&mapped_desc)) + + let sampler = self.inner.create_sampler_with_descriptor(&mapped_desc); + + WebSampler { + inner: sampler, + ident: crate::cmp::Identifier::create(), + } + .into() } - fn device_create_query_set( - &self, - device_data: &Self::DeviceData, - desc: &wgt::QuerySetDescriptor>, - ) -> Self::QuerySetData { + fn create_query_set(&self, desc: &crate::QuerySetDescriptor<'_>) -> dispatch::DispatchQuerySet { let ty = match desc.ty { wgt::QueryType::Occlusion => webgpu_sys::GpuQueryType::Occlusion, wgt::QueryType::Timestamp => webgpu_sys::GpuQueryType::Timestamp, @@ -2061,35 +2240,40 @@ impl crate::context::Context for ContextWebGpu { if let Some(label) = desc.label { mapped_desc.set_label(label); } - Sendable( - device_data - .0 - .create_query_set(&mapped_desc) - .expect_throw("could not create query set"), - ) + + let query_set = self.inner.create_query_set(&mapped_desc).unwrap(); + + WebQuerySet { + inner: query_set, + ident: crate::cmp::Identifier::create(), + } + .into() } - fn device_create_command_encoder( + fn create_command_encoder( &self, - device_data: &Self::DeviceData, desc: &crate::CommandEncoderDescriptor<'_>, - ) -> Self::CommandEncoderData { + ) -> dispatch::DispatchCommandEncoder { let mapped_desc = webgpu_sys::GpuCommandEncoderDescriptor::new(); if let Some(label) = desc.label { mapped_desc.set_label(label); } - Sendable( - device_data - .0 - .create_command_encoder_with_descriptor(&mapped_desc), - ) + + let command_encoder = self + .inner + .create_command_encoder_with_descriptor(&mapped_desc); + + WebCommandEncoder { + inner: command_encoder, + ident: crate::cmp::Identifier::create(), + } + .into() } - fn device_create_render_bundle_encoder( + fn create_render_bundle_encoder( &self, - device_data: &Self::DeviceData, desc: &crate::RenderBundleEncoderDescriptor<'_>, - ) -> Self::RenderBundleEncoderData { + ) -> dispatch::DispatchRenderBundleEncoder { let mapped_color_formats = desc .color_formats .iter() @@ -2108,345 +2292,557 @@ impl crate::context::Context for ContextWebGpu { mapped_desc.set_stencil_read_only(ds.stencil_read_only); } mapped_desc.set_sample_count(desc.sample_count); - Sendable( - device_data - .0 - .create_render_bundle_encoder(&mapped_desc) - .expect_throw("could not create render bundle encoder"), - ) - } - fn device_drop(&self, _device_data: &Self::DeviceData) { - // Device is dropped automatically - } - - fn device_destroy(&self, device_data: &Self::DeviceData) { - device_data.0.destroy(); - } + let render_bundle_encoder = self + .inner + .create_render_bundle_encoder(&mapped_desc) + .unwrap(); - fn queue_drop(&self, _queue_data: &Self::QueueData) { - // Queue is dropped automatically + WebRenderBundleEncoder { + inner: render_bundle_encoder, + ident: crate::cmp::Identifier::create(), + } + .into() } - fn device_set_device_lost_callback( - &self, - device_data: &Self::DeviceData, - device_lost_callback: crate::context::DeviceLostCallback, - ) { - use webgpu_sys::{GpuDeviceLostInfo, GpuDeviceLostReason}; - + fn set_device_lost_callback(&self, device_lost_callback: dispatch::BoxDeviceLostCallback) { let closure = Closure::once(move |info: JsValue| { - let info = info.dyn_into::().unwrap(); + let info = info.dyn_into::().unwrap(); device_lost_callback( match info.reason() { - GpuDeviceLostReason::Destroyed => crate::DeviceLostReason::Destroyed, - GpuDeviceLostReason::Unknown => crate::DeviceLostReason::Unknown, + webgpu_sys::GpuDeviceLostReason::Destroyed => { + crate::DeviceLostReason::Destroyed + } + webgpu_sys::GpuDeviceLostReason::Unknown => crate::DeviceLostReason::Unknown, _ => crate::DeviceLostReason::Unknown, }, info.message(), ); }); - let _ = device_data.0.lost().then(&closure); - } - - fn device_poll( - &self, - _device_data: &Self::DeviceData, - _maintain: crate::Maintain, - ) -> crate::MaintainResult { - // Device is polled automatically - crate::MaintainResult::SubmissionQueueEmpty + let _ = self.inner.lost().then(&closure); } - fn device_on_uncaptured_error( - &self, - device_data: &Self::DeviceData, - handler: Box, - ) { + fn on_uncaptured_error(&self, handler: Box) { let f = Closure::wrap(Box::new(move |event: webgpu_sys::GpuUncapturedErrorEvent| { let error = crate::Error::from_js(event.error().value_of()); handler(error); }) as Box); - device_data - .0 + self.inner .set_onuncapturederror(Some(f.as_ref().unchecked_ref())); // TODO: This will leak the memory associated with the error handler by default. f.forget(); } - fn device_push_error_scope(&self, device_data: &Self::DeviceData, filter: crate::ErrorFilter) { - device_data.0.push_error_scope(match filter { + fn push_error_scope(&self, filter: crate::ErrorFilter) { + self.inner.push_error_scope(match filter { crate::ErrorFilter::OutOfMemory => webgpu_sys::GpuErrorFilter::OutOfMemory, crate::ErrorFilter::Validation => webgpu_sys::GpuErrorFilter::Validation, crate::ErrorFilter::Internal => webgpu_sys::GpuErrorFilter::Internal, }); } - fn device_pop_error_scope(&self, device_data: &Self::DeviceData) -> Self::PopErrorScopeFuture { - let error_promise = device_data.0.pop_error_scope(); - MakeSendFuture::new( + fn pop_error_scope(&self) -> Pin> { + let error_promise = self.inner.pop_error_scope(); + Box::pin(MakeSendFuture::new( wasm_bindgen_futures::JsFuture::from(error_promise), future_pop_error_scope, - ) + )) } - fn buffer_map_async( - &self, - buffer_data: &Self::BufferData, - mode: crate::MapMode, - range: Range, - callback: crate::context::BufferMapCallback, - ) { - let map_promise = buffer_data.0.buffer.map_async_with_f64_and_f64( - map_map_mode(mode), - range.start as f64, - (range.end - range.start) as f64, - ); + fn start_capture(&self) { + // No capturing api in webgpu + } - buffer_data.0.set_mapped_range(range); + fn stop_capture(&self) { + // No capturing api in webgpu + } - register_then_closures(&map_promise, callback, Ok(()), Err(crate::BufferAsyncError)); + fn poll(&self, _maintain: crate::Maintain) -> crate::MaintainResult { + // Device is polled automatically + crate::MaintainResult::SubmissionQueueEmpty } - fn buffer_get_mapped_range( - &self, - buffer_data: &Self::BufferData, - sub_range: Range, - ) -> Box { - let actual_mapping = buffer_data.0.get_mapped_range(sub_range); - let temporary_mapping = actual_mapping.to_vec(); - Box::new(BufferMappedRange { - actual_mapping, - temporary_mapping, - }) + fn get_internal_counters(&self) -> crate::InternalCounters { + crate::InternalCounters::default() + } + + fn generate_allocator_report(&self) -> Option { + None } - fn buffer_unmap(&self, buffer_data: &Self::BufferData) { - buffer_data.0.buffer.unmap(); - buffer_data.0.mapping.borrow_mut().mapped_buffer = None; + fn destroy(&self) { + self.inner.destroy(); + } +} +impl Drop for WebDevice { + fn drop(&mut self) { + // no-op } +} - fn shader_get_compilation_info( +impl dispatch::QueueInterface for WebQueue { + fn write_buffer( &self, - shader_data: &Self::ShaderModuleData, - ) -> Self::CompilationInfoFuture { - let compilation_info_promise = shader_data.0.module.get_compilation_info(); - let map_future = Box::new({ - let compilation_info = shader_data.0.compilation_info.clone(); - move |result| future_compilation_info(result, &compilation_info) - }); - MakeSendFuture::new( - wasm_bindgen_futures::JsFuture::from(compilation_info_promise), - map_future, + buffer: &dispatch::DispatchBuffer, + offset: crate::BufferAddress, + data: &[u8], + ) { + let buffer = buffer.as_webgpu(); + /* Skip the copy once gecko allows BufferSource instead of ArrayBuffer + self.inner.write_buffer_with_f64_and_u8_array_and_f64_and_f64( + &buffer.buffer, + offset as f64, + data, + 0f64, + data.len() as f64, + ); + */ + self.inner + .write_buffer_with_f64_and_buffer_source_and_f64_and_f64( + &buffer.inner, + offset as f64, + &js_sys::Uint8Array::from(data).buffer(), + 0f64, + data.len() as f64, + ) + .unwrap(); + } + + fn create_staging_buffer( + &self, + size: crate::BufferSize, + ) -> Option { + Some( + WebQueueWriteBuffer { + inner: vec![0; size.get() as usize].into_boxed_slice(), + ident: crate::cmp::Identifier::create(), + } + .into(), ) } - fn texture_create_view( + fn validate_write_buffer( &self, - texture_data: &Self::TextureData, - desc: &crate::TextureViewDescriptor<'_>, - ) -> Self::TextureViewData { - let mapped = webgpu_sys::GpuTextureViewDescriptor::new(); - if let Some(dim) = desc.dimension { - mapped.set_dimension(map_texture_view_dimension(dim)); - } - if let Some(format) = desc.format { - mapped.set_format(map_texture_format(format)); + buffer: &dispatch::DispatchBuffer, + offset: wgt::BufferAddress, + size: wgt::BufferSize, + ) -> Option<()> { + let buffer = buffer.as_webgpu(); + + let usage = wgt::BufferUsages::from_bits_truncate(buffer.inner.usage()); + // TODO: actually send this down the error scope + if !usage.contains(wgt::BufferUsages::COPY_DST) { + log::error!("Destination buffer is missing the `COPY_DST` usage flag"); + return None; } - mapped.set_aspect(map_texture_aspect(desc.aspect)); - mapped.set_base_array_layer(desc.base_array_layer); - if let Some(count) = desc.array_layer_count { - mapped.set_array_layer_count(count); + let write_size = u64::from(size); + if write_size % wgt::COPY_BUFFER_ALIGNMENT != 0 { + log::error!( + "Copy size {} does not respect `COPY_BUFFER_ALIGNMENT`", + size + ); + return None; } - mapped.set_base_mip_level(desc.base_mip_level); - if let Some(count) = desc.mip_level_count { - mapped.set_mip_level_count(count); + if offset % wgt::COPY_BUFFER_ALIGNMENT != 0 { + log::error!( + "Buffer offset {} is not aligned to block size or `COPY_BUFFER_ALIGNMENT`", + offset + ); + return None; } - if let Some(label) = desc.label { - mapped.set_label(label); + if write_size + offset > buffer.inner.size() as u64 { + log::error!("copy of {}..{} would end up overrunning the bounds of the destination buffer of size {}", offset, offset + write_size, buffer.inner.size()); + return None; } - Sendable( - texture_data - .0 - .create_view_with_descriptor(&mapped) - .expect_throw("could not create view with descriptor"), - ) + Some(()) } - fn surface_drop(&self, _surface_data: &Self::SurfaceData) { - // Dropped automatically - } + fn write_staging_buffer( + &self, + buffer: &dispatch::DispatchBuffer, + offset: crate::BufferAddress, + staging_buffer: &dispatch::DispatchQueueWriteBuffer, + ) { + let staging_buffer = staging_buffer.as_webgpu(); - fn adapter_drop(&self, _adapter_data: &Self::AdapterData) { - // Dropped automatically + dispatch::QueueInterface::write_buffer(self, buffer, offset, &staging_buffer.inner) } - fn buffer_destroy(&self, buffer_data: &Self::BufferData) { - buffer_data.0.buffer.destroy(); - } + fn write_texture( + &self, + texture: crate::TexelCopyTextureInfo<'_>, + data: &[u8], + data_layout: crate::TexelCopyBufferLayout, + size: crate::Extent3d, + ) { + let mapped_data_layout = webgpu_sys::GpuTexelCopyBufferLayout::new(); + if let Some(bytes_per_row) = data_layout.bytes_per_row { + mapped_data_layout.set_bytes_per_row(bytes_per_row); + } + if let Some(rows_per_image) = data_layout.rows_per_image { + mapped_data_layout.set_rows_per_image(rows_per_image); + } + mapped_data_layout.set_offset(data_layout.offset as f64); - fn buffer_drop(&self, _buffer_data: &Self::BufferData) { - // Dropped automatically + /* Skip the copy once gecko allows BufferSource instead of ArrayBuffer + self.inner.write_texture_with_u8_array_and_gpu_extent_3d_dict( + &map_texture_copy_view(texture), + data, + &mapped_data_layout, + &map_extent_3d(size), + ); + */ + self.inner + .write_texture_with_buffer_source_and_gpu_extent_3d_dict( + &map_texture_copy_view(texture), + &js_sys::Uint8Array::from(data).buffer(), + &mapped_data_layout, + &map_extent_3d(size), + ) + .unwrap(); } - fn texture_destroy(&self, texture_data: &Self::TextureData) { - texture_data.0.destroy(); + fn copy_external_image_to_texture( + &self, + source: &wgt::CopyExternalImageSourceInfo, + dest: wgt::CopyExternalImageDestInfo<&crate::api::Texture>, + size: crate::Extent3d, + ) { + self.inner + .copy_external_image_to_texture_with_gpu_extent_3d_dict( + &map_external_texture_copy_view(source), + &map_tagged_texture_copy_view(dest), + &map_extent_3d(size), + ) + .unwrap(); } - fn texture_drop(&self, _texture_data: &Self::TextureData) { - // Dropped automatically - } + fn submit( + &self, + command_buffers: &mut dyn Iterator, + ) -> u64 { + let temp_command_buffers = command_buffers.collect::>(); - fn texture_view_drop(&self, _texture_view_data: &Self::TextureViewData) { - // Dropped automatically - } + let array = temp_command_buffers + .iter() + .map(|buffer| &buffer.as_webgpu().inner) + .collect::(); - fn sampler_drop(&self, _sampler_data: &Self::SamplerData) { - // Dropped automatically - } + self.inner.submit(&array); - fn query_set_drop(&self, _query_set_data: &Self::QuerySetData) { - // Dropped automatically + 0 } - fn bind_group_drop(&self, _bind_group_data: &Self::BindGroupData) { - // Dropped automatically + fn get_timestamp_period(&self) -> f32 { + // Timestamp values are always in nanoseconds, see https://gpuweb.github.io/gpuweb/#timestamp + 1.0 } - fn bind_group_layout_drop(&self, _bind_group_layout_data: &Self::BindGroupLayoutData) { - // Dropped automatically + fn on_submitted_work_done(&self, _callback: dispatch::BoxSubmittedWorkDoneCallback) { + unimplemented!("on_submitted_work_done is not yet implemented"); } - - fn pipeline_layout_drop(&self, _pipeline_layout_data: &Self::PipelineLayoutData) { - // Dropped automatically +} +impl Drop for WebQueue { + fn drop(&mut self) { + // no-op } +} - fn shader_module_drop(&self, _shader_module_data: &Self::ShaderModuleData) { - // Dropped automatically +impl dispatch::ShaderModuleInterface for WebShaderModule { + fn get_compilation_info(&self) -> Pin> { + let compilation_info_promise = self.module.get_compilation_info(); + let map_future = Box::new({ + let compilation_info = self.compilation_info.clone(); + move |result| future_compilation_info(result, &compilation_info) + }); + Box::pin(MakeSendFuture::new( + wasm_bindgen_futures::JsFuture::from(compilation_info_promise), + map_future, + )) } - - fn command_encoder_drop(&self, _command_encoder_data: &Self::CommandEncoderData) { - // Dropped automatically +} +impl Drop for WebShaderModule { + fn drop(&mut self) { + // no-op } +} - fn command_buffer_drop(&self, _command_buffer_data: &Self::CommandBufferData) { - // Dropped automatically +impl dispatch::BindGroupLayoutInterface for WebBindGroupLayout {} +impl Drop for WebBindGroupLayout { + fn drop(&mut self) { + // no-op } +} - fn render_bundle_drop(&self, _render_bundle_data: &Self::RenderBundleData) { - // Dropped automatically +impl dispatch::BindGroupInterface for WebBindGroup {} +impl Drop for WebBindGroup { + fn drop(&mut self) { + // no-op } +} - fn compute_pipeline_drop(&self, _pipeline_data: &Self::ComputePipelineData) { - // Dropped automatically +impl dispatch::TextureViewInterface for WebTextureView {} +impl Drop for WebTextureView { + fn drop(&mut self) { + // no-op } +} - fn render_pipeline_drop(&self, _pipeline_data: &Self::RenderPipelineData) { - // Dropped automatically +impl dispatch::SamplerInterface for WebSampler {} +impl Drop for WebSampler { + fn drop(&mut self) { + // no-op } +} - fn compute_pipeline_get_bind_group_layout( +impl dispatch::BufferInterface for WebBuffer { + fn map_async( &self, - pipeline_data: &Self::ComputePipelineData, - index: u32, - ) -> Self::BindGroupLayoutData { - Sendable(pipeline_data.0.get_bind_group_layout(index)) + mode: crate::MapMode, + range: Range, + callback: dispatch::BufferMapCallback, + ) { + let map_promise = self.inner.map_async_with_f64_and_f64( + map_map_mode(mode), + range.start as f64, + (range.end - range.start) as f64, + ); + + self.set_mapped_range(range); + + register_then_closures(&map_promise, callback, Ok(()), Err(crate::BufferAsyncError)); } - fn render_pipeline_get_bind_group_layout( + fn get_mapped_range( &self, - pipeline_data: &Self::RenderPipelineData, - index: u32, - ) -> Self::BindGroupLayoutData { - Sendable(pipeline_data.0.get_bind_group_layout(index)) + sub_range: Range, + ) -> dispatch::DispatchBufferMappedRange { + let actual_mapping = self.get_mapped_range(sub_range); + let temporary_mapping = actual_mapping.to_vec(); + WebBufferMappedRange { + actual_mapping, + temporary_mapping, + ident: crate::cmp::Identifier::create(), + } + .into() } - fn command_encoder_copy_buffer_to_buffer( + fn get_mapped_range_as_array_buffer( &self, - encoder_data: &Self::CommandEncoderData, - source_data: &Self::BufferData, - source_offset: wgt::BufferAddress, - destination_data: &Self::BufferData, - destination_offset: wgt::BufferAddress, - copy_size: wgt::BufferAddress, - ) { - encoder_data - .0 - .copy_buffer_to_buffer_with_f64_and_f64_and_f64( - &source_data.0.buffer, - source_offset as f64, - &destination_data.0.buffer, - destination_offset as f64, - copy_size as f64, - ) - .expect_throw("could not copy buffer to buffer") + sub_range: Range, + ) -> Option { + Some(self.get_mapped_array_buffer(sub_range)) } - fn command_encoder_copy_buffer_to_texture( - &self, - encoder_data: &Self::CommandEncoderData, + fn unmap(&self) { + self.inner.unmap(); + self.mapping.borrow_mut().mapped_buffer = None; + } + + fn destroy(&self) { + self.inner.destroy(); + } +} +impl Drop for WebBuffer { + fn drop(&mut self) { + // no-op + } +} + +impl dispatch::TextureInterface for WebTexture { + fn create_view( + &self, + desc: &crate::TextureViewDescriptor<'_>, + ) -> dispatch::DispatchTextureView { + let mapped = webgpu_sys::GpuTextureViewDescriptor::new(); + if let Some(dim) = desc.dimension { + mapped.set_dimension(map_texture_view_dimension(dim)); + } + if let Some(format) = desc.format { + mapped.set_format(map_texture_format(format)); + } + mapped.set_aspect(map_texture_aspect(desc.aspect)); + mapped.set_base_array_layer(desc.base_array_layer); + if let Some(count) = desc.array_layer_count { + mapped.set_array_layer_count(count); + } + mapped.set_base_mip_level(desc.base_mip_level); + if let Some(count) = desc.mip_level_count { + mapped.set_mip_level_count(count); + } + if let Some(label) = desc.label { + mapped.set_label(label); + } + + let view = self.inner.create_view_with_descriptor(&mapped).unwrap(); + + WebTextureView { + inner: view, + ident: crate::cmp::Identifier::create(), + } + .into() + } + + fn destroy(&self) { + self.inner.destroy(); + } +} +impl Drop for WebTexture { + fn drop(&mut self) { + // no-op + } +} + +impl dispatch::BlasInterface for WebBlas { + fn destroy(&self) { + unimplemented!("Raytracing not implemented for web"); + } +} +impl Drop for WebBlas { + fn drop(&mut self) { + // no-op + } +} + +impl dispatch::TlasInterface for WebTlas { + fn destroy(&self) { + unimplemented!("Raytracing not implemented for web"); + } +} +impl Drop for WebTlas { + fn drop(&mut self) { + // no-op + } +} + +impl dispatch::QuerySetInterface for WebQuerySet {} +impl Drop for WebQuerySet { + fn drop(&mut self) { + // no-op + } +} + +impl dispatch::PipelineLayoutInterface for WebPipelineLayout {} +impl Drop for WebPipelineLayout { + fn drop(&mut self) { + // no-op + } +} + +impl dispatch::RenderPipelineInterface for WebRenderPipeline { + fn get_bind_group_layout(&self, index: u32) -> dispatch::DispatchBindGroupLayout { + let bind_group_layout = self.inner.get_bind_group_layout(index); + + WebBindGroupLayout { + inner: bind_group_layout, + ident: crate::cmp::Identifier::create(), + } + .into() + } +} +impl Drop for WebRenderPipeline { + fn drop(&mut self) { + // no-op + } +} + +impl dispatch::ComputePipelineInterface for WebComputePipeline { + fn get_bind_group_layout(&self, index: u32) -> dispatch::DispatchBindGroupLayout { + let bind_group_layout = self.inner.get_bind_group_layout(index); + + WebBindGroupLayout { + inner: bind_group_layout, + ident: crate::cmp::Identifier::create(), + } + .into() + } +} +impl Drop for WebComputePipeline { + fn drop(&mut self) { + // no-op + } +} + +impl dispatch::CommandEncoderInterface for WebCommandEncoder { + fn copy_buffer_to_buffer( + &self, + source: &dispatch::DispatchBuffer, + source_offset: crate::BufferAddress, + destination: &dispatch::DispatchBuffer, + destination_offset: crate::BufferAddress, + copy_size: crate::BufferAddress, + ) { + let source = source.as_webgpu(); + let destination = destination.as_webgpu(); + + self.inner + .copy_buffer_to_buffer_with_f64_and_f64_and_f64( + &source.inner, + source_offset as f64, + &destination.inner, + destination_offset as f64, + copy_size as f64, + ) + .unwrap(); + } + + fn copy_buffer_to_texture( + &self, source: crate::TexelCopyBufferInfo<'_>, destination: crate::TexelCopyTextureInfo<'_>, - copy_size: wgt::Extent3d, + copy_size: crate::Extent3d, ) { - encoder_data - .0 + self.inner .copy_buffer_to_texture_with_gpu_extent_3d_dict( &map_buffer_copy_view(source), &map_texture_copy_view(destination), &map_extent_3d(copy_size), ) - .expect_throw("could not copy buffer to texture") + .unwrap(); } - fn command_encoder_copy_texture_to_buffer( + fn copy_texture_to_buffer( &self, - encoder_data: &Self::CommandEncoderData, source: crate::TexelCopyTextureInfo<'_>, destination: crate::TexelCopyBufferInfo<'_>, - copy_size: wgt::Extent3d, + copy_size: crate::Extent3d, ) { - encoder_data - .0 + self.inner .copy_texture_to_buffer_with_gpu_extent_3d_dict( &map_texture_copy_view(source), &map_buffer_copy_view(destination), &map_extent_3d(copy_size), ) - .expect_throw("could not copy texture to buffer") + .unwrap(); } - fn command_encoder_copy_texture_to_texture( + fn copy_texture_to_texture( &self, - encoder_data: &Self::CommandEncoderData, source: crate::TexelCopyTextureInfo<'_>, destination: crate::TexelCopyTextureInfo<'_>, - copy_size: wgt::Extent3d, + copy_size: crate::Extent3d, ) { - encoder_data - .0 + self.inner .copy_texture_to_texture_with_gpu_extent_3d_dict( &map_texture_copy_view(source), &map_texture_copy_view(destination), &map_extent_3d(copy_size), ) - .expect_throw("could not copy texture to texture") + .unwrap(); } - fn command_encoder_begin_compute_pass( + fn begin_compute_pass( &self, - encoder_data: &Self::CommandEncoderData, desc: &crate::ComputePassDescriptor<'_>, - ) -> Self::ComputePassData { + ) -> dispatch::DispatchComputePass { let mapped_desc = webgpu_sys::GpuComputePassDescriptor::new(); if let Some(label) = desc.label { mapped_desc.set_label(label); } if let Some(ref timestamp_writes) = desc.timestamp_writes { - let query_set: &::QuerySetData = - downcast_ref(timestamp_writes.query_set.data.as_ref()); - let writes = webgpu_sys::GpuComputePassTimestampWrites::new(&query_set.0); + let query_set = timestamp_writes.query_set.inner.as_webgpu(); + let writes = webgpu_sys::GpuComputePassTimestampWrites::new(&query_set.inner); if let Some(index) = timestamp_writes.beginning_of_pass_write_index { writes.set_beginning_of_pass_write_index(index); } @@ -2456,18 +2852,19 @@ impl crate::context::Context for ContextWebGpu { mapped_desc.set_timestamp_writes(&writes); } - Sendable( - encoder_data - .0 - .begin_compute_pass_with_descriptor(&mapped_desc), - ) + let compute_pass = self.inner.begin_compute_pass_with_descriptor(&mapped_desc); + + WebComputePassEncoder { + inner: compute_pass, + ident: crate::cmp::Identifier::create(), + } + .into() } - fn command_encoder_begin_render_pass( + fn begin_render_pass( &self, - encoder_data: &Self::CommandEncoderData, desc: &crate::RenderPassDescriptor<'_>, - ) -> Self::RenderPassData { + ) -> dispatch::DispatchRenderPass { let mapped_color_attachments = desc .color_attachments .iter() @@ -2482,21 +2879,19 @@ impl crate::context::Context for ContextWebGpu { crate::LoadOp::Load => webgpu_sys::GpuLoadOp::Load, }; - let view: &::TextureViewData = - downcast_ref(ca.view.data.as_ref()); + let view = &ca.view.inner.as_webgpu().inner; let mapped_color_attachment = webgpu_sys::GpuRenderPassColorAttachment::new( load_value, map_store_op(ca.ops.store), - &view.0, + view, ); if let Some(cv) = clear_value { mapped_color_attachment.set_clear_value(&cv); } if let Some(rt) = ca.resolve_target { - let resolve_target_view: &::TextureViewData = - downcast_ref(rt.data.as_ref()); - mapped_color_attachment.set_resolve_target(&resolve_target_view.0); + let resolve_target_view = &rt.inner.as_webgpu().inner; + mapped_color_attachment.set_resolve_target(resolve_target_view); } mapped_color_attachment.set_store_op(map_store_op(ca.ops.store)); @@ -2513,10 +2908,9 @@ impl crate::context::Context for ContextWebGpu { } if let Some(dsa) = &desc.depth_stencil_attachment { - let depth_stencil_attachment: &::TextureViewData = - downcast_ref(dsa.view.data.as_ref()); + let depth_stencil_attachment = &dsa.view.inner.as_webgpu().inner; let mapped_depth_stencil_attachment = - webgpu_sys::GpuRenderPassDepthStencilAttachment::new(&depth_stencil_attachment.0); + webgpu_sys::GpuRenderPassDepthStencilAttachment::new(depth_stencil_attachment); if let Some(ref ops) = dsa.depth_ops { let load_op = match ops.load { crate::LoadOp::Clear(v) => { @@ -2545,9 +2939,8 @@ impl crate::context::Context for ContextWebGpu { } if let Some(ref timestamp_writes) = desc.timestamp_writes { - let query_set: &::QuerySetData = - downcast_ref(timestamp_writes.query_set.data.as_ref()); - let writes = webgpu_sys::GpuRenderPassTimestampWrites::new(&query_set.0); + let query_set = ×tamp_writes.query_set.inner.as_webgpu().inner; + let writes = webgpu_sys::GpuRenderPassTimestampWrites::new(query_set); if let Some(index) = timestamp_writes.beginning_of_pass_write_index { writes.set_beginning_of_pass_write_index(index); } @@ -2557,694 +2950,338 @@ impl crate::context::Context for ContextWebGpu { mapped_desc.set_timestamp_writes(&writes); } - Sendable( - encoder_data - .0 - .begin_render_pass(&mapped_desc) - .expect_throw("could not begin render pass"), - ) + let render_pass = self.inner.begin_render_pass(&mapped_desc).unwrap(); + + WebRenderPassEncoder { + inner: render_pass, + ident: crate::cmp::Identifier::create(), + } + .into() } - fn command_encoder_finish( - &self, - encoder_data: &mut Self::CommandEncoderData, - ) -> Self::CommandBufferData { - let label = encoder_data.0.label(); - Sendable(if label.is_empty() { - encoder_data.0.finish() + fn finish(&mut self) -> dispatch::DispatchCommandBuffer { + let label = self.inner.label(); + let buffer = if label.is_empty() { + self.inner.finish() } else { let mapped_desc = webgpu_sys::GpuCommandBufferDescriptor::new(); mapped_desc.set_label(&label); - encoder_data.0.finish_with_descriptor(&mapped_desc) - }) + + self.inner.finish_with_descriptor(&mapped_desc) + }; + + WebCommandBuffer { + inner: buffer, + ident: crate::cmp::Identifier::create(), + } + .into() } - fn command_encoder_clear_texture( + fn clear_texture( &self, - _encoder_data: &Self::CommandEncoderData, - _texture_data: &Self::TextureData, - _subresource_range: &wgt::ImageSubresourceRange, + _texture: &dispatch::DispatchTexture, + _subresource_range: &crate::ImageSubresourceRange, ) { - //TODO + unimplemented!("clear_texture is not yet implemented"); } - fn command_encoder_clear_buffer( + fn clear_buffer( &self, - encoder_data: &Self::CommandEncoderData, - buffer_data: &Self::BufferData, - offset: wgt::BufferAddress, - size: Option, + buffer: &dispatch::DispatchBuffer, + offset: crate::BufferAddress, + size: Option, ) { + let buffer = buffer.as_webgpu(); + match size { - Some(size) => encoder_data.0.clear_buffer_with_f64_and_f64( - &buffer_data.0.buffer, - offset as f64, - size as f64, - ), - None => encoder_data - .0 - .clear_buffer_with_f64(&buffer_data.0.buffer, offset as f64), + Some(size) => { + self.inner + .clear_buffer_with_f64_and_f64(&buffer.inner, offset as f64, size as f64) + } + None => self + .inner + .clear_buffer_with_f64(&buffer.inner, offset as f64), } } - fn command_encoder_insert_debug_marker( - &self, - _encoder_data: &Self::CommandEncoderData, - _label: &str, - ) { + fn insert_debug_marker(&self, _label: &str) { // Not available in gecko yet - // encoder.insert_debug_marker(label); + // self.insert_debug_marker(label); } - fn command_encoder_push_debug_group( - &self, - _encoder_data: &Self::CommandEncoderData, - _label: &str, - ) { + fn push_debug_group(&self, _label: &str) { // Not available in gecko yet - // encoder.push_debug_group(label); + // self.push_debug_group(label); } - fn command_encoder_pop_debug_group(&self, _encoder_data: &Self::CommandEncoderData) { + fn pop_debug_group(&self) { // Not available in gecko yet - // encoder.pop_debug_group(); + // self.pop_debug_group(); } - fn command_encoder_write_timestamp( - &self, - _encoder_data: &Self::CommandEncoderData, - _query_set_data: &Self::QuerySetData, - _query_index: u32, - ) { + fn write_timestamp(&self, _query_set: &dispatch::DispatchQuerySet, _query_index: u32) { // Not available on WebGPU. // This was part of the spec originally but got removed, see https://github.com/gpuweb/gpuweb/pull/4370 panic!("TIMESTAMP_QUERY_INSIDE_ENCODERS feature must be enabled to call write_timestamp on a command encoder.") } - fn command_encoder_resolve_query_set( + fn resolve_query_set( &self, - encoder_data: &Self::CommandEncoderData, - query_set_data: &Self::QuerySetData, + query_set: &dispatch::DispatchQuerySet, first_query: u32, query_count: u32, - destination_data: &Self::BufferData, - destination_offset: wgt::BufferAddress, + destination: &dispatch::DispatchBuffer, + destination_offset: crate::BufferAddress, ) { - encoder_data.0.resolve_query_set_with_u32( - &query_set_data.0, + let query_set = &query_set.as_webgpu().inner; + let destination = &destination.as_webgpu().inner; + + self.inner.resolve_query_set_with_u32( + query_set, first_query, query_count, - &destination_data.0.buffer, + destination, destination_offset as u32, ); } - fn render_bundle_encoder_finish( - &self, - encoder_data: Self::RenderBundleEncoderData, - desc: &crate::RenderBundleDescriptor<'_>, - ) -> Self::RenderBundleData { - Sendable(match desc.label { - Some(label) => { - let mapped_desc = webgpu_sys::GpuRenderBundleDescriptor::new(); - mapped_desc.set_label(label); - encoder_data.0.finish_with_descriptor(&mapped_desc) - } - None => encoder_data.0.finish(), - }) - } - - fn queue_write_buffer( - &self, - queue_data: &Self::QueueData, - buffer_data: &Self::BufferData, - offset: wgt::BufferAddress, - data: &[u8], - ) { - /* Skip the copy once gecko allows BufferSource instead of ArrayBuffer - queue_data.0.write_buffer_with_f64_and_u8_array_and_f64_and_f64( - &buffer_data.0, - offset as f64, - data, - 0f64, - data.len() as f64, - ); - */ - queue_data - .0 - .write_buffer_with_f64_and_buffer_source_and_f64_and_f64( - &buffer_data.0.buffer, - offset as f64, - &js_sys::Uint8Array::from(data).buffer(), - 0f64, - data.len() as f64, - ) - .expect_throw("invalid buffer write"); - } - - fn queue_validate_write_buffer( - &self, - _queue_data: &Self::QueueData, - buffer_data: &Self::BufferData, - offset: wgt::BufferAddress, - size: wgt::BufferSize, - ) -> Option<()> { - let usage = wgt::BufferUsages::from_bits_truncate(buffer_data.0.buffer.usage()); - // TODO: actually send this down the error scope - if !usage.contains(wgt::BufferUsages::COPY_DST) { - log::error!("Destination buffer is missing the `COPY_DST` usage flag"); - return None; - } - let write_size = u64::from(size); - if write_size % wgt::COPY_BUFFER_ALIGNMENT != 0 { - log::error!( - "Copy size {} does not respect `COPY_BUFFER_ALIGNMENT`", - size - ); - return None; - } - if offset % wgt::COPY_BUFFER_ALIGNMENT != 0 { - log::error!( - "Buffer offset {} is not aligned to block size or `COPY_BUFFER_ALIGNMENT`", - offset - ); - return None; - } - if write_size + offset > buffer_data.0.buffer.size() as u64 { - log::error!("copy of {}..{} would end up overrunning the bounds of the destination buffer of size {}", offset, offset + write_size, buffer_data.0.buffer.size()); - return None; - } - Some(()) - } - - fn queue_create_staging_buffer( - &self, - _queue_data: &Self::QueueData, - size: wgt::BufferSize, - ) -> Option> { - Some(Box::new(WebQueueWriteBuffer( - vec![0; size.get() as usize].into_boxed_slice(), - ))) - } - - fn queue_write_staging_buffer( - &self, - queue_data: &Self::QueueData, - buffer_data: &Self::BufferData, - offset: wgt::BufferAddress, - staging_buffer: &dyn QueueWriteBuffer, - ) { - let staging_buffer = staging_buffer - .as_any() - .downcast_ref::() - .unwrap() - .slice(); - self.queue_write_buffer(queue_data, buffer_data, offset, staging_buffer) - } - - fn queue_write_texture( - &self, - queue_data: &Self::QueueData, - texture: crate::TexelCopyTextureInfo<'_>, - data: &[u8], - data_layout: wgt::TexelCopyBufferLayout, - size: wgt::Extent3d, - ) { - let mapped_data_layout = webgpu_sys::GpuTexelCopyBufferLayout::new(); - if let Some(bytes_per_row) = data_layout.bytes_per_row { - mapped_data_layout.set_bytes_per_row(bytes_per_row); - } - if let Some(rows_per_image) = data_layout.rows_per_image { - mapped_data_layout.set_rows_per_image(rows_per_image); - } - mapped_data_layout.set_offset(data_layout.offset as f64); - - /* Skip the copy once gecko allows BufferSource instead of ArrayBuffer - queue_data.0.write_texture_with_u8_array_and_gpu_extent_3d_dict( - &map_texture_copy_view(texture), - data, - &mapped_data_layout, - &map_extent_3d(size), - ); - */ - queue_data - .0 - .write_texture_with_buffer_source_and_gpu_extent_3d_dict( - &map_texture_copy_view(texture), - &js_sys::Uint8Array::from(data).buffer(), - &mapped_data_layout, - &map_extent_3d(size), - ) - .expect_throw("invalid texture write"); - } - - fn queue_copy_external_image_to_texture( + fn build_acceleration_structures_unsafe_tlas<'a>( &self, - queue_data: &Self::QueueData, - source: &wgt::CopyExternalImageSourceInfo, - dest: crate::CopyExternalImageDestInfo<'_>, - size: wgt::Extent3d, + _blas: &mut dyn Iterator>, + _tlas: &mut dyn Iterator>, ) { - queue_data - .0 - .copy_external_image_to_texture_with_gpu_extent_3d_dict( - &map_external_texture_copy_view(source), - &map_tagged_texture_copy_view(dest), - &map_extent_3d(size), - ) - .expect_throw("invalid copy from external image to texture"); - } - - fn queue_submit>( - &self, - queue_data: &Self::QueueData, - command_buffers: I, - ) -> Self::SubmissionIndexData { - let temp_command_buffers = command_buffers - .map(|data| data.0) - .collect::(); - - queue_data.0.submit(&temp_command_buffers); - } - - fn queue_get_timestamp_period(&self, _queue_data: &Self::QueueData) -> f32 { - // Timestamp values are always in nanoseconds, see https://gpuweb.github.io/gpuweb/#timestamp - 1.0 + unimplemented!("Raytracing not implemented for web"); } - fn queue_on_submitted_work_done( + fn build_acceleration_structures<'a>( &self, - _queue_data: &Self::QueueData, - _callback: crate::context::SubmittedWorkDoneCallback, + _blas: &mut dyn Iterator>, + _tlas: &mut dyn Iterator, ) { - unimplemented!() + unimplemented!("Raytracing not implemented for web"); } - - fn device_start_capture(&self, _device_data: &Self::DeviceData) {} - fn device_stop_capture(&self, _device_data: &Self::DeviceData) {} - - fn device_get_internal_counters( - &self, - _device_data: &Self::DeviceData, - ) -> wgt::InternalCounters { - Default::default() +} +impl Drop for WebCommandEncoder { + fn drop(&mut self) { + // no-op } +} - fn device_generate_allocator_report( - &self, - _device_data: &Self::DeviceData, - ) -> Option { - None +impl dispatch::PipelineCacheInterface for WebPipelineCache { + fn get_data(&self) -> Option> { + todo!() } - - fn pipeline_cache_get_data(&self, _: &Self::PipelineCacheData) -> Option> { - None +} +impl Drop for WebPipelineCache { + fn drop(&mut self) { + // no-op } +} - fn compute_pass_set_pipeline( - &self, - pass_data: &mut Self::ComputePassData, - pipeline_data: &Self::ComputePipelineData, - ) { - pass_data.0.set_pipeline(&pipeline_data.0) +impl dispatch::ComputePassInterface for WebComputePassEncoder { + fn set_pipeline(&mut self, pipeline: &dispatch::DispatchComputePipeline) { + let pipeline = &pipeline.as_webgpu().inner; + self.inner.set_pipeline(pipeline); } - fn compute_pass_set_bind_group( - &self, - pass_data: &mut Self::ComputePassData, + fn set_bind_group( + &mut self, index: u32, - bind_group_data: Option<&Self::BindGroupData>, - offsets: &[wgt::DynamicOffset], + bind_group: Option<&dispatch::DispatchBindGroup>, + offsets: &[crate::DynamicOffset], ) { - if bind_group_data.is_none() { - // TODO: Handle the None case. + let Some(bind_group) = bind_group else { return; - } - let bind_group_data = bind_group_data.unwrap(); + }; + let bind_group = &bind_group.as_webgpu().inner; + if offsets.is_empty() { - pass_data.0.set_bind_group(index, Some(&bind_group_data.0)); + self.inner.set_bind_group(index, Some(bind_group)); } else { - pass_data - .0 - .set_bind_group_with_u32_array_and_f64_and_dynamic_offsets_data_length( + self.inner + .set_bind_group_with_u32_slice_and_f64_and_dynamic_offsets_data_length( index, - Some(&bind_group_data.0), - unsafe { &js_sys::Uint32Array::view(offsets) }, + Some(bind_group), + offsets, 0f64, offsets.len() as u32, ) - .expect_throw("invalid usage when setting bind group"); + .unwrap(); } } - fn compute_pass_set_push_constants( - &self, - _pass_data: &mut Self::ComputePassData, - _offset: u32, - _data: &[u8], - ) { + fn set_push_constants(&mut self, _offset: u32, _data: &[u8]) { panic!("PUSH_CONSTANTS feature must be enabled to call multi_draw_indexed_indirect") } - fn compute_pass_insert_debug_marker( - &self, - _pass_data: &mut Self::ComputePassData, - _label: &str, - ) { + fn insert_debug_marker(&mut self, _label: &str) { // Not available in gecko yet - // self.0.insert_debug_marker(label); + // self.inner.insert_debug_marker(label); } - fn compute_pass_push_debug_group( - &self, - _pass_data: &mut Self::ComputePassData, - _group_label: &str, - ) { + fn push_debug_group(&mut self, _group_label: &str) { // Not available in gecko yet - // self.0.push_debug_group(group_label); + // self.inner.push_debug_group(group_label); } - fn compute_pass_pop_debug_group(&self, _pass_data: &mut Self::ComputePassData) { + fn pop_debug_group(&mut self) { // Not available in gecko yet - // self.0.pop_debug_group(); + // self.inner.pop_debug_group(); } - fn compute_pass_write_timestamp( - &self, - _pass_data: &mut Self::ComputePassData, - _query_set_data: &Self::QuerySetData, - _query_index: u32, - ) { + fn write_timestamp(&mut self, _query_set: &dispatch::DispatchQuerySet, _query_index: u32) { panic!("TIMESTAMP_QUERY_INSIDE_PASSES feature must be enabled to call write_timestamp in a compute pass.") } - fn compute_pass_begin_pipeline_statistics_query( - &self, - _pass_data: &mut Self::ComputePassData, - _query_set_data: &Self::QuerySetData, + fn begin_pipeline_statistics_query( + &mut self, + _query_set: &dispatch::DispatchQuerySet, _query_index: u32, ) { // Not available in gecko yet } - fn compute_pass_end_pipeline_statistics_query(&self, _pass_data: &mut Self::ComputePassData) { + fn end_pipeline_statistics_query(&mut self) { // Not available in gecko yet } - fn compute_pass_dispatch_workgroups( - &self, - pass_data: &mut Self::ComputePassData, - x: u32, - y: u32, - z: u32, - ) { - pass_data - .0 + fn dispatch_workgroups(&mut self, x: u32, y: u32, z: u32) { + self.inner .dispatch_workgroups_with_workgroup_count_y_and_workgroup_count_z(x, y, z); } - fn compute_pass_dispatch_workgroups_indirect( - &self, - pass_data: &mut Self::ComputePassData, - indirect_buffer_data: &Self::BufferData, - indirect_offset: wgt::BufferAddress, + fn dispatch_workgroups_indirect( + &mut self, + indirect_buffer: &dispatch::DispatchBuffer, + indirect_offset: crate::BufferAddress, ) { - pass_data.0.dispatch_workgroups_indirect_with_f64( - &indirect_buffer_data.0.buffer, - indirect_offset as f64, - ); + let indirect_buffer = indirect_buffer.as_webgpu(); + + self.inner + .dispatch_workgroups_indirect_with_f64(&indirect_buffer.inner, indirect_offset as f64); } - fn compute_pass_end(&self, pass_data: &mut Self::ComputePassData) { - pass_data.0.end(); + fn end(&mut self) { + self.inner.end(); + } +} +impl Drop for WebComputePassEncoder { + fn drop(&mut self) { + dispatch::ComputePassInterface::end(self); } +} - fn render_bundle_encoder_set_pipeline( - &self, - encoder_data: &mut Self::RenderBundleEncoderData, - pipeline_data: &Self::RenderPipelineData, - ) { - encoder_data.0.set_pipeline(&pipeline_data.0); +impl dispatch::RenderPassInterface for WebRenderPassEncoder { + fn set_pipeline(&mut self, pipeline: &dispatch::DispatchRenderPipeline) { + let pipeline = &pipeline.as_webgpu().inner; + + self.inner.set_pipeline(pipeline); } - fn render_bundle_encoder_set_bind_group( - &self, - encoder_data: &mut Self::RenderBundleEncoderData, + fn set_bind_group( + &mut self, index: u32, - bind_group_data: Option<&Self::BindGroupData>, - offsets: &[wgt::DynamicOffset], + bind_group: Option<&dispatch::DispatchBindGroup>, + offsets: &[crate::DynamicOffset], ) { - if bind_group_data.is_none() { - // TODO: Handle the None case. + let Some(bind_group) = bind_group else { return; - } - let bind_group_data = bind_group_data.unwrap(); + }; + let bind_group = &bind_group.as_webgpu().inner; + if offsets.is_empty() { - encoder_data - .0 - .set_bind_group(index, Some(&bind_group_data.0)); + self.inner.set_bind_group(index, Some(bind_group)); } else { - encoder_data - .0 - .set_bind_group_with_u32_array_and_f64_and_dynamic_offsets_data_length( + self.inner + .set_bind_group_with_u32_slice_and_f64_and_dynamic_offsets_data_length( index, - Some(&bind_group_data.0), - unsafe { &js_sys::Uint32Array::view(offsets) }, + Some(bind_group), + offsets, 0f64, offsets.len() as u32, ) - .expect_throw("invalid usage when setting bind group"); + .unwrap(); } } - fn render_bundle_encoder_set_index_buffer( - &self, - encoder_data: &mut Self::RenderBundleEncoderData, - buffer_data: &Self::BufferData, - index_format: wgt::IndexFormat, - offset: wgt::BufferAddress, - size: Option, + fn set_index_buffer( + &mut self, + buffer: &dispatch::DispatchBuffer, + index_format: crate::IndexFormat, + offset: crate::BufferAddress, + size: Option, ) { - match size { - Some(s) => { - encoder_data.0.set_index_buffer_with_f64_and_f64( - &buffer_data.0.buffer, - map_index_format(index_format), - offset as f64, - s.get() as f64, - ); - } - None => { - encoder_data.0.set_index_buffer_with_f64( - &buffer_data.0.buffer, - map_index_format(index_format), - offset as f64, - ); - } - }; - } + let buffer = buffer.as_webgpu(); + let index_format = map_index_format(index_format); - fn render_bundle_encoder_set_vertex_buffer( - &self, - encoder_data: &mut Self::RenderBundleEncoderData, - slot: u32, - buffer_data: &Self::BufferData, - offset: wgt::BufferAddress, - size: Option, - ) { - match size { - Some(s) => { - encoder_data.0.set_vertex_buffer_with_f64_and_f64( - slot, - Some(&buffer_data.0.buffer), - offset as f64, - s.get() as f64, - ); - } - None => { - encoder_data.0.set_vertex_buffer_with_f64( - slot, - Some(&buffer_data.0.buffer), - offset as f64, - ); - } - }; - } - - fn render_bundle_encoder_set_push_constants( - &self, - _encoder_data: &mut Self::RenderBundleEncoderData, - _stages: wgt::ShaderStages, - _offset: u32, - _data: &[u8], - ) { - panic!("PUSH_CONSTANTS feature must be enabled to call multi_draw_indexed_indirect") - } - - fn render_bundle_encoder_draw( - &self, - encoder_data: &mut Self::RenderBundleEncoderData, - vertices: Range, - instances: Range, - ) { - encoder_data - .0 - .draw_with_instance_count_and_first_vertex_and_first_instance( - vertices.end - vertices.start, - instances.end - instances.start, - vertices.start, - instances.start, + if let Some(size) = size { + self.inner.set_index_buffer_with_f64_and_f64( + &buffer.inner, + index_format, + offset as f64, + size.get() as f64, ); + } else { + self.inner + .set_index_buffer_with_f64(&buffer.inner, index_format, offset as f64); + } } - fn render_bundle_encoder_draw_indexed( - &self, - encoder_data: &mut Self::RenderBundleEncoderData, - indices: Range, - base_vertex: i32, - instances: Range, + fn set_vertex_buffer( + &mut self, + slot: u32, + buffer: &dispatch::DispatchBuffer, + offset: crate::BufferAddress, + size: Option, ) { - encoder_data - .0 - .draw_indexed_with_instance_count_and_first_index_and_base_vertex_and_first_instance( - indices.end - indices.start, - instances.end - instances.start, - indices.start, - base_vertex, - instances.start, - ); - } + let buffer = buffer.as_webgpu(); - fn render_bundle_encoder_draw_indirect( - &self, - encoder_data: &mut Self::RenderBundleEncoderData, - indirect_buffer_data: &Self::BufferData, - indirect_offset: wgt::BufferAddress, - ) { - encoder_data - .0 - .draw_indirect_with_f64(&indirect_buffer_data.0.buffer, indirect_offset as f64); + if let Some(size) = size { + self.inner.set_vertex_buffer_with_f64_and_f64( + slot, + Some(&buffer.inner), + offset as f64, + size.get() as f64, + ); + } else { + self.inner + .set_vertex_buffer_with_f64(slot, Some(&buffer.inner), offset as f64); + } } - fn render_bundle_encoder_draw_indexed_indirect( - &self, - encoder_data: &mut Self::RenderBundleEncoderData, - indirect_buffer_data: &Self::BufferData, - indirect_offset: wgt::BufferAddress, - ) { - encoder_data - .0 - .draw_indexed_indirect_with_f64(&indirect_buffer_data.0.buffer, indirect_offset as f64); + fn set_push_constants(&mut self, _stages: crate::ShaderStages, _offset: u32, _data: &[u8]) { + panic!("PUSH_CONSTANTS feature must be enabled to call multi_draw_indexed_indirect") } - fn render_pass_set_pipeline( - &self, - pass_data: &mut Self::RenderPassData, - pipeline_data: &Self::RenderPipelineData, - ) { - pass_data.0.set_pipeline(&pipeline_data.0); + fn set_blend_constant(&mut self, color: crate::Color) { + self.inner + .set_blend_constant_with_gpu_color_dict(&map_color(color)) + .unwrap(); } - fn render_pass_set_bind_group( - &self, - pass_data: &mut Self::RenderPassData, - index: u32, - bind_group_data: Option<&Self::BindGroupData>, - offsets: &[wgt::DynamicOffset], - ) { - if bind_group_data.is_none() { - // TODO: Handle the None case. - return; - } - let bind_group_data = bind_group_data.unwrap(); - if offsets.is_empty() { - pass_data.0.set_bind_group(index, Some(&bind_group_data.0)); - } else { - pass_data - .0 - .set_bind_group_with_u32_array_and_f64_and_dynamic_offsets_data_length( - index, - Some(&bind_group_data.0), - unsafe { &js_sys::Uint32Array::view(offsets) }, - 0f64, - offsets.len() as u32, - ) - .expect_throw("invalid usage when setting bind group"); - } + fn set_scissor_rect(&mut self, x: u32, y: u32, width: u32, height: u32) { + self.inner.set_scissor_rect(x, y, width, height); } - fn render_pass_set_index_buffer( - &self, - pass_data: &mut Self::RenderPassData, - buffer_data: &Self::BufferData, - index_format: wgt::IndexFormat, - offset: wgt::BufferAddress, - size: Option, - ) { - match size { - Some(s) => { - pass_data.0.set_index_buffer_with_f64_and_f64( - &buffer_data.0.buffer, - map_index_format(index_format), - offset as f64, - s.get() as f64, - ); - } - None => { - pass_data.0.set_index_buffer_with_f64( - &buffer_data.0.buffer, - map_index_format(index_format), - offset as f64, - ); - } - }; - } - - fn render_pass_set_vertex_buffer( - &self, - pass_data: &mut Self::RenderPassData, - slot: u32, - buffer_data: &Self::BufferData, - offset: wgt::BufferAddress, - size: Option, + fn set_viewport( + &mut self, + x: f32, + y: f32, + width: f32, + height: f32, + min_depth: f32, + max_depth: f32, ) { - match size { - Some(s) => { - pass_data.0.set_vertex_buffer_with_f64_and_f64( - slot, - Some(&buffer_data.0.buffer), - offset as f64, - s.get() as f64, - ); - } - None => { - pass_data.0.set_vertex_buffer_with_f64( - slot, - Some(&buffer_data.0.buffer), - offset as f64, - ); - } - }; + self.inner + .set_viewport(x, y, width, height, min_depth, max_depth); } - fn render_pass_set_push_constants( - &self, - _pass_data: &mut Self::RenderPassData, - _stages: wgt::ShaderStages, - _offset: u32, - _data: &[u8], - ) { - panic!("PUSH_CONSTANTS feature must be enabled to call multi_draw_indexed_indirect") + fn set_stencil_reference(&mut self, reference: u32) { + self.inner.set_stencil_reference(reference); } - fn render_pass_draw( - &self, - pass_data: &mut Self::RenderPassData, - vertices: Range, - instances: Range, - ) { - pass_data - .0 + fn draw(&mut self, vertices: Range, instances: Range) { + self.inner .draw_with_instance_count_and_first_vertex_and_first_instance( vertices.end - vertices.start, instances.end - instances.start, @@ -3253,73 +3290,73 @@ impl crate::context::Context for ContextWebGpu { ); } - fn render_pass_draw_indexed( - &self, - pass_data: &mut Self::RenderPassData, - indices: Range, - base_vertex: i32, - instances: Range, - ) { - pass_data - .0 + fn draw_indexed(&mut self, indices: Range, base_vertex: i32, instances: Range) { + self.inner .draw_indexed_with_instance_count_and_first_index_and_base_vertex_and_first_instance( indices.end - indices.start, instances.end - instances.start, indices.start, base_vertex, instances.start, - ); + ) } - fn render_pass_draw_indirect( - &self, - pass_data: &mut Self::RenderPassData, - indirect_buffer_data: &Self::BufferData, - indirect_offset: wgt::BufferAddress, + fn draw_indirect( + &mut self, + indirect_buffer: &dispatch::DispatchBuffer, + indirect_offset: crate::BufferAddress, ) { - pass_data - .0 - .draw_indirect_with_f64(&indirect_buffer_data.0.buffer, indirect_offset as f64); + let buffer = indirect_buffer.as_webgpu(); + self.inner + .draw_indirect_with_f64(&buffer.inner, indirect_offset as f64); } - fn render_pass_draw_indexed_indirect( - &self, - pass_data: &mut Self::RenderPassData, - indirect_buffer_data: &Self::BufferData, - indirect_offset: wgt::BufferAddress, + fn draw_indexed_indirect( + &mut self, + indirect_buffer: &dispatch::DispatchBuffer, + indirect_offset: crate::BufferAddress, ) { - pass_data - .0 - .draw_indexed_indirect_with_f64(&indirect_buffer_data.0.buffer, indirect_offset as f64); + let buffer = indirect_buffer.as_webgpu(); + self.inner + .draw_indexed_indirect_with_f64(&buffer.inner, indirect_offset as f64); } - fn render_pass_multi_draw_indirect( - &self, - _pass_data: &mut Self::RenderPassData, - _indirect_buffer_data: &Self::BufferData, - _indirect_offset: wgt::BufferAddress, - _count: u32, + fn multi_draw_indirect( + &mut self, + indirect_buffer: &dispatch::DispatchBuffer, + indirect_offset: crate::BufferAddress, + count: u32, ) { - panic!("MULTI_DRAW_INDIRECT feature must be enabled to call multi_draw_indirect") + let buffer = indirect_buffer.as_webgpu(); + + for i in 0..count { + let offset = indirect_offset + i as crate::BufferAddress * 16; + self.inner + .draw_indirect_with_f64(&buffer.inner, offset as f64); + } } - fn render_pass_multi_draw_indexed_indirect( - &self, - _pass_data: &mut Self::RenderPassData, - _indirect_buffer_data: &Self::BufferData, - _indirect_offset: wgt::BufferAddress, - _count: u32, + fn multi_draw_indexed_indirect( + &mut self, + indirect_buffer: &dispatch::DispatchBuffer, + indirect_offset: crate::BufferAddress, + count: u32, ) { - panic!("MULTI_DRAW_INDIRECT feature must be enabled to call multi_draw_indexed_indirect") + let buffer = indirect_buffer.as_webgpu(); + + for i in 0..count { + let offset = indirect_offset + i as crate::BufferAddress * 20; + self.inner + .draw_indexed_indirect_with_f64(&buffer.inner, offset as f64); + } } - fn render_pass_multi_draw_indirect_count( - &self, - _pass_data: &mut Self::RenderPassData, - _indirect_buffer_data: &Self::BufferData, - _indirect_offset: wgt::BufferAddress, - _count_buffer_data: &Self::BufferData, - _count_buffer_offset: wgt::BufferAddress, + fn multi_draw_indirect_count( + &mut self, + _indirect_buffer: &dispatch::DispatchBuffer, + _indirect_offset: crate::BufferAddress, + _count_buffer: &dispatch::DispatchBuffer, + _count_buffer_offset: crate::BufferAddress, _max_count: u32, ) { panic!( @@ -3327,279 +3364,360 @@ impl crate::context::Context for ContextWebGpu { ) } - fn render_pass_multi_draw_indexed_indirect_count( - &self, - _pass_data: &mut Self::RenderPassData, - _indirect_buffer_data: &Self::BufferData, - _indirect_offset: wgt::BufferAddress, - _count_buffer_data: &Self::BufferData, - _count_buffer_offset: wgt::BufferAddress, + fn multi_draw_indexed_indirect_count( + &mut self, + _indirect_buffer: &dispatch::DispatchBuffer, + _indirect_offset: crate::BufferAddress, + _count_buffer: &dispatch::DispatchBuffer, + _count_buffer_offset: crate::BufferAddress, _max_count: u32, ) { panic!("MULTI_DRAW_INDIRECT_COUNT feature must be enabled to call multi_draw_indexed_indirect_count") } - fn render_pass_set_blend_constant( - &self, - pass_data: &mut Self::RenderPassData, - color: wgt::Color, - ) { - pass_data - .0 - .set_blend_constant_with_gpu_color_dict(&map_color(color)) - .expect_throw("could not set blend constant"); - } - - fn render_pass_set_scissor_rect( - &self, - pass_data: &mut Self::RenderPassData, - x: u32, - y: u32, - width: u32, - height: u32, - ) { - pass_data.0.set_scissor_rect(x, y, width, height); - } - - fn render_pass_set_viewport( - &self, - pass_data: &mut Self::RenderPassData, - x: f32, - y: f32, - width: f32, - height: f32, - min_depth: f32, - max_depth: f32, - ) { - pass_data - .0 - .set_viewport(x, y, width, height, min_depth, max_depth); - } - - fn render_pass_set_stencil_reference( - &self, - pass_data: &mut Self::RenderPassData, - reference: u32, - ) { - pass_data.0.set_stencil_reference(reference); - } - - fn render_pass_insert_debug_marker(&self, _pass_data: &mut Self::RenderPassData, _label: &str) { + fn insert_debug_marker(&mut self, _label: &str) { // Not available in gecko yet - // self.0.insert_debug_marker(label); + // self.inner.insert_debug_marker(label); } - fn render_pass_push_debug_group( - &self, - _pass_data: &mut Self::RenderPassData, - _group_label: &str, - ) { + fn push_debug_group(&mut self, _group_label: &str) { // Not available in gecko yet - // self.0.push_debug_group(group_label); + // self.inner.push_debug_group(group_label); } - fn render_pass_pop_debug_group(&self, _pass_data: &mut Self::RenderPassData) { + fn pop_debug_group(&mut self) { // Not available in gecko yet - // self.0.pop_debug_group(); + // self.inner.pop_debug_group(); } - fn render_pass_write_timestamp( - &self, - _pass_data: &mut Self::RenderPassData, - _query_set_data: &Self::QuerySetData, - _query_index: u32, - ) { + fn write_timestamp(&mut self, _query_set: &dispatch::DispatchQuerySet, _query_index: u32) { panic!("TIMESTAMP_QUERY_INSIDE_PASSES feature must be enabled to call write_timestamp in a render pass.") } - fn render_pass_begin_occlusion_query( - &self, - _pass_data: &mut Self::RenderPassData, - _query_index: u32, - ) { + fn begin_occlusion_query(&mut self, _query_index: u32) { // Not available in gecko yet + // self.inner.begin_occlusion_query(query_index); } - fn render_pass_end_occlusion_query(&self, _pass_data: &mut Self::RenderPassData) { + fn end_occlusion_query(&mut self) { // Not available in gecko yet + // self.inner.end_occlusion_query(); } - fn render_pass_begin_pipeline_statistics_query( - &self, - _pass_data: &mut Self::RenderPassData, - _query_set_data: &Self::QuerySetData, + fn begin_pipeline_statistics_query( + &mut self, + _query_set: &dispatch::DispatchQuerySet, _query_index: u32, ) { // Not available in gecko yet + // let query_set = query_set.as_webgpu(); + // self.inner.begin_pipeline_statistics_query(query_set, query_index); } - fn render_pass_end_pipeline_statistics_query(&self, _pass_data: &mut Self::RenderPassData) { + fn end_pipeline_statistics_query(&mut self) { // Not available in gecko yet + // self.inner.end_pipeline_statistics_query(); } - fn render_pass_execute_bundles( - &self, - pass_data: &mut Self::RenderPassData, - render_bundles: &mut dyn Iterator, + fn execute_bundles( + &mut self, + render_bundles: &mut dyn Iterator, ) { let mapped = render_bundles - .map(|bundle_data| &bundle_data.0) + .map(|bundle| &bundle.as_webgpu().inner) .collect::(); - pass_data.0.execute_bundles(&mapped); + self.inner.execute_bundles(&mapped); } - fn render_pass_end(&self, pass_data: &mut Self::RenderPassData) { - pass_data.0.end(); + fn end(&mut self) { + self.inner.end(); } +} +impl Drop for WebRenderPassEncoder { + fn drop(&mut self) { + dispatch::RenderPassInterface::end(self); + } +} - fn device_create_blas( - &self, - _device_data: &Self::DeviceData, - _desc: &crate::CreateBlasDescriptor<'_>, - _sizes: wgt::BlasGeometrySizeDescriptors, - ) -> (Option, Self::BlasData) { - unimplemented!("Raytracing not implemented for web"); +impl dispatch::CommandBufferInterface for WebCommandBuffer {} +impl Drop for WebCommandBuffer { + fn drop(&mut self) { + // no-op } +} - fn device_create_tlas( - &self, - _device_data: &Self::DeviceData, - _desc: &crate::CreateTlasDescriptor<'_>, - ) -> Self::TlasData { - unimplemented!("Raytracing not implemented for web"); +impl dispatch::RenderBundleEncoderInterface for WebRenderBundleEncoder { + fn set_pipeline(&mut self, pipeline: &dispatch::DispatchRenderPipeline) { + let pipeline = &pipeline.as_webgpu().inner; + self.inner.set_pipeline(pipeline); } - fn command_encoder_build_acceleration_structures_unsafe_tlas<'a>( - &'a self, - _encoder_data: &Self::CommandEncoderData, - _blas: impl Iterator>, - _tlas: impl Iterator>, + fn set_bind_group( + &mut self, + index: u32, + bind_group: Option<&dispatch::DispatchBindGroup>, + offsets: &[crate::DynamicOffset], ) { - unimplemented!("Raytracing not implemented for web"); + let Some(bind_group) = bind_group else { + return; + }; + let bind_group = &bind_group.as_webgpu().inner; + + if offsets.is_empty() { + self.inner.set_bind_group(index, Some(bind_group)); + } else { + self.inner + .set_bind_group_with_u32_slice_and_f64_and_dynamic_offsets_data_length( + index, + Some(bind_group), + offsets, + 0f64, + offsets.len() as u32, + ) + .unwrap(); + } } - fn command_encoder_build_acceleration_structures<'a>( - &'a self, - _encoder_data: &Self::CommandEncoderData, - _blas: impl Iterator>, - _tlas: impl Iterator>, + fn set_index_buffer( + &mut self, + buffer: &dispatch::DispatchBuffer, + index_format: crate::IndexFormat, + offset: crate::BufferAddress, + size: Option, ) { - unimplemented!("Raytracing not implemented for web"); - } + let buffer = buffer.as_webgpu(); + let index_format = map_index_format(index_format); - fn blas_destroy(&self, _blas_data: &Self::BlasData) { - unimplemented!("Raytracing not implemented for web"); + if let Some(size) = size { + self.inner.set_index_buffer_with_f64_and_f64( + &buffer.inner, + index_format, + offset as f64, + size.get() as f64, + ); + } else { + self.inner + .set_index_buffer_with_f64(&buffer.inner, index_format, offset as f64); + } } - fn blas_drop(&self, _blas_data: &Self::BlasData) { - unimplemented!("Raytracing not implemented for web"); - } + fn set_vertex_buffer( + &mut self, + slot: u32, + buffer: &dispatch::DispatchBuffer, + offset: crate::BufferAddress, + size: Option, + ) { + let buffer = buffer.as_webgpu(); - fn tlas_destroy(&self, _tlas_data: &Self::TlasData) { - unimplemented!("Raytracing not implemented for web"); + if let Some(size) = size { + self.inner.set_vertex_buffer_with_f64_and_f64( + slot, + Some(&buffer.inner), + offset as f64, + size.get() as f64, + ); + } else { + self.inner + .set_vertex_buffer_with_f64(slot, Some(&buffer.inner), offset as f64); + } } - fn tlas_drop(&self, _tlas_data: &Self::TlasData) { - unimplemented!("Raytracing not implemented for web"); + fn set_push_constants(&mut self, _stages: crate::ShaderStages, _offset: u32, _data: &[u8]) { + panic!("PUSH_CONSTANTS feature must be enabled to call multi_draw_indexed_indirect") } -} -pub(crate) type SurfaceOutputDetail = (); + fn draw(&mut self, vertices: Range, instances: Range) { + self.inner + .draw_with_instance_count_and_first_vertex_and_first_instance( + vertices.end - vertices.start, + instances.end - instances.start, + vertices.start, + instances.start, + ); + } -#[derive(Debug)] -pub struct WebQueueWriteBuffer(Box<[u8]>); + fn draw_indexed(&mut self, indices: Range, base_vertex: i32, instances: Range) { + self.inner + .draw_indexed_with_instance_count_and_first_index_and_base_vertex_and_first_instance( + indices.end - indices.start, + instances.end - instances.start, + indices.start, + base_vertex, + instances.start, + ) + } -impl QueueWriteBuffer for WebQueueWriteBuffer { - fn slice(&self) -> &[u8] { - &self.0 + fn draw_indirect( + &mut self, + indirect_buffer: &dispatch::DispatchBuffer, + indirect_offset: crate::BufferAddress, + ) { + let buffer = indirect_buffer.as_webgpu(); + self.inner + .draw_indirect_with_f64(&buffer.inner, indirect_offset as f64); } - #[inline] - fn slice_mut(&mut self) -> &mut [u8] { - &mut self.0 + fn draw_indexed_indirect( + &mut self, + indirect_buffer: &dispatch::DispatchBuffer, + indirect_offset: crate::BufferAddress, + ) { + let buffer = indirect_buffer.as_webgpu(); + self.inner + .draw_indexed_indirect_with_f64(&buffer.inner, indirect_offset as f64); } - fn as_any(&self) -> &dyn Any { - self + fn finish(self, desc: &crate::RenderBundleDescriptor<'_>) -> dispatch::DispatchRenderBundle + where + Self: Sized, + { + let bundle = match desc.label { + Some(label) => { + let mapped_desc = webgpu_sys::GpuRenderBundleDescriptor::new(); + mapped_desc.set_label(label); + self.inner.finish_with_descriptor(&mapped_desc) + } + None => self.inner.finish(), + }; + + WebRenderBundle { + inner: bundle, + ident: crate::cmp::Identifier::create(), + } + .into() + } +} +impl Drop for WebRenderBundleEncoder { + fn drop(&mut self) { + // no-op } } -/// Stores the state of a GPU buffer and a reference to its mapped `ArrayBuffer` (if any). -/// The WebGPU specification forbids calling `getMappedRange` on a `webgpu_sys::GpuBuffer` more than -/// once, so this struct stores the initial mapped range and re-uses it, allowing for multiple `get_mapped_range` -/// calls on the Rust-side. -#[derive(Debug)] -pub struct WebBuffer { - /// The associated GPU buffer. - buffer: webgpu_sys::GpuBuffer, - /// The mapped array buffer and mapped range. - mapping: RefCell, +impl dispatch::RenderBundleInterface for WebRenderBundle {} +impl Drop for WebRenderBundle { + fn drop(&mut self) { + // no-op + } } -impl WebBuffer { - /// Creates a new web buffer for the given Javascript object and description. - fn new(buffer: webgpu_sys::GpuBuffer, desc: &crate::BufferDescriptor<'_>) -> Self { - Self { - buffer, - mapping: RefCell::new(WebBufferMapState { - mapped_buffer: None, - range: 0..desc.size, - }), +impl dispatch::SurfaceInterface for WebSurface { + fn get_capabilities(&self, _adapter: &dispatch::DispatchAdapter) -> wgt::SurfaceCapabilities { + let mut formats = vec![ + wgt::TextureFormat::Rgba8Unorm, + wgt::TextureFormat::Bgra8Unorm, + wgt::TextureFormat::Rgba16Float, + ]; + let mut mapped_formats = formats.iter().map(|format| map_texture_format(*format)); + // Preferred canvas format will only be either "rgba8unorm" or "bgra8unorm". + // https://www.w3.org/TR/webgpu/#dom-gpu-getpreferredcanvasformat + let preferred_format = self + .gpu + .as_ref() + .expect("Caller could not have created an adapter if gpu is undefined.") + .get_preferred_canvas_format(); + if let Some(index) = mapped_formats.position(|format| format == preferred_format) { + formats.swap(0, index); + } + + wgt::SurfaceCapabilities { + // https://gpuweb.github.io/gpuweb/#supported-context-formats + formats, + // Doesn't really have meaning on the web. + present_modes: vec![wgt::PresentMode::Fifo], + alpha_modes: vec![wgt::CompositeAlphaMode::Opaque], + // Statically set to RENDER_ATTACHMENT for now. See https://gpuweb.github.io/gpuweb/#dom-gpucanvasconfiguration-usage + usages: wgt::TextureUsages::RENDER_ATTACHMENT, } } - /// Creates a raw Javascript array buffer over the provided range. - fn get_mapped_array_buffer(&self, sub_range: Range) -> js_sys::ArrayBuffer { - self.buffer - .get_mapped_range_with_f64_and_f64( - sub_range.start as f64, - (sub_range.end - sub_range.start) as f64, - ) - .expect_throw("invalid mapped range") + fn configure(&self, device: &dispatch::DispatchDevice, config: &crate::SurfaceConfiguration) { + let device = device.as_webgpu(); + + match self.canvas { + Canvas::Canvas(ref canvas) => { + canvas.set_width(config.width); + canvas.set_height(config.height); + } + Canvas::Offscreen(ref canvas) => { + canvas.set_width(config.width); + canvas.set_height(config.height); + } + } + + if let wgt::PresentMode::Mailbox | wgt::PresentMode::Immediate = config.present_mode { + panic!("Only FIFO/Auto* is supported on web"); + } + if let wgt::CompositeAlphaMode::PostMultiplied | wgt::CompositeAlphaMode::Inherit = + config.alpha_mode + { + panic!("Only Opaque/Auto or PreMultiplied alpha mode are supported on web"); + } + let alpha_mode = match config.alpha_mode { + wgt::CompositeAlphaMode::PreMultiplied => webgpu_sys::GpuCanvasAlphaMode::Premultiplied, + _ => webgpu_sys::GpuCanvasAlphaMode::Opaque, + }; + let mapped = webgpu_sys::GpuCanvasConfiguration::new( + &device.inner, + map_texture_format(config.format), + ); + mapped.set_usage(config.usage.bits()); + mapped.set_alpha_mode(alpha_mode); + let mapped_view_formats = config + .view_formats + .iter() + .map(|format| JsValue::from(map_texture_format(*format))) + .collect::(); + mapped.set_view_formats(&mapped_view_formats); + self.context.configure(&mapped).unwrap(); } - /// Obtains a reference to the re-usable buffer mapping as a Javascript array view. - fn get_mapped_range(&self, sub_range: Range) -> js_sys::Uint8Array { - let mut mapping = self.mapping.borrow_mut(); - let range = mapping.range.clone(); - let array_buffer = mapping.mapped_buffer.get_or_insert_with(|| { - self.buffer - .get_mapped_range_with_f64_and_f64( - range.start as f64, - (range.end - range.start) as f64, - ) - .expect_throw("invalid mapped range") - }); - js_sys::Uint8Array::new_with_byte_offset_and_length( - array_buffer, - (sub_range.start - range.start) as u32, - (sub_range.end - sub_range.start) as u32, + fn get_current_texture( + &self, + ) -> ( + Option, + crate::SurfaceStatus, + dispatch::DispatchSurfaceOutputDetail, + ) { + let surface_texture = self.context.get_current_texture().unwrap(); + + let web_surface_texture = WebTexture { + inner: surface_texture, + ident: crate::cmp::Identifier::create(), + }; + + ( + Some(web_surface_texture.into()), + crate::SurfaceStatus::Good, + WebSurfaceOutputDetail { + ident: crate::cmp::Identifier::create(), + } + .into(), ) } - - /// Sets the range of the buffer which is presently mapped. - fn set_mapped_range(&self, range: Range) { - self.mapping.borrow_mut().range = range; +} +impl Drop for WebSurface { + fn drop(&mut self) { + // no-op } } -/// Remembers which portion of a buffer has been mapped, along with a reference -/// to the mapped portion. -#[derive(Debug)] -struct WebBufferMapState { - /// The mapped memory of the buffer. - pub mapped_buffer: Option, - /// The total range which has been mapped in the buffer overall. - pub range: Range, -} +impl dispatch::SurfaceOutputDetailInterface for WebSurfaceOutputDetail { + fn present(&self) { + // Swapchain is presented automatically on the web. + } -#[derive(Debug)] -pub struct BufferMappedRange { - actual_mapping: js_sys::Uint8Array, - temporary_mapping: Vec, + fn texture_discard(&self) { + // Can't really discard the texture on the web. + } +} +impl Drop for WebSurfaceOutputDetail { + fn drop(&mut self) { + // no-op + } } -impl crate::context::BufferMappedRange for BufferMappedRange { +impl dispatch::BufferMappedRangeInterface for WebBufferMappedRange { #[inline] fn slice(&self) -> &[u8] { &self.temporary_mapping @@ -3610,8 +3728,7 @@ impl crate::context::BufferMappedRange for BufferMappedRange { &mut self.temporary_mapping } } - -impl Drop for BufferMappedRange { +impl Drop for WebBufferMappedRange { fn drop(&mut self) { // Copy from the temporary mapping back into the array buffer that was // originally provided by the browser @@ -3625,6 +3742,24 @@ impl Drop for BufferMappedRange { } } +impl dispatch::QueueWriteBufferInterface for WebQueueWriteBuffer { + fn slice(&self) -> &[u8] { + &self.inner + } + + #[inline] + fn slice_mut(&mut self) -> &mut [u8] { + &mut self.inner + } +} +impl Drop for WebQueueWriteBuffer { + fn drop(&mut self) { + // The api struct calls write_staging_buffer + + // no-op + } +} + /// Adds the constants map to the given pipeline descriptor if the map is nonempty. /// Panics if the map cannot be set. /// diff --git a/wgpu/src/backend/webgpu/defined_non_null_js_value.rs b/wgpu/src/backend/webgpu/defined_non_null_js_value.rs index fc5a8737ef..3761028810 100644 --- a/wgpu/src/backend/webgpu/defined_non_null_js_value.rs +++ b/wgpu/src/backend/webgpu/defined_non_null_js_value.rs @@ -3,7 +3,7 @@ use std::ops::{Deref, DerefMut}; use wasm_bindgen::JsValue; /// Derefs to a [`JsValue`] that's known not to be `undefined` or `null`. -#[derive(Debug)] +#[derive(Debug, Clone, PartialEq, Eq, PartialOrd, Ord, Hash)] pub struct DefinedNonNullJsValue(T); impl DefinedNonNullJsValue diff --git a/wgpu/src/backend/wgpu_core.rs b/wgpu/src/backend/wgpu_core.rs index 7de1dc7e23..41ef582129 100644 --- a/wgpu/src/backend/wgpu_core.rs +++ b/wgpu/src/backend/wgpu_core.rs @@ -1,33 +1,23 @@ use crate::{ - context::downcast_ref, AdapterInfo, BindGroupDescriptor, BindGroupLayoutDescriptor, - BindingResource, BufferBinding, BufferDescriptor, CommandEncoderDescriptor, CompilationInfo, - CompilationMessage, CompilationMessageType, ComputePassDescriptor, ComputePipelineDescriptor, - DownlevelCapabilities, ErrorSource, Features, Label, Limits, LoadOp, MapMode, Operations, - PipelineCacheDescriptor, PipelineLayoutDescriptor, RenderBundleEncoderDescriptor, - RenderPipelineDescriptor, SamplerDescriptor, ShaderModuleDescriptor, - ShaderModuleDescriptorSpirV, ShaderSource, StoreOp, SurfaceStatus, SurfaceTargetUnsafe, - TextureDescriptor, TextureViewDescriptor, UncapturedErrorHandler, + api, + dispatch::{self, BufferMappedRangeInterface, InterfaceTypes}, + BindingResource, BufferBinding, BufferDescriptor, CompilationInfo, CompilationMessage, + CompilationMessageType, ErrorSource, Features, Label, LoadOp, MapMode, Operations, + ShaderSource, StoreOp, SurfaceTargetUnsafe, TextureDescriptor, }; use arrayvec::ArrayVec; use parking_lot::Mutex; use smallvec::SmallVec; use std::{ - any::Any, - borrow::Cow::Borrowed, - error::Error, - fmt, - future::{ready, Ready}, - ops::Range, - ptr::NonNull, - slice, - sync::Arc, + borrow::Cow::Borrowed, error::Error, fmt, future::ready, ops::Range, pin::Pin, ptr::NonNull, + slice, sync::Arc, }; -use wgc::error::ContextErrorSource; -use wgc::{command::bundle_ffi::*, pipeline::CreateShaderModuleError}; +use wgc::{command::bundle_ffi::*, error::ContextErrorSource, pipeline::CreateShaderModuleError}; use wgt::WasmNotSendSync; -pub struct ContextWgpuCore(wgc::global::Global); +#[derive(Clone)] +pub struct ContextWgpuCore(Arc); impl Drop for ContextWgpuCore { fn drop(&mut self) { @@ -45,7 +35,12 @@ impl fmt::Debug for ContextWgpuCore { impl ContextWgpuCore { pub unsafe fn from_hal_instance(hal_instance: A::Instance) -> Self { - Self(unsafe { wgc::global::Global::from_hal_instance::("wgpu", hal_instance) }) + Self(unsafe { + Arc::new(wgc::global::Global::from_hal_instance::( + "wgpu", + hal_instance, + )) + }) } /// # Safety @@ -56,7 +51,7 @@ impl ContextWgpuCore { } pub unsafe fn from_core_instance(core_instance: wgc::instance::Instance) -> Self { - Self(unsafe { wgc::global::Global::from_instance(core_instance) }) + Self(unsafe { Arc::new(wgc::global::Global::from_instance(core_instance)) }) } #[cfg(native)] @@ -77,18 +72,18 @@ impl ContextWgpuCore { R, >( &self, - adapter: &wgc::id::AdapterId, + adapter: &CoreAdapter, hal_adapter_callback: F, ) -> R { unsafe { self.0 - .adapter_as_hal::(*adapter, hal_adapter_callback) + .adapter_as_hal::(adapter.id, hal_adapter_callback) } } pub unsafe fn buffer_as_hal) -> R, R>( &self, - buffer: &Buffer, + buffer: &CoreBuffer, hal_buffer_callback: F, ) -> R { unsafe { @@ -99,17 +94,17 @@ impl ContextWgpuCore { pub unsafe fn create_device_from_hal( &self, - adapter: &wgc::id::AdapterId, + adapter: &CoreAdapter, hal_device: hal::OpenDevice, desc: &crate::DeviceDescriptor<'_>, trace_dir: Option<&std::path::Path>, - ) -> Result<(Device, Queue), crate::RequestDeviceError> { + ) -> Result<(CoreDevice, CoreQueue), crate::RequestDeviceError> { if trace_dir.is_some() { log::error!("Feature 'trace' has been removed temporarily, see https://github.com/gfx-rs/wgpu/issues/5974"); } let (device_id, queue_id) = unsafe { self.0.create_device_from_hal( - *adapter, + adapter.id, hal_device.into(), &desc.map_label(|l| l.map(Borrowed)), None, @@ -118,12 +113,14 @@ impl ContextWgpuCore { ) }?; let error_sink = Arc::new(Mutex::new(ErrorSinkRaw::new())); - let device = Device { + let device = CoreDevice { + context: self.clone(), id: device_id, error_sink: error_sink.clone(), features: desc.required_features, }; - let queue = Queue { + let queue = CoreQueue { + context: self.clone(), id: queue_id, error_sink, }; @@ -133,9 +130,9 @@ impl ContextWgpuCore { pub unsafe fn create_texture_from_hal( &self, hal_texture: A::Texture, - device: &Device, + device: &CoreDevice, desc: &TextureDescriptor<'_>, - ) -> Texture { + ) -> CoreTexture { let descriptor = desc.map_label_and_view_formats(|l| l.map(Borrowed), |v| v.to_vec()); let (id, error) = unsafe { self.0 @@ -149,7 +146,8 @@ impl ContextWgpuCore { "Device::create_texture_from_hal", ); } - Texture { + CoreTexture { + context: self.clone(), id, error_sink: Arc::clone(&device.error_sink), } @@ -158,9 +156,9 @@ impl ContextWgpuCore { pub unsafe fn create_buffer_from_hal( &self, hal_buffer: A::Buffer, - device: &Device, + device: &CoreDevice, desc: &BufferDescriptor<'_>, - ) -> Buffer { + ) -> CoreBuffer { let (id, error) = unsafe { self.0.create_buffer_from_hal::( hal_buffer, @@ -177,7 +175,8 @@ impl ContextWgpuCore { "Device::create_buffer_from_hal", ); } - Buffer { + CoreBuffer { + context: self.clone(), id, error_sink: Arc::clone(&device.error_sink), } @@ -185,7 +184,7 @@ impl ContextWgpuCore { pub unsafe fn device_as_hal) -> R, R>( &self, - device: &Device, + device: &CoreDevice, hal_device_callback: F, ) -> R { unsafe { @@ -200,7 +199,7 @@ impl ContextWgpuCore { R, >( &self, - surface: &Surface, + surface: &CoreSurface, hal_surface_callback: F, ) -> R { unsafe { @@ -215,7 +214,7 @@ impl ContextWgpuCore { R, >( &self, - texture: &Texture, + texture: &CoreTexture, hal_texture_callback: F, ) -> R { unsafe { @@ -230,12 +229,12 @@ impl ContextWgpuCore { R, >( &self, - texture_view_data: &wgc::id::TextureViewId, + texture_view: &CoreTextureView, hal_texture_view_callback: F, ) -> R { unsafe { self.0 - .texture_view_as_hal::(*texture_view_data, hal_texture_view_callback) + .texture_view_as_hal::(texture_view.id, hal_texture_view_callback) } } @@ -246,7 +245,7 @@ impl ContextWgpuCore { R, >( &self, - command_encoder: &CommandEncoder, + command_encoder: &CoreCommandEncoder, hal_command_encoder_callback: F, ) -> R { unsafe { @@ -365,7 +364,7 @@ impl ContextWgpuCore { fn map_buffer_copy_view(view: crate::TexelCopyBufferInfo<'_>) -> wgc::command::TexelCopyBufferInfo { wgc::command::TexelCopyBufferInfo { - buffer: downcast_buffer(view.buffer).id, + buffer: view.buffer.inner.as_core().id, layout: view.layout, } } @@ -374,7 +373,7 @@ fn map_texture_copy_view( view: crate::TexelCopyTextureInfo<'_>, ) -> wgc::command::TexelCopyTextureInfo { wgc::command::TexelCopyTextureInfo { - texture: downcast_texture(view.texture).id, + texture: view.texture.inner.as_core().id, mip_level: view.mip_level, origin: view.origin, aspect: view.aspect, @@ -386,10 +385,10 @@ fn map_texture_copy_view( allow(unused) )] fn map_texture_tagged_copy_view( - view: crate::CopyExternalImageDestInfo<'_>, + view: wgt::CopyExternalImageDestInfo<&api::Texture>, ) -> wgc::command::CopyExternalImageDestInfo { wgc::command::CopyExternalImageDestInfo { - texture: downcast_texture(view.texture).id, + texture: view.texture.inner.as_core().id, mip_level: view.mip_level, origin: view.origin, aspect: view.aspect, @@ -437,7 +436,8 @@ fn map_pass_channel( } #[derive(Debug)] -pub struct Surface { +pub struct CoreSurface { + pub(crate) context: ContextWgpuCore, id: wgc::id::SurfaceId, /// Configured device is needed to know which backend /// code to execute when acquiring a new frame. @@ -445,125 +445,352 @@ pub struct Surface { } #[derive(Debug)] -pub struct Device { +pub struct CoreAdapter { + pub(crate) context: ContextWgpuCore, + pub(crate) id: wgc::id::AdapterId, +} + +#[derive(Debug)] +pub struct CoreDevice { + pub(crate) context: ContextWgpuCore, id: wgc::id::DeviceId, error_sink: ErrorSink, features: Features, } #[derive(Debug)] -pub struct Buffer { +pub struct CoreBuffer { + pub(crate) context: ContextWgpuCore, id: wgc::id::BufferId, error_sink: ErrorSink, } #[derive(Debug)] -pub struct ShaderModule { +pub struct CoreShaderModule { + pub(crate) context: ContextWgpuCore, id: wgc::id::ShaderModuleId, compilation_info: CompilationInfo, } #[derive(Debug)] -pub struct Texture { +pub struct CoreBindGroupLayout { + pub(crate) context: ContextWgpuCore, + id: wgc::id::BindGroupLayoutId, +} + +#[derive(Debug)] +pub struct CoreBindGroup { + pub(crate) context: ContextWgpuCore, + id: wgc::id::BindGroupId, +} + +#[derive(Debug)] +pub struct CoreTexture { + pub(crate) context: ContextWgpuCore, id: wgc::id::TextureId, error_sink: ErrorSink, } #[derive(Debug)] -pub struct Queue { +pub struct CoreTextureView { + pub(crate) context: ContextWgpuCore, + id: wgc::id::TextureViewId, +} + +#[derive(Debug)] +pub struct CoreSampler { + pub(crate) context: ContextWgpuCore, + id: wgc::id::SamplerId, +} + +#[derive(Debug)] +pub struct CoreQuerySet { + pub(crate) context: ContextWgpuCore, + id: wgc::id::QuerySetId, +} + +#[derive(Debug)] +pub struct CorePipelineLayout { + pub(crate) context: ContextWgpuCore, + id: wgc::id::PipelineLayoutId, +} + +#[derive(Debug)] +pub struct CorePipelineCache { + pub(crate) context: ContextWgpuCore, + id: wgc::id::PipelineCacheId, +} + +#[derive(Debug)] +pub struct CoreCommandBuffer { + pub(crate) context: ContextWgpuCore, + id: wgc::id::CommandBufferId, +} + +#[derive(Debug)] +pub struct CoreRenderBundleEncoder { + pub(crate) context: ContextWgpuCore, + encoder: wgc::command::RenderBundleEncoder, + id: crate::cmp::Identifier, +} + +#[derive(Debug)] +pub struct CoreRenderBundle { + id: wgc::id::RenderBundleId, +} + +#[derive(Debug)] +pub struct CoreQueue { + pub(crate) context: ContextWgpuCore, id: wgc::id::QueueId, error_sink: ErrorSink, } #[derive(Debug)] -pub struct ComputePipeline { +pub struct CoreComputePipeline { + pub(crate) context: ContextWgpuCore, id: wgc::id::ComputePipelineId, error_sink: ErrorSink, } #[derive(Debug)] -pub struct RenderPipeline { +pub struct CoreRenderPipeline { + pub(crate) context: ContextWgpuCore, id: wgc::id::RenderPipelineId, error_sink: ErrorSink, } #[derive(Debug)] -pub struct ComputePass { +pub struct CoreComputePass { + pub(crate) context: ContextWgpuCore, pass: wgc::command::ComputePass, error_sink: ErrorSink, + id: crate::cmp::Identifier, } #[derive(Debug)] -pub struct RenderPass { +pub struct CoreRenderPass { + pub(crate) context: ContextWgpuCore, pass: wgc::command::RenderPass, error_sink: ErrorSink, + id: crate::cmp::Identifier, } #[derive(Debug)] -pub struct CommandEncoder { +pub struct CoreCommandEncoder { + pub(crate) context: ContextWgpuCore, id: wgc::id::CommandEncoderId, error_sink: ErrorSink, open: bool, } #[derive(Debug)] -pub struct Blas { +pub struct CoreBlas { + pub(crate) context: ContextWgpuCore, id: wgc::id::BlasId, // error_sink: ErrorSink, } #[derive(Debug)] -pub struct Tlas { +pub struct CoreTlas { + pub(crate) context: ContextWgpuCore, id: wgc::id::TlasId, // error_sink: ErrorSink, } -impl crate::Context for ContextWgpuCore { - type AdapterData = wgc::id::AdapterId; - type DeviceData = Device; - type QueueData = Queue; - type ShaderModuleData = ShaderModule; - type BindGroupLayoutData = wgc::id::BindGroupLayoutId; - type BindGroupData = wgc::id::BindGroupId; - type TextureViewData = wgc::id::TextureViewId; - type SamplerData = wgc::id::SamplerId; - type BufferData = Buffer; - type TextureData = Texture; - type QuerySetData = wgc::id::QuerySetId; - type PipelineLayoutData = wgc::id::PipelineLayoutId; - type RenderPipelineData = RenderPipeline; - type ComputePipelineData = ComputePipeline; - type PipelineCacheData = wgc::id::PipelineCacheId; - type CommandEncoderData = CommandEncoder; - type ComputePassData = ComputePass; - type RenderPassData = RenderPass; - type CommandBufferData = wgc::id::CommandBufferId; - type RenderBundleEncoderData = wgc::command::RenderBundleEncoder; - type RenderBundleData = wgc::id::RenderBundleId; - - type SurfaceData = Surface; - type SurfaceOutputDetail = SurfaceOutputDetail; - type SubmissionIndexData = wgc::SubmissionIndex; - - type RequestAdapterFuture = Ready>; - type BlasData = Blas; - type TlasData = Tlas; - - #[allow(clippy::type_complexity)] - type RequestDeviceFuture = - Ready>; - - type PopErrorScopeFuture = Ready>; - type CompilationInfoFuture = Ready; - - fn init(instance_desc: wgt::InstanceDescriptor) -> Self { - Self(wgc::global::Global::new("wgpu", instance_desc)) - } - - unsafe fn instance_create_surface( +#[derive(Debug)] +pub struct CoreSurfaceOutputDetail { + context: ContextWgpuCore, + surface_id: wgc::id::SurfaceId, +} + +type ErrorSink = Arc>; + +struct ErrorScope { + error: Option, + filter: crate::ErrorFilter, +} + +struct ErrorSinkRaw { + scopes: Vec, + uncaptured_handler: Option>, +} + +impl ErrorSinkRaw { + fn new() -> ErrorSinkRaw { + ErrorSinkRaw { + scopes: Vec::new(), + uncaptured_handler: None, + } + } + + #[track_caller] + fn handle_error(&mut self, err: crate::Error) { + let filter = match err { + crate::Error::OutOfMemory { .. } => crate::ErrorFilter::OutOfMemory, + crate::Error::Validation { .. } => crate::ErrorFilter::Validation, + crate::Error::Internal { .. } => crate::ErrorFilter::Internal, + }; + match self + .scopes + .iter_mut() + .rev() + .find(|scope| scope.filter == filter) + { + Some(scope) => { + if scope.error.is_none() { + scope.error = Some(err); + } + } + None => { + if let Some(custom_handler) = self.uncaptured_handler.as_ref() { + (custom_handler)(err); + } else { + // direct call preserves #[track_caller] where dyn can't + default_error_handler(err); + } + } + } + } +} + +impl fmt::Debug for ErrorSinkRaw { + fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + write!(f, "ErrorSink") + } +} + +#[track_caller] +fn default_error_handler(err: crate::Error) { + log::error!("Handling wgpu errors as fatal by default"); + panic!("wgpu error: {err}\n"); +} + +impl From for CompilationInfo { + fn from(value: CreateShaderModuleError) -> Self { + match value { + #[cfg(feature = "wgsl")] + CreateShaderModuleError::Parsing(v) => v.into(), + #[cfg(feature = "glsl")] + CreateShaderModuleError::ParsingGlsl(v) => v.into(), + #[cfg(feature = "spirv")] + CreateShaderModuleError::ParsingSpirV(v) => v.into(), + CreateShaderModuleError::Validation(v) => v.into(), + // Device errors are reported through the error sink, and are not compilation errors. + // Same goes for native shader module generation errors. + CreateShaderModuleError::Device(_) | CreateShaderModuleError::Generation => { + CompilationInfo { + messages: Vec::new(), + } + } + // Everything else is an error message without location information. + _ => CompilationInfo { + messages: vec![CompilationMessage { + message: value.to_string(), + message_type: CompilationMessageType::Error, + location: None, + }], + }, + } + } +} + +#[derive(Debug)] +pub struct CoreQueueWriteBuffer { + buffer_id: wgc::id::StagingBufferId, + mapping: CoreBufferMappedRange, +} + +#[derive(Debug)] +pub struct CoreBufferMappedRange { + ptr: NonNull, + size: usize, +} + +#[cfg(send_sync)] +unsafe impl Send for CoreBufferMappedRange {} +#[cfg(send_sync)] +unsafe impl Sync for CoreBufferMappedRange {} + +impl Drop for CoreBufferMappedRange { + fn drop(&mut self) { + // Intentionally left blank so that `BufferMappedRange` still + // implements `Drop`, to match the web backend + } +} + +crate::cmp::impl_eq_ord_hash_arc_address!(ContextWgpuCore => .0); +crate::cmp::impl_eq_ord_hash_proxy!(CoreAdapter => .id); +crate::cmp::impl_eq_ord_hash_proxy!(CoreDevice => .id); +crate::cmp::impl_eq_ord_hash_proxy!(CoreQueue => .id); +crate::cmp::impl_eq_ord_hash_proxy!(CoreShaderModule => .id); +crate::cmp::impl_eq_ord_hash_proxy!(CoreBindGroupLayout => .id); +crate::cmp::impl_eq_ord_hash_proxy!(CoreBindGroup => .id); +crate::cmp::impl_eq_ord_hash_proxy!(CoreTextureView => .id); +crate::cmp::impl_eq_ord_hash_proxy!(CoreSampler => .id); +crate::cmp::impl_eq_ord_hash_proxy!(CoreBuffer => .id); +crate::cmp::impl_eq_ord_hash_proxy!(CoreTexture => .id); +crate::cmp::impl_eq_ord_hash_proxy!(CoreBlas => .id); +crate::cmp::impl_eq_ord_hash_proxy!(CoreTlas => .id); +crate::cmp::impl_eq_ord_hash_proxy!(CoreQuerySet => .id); +crate::cmp::impl_eq_ord_hash_proxy!(CorePipelineLayout => .id); +crate::cmp::impl_eq_ord_hash_proxy!(CoreRenderPipeline => .id); +crate::cmp::impl_eq_ord_hash_proxy!(CoreComputePipeline => .id); +crate::cmp::impl_eq_ord_hash_proxy!(CorePipelineCache => .id); +crate::cmp::impl_eq_ord_hash_proxy!(CoreCommandEncoder => .id); +crate::cmp::impl_eq_ord_hash_proxy!(CoreComputePass => .id); +crate::cmp::impl_eq_ord_hash_proxy!(CoreRenderPass => .id); +crate::cmp::impl_eq_ord_hash_proxy!(CoreCommandBuffer => .id); +crate::cmp::impl_eq_ord_hash_proxy!(CoreRenderBundleEncoder => .id); +crate::cmp::impl_eq_ord_hash_proxy!(CoreRenderBundle => .id); +crate::cmp::impl_eq_ord_hash_proxy!(CoreSurface => .id); +crate::cmp::impl_eq_ord_hash_proxy!(CoreSurfaceOutputDetail => .surface_id); +crate::cmp::impl_eq_ord_hash_proxy!(CoreQueueWriteBuffer => .mapping.ptr); +crate::cmp::impl_eq_ord_hash_proxy!(CoreBufferMappedRange => .ptr); + +impl InterfaceTypes for ContextWgpuCore { + type Instance = ContextWgpuCore; + type Adapter = CoreAdapter; + type Device = CoreDevice; + type Queue = CoreQueue; + type ShaderModule = CoreShaderModule; + type BindGroupLayout = CoreBindGroupLayout; + type BindGroup = CoreBindGroup; + type TextureView = CoreTextureView; + type Sampler = CoreSampler; + type Buffer = CoreBuffer; + type Texture = CoreTexture; + type Blas = CoreBlas; + type Tlas = CoreTlas; + type QuerySet = CoreQuerySet; + type PipelineLayout = CorePipelineLayout; + type RenderPipeline = CoreRenderPipeline; + type ComputePipeline = CoreComputePipeline; + type PipelineCache = CorePipelineCache; + type CommandEncoder = CoreCommandEncoder; + type ComputePass = CoreComputePass; + type RenderPass = CoreRenderPass; + type CommandBuffer = CoreCommandBuffer; + type RenderBundleEncoder = CoreRenderBundleEncoder; + type RenderBundle = CoreRenderBundle; + type Surface = CoreSurface; + type SurfaceOutputDetail = CoreSurfaceOutputDetail; + type QueueWriteBuffer = CoreQueueWriteBuffer; + type BufferMappedRange = CoreBufferMappedRange; +} + +impl dispatch::InstanceInterface for ContextWgpuCore { + fn new(desc: wgt::InstanceDescriptor) -> Self + where + Self: Sized, + { + Self(Arc::new(wgc::global::Global::new("wgpu", desc))) + } + + unsafe fn create_surface( &self, - target: SurfaceTargetUnsafe, - ) -> Result { + target: crate::api::SurfaceTargetUnsafe, + ) -> Result { let id = match target { SurfaceTargetUnsafe::RawHandle { raw_display_handle, @@ -596,43 +823,58 @@ impl crate::Context for ContextWgpuCore { }, }?; - Ok(Surface { + Ok(dispatch::DispatchSurface::Core(CoreSurface { + context: self.clone(), id, configured_device: Mutex::default(), - }) + })) } - fn instance_request_adapter( + fn request_adapter( &self, - options: &crate::RequestAdapterOptions<'_, '_>, - ) -> Self::RequestAdapterFuture { + options: &crate::api::RequestAdapterOptions<'_, '_>, + ) -> Pin> { let id = self.0.request_adapter( &wgc::instance::RequestAdapterOptions { power_preference: options.power_preference, force_fallback_adapter: options.force_fallback_adapter, - compatible_surface: options.compatible_surface.map(|surface| { - let surface: &::SurfaceData = - downcast_ref(surface.surface_data.as_ref()); - surface.id - }), + compatible_surface: options + .compatible_surface + .map(|surface| surface.inner.as_core().id), }, wgt::Backends::all(), None, ); - ready(id.ok()) + let adapter = id.map(|id| { + let core = CoreAdapter { + context: self.clone(), + id, + }; + let generic: dispatch::DispatchAdapter = core.into(); + generic + }); + Box::pin(ready(adapter.ok())) + } + + fn poll_all_devices(&self, force_wait: bool) -> bool { + match self.0.poll_all_devices(force_wait) { + Ok(all_queue_empty) => all_queue_empty, + Err(err) => self.handle_error_fatal(err, "Instance::poll_all_devices"), + } } +} - fn adapter_request_device( +impl dispatch::AdapterInterface for CoreAdapter { + fn request_device( &self, - adapter_data: &Self::AdapterData, desc: &crate::DeviceDescriptor<'_>, trace_dir: Option<&std::path::Path>, - ) -> Self::RequestDeviceFuture { + ) -> Pin> { if trace_dir.is_some() { log::error!("Feature 'trace' has been removed temporarily, see https://github.com/gfx-rs/wgpu/issues/5974"); } - let res = self.0.adapter_request_device( - *adapter_data, + let res = self.context.0.adapter_request_device( + self.id, &desc.map_label(|l| l.map(Borrowed)), None, None, @@ -641,148 +883,78 @@ impl crate::Context for ContextWgpuCore { let (device_id, queue_id) = match res { Ok(ids) => ids, Err(err) => { - return ready(Err(err.into())); + return Box::pin(ready(Err(err.into()))); } }; let error_sink = Arc::new(Mutex::new(ErrorSinkRaw::new())); - let device = Device { + let device = CoreDevice { + context: self.context.clone(), id: device_id, error_sink: error_sink.clone(), features: desc.required_features, }; - let queue = Queue { + let queue = CoreQueue { + context: self.context.clone(), id: queue_id, error_sink, }; - ready(Ok((device, queue))) - } - - fn instance_poll_all_devices(&self, force_wait: bool) -> bool { - match self.0.poll_all_devices(force_wait) { - Ok(all_queue_empty) => all_queue_empty, - Err(err) => self.handle_error_fatal(err, "Device::poll"), - } - } - - fn adapter_is_surface_supported( - &self, - adapter_data: &Self::AdapterData, - surface_data: &Self::SurfaceData, - ) -> bool { - self.0 - .adapter_is_surface_supported(*adapter_data, surface_data.id) - } - - fn adapter_features(&self, adapter_data: &Self::AdapterData) -> Features { - self.0.adapter_features(*adapter_data) + Box::pin(ready(Ok((device.into(), queue.into())))) } - fn adapter_limits(&self, adapter_data: &Self::AdapterData) -> Limits { - self.0.adapter_limits(*adapter_data) - } + fn is_surface_supported(&self, surface: &dispatch::DispatchSurface) -> bool { + let surface = surface.as_core(); - fn adapter_downlevel_capabilities( - &self, - adapter_data: &Self::AdapterData, - ) -> DownlevelCapabilities { - self.0.adapter_downlevel_capabilities(*adapter_data) + self.context + .0 + .adapter_is_surface_supported(self.id, surface.id) } - fn adapter_get_info(&self, adapter_data: &Self::AdapterData) -> AdapterInfo { - self.0.adapter_get_info(*adapter_data) + fn features(&self) -> crate::Features { + self.context.0.adapter_features(self.id) } - fn adapter_get_texture_format_features( - &self, - adapter_data: &Self::AdapterData, - format: wgt::TextureFormat, - ) -> wgt::TextureFormatFeatures { - self.0 - .adapter_get_texture_format_features(*adapter_data, format) + fn limits(&self) -> crate::Limits { + self.context.0.adapter_limits(self.id) } - fn adapter_get_presentation_timestamp( - &self, - adapter_data: &Self::AdapterData, - ) -> wgt::PresentationTimestamp { - self.0.adapter_get_presentation_timestamp(*adapter_data) + fn downlevel_capabilities(&self) -> crate::DownlevelCapabilities { + self.context.0.adapter_downlevel_capabilities(self.id) } - fn surface_get_capabilities( - &self, - surface_data: &Self::SurfaceData, - adapter_data: &Self::AdapterData, - ) -> wgt::SurfaceCapabilities { - self.0 - .surface_get_capabilities(surface_data.id, *adapter_data) - .unwrap_or_default() + fn get_info(&self) -> crate::AdapterInfo { + self.context.0.adapter_get_info(self.id) } - fn surface_configure( + fn get_texture_format_features( &self, - surface_data: &Self::SurfaceData, - device_data: &Self::DeviceData, - config: &crate::SurfaceConfiguration, - ) { - let error = self + format: crate::TextureFormat, + ) -> crate::TextureFormatFeatures { + self.context .0 - .surface_configure(surface_data.id, device_data.id, config); - if let Some(e) = error { - self.handle_error_fatal(e, "Surface::configure"); - } else { - *surface_data.configured_device.lock() = Some(device_data.id); - } - } - - fn surface_get_current_texture( - &self, - surface_data: &Self::SurfaceData, - ) -> ( - Option, - SurfaceStatus, - Self::SurfaceOutputDetail, - ) { - match self.0.surface_get_current_texture(surface_data.id, None) { - Ok(wgc::present::SurfaceOutput { status, texture_id }) => { - let data = texture_id.map(|id| Texture { - id, - error_sink: Arc::new(Mutex::new(ErrorSinkRaw::new())), - }); - - ( - data, - status, - SurfaceOutputDetail { - surface_id: surface_data.id, - }, - ) - } - Err(err) => self.handle_error_fatal(err, "Surface::get_current_texture_view"), - } + .adapter_get_texture_format_features(self.id, format) } - fn surface_present(&self, detail: &Self::SurfaceOutputDetail) { - match self.0.surface_present(detail.surface_id) { - Ok(_status) => (), - Err(err) => self.handle_error_fatal(err, "Surface::present"), - } + fn get_presentation_timestamp(&self) -> crate::PresentationTimestamp { + self.context.0.adapter_get_presentation_timestamp(self.id) } +} - fn surface_texture_discard(&self, detail: &Self::SurfaceOutputDetail) { - match self.0.surface_texture_discard(detail.surface_id) { - Ok(_status) => (), - Err(err) => self.handle_error_fatal(err, "Surface::discard_texture"), - } +impl Drop for CoreAdapter { + fn drop(&mut self) { + self.context.0.adapter_drop(self.id) } +} - fn device_features(&self, device_data: &Self::DeviceData) -> Features { - self.0.device_features(device_data.id) +impl dispatch::DeviceInterface for CoreDevice { + fn features(&self) -> crate::Features { + self.context.0.device_features(self.id) } - fn device_limits(&self, device_data: &Self::DeviceData) -> Limits { - self.0.device_limits(device_data.id) + fn limits(&self) -> crate::Limits { + self.context.0.device_limits(self.id) } + // If we have no way to create a shader module, we can't return one, and so most of the function is unreachable. #[cfg_attr( not(any( feature = "spirv", @@ -790,14 +962,13 @@ impl crate::Context for ContextWgpuCore { feature = "wgsl", feature = "naga-ir" )), - allow(unreachable_code, unused_variables) + allow(unreachable_code, unused) )] - fn device_create_shader_module( + fn create_shader_module( &self, - device_data: &Self::DeviceData, - desc: ShaderModuleDescriptor<'_>, + desc: crate::ShaderModuleDescriptor<'_>, shader_bound_checks: wgt::ShaderBoundChecks, - ) -> Self::ShaderModuleData { + ) -> dispatch::DispatchShaderModule { let descriptor = wgc::pipeline::ShaderModuleDescriptor { label: desc.label.map(Borrowed), shader_bound_checks, @@ -829,12 +1000,13 @@ impl crate::Context for ContextWgpuCore { ShaderSource::Dummy(_) => panic!("found `ShaderSource::Dummy`"), }; let (id, error) = - self.0 - .device_create_shader_module(device_data.id, &descriptor, source, None); + self.context + .0 + .device_create_shader_module(self.id, &descriptor, source, None); let compilation_info = match error { Some(cause) => { - self.handle_error( - &device_data.error_sink, + self.context.handle_error( + &self.error_sink, cause.clone(), desc.label, "Device::create_shader_module", @@ -844,17 +1016,18 @@ impl crate::Context for ContextWgpuCore { None => CompilationInfo { messages: vec![] }, }; - ShaderModule { + CoreShaderModule { + context: self.context.clone(), id, compilation_info, } + .into() } - unsafe fn device_create_shader_module_spirv( + unsafe fn create_shader_module_spirv( &self, - device_data: &Self::DeviceData, - desc: &ShaderModuleDescriptorSpirV<'_>, - ) -> Self::ShaderModuleData { + desc: &crate::ShaderModuleDescriptorSpirV<'_>, + ) -> dispatch::DispatchShaderModule { let descriptor = wgc::pipeline::ShaderModuleDescriptor { label: desc.label.map(Borrowed), // Doesn't matter the value since spirv shaders aren't mutated to include @@ -862,8 +1035,8 @@ impl crate::Context for ContextWgpuCore { shader_bound_checks: unsafe { wgt::ShaderBoundChecks::unchecked() }, }; let (id, error) = unsafe { - self.0.device_create_shader_module_spirv( - device_data.id, + self.context.0.device_create_shader_module_spirv( + self.id, &descriptor, Borrowed(&desc.source), None, @@ -871,8 +1044,8 @@ impl crate::Context for ContextWgpuCore { }; let compilation_info = match error { Some(cause) => { - self.handle_error( - &device_data.error_sink, + self.context.handle_error( + &self.error_sink, cause.clone(), desc.label, "Device::create_shader_module_spirv", @@ -881,55 +1054,57 @@ impl crate::Context for ContextWgpuCore { } None => CompilationInfo { messages: vec![] }, }; - ShaderModule { + CoreShaderModule { + context: self.context.clone(), id, compilation_info, } + .into() } - fn device_create_bind_group_layout( + fn create_bind_group_layout( &self, - device_data: &Self::DeviceData, - desc: &BindGroupLayoutDescriptor<'_>, - ) -> Self::BindGroupLayoutData { + desc: &crate::BindGroupLayoutDescriptor<'_>, + ) -> dispatch::DispatchBindGroupLayout { let descriptor = wgc::binding_model::BindGroupLayoutDescriptor { label: desc.label.map(Borrowed), entries: Borrowed(desc.entries), }; - let (id, error) = self - .0 - .device_create_bind_group_layout(device_data.id, &descriptor, None); + let (id, error) = + self.context + .0 + .device_create_bind_group_layout(self.id, &descriptor, None); if let Some(cause) = error { - self.handle_error( - &device_data.error_sink, + self.context.handle_error( + &self.error_sink, cause, desc.label, "Device::create_bind_group_layout", ); } - id + CoreBindGroupLayout { + context: self.context.clone(), + id, + } + .into() } - fn device_create_bind_group( + + fn create_bind_group( &self, - device_data: &Self::DeviceData, - desc: &BindGroupDescriptor<'_>, - ) -> Self::BindGroupData { + desc: &crate::BindGroupDescriptor<'_>, + ) -> dispatch::DispatchBindGroup { use wgc::binding_model as bm; let mut arrayed_texture_views = Vec::new(); let mut arrayed_samplers = Vec::new(); - if device_data - .features - .contains(Features::TEXTURE_BINDING_ARRAY) - { + if self.features.contains(Features::TEXTURE_BINDING_ARRAY) { // gather all the array view IDs first for entry in desc.entries.iter() { if let BindingResource::TextureViewArray(array) = entry.resource { - arrayed_texture_views - .extend(array.iter().map(|view| *downcast_texture_view(view))); + arrayed_texture_views.extend(array.iter().map(|view| view.inner.as_core().id)); } if let BindingResource::SamplerArray(array) = entry.resource { - arrayed_samplers.extend(array.iter().map(|sampler| *downcast_sampler(sampler))); + arrayed_samplers.extend(array.iter().map(|sampler| sampler.inner.as_core().id)); } } } @@ -937,15 +1112,12 @@ impl crate::Context for ContextWgpuCore { let mut remaining_arrayed_samplers = &arrayed_samplers[..]; let mut arrayed_buffer_bindings = Vec::new(); - if device_data - .features - .contains(Features::BUFFER_BINDING_ARRAY) - { + if self.features.contains(Features::BUFFER_BINDING_ARRAY) { // gather all the buffers first for entry in desc.entries.iter() { if let BindingResource::BufferArray(array) = entry.resource { arrayed_buffer_bindings.extend(array.iter().map(|binding| bm::BufferBinding { - buffer_id: downcast_buffer(binding.buffer).id, + buffer_id: binding.buffer.inner.as_core().id, offset: binding.offset, size: binding.size, })); @@ -965,7 +1137,7 @@ impl crate::Context for ContextWgpuCore { offset, size, }) => bm::BindingResource::Buffer(bm::BufferBinding { - buffer_id: downcast_buffer(buffer).id, + buffer_id: buffer.inner.as_core().id, offset, size, }), @@ -976,7 +1148,7 @@ impl crate::Context for ContextWgpuCore { bm::BindingResource::BufferArray(Borrowed(slice)) } BindingResource::Sampler(sampler) => { - bm::BindingResource::Sampler(*downcast_sampler(sampler)) + bm::BindingResource::Sampler(sampler.inner.as_core().id) } BindingResource::SamplerArray(array) => { let slice = &remaining_arrayed_samplers[..array.len()]; @@ -984,7 +1156,7 @@ impl crate::Context for ContextWgpuCore { bm::BindingResource::SamplerArray(Borrowed(slice)) } BindingResource::TextureView(texture_view) => { - bm::BindingResource::TextureView(*downcast_texture_view(texture_view)) + bm::BindingResource::TextureView(texture_view.inner.as_core().id) } BindingResource::TextureViewArray(array) => { let slice = &remaining_arrayed_texture_views[..array.len()]; @@ -994,7 +1166,7 @@ impl crate::Context for ContextWgpuCore { } BindingResource::AccelerationStructure(acceleration_structure) => { bm::BindingResource::AccelerationStructure( - downcast_tlas(acceleration_structure).id, + acceleration_structure.inner.as_core().id, ) } }, @@ -1002,28 +1174,33 @@ impl crate::Context for ContextWgpuCore { .collect::>(); let descriptor = bm::BindGroupDescriptor { label: desc.label.as_ref().map(|label| Borrowed(&label[..])), - layout: *downcast_bind_group_layout(desc.layout), + layout: desc.layout.inner.as_core().id, entries: Borrowed(&entries), }; let (id, error) = self + .context .0 - .device_create_bind_group(device_data.id, &descriptor, None); + .device_create_bind_group(self.id, &descriptor, None); if let Some(cause) = error { - self.handle_error( - &device_data.error_sink, + self.context.handle_error( + &self.error_sink, cause, desc.label, "Device::create_bind_group", ); } - id + CoreBindGroup { + context: self.context.clone(), + id, + } + .into() } - fn device_create_pipeline_layout( + + fn create_pipeline_layout( &self, - device_data: &Self::DeviceData, - desc: &PipelineLayoutDescriptor<'_>, - ) -> Self::PipelineLayoutData { + desc: &crate::PipelineLayoutDescriptor<'_>, + ) -> dispatch::DispatchPipelineLayout { // Limit is always less or equal to hal::MAX_BIND_GROUPS, so this is always right // Guards following ArrayVec assert!( @@ -1036,7 +1213,7 @@ impl crate::Context for ContextWgpuCore { let temp_layouts = desc .bind_group_layouts .iter() - .map(|bgl| *downcast_bind_group_layout(bgl)) + .map(|bgl| bgl.inner.as_core().id) .collect::>(); let descriptor = wgc::binding_model::PipelineLayoutDescriptor { label: desc.label.map(Borrowed), @@ -1045,23 +1222,28 @@ impl crate::Context for ContextWgpuCore { }; let (id, error) = self + .context .0 - .device_create_pipeline_layout(device_data.id, &descriptor, None); + .device_create_pipeline_layout(self.id, &descriptor, None); if let Some(cause) = error { - self.handle_error( - &device_data.error_sink, + self.context.handle_error( + &self.error_sink, cause, desc.label, "Device::create_pipeline_layout", ); } - id + CorePipelineLayout { + context: self.context.clone(), + id, + } + .into() } - fn device_create_render_pipeline( + + fn create_render_pipeline( &self, - device_data: &Self::DeviceData, - desc: &RenderPipelineDescriptor<'_>, - ) -> Self::RenderPipelineData { + desc: &crate::RenderPipelineDescriptor<'_>, + ) -> dispatch::DispatchRenderPipeline { use wgc::pipeline as pipe; let vertex_buffers: ArrayVec<_, { wgc::MAX_VERTEX_BUFFERS }> = desc @@ -1077,10 +1259,10 @@ impl crate::Context for ContextWgpuCore { let descriptor = pipe::RenderPipelineDescriptor { label: desc.label.map(Borrowed), - layout: desc.layout.map(downcast_pipeline_layout).copied(), + layout: desc.layout.map(|layout| layout.inner.as_core().id), vertex: pipe::VertexState { stage: pipe::ProgrammableStageDescriptor { - module: downcast_shader_module(desc.vertex.module).id, + module: desc.vertex.module.inner.as_core().id, entry_point: desc.vertex.entry_point.map(Borrowed), constants: Borrowed(desc.vertex.compilation_options.constants), zero_initialize_workgroup_memory: desc @@ -1095,7 +1277,7 @@ impl crate::Context for ContextWgpuCore { multisample: desc.multisample, fragment: desc.fragment.as_ref().map(|frag| pipe::FragmentState { stage: pipe::ProgrammableStageDescriptor { - module: downcast_shader_module(frag.module).id, + module: frag.module.inner.as_core().id, entry_point: frag.entry_point.map(Borrowed), constants: Borrowed(frag.compilation_options.constants), zero_initialize_workgroup_memory: frag @@ -1105,53 +1287,57 @@ impl crate::Context for ContextWgpuCore { targets: Borrowed(frag.targets), }), multiview: desc.multiview, - cache: desc.cache.map(downcast_pipeline_cache).copied(), + cache: desc.cache.map(|cache| cache.inner.as_core().id), }; let (id, error) = - self.0 - .device_create_render_pipeline(device_data.id, &descriptor, None, None); + self.context + .0 + .device_create_render_pipeline(self.id, &descriptor, None, None); if let Some(cause) = error { if let wgc::pipeline::CreateRenderPipelineError::Internal { stage, ref error } = cause { log::error!("Shader translation error for stage {:?}: {}", stage, error); log::error!("Please report it to https://github.com/gfx-rs/wgpu"); } - self.handle_error( - &device_data.error_sink, + self.context.handle_error( + &self.error_sink, cause, desc.label, "Device::create_render_pipeline", ); } - RenderPipeline { + CoreRenderPipeline { + context: self.context.clone(), id, - error_sink: Arc::clone(&device_data.error_sink), + error_sink: Arc::clone(&self.error_sink), } + .into() } - fn device_create_compute_pipeline( + + fn create_compute_pipeline( &self, - device_data: &Self::DeviceData, - desc: &ComputePipelineDescriptor<'_>, - ) -> Self::ComputePipelineData { + desc: &crate::ComputePipelineDescriptor<'_>, + ) -> dispatch::DispatchComputePipeline { use wgc::pipeline as pipe; let descriptor = pipe::ComputePipelineDescriptor { label: desc.label.map(Borrowed), - layout: desc.layout.map(downcast_pipeline_layout).copied(), + layout: desc.layout.map(|pll| pll.inner.as_core().id), stage: pipe::ProgrammableStageDescriptor { - module: downcast_shader_module(desc.module).id, + module: desc.module.inner.as_core().id, entry_point: desc.entry_point.map(Borrowed), constants: Borrowed(desc.compilation_options.constants), zero_initialize_workgroup_memory: desc .compilation_options .zero_initialize_workgroup_memory, }, - cache: desc.cache.map(downcast_pipeline_cache).copied(), + cache: desc.cache.map(|cache| cache.inner.as_core().id), }; let (id, error) = - self.0 - .device_create_compute_pipeline(device_data.id, &descriptor, None, None); + self.context + .0 + .device_create_compute_pipeline(self.id, &descriptor, None, None); if let Some(cause) = error { if let wgc::pipeline::CreateComputePipelineError::Internal(ref error) = cause { log::error!( @@ -1161,24 +1347,25 @@ impl crate::Context for ContextWgpuCore { ); log::error!("Please report it to https://github.com/gfx-rs/wgpu"); } - self.handle_error( - &device_data.error_sink, + self.context.handle_error( + &self.error_sink, cause, desc.label, "Device::create_compute_pipeline", ); } - ComputePipeline { + CoreComputePipeline { + context: self.context.clone(), id, - error_sink: Arc::clone(&device_data.error_sink), + error_sink: Arc::clone(&self.error_sink), } + .into() } - unsafe fn device_create_pipeline_cache( + unsafe fn create_pipeline_cache( &self, - device_data: &Self::DeviceData, - desc: &PipelineCacheDescriptor<'_>, - ) -> Self::PipelineCacheData { + desc: &crate::PipelineCacheDescriptor<'_>, + ) -> dispatch::DispatchPipelineCache { use wgc::pipeline as pipe; let descriptor = pipe::PipelineCacheDescriptor { @@ -1187,80 +1374,117 @@ impl crate::Context for ContextWgpuCore { fallback: desc.fallback, }; let (id, error) = unsafe { - self.0 - .device_create_pipeline_cache(device_data.id, &descriptor, None) + self.context + .0 + .device_create_pipeline_cache(self.id, &descriptor, None) }; if let Some(cause) = error { - self.handle_error( - &device_data.error_sink, + self.context.handle_error( + &self.error_sink, cause, desc.label, "Device::device_create_pipeline_cache_init", ); } - id + CorePipelineCache { + context: self.context.clone(), + id, + } + .into() } - fn device_create_buffer( - &self, - device_data: &Self::DeviceData, - desc: &crate::BufferDescriptor<'_>, - ) -> Self::BufferData { - let (id, error) = - self.0 - .device_create_buffer(device_data.id, &desc.map_label(|l| l.map(Borrowed)), None); + fn create_buffer(&self, desc: &crate::BufferDescriptor<'_>) -> dispatch::DispatchBuffer { + let (id, error) = self.context.0.device_create_buffer( + self.id, + &desc.map_label(|l| l.map(Borrowed)), + None, + ); if let Some(cause) = error { - self.handle_error( - &device_data.error_sink, - cause, - desc.label, - "Device::create_buffer", - ); + self.context + .handle_error(&self.error_sink, cause, desc.label, "Device::create_buffer"); } - Buffer { + CoreBuffer { + context: self.context.clone(), id, - error_sink: Arc::clone(&device_data.error_sink), + error_sink: Arc::clone(&self.error_sink), } + .into() } - fn device_create_texture( - &self, - device_data: &Self::DeviceData, - desc: &TextureDescriptor<'_>, - ) -> Self::TextureData { + + fn create_texture(&self, desc: &crate::TextureDescriptor<'_>) -> dispatch::DispatchTexture { let wgt_desc = desc.map_label_and_view_formats(|l| l.map(Borrowed), |v| v.to_vec()); let (id, error) = self + .context .0 - .device_create_texture(device_data.id, &wgt_desc, None); + .device_create_texture(self.id, &wgt_desc, None); if let Some(cause) = error { - self.handle_error( - &device_data.error_sink, + self.context.handle_error( + &self.error_sink, cause, desc.label, "Device::create_texture", ); } - Texture { + CoreTexture { + context: self.context.clone(), id, - error_sink: Arc::clone(&device_data.error_sink), + error_sink: Arc::clone(&self.error_sink), } + .into() } - fn device_create_sampler( + + fn create_blas( &self, - device_data: &Self::DeviceData, - desc: &SamplerDescriptor<'_>, - ) -> Self::SamplerData { - let descriptor = wgc::resource::SamplerDescriptor { - label: desc.label.map(Borrowed), - address_modes: [ - desc.address_mode_u, - desc.address_mode_v, - desc.address_mode_w, - ], - mag_filter: desc.mag_filter, - min_filter: desc.min_filter, - mipmap_filter: desc.mipmap_filter, + desc: &crate::CreateBlasDescriptor<'_>, + sizes: crate::BlasGeometrySizeDescriptors, + ) -> (Option, dispatch::DispatchBlas) { + let global = &self.context.0; + let (id, handle, error) = + global.device_create_blas(self.id, &desc.map_label(|l| l.map(Borrowed)), sizes, None); + if let Some(cause) = error { + self.context + .handle_error(&self.error_sink, cause, desc.label, "Device::create_blas"); + } + ( + handle, + CoreBlas { + context: self.context.clone(), + id, + // error_sink: Arc::clone(&self.error_sink), + } + .into(), + ) + } + + fn create_tlas(&self, desc: &crate::CreateTlasDescriptor<'_>) -> dispatch::DispatchTlas { + let global = &self.context.0; + let (id, error) = + global.device_create_tlas(self.id, &desc.map_label(|l| l.map(Borrowed)), None); + if let Some(cause) = error { + self.context + .handle_error(&self.error_sink, cause, desc.label, "Device::create_blas"); + } + CoreTlas { + context: self.context.clone(), + id, + // error_sink: Arc::clone(&self.error_sink), + } + .into() + } + + fn create_sampler(&self, desc: &crate::SamplerDescriptor<'_>) -> dispatch::DispatchSampler { + let descriptor = wgc::resource::SamplerDescriptor { + label: desc.label.map(Borrowed), + address_modes: [ + desc.address_mode_u, + desc.address_mode_v, + desc.address_mode_w, + ], + mag_filter: desc.mag_filter, + min_filter: desc.min_filter, + mipmap_filter: desc.mipmap_filter, lod_min_clamp: desc.lod_min_clamp, lod_max_clamp: desc.lod_max_clamp, compare: desc.compare, @@ -1269,63 +1493,72 @@ impl crate::Context for ContextWgpuCore { }; let (id, error) = self + .context .0 - .device_create_sampler(device_data.id, &descriptor, None); + .device_create_sampler(self.id, &descriptor, None); if let Some(cause) = error { - self.handle_error( - &device_data.error_sink, + self.context.handle_error( + &self.error_sink, cause, desc.label, "Device::create_sampler", ); } - id + CoreSampler { + context: self.context.clone(), + id, + } + .into() } - fn device_create_query_set( - &self, - device_data: &Self::DeviceData, - desc: &wgt::QuerySetDescriptor>, - ) -> Self::QuerySetData { - let (id, error) = self.0.device_create_query_set( - device_data.id, + + fn create_query_set(&self, desc: &crate::QuerySetDescriptor<'_>) -> dispatch::DispatchQuerySet { + let (id, error) = self.context.0.device_create_query_set( + self.id, &desc.map_label(|l| l.map(Borrowed)), None, ); if let Some(cause) = error { - self.handle_error_nolabel(&device_data.error_sink, cause, "Device::create_query_set"); + self.context + .handle_error_nolabel(&self.error_sink, cause, "Device::create_query_set"); } - id + CoreQuerySet { + context: self.context.clone(), + id, + } + .into() } - fn device_create_command_encoder( + + fn create_command_encoder( &self, - device_data: &Self::DeviceData, - desc: &CommandEncoderDescriptor<'_>, - ) -> Self::CommandEncoderData { - let (id, error) = self.0.device_create_command_encoder( - device_data.id, + desc: &crate::CommandEncoderDescriptor<'_>, + ) -> dispatch::DispatchCommandEncoder { + let (id, error) = self.context.0.device_create_command_encoder( + self.id, &desc.map_label(|l| l.map(Borrowed)), None, ); if let Some(cause) = error { - self.handle_error( - &device_data.error_sink, + self.context.handle_error( + &self.error_sink, cause, desc.label, "Device::create_command_encoder", ); } - CommandEncoder { + CoreCommandEncoder { + context: self.context.clone(), id, - error_sink: Arc::clone(&device_data.error_sink), + error_sink: Arc::clone(&self.error_sink), open: true, } + .into() } - fn device_create_render_bundle_encoder( + + fn create_render_bundle_encoder( &self, - device_data: &Self::DeviceData, - desc: &RenderBundleEncoderDescriptor<'_>, - ) -> Self::RenderBundleEncoderData { + desc: &crate::RenderBundleEncoderDescriptor<'_>, + ) -> dispatch::DispatchRenderBundleEncoder { let descriptor = wgc::command::RenderBundleEncoderDescriptor { label: desc.label.map(Borrowed), color_formats: Borrowed(desc.color_formats), @@ -1333,74 +1566,321 @@ impl crate::Context for ContextWgpuCore { sample_count: desc.sample_count, multiview: desc.multiview, }; - match wgc::command::RenderBundleEncoder::new(&descriptor, device_data.id, None) { + let encoder = match wgc::command::RenderBundleEncoder::new(&descriptor, self.id, None) { Ok(encoder) => encoder, Err(e) => panic!("Error in Device::create_render_bundle_encoder: {e}"), + }; + + CoreRenderBundleEncoder { + context: self.context.clone(), + encoder, + id: crate::cmp::Identifier::create(), } + .into() } - #[cfg_attr(not(any(native, Emscripten)), allow(unused))] - fn device_drop(&self, device_data: &Self::DeviceData) { - #[cfg(any(native, Emscripten))] - { - self.0.device_drop(device_data.id); - } + + fn set_device_lost_callback(&self, device_lost_callback: dispatch::BoxDeviceLostCallback) { + self.context + .0 + .device_set_device_lost_closure(self.id, device_lost_callback); } - #[cfg_attr(target_arch = "wasm32", allow(unused))] - fn queue_drop(&self, queue_data: &Self::QueueData) { - self.0.queue_drop(queue_data.id); + + fn on_uncaptured_error(&self, handler: Box) { + let mut error_sink = self.error_sink.lock(); + error_sink.uncaptured_handler = Some(handler); } - fn device_set_device_lost_callback( - &self, - device_data: &Self::DeviceData, - device_lost_callback: crate::context::DeviceLostCallback, - ) { - self.0 - .device_set_device_lost_closure(device_data.id, device_lost_callback); + + fn push_error_scope(&self, filter: crate::ErrorFilter) { + let mut error_sink = self.error_sink.lock(); + error_sink.scopes.push(ErrorScope { + error: None, + filter, + }); } - fn device_destroy(&self, device_data: &Self::DeviceData) { - self.0.device_destroy(device_data.id); + + fn pop_error_scope(&self) -> Pin> { + let mut error_sink = self.error_sink.lock(); + let scope = error_sink.scopes.pop().unwrap(); + Box::pin(ready(scope.error)) } - fn device_poll( - &self, - device_data: &Self::DeviceData, - maintain: crate::Maintain, - ) -> wgt::MaintainResult { - let maintain_inner = maintain.map_index(|i| *i.data.as_ref().downcast_ref().unwrap()); - match self.0.device_poll(device_data.id, maintain_inner) { + + fn start_capture(&self) { + self.context.0.device_start_capture(self.id); + } + + fn stop_capture(&self) { + self.context.0.device_stop_capture(self.id); + } + + fn poll(&self, maintain: crate::Maintain) -> crate::MaintainResult { + let maintain_inner = maintain.map_index(|i| i.index); + match self.context.0.device_poll(self.id, maintain_inner) { Ok(done) => match done { true => wgt::MaintainResult::SubmissionQueueEmpty, false => wgt::MaintainResult::Ok, }, - Err(err) => self.handle_error_fatal(err, "Device::poll"), + Err(err) => self.context.handle_error_fatal(err, "Device::poll"), } } - fn device_on_uncaptured_error( + + fn get_internal_counters(&self) -> crate::InternalCounters { + self.context.0.device_get_internal_counters(self.id) + } + + fn generate_allocator_report(&self) -> Option { + self.context.0.device_generate_allocator_report(self.id) + } + + fn destroy(&self) { + self.context.0.device_destroy(self.id); + } +} + +impl Drop for CoreDevice { + fn drop(&mut self) { + self.context.0.device_drop(self.id) + } +} + +impl dispatch::QueueInterface for CoreQueue { + fn write_buffer( &self, - device_data: &Self::DeviceData, - handler: Box, + buffer: &dispatch::DispatchBuffer, + offset: crate::BufferAddress, + data: &[u8], ) { - let mut error_sink = device_data.error_sink.lock(); - error_sink.uncaptured_handler = Some(handler); + let buffer = buffer.as_core(); + + match self + .context + .0 + .queue_write_buffer(self.id, buffer.id, offset, data) + { + Ok(()) => (), + Err(err) => { + self.context + .handle_error_nolabel(&self.error_sink, err, "Queue::write_buffer") + } + } } - fn device_push_error_scope(&self, device_data: &Self::DeviceData, filter: crate::ErrorFilter) { - let mut error_sink = device_data.error_sink.lock(); - error_sink.scopes.push(ErrorScope { - error: None, - filter, - }); + + fn create_staging_buffer( + &self, + size: crate::BufferSize, + ) -> Option { + match self + .context + .0 + .queue_create_staging_buffer(self.id, size, None) + { + Ok((buffer_id, ptr)) => Some( + CoreQueueWriteBuffer { + buffer_id, + mapping: CoreBufferMappedRange { + ptr, + size: size.get() as usize, + }, + } + .into(), + ), + Err(err) => { + self.context.handle_error_nolabel( + &self.error_sink, + err, + "Queue::write_buffer_with", + ); + None + } + } } - fn device_pop_error_scope(&self, device_data: &Self::DeviceData) -> Self::PopErrorScopeFuture { - let mut error_sink = device_data.error_sink.lock(); - let scope = error_sink.scopes.pop().unwrap(); - ready(scope.error) + + fn validate_write_buffer( + &self, + buffer: &dispatch::DispatchBuffer, + offset: wgt::BufferAddress, + size: wgt::BufferSize, + ) -> Option<()> { + let buffer = buffer.as_core(); + + match self + .context + .0 + .queue_validate_write_buffer(self.id, buffer.id, offset, size) + { + Ok(()) => Some(()), + Err(err) => { + self.context.handle_error_nolabel( + &self.error_sink, + err, + "Queue::write_buffer_with", + ); + None + } + } + } + + fn write_staging_buffer( + &self, + buffer: &dispatch::DispatchBuffer, + offset: crate::BufferAddress, + staging_buffer: &dispatch::DispatchQueueWriteBuffer, + ) { + let buffer = buffer.as_core(); + let staging_buffer = staging_buffer.as_core(); + + match self.context.0.queue_write_staging_buffer( + self.id, + buffer.id, + offset, + staging_buffer.buffer_id, + ) { + Ok(()) => (), + Err(err) => { + self.context.handle_error_nolabel( + &self.error_sink, + err, + "Queue::write_buffer_with", + ); + } + } } - fn buffer_map_async( + fn write_texture( &self, - buffer_data: &Self::BufferData, - mode: MapMode, - range: Range, - callback: crate::context::BufferMapCallback, + texture: crate::TexelCopyTextureInfo<'_>, + data: &[u8], + data_layout: crate::TexelCopyBufferLayout, + size: crate::Extent3d, + ) { + match self.context.0.queue_write_texture( + self.id, + &map_texture_copy_view(texture), + data, + &data_layout, + &size, + ) { + Ok(()) => (), + Err(err) => { + self.context + .handle_error_nolabel(&self.error_sink, err, "Queue::write_texture") + } + } + } + + #[cfg(any(webgpu, webgl))] + fn copy_external_image_to_texture( + &self, + source: &wgt::CopyExternalImageSourceInfo, + dest: wgt::CopyExternalImageDestInfo<&crate::api::Texture>, + size: crate::Extent3d, + ) { + match self.context.0.queue_copy_external_image_to_texture( + self.id, + source, + map_texture_tagged_copy_view(dest), + size, + ) { + Ok(()) => (), + Err(err) => self.context.handle_error_nolabel( + &self.error_sink, + err, + "Queue::copy_external_image_to_texture", + ), + } + } + + fn submit( + &self, + command_buffers: &mut dyn Iterator, + ) -> u64 { + let temp_command_buffers = command_buffers.collect::>(); + let command_buffer_ids = temp_command_buffers + .iter() + .map(|cmdbuf| cmdbuf.as_core().id) + .collect::>(); + + let index = match self.context.0.queue_submit(self.id, &command_buffer_ids) { + Ok(index) => index, + Err((index, err)) => { + self.context + .handle_error_nolabel(&self.error_sink, err, "Queue::submit"); + index + } + }; + + drop(temp_command_buffers); + + index + } + + fn get_timestamp_period(&self) -> f32 { + self.context.0.queue_get_timestamp_period(self.id) + } + + fn on_submitted_work_done(&self, callback: dispatch::BoxSubmittedWorkDoneCallback) { + self.context + .0 + .queue_on_submitted_work_done(self.id, callback); + } +} + +impl Drop for CoreQueue { + fn drop(&mut self) { + self.context.0.queue_drop(self.id) + } +} + +impl dispatch::ShaderModuleInterface for CoreShaderModule { + fn get_compilation_info(&self) -> Pin> { + Box::pin(ready(self.compilation_info.clone())) + } +} + +impl Drop for CoreShaderModule { + fn drop(&mut self) { + self.context.0.shader_module_drop(self.id) + } +} + +impl dispatch::BindGroupLayoutInterface for CoreBindGroupLayout {} + +impl Drop for CoreBindGroupLayout { + fn drop(&mut self) { + self.context.0.bind_group_layout_drop(self.id) + } +} + +impl dispatch::BindGroupInterface for CoreBindGroup {} + +impl Drop for CoreBindGroup { + fn drop(&mut self) { + self.context.0.bind_group_drop(self.id) + } +} + +impl dispatch::TextureViewInterface for CoreTextureView {} + +impl Drop for CoreTextureView { + fn drop(&mut self) { + // TODO: We don't use this error at all? + let _ = self.context.0.texture_view_drop(self.id); + } +} + +impl dispatch::SamplerInterface for CoreSampler {} + +impl Drop for CoreSampler { + fn drop(&mut self) { + self.context.0.sampler_drop(self.id) + } +} + +impl dispatch::BufferInterface for CoreBuffer { + fn map_async( + &self, + mode: crate::MapMode, + range: Range, + callback: dispatch::BufferMapCallback, ) { let operation = wgc::resource::BufferMapOperation { host: match mode { @@ -1413,57 +1893,76 @@ impl crate::Context for ContextWgpuCore { })), }; - match self.0.buffer_map_async( - buffer_data.id, + match self.context.0.buffer_map_async( + self.id, range.start, Some(range.end - range.start), operation, ) { Ok(_) => (), Err(cause) => { - self.handle_error_nolabel(&buffer_data.error_sink, cause, "Buffer::map_async") + self.context + .handle_error_nolabel(&self.error_sink, cause, "Buffer::map_async") } } } - fn buffer_get_mapped_range( + + fn get_mapped_range( &self, - buffer_data: &Self::BufferData, - sub_range: Range, - ) -> Box { + sub_range: Range, + ) -> dispatch::DispatchBufferMappedRange { let size = sub_range.end - sub_range.start; match self + .context .0 - .buffer_get_mapped_range(buffer_data.id, sub_range.start, Some(size)) + .buffer_get_mapped_range(self.id, sub_range.start, Some(size)) { - Ok((ptr, size)) => Box::new(BufferMappedRange { + Ok((ptr, size)) => CoreBufferMappedRange { ptr, size: size as usize, - }), - Err(err) => self.handle_error_fatal(err, "Buffer::get_mapped_range"), + } + .into(), + Err(err) => self + .context + .handle_error_fatal(err, "Buffer::get_mapped_range"), } } - fn buffer_unmap(&self, buffer_data: &Self::BufferData) { - match self.0.buffer_unmap(buffer_data.id) { + #[cfg(webgpu)] + fn get_mapped_range_as_array_buffer( + &self, + _sub_range: Range, + ) -> Option { + None + } + + fn unmap(&self) { + match self.context.0.buffer_unmap(self.id) { Ok(()) => (), Err(cause) => { - self.handle_error_nolabel(&buffer_data.error_sink, cause, "Buffer::buffer_unmap") + self.context + .handle_error_nolabel(&self.error_sink, cause, "Buffer::buffer_unmap") } } } - fn shader_get_compilation_info( - &self, - shader_data: &Self::ShaderModuleData, - ) -> Self::CompilationInfoFuture { - ready(shader_data.compilation_info.clone()) + fn destroy(&self) { + // Per spec, no error to report. Even calling destroy multiple times is valid. + let _ = self.context.0.buffer_destroy(self.id); + } +} + +impl Drop for CoreBuffer { + fn drop(&mut self) { + self.context.0.buffer_drop(self.id) } +} - fn texture_create_view( +impl dispatch::TextureInterface for CoreTexture { + fn create_view( &self, - texture_data: &Self::TextureData, - desc: &TextureViewDescriptor<'_>, - ) -> Self::TextureViewData { + desc: &crate::TextureViewDescriptor<'_>, + ) -> dispatch::DispatchTextureView { let descriptor = wgc::resource::TextureViewDescriptor { label: desc.label.map(Borrowed), format: desc.format, @@ -1477,237 +1976,243 @@ impl crate::Context for ContextWgpuCore { }, }; let (id, error) = self + .context .0 - .texture_create_view(texture_data.id, &descriptor, None); + .texture_create_view(self.id, &descriptor, None); if let Some(cause) = error { - self.handle_error( - &texture_data.error_sink, - cause, - desc.label, - "Texture::create_view", - ); + self.context + .handle_error(&self.error_sink, cause, desc.label, "Texture::create_view"); + } + CoreTextureView { + context: self.context.clone(), + id, } - id + .into() } - fn surface_drop(&self, surface_data: &Self::SurfaceData) { - self.0.surface_drop(surface_data.id) + fn destroy(&self) { + // Per spec, no error to report. Even calling destroy multiple times is valid. + let _ = self.context.0.texture_destroy(self.id); } +} - fn adapter_drop(&self, adapter_data: &Self::AdapterData) { - self.0.adapter_drop(*adapter_data) +impl Drop for CoreTexture { + fn drop(&mut self) { + self.context.0.texture_drop(self.id) } +} - fn buffer_destroy(&self, buffer_data: &Self::BufferData) { +impl dispatch::BlasInterface for CoreBlas { + fn destroy(&self) { // Per spec, no error to report. Even calling destroy multiple times is valid. - let _ = self.0.buffer_destroy(buffer_data.id); + let _ = self.context.0.blas_destroy(self.id); } +} - fn buffer_drop(&self, buffer_data: &Self::BufferData) { - self.0.buffer_drop(buffer_data.id) +impl Drop for CoreBlas { + fn drop(&mut self) { + self.context.0.blas_drop(self.id) } +} - fn texture_destroy(&self, texture_data: &Self::TextureData) { +impl dispatch::TlasInterface for CoreTlas { + fn destroy(&self) { // Per spec, no error to report. Even calling destroy multiple times is valid. - let _ = self.0.texture_destroy(texture_data.id); + let _ = self.context.0.tlas_destroy(self.id); } +} - fn texture_drop(&self, texture_data: &Self::TextureData) { - self.0.texture_drop(texture_data.id) +impl Drop for CoreTlas { + fn drop(&mut self) { + self.context.0.tlas_drop(self.id) } +} - fn texture_view_drop(&self, texture_view_data: &Self::TextureViewData) { - let _ = self.0.texture_view_drop(*texture_view_data); - } +impl dispatch::QuerySetInterface for CoreQuerySet {} - fn sampler_drop(&self, sampler_data: &Self::SamplerData) { - self.0.sampler_drop(*sampler_data) +impl Drop for CoreQuerySet { + fn drop(&mut self) { + self.context.0.query_set_drop(self.id) } +} - fn query_set_drop(&self, query_set_data: &Self::QuerySetData) { - self.0.query_set_drop(*query_set_data) - } +impl dispatch::PipelineLayoutInterface for CorePipelineLayout {} - fn bind_group_drop(&self, bind_group_data: &Self::BindGroupData) { - self.0.bind_group_drop(*bind_group_data) +impl Drop for CorePipelineLayout { + fn drop(&mut self) { + self.context.0.pipeline_layout_drop(self.id) } +} - fn bind_group_layout_drop(&self, bind_group_layout_data: &Self::BindGroupLayoutData) { - self.0.bind_group_layout_drop(*bind_group_layout_data) +impl dispatch::RenderPipelineInterface for CoreRenderPipeline { + fn get_bind_group_layout(&self, index: u32) -> dispatch::DispatchBindGroupLayout { + let (id, error) = self + .context + .0 + .render_pipeline_get_bind_group_layout(self.id, index, None); + if let Some(err) = error { + self.context.handle_error_nolabel( + &self.error_sink, + err, + "RenderPipeline::get_bind_group_layout", + ) + } + CoreBindGroupLayout { + context: self.context.clone(), + id, + } + .into() } +} - fn pipeline_layout_drop(&self, pipeline_layout_data: &Self::PipelineLayoutData) { - self.0.pipeline_layout_drop(*pipeline_layout_data) - } - fn shader_module_drop(&self, shader_module_data: &Self::ShaderModuleData) { - self.0.shader_module_drop(shader_module_data.id) - } - fn command_encoder_drop(&self, command_encoder_data: &Self::CommandEncoderData) { - if command_encoder_data.open { - self.0.command_encoder_drop(command_encoder_data.id) - } - } - - fn command_buffer_drop(&self, command_buffer_data: &Self::CommandBufferData) { - self.0.command_buffer_drop(*command_buffer_data) - } - - fn render_bundle_drop(&self, render_bundle_data: &Self::RenderBundleData) { - self.0.render_bundle_drop(*render_bundle_data) - } - - fn compute_pipeline_drop(&self, pipeline_data: &Self::ComputePipelineData) { - self.0.compute_pipeline_drop(pipeline_data.id) - } - - fn render_pipeline_drop(&self, pipeline_data: &Self::RenderPipelineData) { - self.0.render_pipeline_drop(pipeline_data.id) - } - - fn pipeline_cache_drop(&self, cache_data: &Self::PipelineCacheData) { - self.0.pipeline_cache_drop(*cache_data) +impl Drop for CoreRenderPipeline { + fn drop(&mut self) { + self.context.0.render_pipeline_drop(self.id) } +} - fn compute_pipeline_get_bind_group_layout( - &self, - pipeline_data: &Self::ComputePipelineData, - index: u32, - ) -> Self::BindGroupLayoutData { - let (id, error) = - self.0 - .compute_pipeline_get_bind_group_layout(pipeline_data.id, index, None); +impl dispatch::ComputePipelineInterface for CoreComputePipeline { + fn get_bind_group_layout(&self, index: u32) -> dispatch::DispatchBindGroupLayout { + let (id, error) = self + .context + .0 + .compute_pipeline_get_bind_group_layout(self.id, index, None); if let Some(err) = error { - self.handle_error_nolabel( - &pipeline_data.error_sink, + self.context.handle_error_nolabel( + &self.error_sink, err, "ComputePipeline::get_bind_group_layout", ) } - id + CoreBindGroupLayout { + context: self.context.clone(), + id, + } + .into() } +} - fn render_pipeline_get_bind_group_layout( - &self, - pipeline_data: &Self::RenderPipelineData, - index: u32, - ) -> Self::BindGroupLayoutData { - let (id, error) = - self.0 - .render_pipeline_get_bind_group_layout(pipeline_data.id, index, None); - if let Some(err) = error { - self.handle_error_nolabel( - &pipeline_data.error_sink, - err, - "RenderPipeline::get_bind_group_layout", - ) - } - id +impl Drop for CoreComputePipeline { + fn drop(&mut self) { + self.context.0.compute_pipeline_drop(self.id) } +} + +impl dispatch::PipelineCacheInterface for CorePipelineCache { + fn get_data(&self) -> Option> { + self.context.0.pipeline_cache_get_data(self.id) + } +} + +impl Drop for CorePipelineCache { + fn drop(&mut self) { + self.context.0.pipeline_cache_drop(self.id) + } +} - fn command_encoder_copy_buffer_to_buffer( +impl dispatch::CommandEncoderInterface for CoreCommandEncoder { + fn copy_buffer_to_buffer( &self, - encoder_data: &Self::CommandEncoderData, - source_data: &Self::BufferData, - source_offset: wgt::BufferAddress, - destination_data: &Self::BufferData, - destination_offset: wgt::BufferAddress, - copy_size: wgt::BufferAddress, + source: &dispatch::DispatchBuffer, + source_offset: crate::BufferAddress, + destination: &dispatch::DispatchBuffer, + destination_offset: crate::BufferAddress, + copy_size: crate::BufferAddress, ) { - if let Err(cause) = self.0.command_encoder_copy_buffer_to_buffer( - encoder_data.id, - source_data.id, + let source = source.as_core(); + let destination = destination.as_core(); + + if let Err(cause) = self.context.0.command_encoder_copy_buffer_to_buffer( + self.id, + source.id, source_offset, - destination_data.id, + destination.id, destination_offset, copy_size, ) { - self.handle_error_nolabel( - &encoder_data.error_sink, + self.context.handle_error_nolabel( + &self.error_sink, cause, "CommandEncoder::copy_buffer_to_buffer", ); } } - fn command_encoder_copy_buffer_to_texture( + fn copy_buffer_to_texture( &self, - encoder_data: &Self::CommandEncoderData, source: crate::TexelCopyBufferInfo<'_>, destination: crate::TexelCopyTextureInfo<'_>, - copy_size: wgt::Extent3d, + copy_size: crate::Extent3d, ) { - if let Err(cause) = self.0.command_encoder_copy_buffer_to_texture( - encoder_data.id, + if let Err(cause) = self.context.0.command_encoder_copy_buffer_to_texture( + self.id, &map_buffer_copy_view(source), &map_texture_copy_view(destination), ©_size, ) { - self.handle_error_nolabel( - &encoder_data.error_sink, + self.context.handle_error_nolabel( + &self.error_sink, cause, "CommandEncoder::copy_buffer_to_texture", ); } } - fn command_encoder_copy_texture_to_buffer( + fn copy_texture_to_buffer( &self, - encoder_data: &Self::CommandEncoderData, source: crate::TexelCopyTextureInfo<'_>, destination: crate::TexelCopyBufferInfo<'_>, - copy_size: wgt::Extent3d, + copy_size: crate::Extent3d, ) { - if let Err(cause) = self.0.command_encoder_copy_texture_to_buffer( - encoder_data.id, + if let Err(cause) = self.context.0.command_encoder_copy_texture_to_buffer( + self.id, &map_texture_copy_view(source), &map_buffer_copy_view(destination), ©_size, ) { - self.handle_error_nolabel( - &encoder_data.error_sink, + self.context.handle_error_nolabel( + &self.error_sink, cause, "CommandEncoder::copy_texture_to_buffer", ); } } - fn command_encoder_copy_texture_to_texture( + fn copy_texture_to_texture( &self, - encoder_data: &Self::CommandEncoderData, source: crate::TexelCopyTextureInfo<'_>, destination: crate::TexelCopyTextureInfo<'_>, - copy_size: wgt::Extent3d, + copy_size: crate::Extent3d, ) { - if let Err(cause) = self.0.command_encoder_copy_texture_to_texture( - encoder_data.id, + if let Err(cause) = self.context.0.command_encoder_copy_texture_to_texture( + self.id, &map_texture_copy_view(source), &map_texture_copy_view(destination), ©_size, ) { - self.handle_error_nolabel( - &encoder_data.error_sink, + self.context.handle_error_nolabel( + &self.error_sink, cause, "CommandEncoder::copy_texture_to_texture", ); } } - fn command_encoder_begin_compute_pass( + fn begin_compute_pass( &self, - encoder_data: &Self::CommandEncoderData, - desc: &ComputePassDescriptor<'_>, - ) -> Self::ComputePassData { + desc: &crate::ComputePassDescriptor<'_>, + ) -> dispatch::DispatchComputePass { let timestamp_writes = desc.timestamp_writes .as_ref() .map(|tw| wgc::command::PassTimestampWrites { - query_set: *downcast_query_set(tw.query_set), + query_set: tw.query_set.inner.as_core().id, beginning_of_pass_write_index: tw.beginning_of_pass_write_index, end_of_pass_write_index: tw.end_of_pass_write_index, }); - let (pass, err) = self.0.command_encoder_create_compute_pass( - encoder_data.id, + let (pass, err) = self.context.0.command_encoder_create_compute_pass( + self.id, &wgc::command::ComputePassDescriptor { label: desc.label.map(Borrowed), timestamp_writes: timestamp_writes.as_ref(), @@ -1715,33 +2220,35 @@ impl crate::Context for ContextWgpuCore { ); if let Some(cause) = err { - self.handle_error( - &encoder_data.error_sink, + self.context.handle_error( + &self.error_sink, cause, desc.label, "CommandEncoder::begin_compute_pass", ); } - Self::ComputePassData { + CoreComputePass { + context: self.context.clone(), pass, - error_sink: encoder_data.error_sink.clone(), + error_sink: self.error_sink.clone(), + id: crate::cmp::Identifier::create(), } + .into() } - fn command_encoder_begin_render_pass( + fn begin_render_pass( &self, - encoder_data: &Self::CommandEncoderData, desc: &crate::RenderPassDescriptor<'_>, - ) -> Self::RenderPassData { + ) -> dispatch::DispatchRenderPass { let colors = desc .color_attachments .iter() .map(|ca| { ca.as_ref() .map(|at| wgc::command::RenderPassColorAttachment { - view: *downcast_texture_view(at.view), - resolve_target: at.resolve_target.map(downcast_texture_view).copied(), + view: at.view.inner.as_core().id, + resolve_target: at.resolve_target.map(|view| view.inner.as_core().id), channel: map_pass_channel(Some(&at.ops)), }) }) @@ -1749,7 +2256,7 @@ impl crate::Context for ContextWgpuCore { let depth_stencil = desc.depth_stencil_attachment.as_ref().map(|dsa| { wgc::command::RenderPassDepthStencilAttachment { - view: *downcast_texture_view(dsa.view), + view: dsa.view.inner.as_core().id, depth: map_pass_channel(dsa.depth_ops.as_ref()), stencil: map_pass_channel(dsa.stencil_ops.as_ref()), } @@ -1759,1558 +2266,1265 @@ impl crate::Context for ContextWgpuCore { desc.timestamp_writes .as_ref() .map(|tw| wgc::command::PassTimestampWrites { - query_set: *downcast_query_set(tw.query_set), + query_set: tw.query_set.inner.as_core().id, beginning_of_pass_write_index: tw.beginning_of_pass_write_index, end_of_pass_write_index: tw.end_of_pass_write_index, }); - let (pass, err) = self.0.command_encoder_create_render_pass( - encoder_data.id, + let (pass, err) = self.context.0.command_encoder_create_render_pass( + self.id, &wgc::command::RenderPassDescriptor { label: desc.label.map(Borrowed), timestamp_writes: timestamp_writes.as_ref(), color_attachments: std::borrow::Cow::Borrowed(&colors), depth_stencil_attachment: depth_stencil.as_ref(), - occlusion_query_set: desc.occlusion_query_set.map(downcast_query_set).copied(), + occlusion_query_set: desc.occlusion_query_set.map(|qs| qs.inner.as_core().id), }, ); if let Some(cause) = err { - self.handle_error( - &encoder_data.error_sink, + self.context.handle_error( + &self.error_sink, cause, desc.label, "CommandEncoder::begin_render_pass", ); } - Self::RenderPassData { + CoreRenderPass { + context: self.context.clone(), pass, - error_sink: encoder_data.error_sink.clone(), + error_sink: self.error_sink.clone(), + id: crate::cmp::Identifier::create(), } + .into() } - fn command_encoder_finish( - &self, - encoder_data: &mut Self::CommandEncoderData, - ) -> Self::CommandBufferData { + fn finish(&mut self) -> dispatch::DispatchCommandBuffer { let descriptor = wgt::CommandBufferDescriptor::default(); - encoder_data.open = false; // prevent the drop - let (id, error) = self.0.command_encoder_finish(encoder_data.id, &descriptor); + self.open = false; // prevent the drop + let (id, error) = self.context.0.command_encoder_finish(self.id, &descriptor); if let Some(cause) = error { - self.handle_error_nolabel(&encoder_data.error_sink, cause, "a CommandEncoder"); + self.context + .handle_error_nolabel(&self.error_sink, cause, "a CommandEncoder"); } - id + CoreCommandBuffer { + context: self.context.clone(), + id, + } + .into() } - fn command_encoder_clear_texture( + fn clear_texture( &self, - encoder_data: &Self::CommandEncoderData, - texture_data: &Self::TextureData, - subresource_range: &wgt::ImageSubresourceRange, + texture: &dispatch::DispatchTexture, + subresource_range: &crate::ImageSubresourceRange, ) { - if let Err(cause) = self.0.command_encoder_clear_texture( - encoder_data.id, - texture_data.id, - subresource_range, - ) { - self.handle_error_nolabel( - &encoder_data.error_sink, + let texture = texture.as_core(); + + if let Err(cause) = + self.context + .0 + .command_encoder_clear_texture(self.id, texture.id, subresource_range) + { + self.context.handle_error_nolabel( + &self.error_sink, cause, "CommandEncoder::clear_texture", ); } } - fn command_encoder_clear_buffer( + fn clear_buffer( &self, - encoder_data: &Self::CommandEncoderData, - buffer_data: &Self::BufferData, - offset: wgt::BufferAddress, - size: Option, + buffer: &dispatch::DispatchBuffer, + offset: crate::BufferAddress, + size: Option, ) { - if let Err(cause) = - self.0 - .command_encoder_clear_buffer(encoder_data.id, buffer_data.id, offset, size) + let buffer = buffer.as_core(); + + if let Err(cause) = self + .context + .0 + .command_encoder_clear_buffer(self.id, buffer.id, offset, size) { - self.handle_error_nolabel( - &encoder_data.error_sink, + self.context.handle_error_nolabel( + &self.error_sink, cause, "CommandEncoder::fill_buffer", ); } } - fn command_encoder_insert_debug_marker( - &self, - encoder_data: &Self::CommandEncoderData, - label: &str, - ) { + fn insert_debug_marker(&self, label: &str) { if let Err(cause) = self + .context .0 - .command_encoder_insert_debug_marker(encoder_data.id, label) + .command_encoder_insert_debug_marker(self.id, label) { - self.handle_error_nolabel( - &encoder_data.error_sink, + self.context.handle_error_nolabel( + &self.error_sink, cause, "CommandEncoder::insert_debug_marker", ); } } - fn command_encoder_push_debug_group( - &self, - encoder_data: &Self::CommandEncoderData, - label: &str, - ) { + fn push_debug_group(&self, label: &str) { if let Err(cause) = self + .context .0 - .command_encoder_push_debug_group(encoder_data.id, label) + .command_encoder_push_debug_group(self.id, label) { - self.handle_error_nolabel( - &encoder_data.error_sink, + self.context.handle_error_nolabel( + &self.error_sink, cause, "CommandEncoder::push_debug_group", ); } } - fn command_encoder_pop_debug_group(&self, encoder_data: &Self::CommandEncoderData) { - if let Err(cause) = self.0.command_encoder_pop_debug_group(encoder_data.id) { - self.handle_error_nolabel( - &encoder_data.error_sink, + fn pop_debug_group(&self) { + if let Err(cause) = self.context.0.command_encoder_pop_debug_group(self.id) { + self.context.handle_error_nolabel( + &self.error_sink, cause, "CommandEncoder::pop_debug_group", ); } } - fn command_encoder_write_timestamp( - &self, - encoder_data: &Self::CommandEncoderData, - query_set_data: &Self::QuerySetData, - query_index: u32, - ) { + fn write_timestamp(&self, query_set: &dispatch::DispatchQuerySet, query_index: u32) { + let query_set = query_set.as_core(); + if let Err(cause) = - self.0 - .command_encoder_write_timestamp(encoder_data.id, *query_set_data, query_index) + self.context + .0 + .command_encoder_write_timestamp(self.id, query_set.id, query_index) { - self.handle_error_nolabel( - &encoder_data.error_sink, + self.context.handle_error_nolabel( + &self.error_sink, cause, "CommandEncoder::write_timestamp", ); } } - fn command_encoder_resolve_query_set( + fn resolve_query_set( &self, - encoder_data: &Self::CommandEncoderData, - query_set_data: &Self::QuerySetData, + query_set: &dispatch::DispatchQuerySet, first_query: u32, query_count: u32, - destination_data: &Self::BufferData, - destination_offset: wgt::BufferAddress, + destination: &dispatch::DispatchBuffer, + destination_offset: crate::BufferAddress, ) { - if let Err(cause) = self.0.command_encoder_resolve_query_set( - encoder_data.id, - *query_set_data, + let query_set = query_set.as_core(); + let destination = destination.as_core(); + + if let Err(cause) = self.context.0.command_encoder_resolve_query_set( + self.id, + query_set.id, first_query, query_count, - destination_data.id, + destination.id, destination_offset, ) { - self.handle_error_nolabel( - &encoder_data.error_sink, + self.context.handle_error_nolabel( + &self.error_sink, cause, "CommandEncoder::resolve_query_set", ); } } - fn render_bundle_encoder_finish( - &self, - encoder_data: Self::RenderBundleEncoderData, - desc: &crate::RenderBundleDescriptor<'_>, - ) -> Self::RenderBundleData { - let (id, error) = self.0.render_bundle_encoder_finish( - encoder_data, - &desc.map_label(|l| l.map(Borrowed)), - None, - ); - if let Some(err) = error { - self.handle_error_fatal(err, "RenderBundleEncoder::finish"); - } - id - } - - fn queue_write_buffer( + fn build_acceleration_structures_unsafe_tlas<'a>( &self, - queue_data: &Self::QueueData, - buffer_data: &Self::BufferData, - offset: wgt::BufferAddress, - data: &[u8], + blas: &mut dyn Iterator>, + tlas: &mut dyn Iterator>, ) { - match self - .0 - .queue_write_buffer(queue_data.id, buffer_data.id, offset, data) - { - Ok(()) => (), - Err(err) => { - self.handle_error_nolabel(&queue_data.error_sink, err, "Queue::write_buffer") + let blas = blas.map(|e: &crate::BlasBuildEntry<'_>| { + let geometries = match e.geometry { + crate::BlasGeometries::TriangleGeometries(ref triangle_geometries) => { + let iter = triangle_geometries.iter().map(|tg| { + wgc::ray_tracing::BlasTriangleGeometry { + vertex_buffer: tg.vertex_buffer.inner.as_core().id, + index_buffer: tg.index_buffer.map(|buf| buf.inner.as_core().id), + transform_buffer: tg.transform_buffer.map(|buf| buf.inner.as_core().id), + size: tg.size, + transform_buffer_offset: tg.transform_buffer_offset, + first_vertex: tg.first_vertex, + vertex_stride: tg.vertex_stride, + index_buffer_offset: tg.index_buffer_offset, + } + }); + wgc::ray_tracing::BlasGeometries::TriangleGeometries(Box::new(iter)) + } + }; + wgc::ray_tracing::BlasBuildEntry { + blas_id: e.blas.shared.inner.as_core().id, + geometries, } - } - } + }); - fn queue_validate_write_buffer( - &self, - queue_data: &Self::QueueData, - buffer_data: &Self::BufferData, - offset: wgt::BufferAddress, - size: wgt::BufferSize, - ) -> Option<()> { - match self - .0 - .queue_validate_write_buffer(queue_data.id, buffer_data.id, offset, size) - { - Ok(()) => Some(()), - Err(err) => { - self.handle_error_nolabel(&queue_data.error_sink, err, "Queue::write_buffer_with"); - None + let tlas = tlas.into_iter().map(|e: &crate::TlasBuildEntry<'a>| { + wgc::ray_tracing::TlasBuildEntry { + tlas_id: e.tlas.inner.as_core().id, + instance_buffer_id: e.instance_buffer.inner.as_core().id, + instance_count: e.instance_count, } - } - } + }); - fn queue_create_staging_buffer( - &self, - queue_data: &Self::QueueData, - size: wgt::BufferSize, - ) -> Option> { - match self + if let Err(cause) = self + .context .0 - .queue_create_staging_buffer(queue_data.id, size, None) + .command_encoder_build_acceleration_structures_unsafe_tlas(self.id, blas, tlas) { - Ok((buffer_id, ptr)) => Some(Box::new(QueueWriteBuffer { - buffer_id, - mapping: BufferMappedRange { - ptr, - size: size.get() as usize, - }, - })), - Err(err) => { - self.handle_error_nolabel(&queue_data.error_sink, err, "Queue::write_buffer_with"); - None - } + self.context.handle_error_nolabel( + &self.error_sink, + cause, + "CommandEncoder::build_acceleration_structures_unsafe_tlas", + ); } } - fn queue_write_staging_buffer( + fn build_acceleration_structures<'a>( &self, - queue_data: &Self::QueueData, - buffer_data: &Self::BufferData, - offset: wgt::BufferAddress, - staging_buffer: &dyn crate::context::QueueWriteBuffer, + blas: &mut dyn Iterator>, + tlas: &mut dyn Iterator, ) { - let staging_buffer = staging_buffer - .as_any() - .downcast_ref::() - .unwrap(); - match self.0.queue_write_staging_buffer( - queue_data.id, - buffer_data.id, - offset, - staging_buffer.buffer_id, - ) { - Ok(()) => (), - Err(err) => { - self.handle_error_nolabel(&queue_data.error_sink, err, "Queue::write_buffer_with"); + let blas = blas.map(|e: &crate::BlasBuildEntry<'_>| { + let geometries = match e.geometry { + crate::BlasGeometries::TriangleGeometries(ref triangle_geometries) => { + let iter = triangle_geometries.iter().map(|tg| { + wgc::ray_tracing::BlasTriangleGeometry { + vertex_buffer: tg.vertex_buffer.inner.as_core().id, + index_buffer: tg.index_buffer.map(|buf| buf.inner.as_core().id), + transform_buffer: tg.transform_buffer.map(|buf| buf.inner.as_core().id), + size: tg.size, + transform_buffer_offset: tg.transform_buffer_offset, + first_vertex: tg.first_vertex, + vertex_stride: tg.vertex_stride, + index_buffer_offset: tg.index_buffer_offset, + } + }); + wgc::ray_tracing::BlasGeometries::TriangleGeometries(Box::new(iter)) + } + }; + wgc::ray_tracing::BlasBuildEntry { + blas_id: e.blas.shared.inner.as_core().id, + geometries, } - } - } + }); - fn queue_write_texture( - &self, - queue_data: &Self::QueueData, - texture: crate::TexelCopyTextureInfo<'_>, - data: &[u8], - data_layout: wgt::TexelCopyBufferLayout, - size: wgt::Extent3d, - ) { - match self.0.queue_write_texture( - queue_data.id, - &map_texture_copy_view(texture), - data, - &data_layout, - &size, - ) { - Ok(()) => (), - Err(err) => { - self.handle_error_nolabel(&queue_data.error_sink, err, "Queue::write_texture") + let tlas = tlas.into_iter().map(|e| { + let instances = e + .instances + .iter() + .map(|instance: &Option| { + instance + .as_ref() + .map(|instance| wgc::ray_tracing::TlasInstance { + blas_id: instance.blas.inner.as_core().id, + transform: &instance.transform, + custom_index: instance.custom_index, + mask: instance.mask, + }) + }); + wgc::ray_tracing::TlasPackage { + tlas_id: e.tlas.inner.as_core().id, + instances: Box::new(instances), + lowest_unmodified: e.lowest_unmodified, } - } - } + }); - #[cfg(any(webgpu, webgl))] - fn queue_copy_external_image_to_texture( - &self, - queue_data: &Self::QueueData, - source: &wgt::CopyExternalImageSourceInfo, - dest: crate::CopyExternalImageDestInfo<'_>, - size: wgt::Extent3d, - ) { - match self.0.queue_copy_external_image_to_texture( - queue_data.id, - source, - map_texture_tagged_copy_view(dest), - size, - ) { - Ok(()) => (), - Err(err) => self.handle_error_nolabel( - &queue_data.error_sink, - err, - "Queue::copy_external_image_to_texture", - ), + if let Err(cause) = self + .context + .0 + .command_encoder_build_acceleration_structures(self.id, blas, tlas) + { + self.context.handle_error_nolabel( + &self.error_sink, + cause, + "CommandEncoder::build_acceleration_structures_unsafe_tlas", + ); } } +} - fn queue_submit>( - &self, - queue_data: &Self::QueueData, - command_buffers: I, - ) -> Self::SubmissionIndexData { - let temp_command_buffers = command_buffers.collect::>(); - - let index = match self.0.queue_submit(queue_data.id, &temp_command_buffers) { - Ok(index) => index, - Err((index, err)) => { - self.handle_error_nolabel(&queue_data.error_sink, err, "Queue::submit"); - index - } - }; - - for cmdbuf in &temp_command_buffers { - self.0.command_buffer_drop(*cmdbuf); +impl Drop for CoreCommandEncoder { + fn drop(&mut self) { + if self.open { + self.context.0.command_encoder_drop(self.id) } - - index - } - - fn queue_get_timestamp_period(&self, queue_data: &Self::QueueData) -> f32 { - self.0.queue_get_timestamp_period(queue_data.id) - } - - fn queue_on_submitted_work_done( - &self, - queue_data: &Self::QueueData, - callback: crate::context::SubmittedWorkDoneCallback, - ) { - self.0.queue_on_submitted_work_done(queue_data.id, callback); - } - - fn device_start_capture(&self, device_data: &Self::DeviceData) { - self.0.device_start_capture(device_data.id); - } - - fn device_stop_capture(&self, device_data: &Self::DeviceData) { - self.0.device_stop_capture(device_data.id); } +} - fn device_get_internal_counters( - &self, - device_data: &Self::DeviceData, - ) -> wgt::InternalCounters { - self.0.device_get_internal_counters(device_data.id) - } +impl dispatch::CommandBufferInterface for CoreCommandBuffer {} - fn device_generate_allocator_report( - &self, - device_data: &Self::DeviceData, - ) -> Option { - self.0.device_generate_allocator_report(device_data.id) +impl Drop for CoreCommandBuffer { + fn drop(&mut self) { + self.context.0.command_buffer_drop(self.id) } +} - fn pipeline_cache_get_data( - &self, - // TODO: Used for error handling? - cache_data: &Self::PipelineCacheData, - ) -> Option> { - self.0.pipeline_cache_get_data(*cache_data) - } +impl dispatch::ComputePassInterface for CoreComputePass { + fn set_pipeline(&mut self, pipeline: &dispatch::DispatchComputePipeline) { + let pipeline = pipeline.as_core(); - fn compute_pass_set_pipeline( - &self, - pass_data: &mut Self::ComputePassData, - pipeline_data: &Self::ComputePipelineData, - ) { if let Err(cause) = self + .context .0 - .compute_pass_set_pipeline(&mut pass_data.pass, pipeline_data.id) + .compute_pass_set_pipeline(&mut self.pass, pipeline.id) { - self.handle_error( - &pass_data.error_sink, + self.context.handle_error( + &self.error_sink, cause, - pass_data.pass.label(), + self.pass.label(), "ComputePass::set_pipeline", ); } } - fn compute_pass_set_bind_group( - &self, - pass_data: &mut Self::ComputePassData, + fn set_bind_group( + &mut self, index: u32, - bind_group_data: Option<&Self::BindGroupData>, - offsets: &[wgt::DynamicOffset], + bind_group: Option<&dispatch::DispatchBindGroup>, + offsets: &[crate::DynamicOffset], ) { - let bg = bind_group_data.cloned(); + let bg = bind_group.map(|bg| bg.as_core().id); + if let Err(cause) = - self.0 - .compute_pass_set_bind_group(&mut pass_data.pass, index, bg, offsets) + self.context + .0 + .compute_pass_set_bind_group(&mut self.pass, index, bg, offsets) { - self.handle_error( - &pass_data.error_sink, + self.context.handle_error( + &self.error_sink, cause, - pass_data.pass.label(), + self.pass.label(), "ComputePass::set_bind_group", ); } } - fn compute_pass_set_push_constants( - &self, - pass_data: &mut Self::ComputePassData, - offset: u32, - data: &[u8], - ) { + fn set_push_constants(&mut self, offset: u32, data: &[u8]) { if let Err(cause) = - self.0 - .compute_pass_set_push_constants(&mut pass_data.pass, offset, data) + self.context + .0 + .compute_pass_set_push_constants(&mut self.pass, offset, data) { - self.handle_error( - &pass_data.error_sink, + self.context.handle_error( + &self.error_sink, cause, - pass_data.pass.label(), + self.pass.label(), "ComputePass::set_push_constant", ); } } - fn compute_pass_insert_debug_marker(&self, pass_data: &mut Self::ComputePassData, label: &str) { - if let Err(cause) = self - .0 - .compute_pass_insert_debug_marker(&mut pass_data.pass, label, 0) + fn insert_debug_marker(&mut self, label: &str) { + if let Err(cause) = + self.context + .0 + .compute_pass_insert_debug_marker(&mut self.pass, label, 0) { - self.handle_error( - &pass_data.error_sink, + self.context.handle_error( + &self.error_sink, cause, - pass_data.pass.label(), + self.pass.label(), "ComputePass::insert_debug_marker", ); } } - fn compute_pass_push_debug_group( - &self, - pass_data: &mut Self::ComputePassData, - group_label: &str, - ) { + fn push_debug_group(&mut self, group_label: &str) { if let Err(cause) = - self.0 - .compute_pass_push_debug_group(&mut pass_data.pass, group_label, 0) + self.context + .0 + .compute_pass_push_debug_group(&mut self.pass, group_label, 0) { - self.handle_error( - &pass_data.error_sink, + self.context.handle_error( + &self.error_sink, cause, - pass_data.pass.label(), + self.pass.label(), "ComputePass::push_debug_group", ); } } - fn compute_pass_pop_debug_group(&self, pass_data: &mut Self::ComputePassData) { - if let Err(cause) = self.0.compute_pass_pop_debug_group(&mut pass_data.pass) { - self.handle_error( - &pass_data.error_sink, + fn pop_debug_group(&mut self) { + if let Err(cause) = self.context.0.compute_pass_pop_debug_group(&mut self.pass) { + self.context.handle_error( + &self.error_sink, cause, - pass_data.pass.label(), + self.pass.label(), "ComputePass::pop_debug_group", ); } } - fn compute_pass_write_timestamp( - &self, - pass_data: &mut Self::ComputePassData, - query_set_data: &Self::QuerySetData, - query_index: u32, - ) { + fn write_timestamp(&mut self, query_set: &dispatch::DispatchQuerySet, query_index: u32) { + let query_set = query_set.as_core(); + if let Err(cause) = - self.0 - .compute_pass_write_timestamp(&mut pass_data.pass, *query_set_data, query_index) + self.context + .0 + .compute_pass_write_timestamp(&mut self.pass, query_set.id, query_index) { - self.handle_error( - &pass_data.error_sink, + self.context.handle_error( + &self.error_sink, cause, - pass_data.pass.label(), + self.pass.label(), "ComputePass::write_timestamp", ); } } - fn compute_pass_begin_pipeline_statistics_query( - &self, - pass_data: &mut Self::ComputePassData, - query_set_data: &Self::QuerySetData, + fn begin_pipeline_statistics_query( + &mut self, + query_set: &dispatch::DispatchQuerySet, query_index: u32, ) { - if let Err(cause) = self.0.compute_pass_begin_pipeline_statistics_query( - &mut pass_data.pass, - *query_set_data, + let query_set = query_set.as_core(); + + if let Err(cause) = self.context.0.compute_pass_begin_pipeline_statistics_query( + &mut self.pass, + query_set.id, query_index, ) { - self.handle_error( - &pass_data.error_sink, + self.context.handle_error( + &self.error_sink, cause, - pass_data.pass.label(), + self.pass.label(), "ComputePass::begin_pipeline_statistics_query", ); } } - fn compute_pass_end_pipeline_statistics_query(&self, pass_data: &mut Self::ComputePassData) { + fn end_pipeline_statistics_query(&mut self) { if let Err(cause) = self + .context .0 - .compute_pass_end_pipeline_statistics_query(&mut pass_data.pass) + .compute_pass_end_pipeline_statistics_query(&mut self.pass) { - self.handle_error( - &pass_data.error_sink, + self.context.handle_error( + &self.error_sink, cause, - pass_data.pass.label(), + self.pass.label(), "ComputePass::end_pipeline_statistics_query", ); } } - fn compute_pass_dispatch_workgroups( - &self, - pass_data: &mut Self::ComputePassData, - x: u32, - y: u32, - z: u32, - ) { + fn dispatch_workgroups(&mut self, x: u32, y: u32, z: u32) { if let Err(cause) = self + .context .0 - .compute_pass_dispatch_workgroups(&mut pass_data.pass, x, y, z) + .compute_pass_dispatch_workgroups(&mut self.pass, x, y, z) { - self.handle_error( - &pass_data.error_sink, + self.context.handle_error( + &self.error_sink, cause, - pass_data.pass.label(), + self.pass.label(), "ComputePass::dispatch_workgroups", ); } } - fn compute_pass_dispatch_workgroups_indirect( - &self, - pass_data: &mut Self::ComputePassData, - indirect_buffer_data: &Self::BufferData, - indirect_offset: wgt::BufferAddress, + fn dispatch_workgroups_indirect( + &mut self, + indirect_buffer: &dispatch::DispatchBuffer, + indirect_offset: crate::BufferAddress, ) { - if let Err(cause) = self.0.compute_pass_dispatch_workgroups_indirect( - &mut pass_data.pass, - indirect_buffer_data.id, + let indirect_buffer = indirect_buffer.as_core(); + + if let Err(cause) = self.context.0.compute_pass_dispatch_workgroups_indirect( + &mut self.pass, + indirect_buffer.id, indirect_offset, ) { - self.handle_error( - &pass_data.error_sink, + self.context.handle_error( + &self.error_sink, cause, - pass_data.pass.label(), + self.pass.label(), "ComputePass::dispatch_workgroups_indirect", ); } } - fn compute_pass_end(&self, pass_data: &mut Self::ComputePassData) { - if let Err(cause) = self.0.compute_pass_end(&mut pass_data.pass) { - self.handle_error( - &pass_data.error_sink, + fn end(&mut self) { + if let Err(cause) = self.context.0.compute_pass_end(&mut self.pass) { + self.context.handle_error( + &self.error_sink, cause, - pass_data.pass.label(), + self.pass.label(), "ComputePass::end", ); } } +} - fn render_bundle_encoder_set_pipeline( - &self, - encoder_data: &mut Self::RenderBundleEncoderData, - pipeline_data: &Self::RenderPipelineData, - ) { - wgpu_render_bundle_set_pipeline(encoder_data, pipeline_data.id) - } - - fn render_bundle_encoder_set_bind_group( - &self, - encoder_data: &mut Self::RenderBundleEncoderData, - index: u32, - bind_group_data: Option<&Self::BindGroupData>, - offsets: &[wgt::DynamicOffset], - ) { - let bg = bind_group_data.cloned(); - unsafe { - wgpu_render_bundle_set_bind_group( - encoder_data, - index, - bg, - offsets.as_ptr(), - offsets.len(), - ) - } - } - - fn render_bundle_encoder_set_index_buffer( - &self, - encoder_data: &mut Self::RenderBundleEncoderData, - buffer_data: &Self::BufferData, - index_format: wgt::IndexFormat, - offset: wgt::BufferAddress, - size: Option, - ) { - encoder_data.set_index_buffer(buffer_data.id, index_format, offset, size) - } - - fn render_bundle_encoder_set_vertex_buffer( - &self, - encoder_data: &mut Self::RenderBundleEncoderData, - slot: u32, - buffer_data: &Self::BufferData, - offset: wgt::BufferAddress, - size: Option, - ) { - wgpu_render_bundle_set_vertex_buffer(encoder_data, slot, buffer_data.id, offset, size) - } - - fn render_bundle_encoder_set_push_constants( - &self, - encoder_data: &mut Self::RenderBundleEncoderData, - stages: wgt::ShaderStages, - offset: u32, - data: &[u8], - ) { - unsafe { - wgpu_render_bundle_set_push_constants( - encoder_data, - stages, - offset, - data.len().try_into().unwrap(), - data.as_ptr(), - ) - } - } - - fn render_bundle_encoder_draw( - &self, - encoder_data: &mut Self::RenderBundleEncoderData, - vertices: Range, - instances: Range, - ) { - wgpu_render_bundle_draw( - encoder_data, - vertices.end - vertices.start, - instances.end - instances.start, - vertices.start, - instances.start, - ) - } - - fn render_bundle_encoder_draw_indexed( - &self, - encoder_data: &mut Self::RenderBundleEncoderData, - indices: Range, - base_vertex: i32, - instances: Range, - ) { - wgpu_render_bundle_draw_indexed( - encoder_data, - indices.end - indices.start, - instances.end - instances.start, - indices.start, - base_vertex, - instances.start, - ) - } - - fn render_bundle_encoder_draw_indirect( - &self, - encoder_data: &mut Self::RenderBundleEncoderData, - indirect_buffer_data: &Self::BufferData, - indirect_offset: wgt::BufferAddress, - ) { - wgpu_render_bundle_draw_indirect(encoder_data, indirect_buffer_data.id, indirect_offset) +impl Drop for CoreComputePass { + fn drop(&mut self) { + dispatch::ComputePassInterface::end(self); } +} - fn render_bundle_encoder_draw_indexed_indirect( - &self, - encoder_data: &mut Self::RenderBundleEncoderData, - indirect_buffer_data: &Self::BufferData, - indirect_offset: wgt::BufferAddress, - ) { - wgpu_render_bundle_draw_indexed_indirect( - encoder_data, - indirect_buffer_data.id, - indirect_offset, - ) - } +impl dispatch::RenderPassInterface for CoreRenderPass { + fn set_pipeline(&mut self, pipeline: &dispatch::DispatchRenderPipeline) { + let pipeline = pipeline.as_core(); - fn render_pass_set_pipeline( - &self, - pass_data: &mut Self::RenderPassData, - pipeline_data: &Self::RenderPipelineData, - ) { if let Err(cause) = self + .context .0 - .render_pass_set_pipeline(&mut pass_data.pass, pipeline_data.id) + .render_pass_set_pipeline(&mut self.pass, pipeline.id) { - self.handle_error( - &pass_data.error_sink, + self.context.handle_error( + &self.error_sink, cause, - pass_data.pass.label(), + self.pass.label(), "RenderPass::set_pipeline", ); } } - fn render_pass_set_bind_group( - &self, - pass_data: &mut Self::RenderPassData, + fn set_bind_group( + &mut self, index: u32, - bind_group_data: Option<&Self::BindGroupData>, - offsets: &[wgt::DynamicOffset], + bind_group: Option<&dispatch::DispatchBindGroup>, + offsets: &[crate::DynamicOffset], ) { - let bg = bind_group_data.cloned(); + let bg = bind_group.map(|bg| bg.as_core().id); + if let Err(cause) = - self.0 - .render_pass_set_bind_group(&mut pass_data.pass, index, bg, offsets) + self.context + .0 + .render_pass_set_bind_group(&mut self.pass, index, bg, offsets) { - self.handle_error( - &pass_data.error_sink, + self.context.handle_error( + &self.error_sink, cause, - pass_data.pass.label(), + self.pass.label(), "RenderPass::set_bind_group", ); } } - fn render_pass_set_index_buffer( - &self, - pass_data: &mut Self::RenderPassData, - buffer_data: &Self::BufferData, - index_format: wgt::IndexFormat, - offset: wgt::BufferAddress, - size: Option, + fn set_index_buffer( + &mut self, + buffer: &dispatch::DispatchBuffer, + index_format: crate::IndexFormat, + offset: crate::BufferAddress, + size: Option, ) { - if let Err(cause) = self.0.render_pass_set_index_buffer( - &mut pass_data.pass, - buffer_data.id, + let buffer = buffer.as_core(); + + if let Err(cause) = self.context.0.render_pass_set_index_buffer( + &mut self.pass, + buffer.id, index_format, offset, size, ) { - self.handle_error( - &pass_data.error_sink, + self.context.handle_error( + &self.error_sink, cause, - pass_data.pass.label(), + self.pass.label(), "RenderPass::set_index_buffer", ); } } - fn render_pass_set_vertex_buffer( - &self, - pass_data: &mut Self::RenderPassData, + fn set_vertex_buffer( + &mut self, slot: u32, - buffer_data: &Self::BufferData, - offset: wgt::BufferAddress, - size: Option, + buffer: &dispatch::DispatchBuffer, + offset: crate::BufferAddress, + size: Option, ) { - if let Err(cause) = self.0.render_pass_set_vertex_buffer( - &mut pass_data.pass, + let buffer = buffer.as_core(); + + if let Err(cause) = self.context.0.render_pass_set_vertex_buffer( + &mut self.pass, slot, - buffer_data.id, + buffer.id, offset, size, ) { - self.handle_error( - &pass_data.error_sink, + self.context.handle_error( + &self.error_sink, cause, - pass_data.pass.label(), + self.pass.label(), "RenderPass::set_vertex_buffer", ); } } - fn render_pass_set_push_constants( - &self, - pass_data: &mut Self::RenderPassData, - stages: wgt::ShaderStages, - offset: u32, - data: &[u8], - ) { + fn set_push_constants(&mut self, stages: crate::ShaderStages, offset: u32, data: &[u8]) { if let Err(cause) = - self.0 - .render_pass_set_push_constants(&mut pass_data.pass, stages, offset, data) + self.context + .0 + .render_pass_set_push_constants(&mut self.pass, stages, offset, data) { - self.handle_error( - &pass_data.error_sink, + self.context.handle_error( + &self.error_sink, cause, - pass_data.pass.label(), + self.pass.label(), "RenderPass::set_push_constants", ); } } - fn render_pass_draw( - &self, - pass_data: &mut Self::RenderPassData, - vertices: Range, - instances: Range, + fn set_blend_constant(&mut self, color: crate::Color) { + if let Err(cause) = self + .context + .0 + .render_pass_set_blend_constant(&mut self.pass, color) + { + self.context.handle_error( + &self.error_sink, + cause, + self.pass.label(), + "RenderPass::set_blend_constant", + ); + } + } + + fn set_scissor_rect(&mut self, x: u32, y: u32, width: u32, height: u32) { + if let Err(cause) = + self.context + .0 + .render_pass_set_scissor_rect(&mut self.pass, x, y, width, height) + { + self.context.handle_error( + &self.error_sink, + cause, + self.pass.label(), + "RenderPass::set_scissor_rect", + ); + } + } + + fn set_viewport( + &mut self, + x: f32, + y: f32, + width: f32, + height: f32, + min_depth: f32, + max_depth: f32, ) { - if let Err(cause) = self.0.render_pass_draw( - &mut pass_data.pass, + if let Err(cause) = self.context.0.render_pass_set_viewport( + &mut self.pass, + x, + y, + width, + height, + min_depth, + max_depth, + ) { + self.context.handle_error( + &self.error_sink, + cause, + self.pass.label(), + "RenderPass::set_viewport", + ); + } + } + + fn set_stencil_reference(&mut self, reference: u32) { + if let Err(cause) = self + .context + .0 + .render_pass_set_stencil_reference(&mut self.pass, reference) + { + self.context.handle_error( + &self.error_sink, + cause, + self.pass.label(), + "RenderPass::set_stencil_reference", + ); + } + } + + fn draw(&mut self, vertices: Range, instances: Range) { + if let Err(cause) = self.context.0.render_pass_draw( + &mut self.pass, vertices.end - vertices.start, instances.end - instances.start, vertices.start, instances.start, ) { - self.handle_error( - &pass_data.error_sink, + self.context.handle_error( + &self.error_sink, cause, - pass_data.pass.label(), + self.pass.label(), "RenderPass::draw", ); } } - fn render_pass_draw_indexed( - &self, - pass_data: &mut Self::RenderPassData, - indices: Range, - base_vertex: i32, - instances: Range, - ) { - if let Err(cause) = self.0.render_pass_draw_indexed( - &mut pass_data.pass, + fn draw_indexed(&mut self, indices: Range, base_vertex: i32, instances: Range) { + if let Err(cause) = self.context.0.render_pass_draw_indexed( + &mut self.pass, indices.end - indices.start, instances.end - instances.start, indices.start, base_vertex, instances.start, ) { - self.handle_error( - &pass_data.error_sink, + self.context.handle_error( + &self.error_sink, cause, - pass_data.pass.label(), + self.pass.label(), "RenderPass::draw_indexed", ); } } - fn render_pass_draw_indirect( - &self, - pass_data: &mut Self::RenderPassData, - indirect_buffer_data: &Self::BufferData, - indirect_offset: wgt::BufferAddress, + fn draw_indirect( + &mut self, + indirect_buffer: &dispatch::DispatchBuffer, + indirect_offset: crate::BufferAddress, ) { - if let Err(cause) = self.0.render_pass_draw_indirect( - &mut pass_data.pass, - indirect_buffer_data.id, + let indirect_buffer = indirect_buffer.as_core(); + + if let Err(cause) = self.context.0.render_pass_draw_indirect( + &mut self.pass, + indirect_buffer.id, indirect_offset, ) { - self.handle_error( - &pass_data.error_sink, + self.context.handle_error( + &self.error_sink, cause, - pass_data.pass.label(), + self.pass.label(), "RenderPass::draw_indirect", ); } } - fn render_pass_draw_indexed_indirect( - &self, - pass_data: &mut Self::RenderPassData, - indirect_buffer_data: &Self::BufferData, - indirect_offset: wgt::BufferAddress, + fn draw_indexed_indirect( + &mut self, + indirect_buffer: &dispatch::DispatchBuffer, + indirect_offset: crate::BufferAddress, ) { - if let Err(cause) = self.0.render_pass_draw_indexed_indirect( - &mut pass_data.pass, - indirect_buffer_data.id, + let indirect_buffer = indirect_buffer.as_core(); + + if let Err(cause) = self.context.0.render_pass_draw_indexed_indirect( + &mut self.pass, + indirect_buffer.id, indirect_offset, ) { - self.handle_error( - &pass_data.error_sink, + self.context.handle_error( + &self.error_sink, cause, - pass_data.pass.label(), + self.pass.label(), "RenderPass::draw_indexed_indirect", ); } } - fn render_pass_multi_draw_indirect( - &self, - pass_data: &mut Self::RenderPassData, - indirect_buffer_data: &Self::BufferData, - indirect_offset: wgt::BufferAddress, + fn multi_draw_indirect( + &mut self, + indirect_buffer: &dispatch::DispatchBuffer, + indirect_offset: crate::BufferAddress, count: u32, ) { - if let Err(cause) = self.0.render_pass_multi_draw_indirect( - &mut pass_data.pass, - indirect_buffer_data.id, + let indirect_buffer = indirect_buffer.as_core(); + + if let Err(cause) = self.context.0.render_pass_multi_draw_indirect( + &mut self.pass, + indirect_buffer.id, indirect_offset, count, ) { - self.handle_error( - &pass_data.error_sink, + self.context.handle_error( + &self.error_sink, cause, - pass_data.pass.label(), + self.pass.label(), "RenderPass::multi_draw_indirect", ); } } - fn render_pass_multi_draw_indexed_indirect( - &self, - pass_data: &mut Self::RenderPassData, - indirect_buffer_data: &Self::BufferData, - indirect_offset: wgt::BufferAddress, + fn multi_draw_indexed_indirect( + &mut self, + indirect_buffer: &dispatch::DispatchBuffer, + indirect_offset: crate::BufferAddress, count: u32, ) { - if let Err(cause) = self.0.render_pass_multi_draw_indexed_indirect( - &mut pass_data.pass, - indirect_buffer_data.id, + let indirect_buffer = indirect_buffer.as_core(); + + if let Err(cause) = self.context.0.render_pass_multi_draw_indexed_indirect( + &mut self.pass, + indirect_buffer.id, indirect_offset, count, ) { - self.handle_error( - &pass_data.error_sink, + self.context.handle_error( + &self.error_sink, cause, - pass_data.pass.label(), + self.pass.label(), "RenderPass::multi_draw_indexed_indirect", ); } } - fn render_pass_multi_draw_indirect_count( - &self, - pass_data: &mut Self::RenderPassData, - indirect_buffer_data: &Self::BufferData, - indirect_offset: wgt::BufferAddress, - count_buffer_data: &Self::BufferData, - count_buffer_offset: wgt::BufferAddress, + fn multi_draw_indirect_count( + &mut self, + indirect_buffer: &dispatch::DispatchBuffer, + indirect_offset: crate::BufferAddress, + count_buffer: &dispatch::DispatchBuffer, + count_buffer_offset: crate::BufferAddress, max_count: u32, ) { - if let Err(cause) = self.0.render_pass_multi_draw_indirect_count( - &mut pass_data.pass, - indirect_buffer_data.id, + let indirect_buffer = indirect_buffer.as_core(); + let count_buffer = count_buffer.as_core(); + + if let Err(cause) = self.context.0.render_pass_multi_draw_indirect_count( + &mut self.pass, + indirect_buffer.id, indirect_offset, - count_buffer_data.id, + count_buffer.id, count_buffer_offset, max_count, ) { - self.handle_error( - &pass_data.error_sink, + self.context.handle_error( + &self.error_sink, cause, - pass_data.pass.label(), + self.pass.label(), "RenderPass::multi_draw_indirect_count", ); } } - fn render_pass_multi_draw_indexed_indirect_count( - &self, - pass_data: &mut Self::RenderPassData, - indirect_buffer_data: &Self::BufferData, - indirect_offset: wgt::BufferAddress, - count_buffer_data: &Self::BufferData, - count_buffer_offset: wgt::BufferAddress, + fn multi_draw_indexed_indirect_count( + &mut self, + indirect_buffer: &dispatch::DispatchBuffer, + indirect_offset: crate::BufferAddress, + count_buffer: &dispatch::DispatchBuffer, + count_buffer_offset: crate::BufferAddress, max_count: u32, ) { - if let Err(cause) = self.0.render_pass_multi_draw_indexed_indirect_count( - &mut pass_data.pass, - indirect_buffer_data.id, - indirect_offset, - count_buffer_data.id, - count_buffer_offset, - max_count, - ) { - self.handle_error( - &pass_data.error_sink, - cause, - pass_data.pass.label(), - "RenderPass::multi_draw_indexed_indirect_count", - ); - } - } - - fn render_pass_set_blend_constant( - &self, - pass_data: &mut Self::RenderPassData, - color: wgt::Color, - ) { - if let Err(cause) = self - .0 - .render_pass_set_blend_constant(&mut pass_data.pass, color) - { - self.handle_error( - &pass_data.error_sink, - cause, - pass_data.pass.label(), - "RenderPass::set_blend_constant", - ); - } - } - - fn render_pass_set_scissor_rect( - &self, - pass_data: &mut Self::RenderPassData, - x: u32, - y: u32, - width: u32, - height: u32, - ) { - if let Err(cause) = - self.0 - .render_pass_set_scissor_rect(&mut pass_data.pass, x, y, width, height) - { - self.handle_error( - &pass_data.error_sink, - cause, - pass_data.pass.label(), - "RenderPass::set_scissor_rect", - ); - } - } - - fn render_pass_set_viewport( - &self, - pass_data: &mut Self::RenderPassData, - x: f32, - y: f32, - width: f32, - height: f32, - min_depth: f32, - max_depth: f32, - ) { - if let Err(cause) = self.0.render_pass_set_viewport( - &mut pass_data.pass, - x, - y, - width, - height, - min_depth, - max_depth, - ) { - self.handle_error( - &pass_data.error_sink, - cause, - pass_data.pass.label(), - "RenderPass::set_viewport", - ); - } - } + let indirect_buffer = indirect_buffer.as_core(); + let count_buffer = count_buffer.as_core(); - fn render_pass_set_stencil_reference( - &self, - pass_data: &mut Self::RenderPassData, - reference: u32, - ) { if let Err(cause) = self + .context .0 - .render_pass_set_stencil_reference(&mut pass_data.pass, reference) + .render_pass_multi_draw_indexed_indirect_count( + &mut self.pass, + indirect_buffer.id, + indirect_offset, + count_buffer.id, + count_buffer_offset, + max_count, + ) { - self.handle_error( - &pass_data.error_sink, + self.context.handle_error( + &self.error_sink, cause, - pass_data.pass.label(), - "RenderPass::set_stencil_reference", + self.pass.label(), + "RenderPass::multi_draw_indexed_indirect_count", ); } } - fn render_pass_insert_debug_marker(&self, pass_data: &mut Self::RenderPassData, label: &str) { + fn insert_debug_marker(&mut self, label: &str) { if let Err(cause) = self + .context .0 - .render_pass_insert_debug_marker(&mut pass_data.pass, label, 0) + .render_pass_insert_debug_marker(&mut self.pass, label, 0) { - self.handle_error( - &pass_data.error_sink, + self.context.handle_error( + &self.error_sink, cause, - pass_data.pass.label(), + self.pass.label(), "RenderPass::insert_debug_marker", ); } } - fn render_pass_push_debug_group( - &self, - pass_data: &mut Self::RenderPassData, - group_label: &str, - ) { - if let Err(cause) = self - .0 - .render_pass_push_debug_group(&mut pass_data.pass, group_label, 0) + fn push_debug_group(&mut self, group_label: &str) { + if let Err(cause) = + self.context + .0 + .render_pass_push_debug_group(&mut self.pass, group_label, 0) { - self.handle_error( - &pass_data.error_sink, + self.context.handle_error( + &self.error_sink, cause, - pass_data.pass.label(), + self.pass.label(), "RenderPass::push_debug_group", ); } } - fn render_pass_pop_debug_group(&self, pass_data: &mut Self::RenderPassData) { - if let Err(cause) = self.0.render_pass_pop_debug_group(&mut pass_data.pass) { - self.handle_error( - &pass_data.error_sink, + fn pop_debug_group(&mut self) { + if let Err(cause) = self.context.0.render_pass_pop_debug_group(&mut self.pass) { + self.context.handle_error( + &self.error_sink, cause, - pass_data.pass.label(), + self.pass.label(), "RenderPass::pop_debug_group", ); } } - fn render_pass_write_timestamp( - &self, - pass_data: &mut Self::RenderPassData, - query_set_data: &Self::QuerySetData, - query_index: u32, - ) { + fn write_timestamp(&mut self, query_set: &dispatch::DispatchQuerySet, query_index: u32) { + let query_set = query_set.as_core(); + if let Err(cause) = - self.0 - .render_pass_write_timestamp(&mut pass_data.pass, *query_set_data, query_index) + self.context + .0 + .render_pass_write_timestamp(&mut self.pass, query_set.id, query_index) { - self.handle_error( - &pass_data.error_sink, + self.context.handle_error( + &self.error_sink, cause, - pass_data.pass.label(), + self.pass.label(), "RenderPass::write_timestamp", ); } } - fn render_pass_begin_occlusion_query( - &self, - pass_data: &mut Self::RenderPassData, - query_index: u32, - ) { + fn begin_occlusion_query(&mut self, query_index: u32) { if let Err(cause) = self + .context .0 - .render_pass_begin_occlusion_query(&mut pass_data.pass, query_index) + .render_pass_begin_occlusion_query(&mut self.pass, query_index) { - self.handle_error( - &pass_data.error_sink, + self.context.handle_error( + &self.error_sink, cause, - pass_data.pass.label(), + self.pass.label(), "RenderPass::begin_occlusion_query", ); } } - fn render_pass_end_occlusion_query(&self, pass_data: &mut Self::RenderPassData) { - if let Err(cause) = self.0.render_pass_end_occlusion_query(&mut pass_data.pass) { - self.handle_error( - &pass_data.error_sink, + fn end_occlusion_query(&mut self) { + if let Err(cause) = self + .context + .0 + .render_pass_end_occlusion_query(&mut self.pass) + { + self.context.handle_error( + &self.error_sink, cause, - pass_data.pass.label(), + self.pass.label(), "RenderPass::end_occlusion_query", ); } } - fn render_pass_begin_pipeline_statistics_query( - &self, - pass_data: &mut Self::RenderPassData, - query_set_data: &Self::QuerySetData, + fn begin_pipeline_statistics_query( + &mut self, + query_set: &dispatch::DispatchQuerySet, query_index: u32, ) { - if let Err(cause) = self.0.render_pass_begin_pipeline_statistics_query( - &mut pass_data.pass, - *query_set_data, + let query_set = query_set.as_core(); + + if let Err(cause) = self.context.0.render_pass_begin_pipeline_statistics_query( + &mut self.pass, + query_set.id, query_index, ) { - self.handle_error( - &pass_data.error_sink, + self.context.handle_error( + &self.error_sink, cause, - pass_data.pass.label(), + self.pass.label(), "RenderPass::begin_pipeline_statistics_query", ); } } - fn render_pass_end_pipeline_statistics_query(&self, pass_data: &mut Self::RenderPassData) { + fn end_pipeline_statistics_query(&mut self) { if let Err(cause) = self + .context .0 - .render_pass_end_pipeline_statistics_query(&mut pass_data.pass) + .render_pass_end_pipeline_statistics_query(&mut self.pass) { - self.handle_error( - &pass_data.error_sink, + self.context.handle_error( + &self.error_sink, cause, - pass_data.pass.label(), + self.pass.label(), "RenderPass::end_pipeline_statistics_query", ); } } - fn render_pass_execute_bundles( - &self, - pass_data: &mut Self::RenderPassData, - render_bundles: &mut dyn Iterator, + fn execute_bundles( + &mut self, + render_bundles: &mut dyn Iterator, ) { - let temp_render_bundles = render_bundles.copied().collect::>(); + let temp_render_bundles = render_bundles + .map(|rb| rb.as_core().id) + .collect::>(); if let Err(cause) = self + .context .0 - .render_pass_execute_bundles(&mut pass_data.pass, &temp_render_bundles) + .render_pass_execute_bundles(&mut self.pass, &temp_render_bundles) { - self.handle_error( - &pass_data.error_sink, + self.context.handle_error( + &self.error_sink, cause, - pass_data.pass.label(), + self.pass.label(), "RenderPass::execute_bundles", ); } } - fn render_pass_end(&self, pass_data: &mut Self::RenderPassData) { - if let Err(cause) = self.0.render_pass_end(&mut pass_data.pass) { - self.handle_error( - &pass_data.error_sink, + fn end(&mut self) { + if let Err(cause) = self.context.0.render_pass_end(&mut self.pass) { + self.context.handle_error( + &self.error_sink, cause, - pass_data.pass.label(), + self.pass.label(), "RenderPass::end", ); } } +} - fn device_create_blas( - &self, - device_data: &Self::DeviceData, - desc: &crate::CreateBlasDescriptor<'_>, - sizes: wgt::BlasGeometrySizeDescriptors, - ) -> (Option, Self::BlasData) { - let global = &self.0; - let (id, handle, error) = global.device_create_blas( - device_data.id, - &desc.map_label(|l| l.map(Borrowed)), - sizes, - None, - ); - if let Some(cause) = error { - self.handle_error( - &device_data.error_sink, - cause, - desc.label, - "Device::create_blas", - ); - } - ( - handle, - Blas { - id, - // error_sink: Arc::clone(&device_data.error_sink), - }, - ) +impl Drop for CoreRenderPass { + fn drop(&mut self) { + dispatch::RenderPassInterface::end(self); } +} - fn device_create_tlas( - &self, - device_data: &Self::DeviceData, - desc: &crate::CreateTlasDescriptor<'_>, - ) -> Self::TlasData { - let global = &self.0; - let (id, error) = - global.device_create_tlas(device_data.id, &desc.map_label(|l| l.map(Borrowed)), None); - if let Some(cause) = error { - self.handle_error( - &device_data.error_sink, - cause, - desc.label, - "Device::create_blas", - ); - } - Tlas { - id, - // error_sink: Arc::clone(&device_data.error_sink), - } +impl dispatch::RenderBundleEncoderInterface for CoreRenderBundleEncoder { + fn set_pipeline(&mut self, pipeline: &dispatch::DispatchRenderPipeline) { + let pipeline = pipeline.as_core(); + + wgpu_render_bundle_set_pipeline(&mut self.encoder, pipeline.id) } - fn command_encoder_build_acceleration_structures_unsafe_tlas<'a>( - &'a self, - encoder_data: &Self::CommandEncoderData, - blas: impl Iterator>, - tlas: impl Iterator>, + fn set_bind_group( + &mut self, + index: u32, + bind_group: Option<&dispatch::DispatchBindGroup>, + offsets: &[crate::DynamicOffset], ) { - let global = &self.0; + let bg = bind_group.map(|bg| bg.as_core().id); - let blas = blas.map(|e: crate::ContextBlasBuildEntry<'_, Self>| { - let geometries = match e.geometries { - crate::ContextBlasGeometries::TriangleGeometries(triangle_geometries) => { - let iter = triangle_geometries.into_iter().map(|tg| { - wgc::ray_tracing::BlasTriangleGeometry { - vertex_buffer: tg.vertex_buffer.id, - index_buffer: tg.index_buffer.map(|buf| buf.id), - transform_buffer: tg.transform_buffer.map(|buf| buf.id), - size: tg.size, - transform_buffer_offset: tg.transform_buffer_offset, - first_vertex: tg.first_vertex, - vertex_stride: tg.vertex_stride, - index_buffer_offset: tg.index_buffer_offset, - } - }); - wgc::ray_tracing::BlasGeometries::TriangleGeometries(Box::new(iter)) - } - }; - wgc::ray_tracing::BlasBuildEntry { - blas_id: e.blas_data.id, - geometries, - } - }); - - let tlas = tlas - .into_iter() - .map(|e: crate::ContextTlasBuildEntry<'a, ContextWgpuCore>| { - wgc::ray_tracing::TlasBuildEntry { - tlas_id: e.tlas_data.id, - instance_buffer_id: e.instance_buffer_data.id, - instance_count: e.instance_count, - } - }); - - if let Err(cause) = global.command_encoder_build_acceleration_structures_unsafe_tlas( - encoder_data.id, - blas, - tlas, - ) { - self.handle_error_nolabel( - &encoder_data.error_sink, - cause, - "CommandEncoder::build_acceleration_structures_unsafe_tlas", - ); + unsafe { + wgpu_render_bundle_set_bind_group( + &mut self.encoder, + index, + bg, + offsets.as_ptr(), + offsets.len(), + ) } } - fn command_encoder_build_acceleration_structures<'a>( - &'a self, - encoder_data: &Self::CommandEncoderData, - blas: impl Iterator>, - tlas: impl Iterator>, + fn set_index_buffer( + &mut self, + buffer: &dispatch::DispatchBuffer, + index_format: crate::IndexFormat, + offset: crate::BufferAddress, + size: Option, ) { - let global = &self.0; + let buffer = buffer.as_core(); - let blas = blas.map(|e: crate::ContextBlasBuildEntry<'_, Self>| { - let geometries = match e.geometries { - crate::ContextBlasGeometries::TriangleGeometries(triangle_geometries) => { - let iter = triangle_geometries.into_iter().map(|tg| { - wgc::ray_tracing::BlasTriangleGeometry { - vertex_buffer: tg.vertex_buffer.id, - index_buffer: tg.index_buffer.map(|buf| buf.id), - transform_buffer: tg.transform_buffer.map(|buf| buf.id), - size: tg.size, - transform_buffer_offset: tg.transform_buffer_offset, - first_vertex: tg.first_vertex, - vertex_stride: tg.vertex_stride, - index_buffer_offset: tg.index_buffer_offset, - } - }); - wgc::ray_tracing::BlasGeometries::TriangleGeometries(Box::new(iter)) - } - }; - wgc::ray_tracing::BlasBuildEntry { - blas_id: e.blas_data.id, - geometries, - } - }); + self.encoder + .set_index_buffer(buffer.id, index_format, offset, size) + } - let tlas = tlas.into_iter().map(|e| { - let instances = - e.instances - .map(|instance: Option>| { - instance.map(|instance| wgc::ray_tracing::TlasInstance { - blas_id: instance.blas_data.id, - transform: instance.transform, - custom_index: instance.custom_index, - mask: instance.mask, - }) - }); - wgc::ray_tracing::TlasPackage { - tlas_id: e.tlas_data.id, - instances: Box::new(instances), - lowest_unmodified: e.lowest_unmodified, - } - }); + fn set_vertex_buffer( + &mut self, + slot: u32, + buffer: &dispatch::DispatchBuffer, + offset: crate::BufferAddress, + size: Option, + ) { + let buffer = buffer.as_core(); - if let Err(cause) = - global.command_encoder_build_acceleration_structures(encoder_data.id, blas, tlas) - { - self.handle_error_nolabel( - &encoder_data.error_sink, - cause, - "CommandEncoder::build_acceleration_structures_unsafe_tlas", - ); + wgpu_render_bundle_set_vertex_buffer(&mut self.encoder, slot, buffer.id, offset, size) + } + + fn set_push_constants(&mut self, stages: crate::ShaderStages, offset: u32, data: &[u8]) { + unsafe { + wgpu_render_bundle_set_push_constants( + &mut self.encoder, + stages, + offset, + data.len().try_into().unwrap(), + data.as_ptr(), + ) } } - fn blas_destroy(&self, blas_data: &Self::BlasData) { - let global = &self.0; - let _ = global.blas_destroy(blas_data.id); + fn draw(&mut self, vertices: Range, instances: Range) { + wgpu_render_bundle_draw( + &mut self.encoder, + vertices.end - vertices.start, + instances.end - instances.start, + vertices.start, + instances.start, + ) } - fn blas_drop(&self, blas_data: &Self::BlasData) { - let global = &self.0; - global.blas_drop(blas_data.id) + fn draw_indexed(&mut self, indices: Range, base_vertex: i32, instances: Range) { + wgpu_render_bundle_draw_indexed( + &mut self.encoder, + indices.end - indices.start, + instances.end - instances.start, + indices.start, + base_vertex, + instances.start, + ) } - fn tlas_destroy(&self, tlas_data: &Self::TlasData) { - let global = &self.0; - let _ = global.tlas_destroy(tlas_data.id); + fn draw_indirect( + &mut self, + indirect_buffer: &dispatch::DispatchBuffer, + indirect_offset: crate::BufferAddress, + ) { + let indirect_buffer = indirect_buffer.as_core(); + + wgpu_render_bundle_draw_indirect(&mut self.encoder, indirect_buffer.id, indirect_offset) } - fn tlas_drop(&self, tlas_data: &Self::TlasData) { - let global = &self.0; - global.tlas_drop(tlas_data.id) + fn draw_indexed_indirect( + &mut self, + indirect_buffer: &dispatch::DispatchBuffer, + indirect_offset: crate::BufferAddress, + ) { + let indirect_buffer = indirect_buffer.as_core(); + + wgpu_render_bundle_draw_indexed_indirect( + &mut self.encoder, + indirect_buffer.id, + indirect_offset, + ) } -} -#[derive(Debug)] -pub struct SurfaceOutputDetail { - surface_id: wgc::id::SurfaceId, + fn finish(self, desc: &crate::RenderBundleDescriptor<'_>) -> dispatch::DispatchRenderBundle + where + Self: Sized, + { + let (id, error) = self.context.0.render_bundle_encoder_finish( + self.encoder, + &desc.map_label(|l| l.map(Borrowed)), + None, + ); + if let Some(err) = error { + self.context + .handle_error_fatal(err, "RenderBundleEncoder::finish"); + } + CoreRenderBundle { id }.into() + } } -type ErrorSink = Arc>; +impl dispatch::RenderBundleInterface for CoreRenderBundle {} -struct ErrorScope { - error: Option, - filter: crate::ErrorFilter, -} +impl dispatch::SurfaceInterface for CoreSurface { + fn get_capabilities(&self, adapter: &dispatch::DispatchAdapter) -> wgt::SurfaceCapabilities { + let adapter = adapter.as_core(); -struct ErrorSinkRaw { - scopes: Vec, - uncaptured_handler: Option>, -} + self.context + .0 + .surface_get_capabilities(self.id, adapter.id) + .unwrap_or_default() + } -impl ErrorSinkRaw { - fn new() -> ErrorSinkRaw { - ErrorSinkRaw { - scopes: Vec::new(), - uncaptured_handler: None, + fn configure(&self, device: &dispatch::DispatchDevice, config: &crate::SurfaceConfiguration) { + let device = device.as_core(); + + let error = self.context.0.surface_configure(self.id, device.id, config); + if let Some(e) = error { + self.context.handle_error_fatal(e, "Surface::configure"); + } else { + *self.configured_device.lock() = Some(device.id); } } - #[track_caller] - fn handle_error(&mut self, err: crate::Error) { - let filter = match err { - crate::Error::OutOfMemory { .. } => crate::ErrorFilter::OutOfMemory, - crate::Error::Validation { .. } => crate::ErrorFilter::Validation, - crate::Error::Internal { .. } => crate::ErrorFilter::Internal, - }; - match self - .scopes - .iter_mut() - .rev() - .find(|scope| scope.filter == filter) - { - Some(scope) => { - if scope.error.is_none() { - scope.error = Some(err); - } - } - None => { - if let Some(custom_handler) = self.uncaptured_handler.as_ref() { - (custom_handler)(err); - } else { - // direct call preserves #[track_caller] where dyn can't - default_error_handler(err); - } + fn get_current_texture( + &self, + ) -> ( + Option, + crate::SurfaceStatus, + dispatch::DispatchSurfaceOutputDetail, + ) { + match self.context.0.surface_get_current_texture(self.id, None) { + Ok(wgc::present::SurfaceOutput { status, texture_id }) => { + let data = texture_id + .map(|id| CoreTexture { + context: self.context.clone(), + id, + error_sink: Arc::new(Mutex::new(ErrorSinkRaw::new())), + }) + .map(Into::into); + + ( + data, + status, + CoreSurfaceOutputDetail { + context: self.context.clone(), + surface_id: self.id, + } + .into(), + ) } + Err(err) => self + .context + .handle_error_fatal(err, "Surface::get_current_texture_view"), } } } -impl fmt::Debug for ErrorSinkRaw { - fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { - write!(f, "ErrorSink") +impl Drop for CoreSurface { + fn drop(&mut self) { + self.context.0.surface_drop(self.id) } } -#[track_caller] -fn default_error_handler(err: crate::Error) { - log::error!("Handling wgpu errors as fatal by default"); - panic!("wgpu error: {err}\n"); -} +impl dispatch::SurfaceOutputDetailInterface for CoreSurfaceOutputDetail { + fn present(&self) { + match self.context.0.surface_present(self.surface_id) { + Ok(_status) => (), + Err(err) => self.context.handle_error_fatal(err, "Surface::present"), + } + } -impl From for CompilationInfo { - fn from(value: CreateShaderModuleError) -> Self { - match value { - #[cfg(feature = "wgsl")] - CreateShaderModuleError::Parsing(v) => v.into(), - #[cfg(feature = "glsl")] - CreateShaderModuleError::ParsingGlsl(v) => v.into(), - #[cfg(feature = "spirv")] - CreateShaderModuleError::ParsingSpirV(v) => v.into(), - CreateShaderModuleError::Validation(v) => v.into(), - // Device errors are reported through the error sink, and are not compilation errors. - // Same goes for native shader module generation errors. - CreateShaderModuleError::Device(_) | CreateShaderModuleError::Generation => { - CompilationInfo { - messages: Vec::new(), - } - } - // Everything else is an error message without location information. - _ => CompilationInfo { - messages: vec![CompilationMessage { - message: value.to_string(), - message_type: CompilationMessageType::Error, - location: None, - }], - }, + fn texture_discard(&self) { + match self.context.0.surface_texture_discard(self.surface_id) { + Ok(_status) => (), + Err(err) => self + .context + .handle_error_fatal(err, "Surface::discard_texture"), } } } +impl Drop for CoreSurfaceOutputDetail { + fn drop(&mut self) { + // Discard gets called by the api struct -#[derive(Debug)] -pub struct QueueWriteBuffer { - buffer_id: wgc::id::StagingBufferId, - mapping: BufferMappedRange, + // no-op + } } -impl crate::context::QueueWriteBuffer for QueueWriteBuffer { +impl dispatch::QueueWriteBufferInterface for CoreQueueWriteBuffer { fn slice(&self) -> &[u8] { panic!() } #[inline] fn slice_mut(&mut self) -> &mut [u8] { - use crate::context::BufferMappedRange; self.mapping.slice_mut() } - - fn as_any(&self) -> &dyn Any { - self - } } +impl Drop for CoreQueueWriteBuffer { + fn drop(&mut self) { + // The api struct calls queue.write_staging_buffer -#[derive(Debug)] -pub struct BufferMappedRange { - ptr: NonNull, - size: usize, + // no-op + } } -#[cfg(send_sync)] -unsafe impl Send for BufferMappedRange {} -#[cfg(send_sync)] -unsafe impl Sync for BufferMappedRange {} - -impl crate::context::BufferMappedRange for BufferMappedRange { +impl dispatch::BufferMappedRangeInterface for CoreBufferMappedRange { #[inline] fn slice(&self) -> &[u8] { unsafe { slice::from_raw_parts(self.ptr.as_ptr(), self.size) } @@ -3321,53 +3535,3 @@ impl crate::context::BufferMappedRange for BufferMappedRange { unsafe { slice::from_raw_parts_mut(self.ptr.as_ptr(), self.size) } } } - -impl Drop for BufferMappedRange { - fn drop(&mut self) { - // Intentionally left blank so that `BufferMappedRange` still - // implements `Drop`, to match the web backend - } -} - -fn downcast_buffer(buffer: &crate::Buffer) -> &::BufferData { - downcast_ref(buffer.data.as_ref()) -} -fn downcast_texture(texture: &crate::Texture) -> &::TextureData { - downcast_ref(texture.data.as_ref()) -} -fn downcast_texture_view( - texture_view: &crate::TextureView, -) -> &::TextureViewData { - downcast_ref(texture_view.data.as_ref()) -} -fn downcast_tlas(tlas: &crate::Tlas) -> &::TlasData { - downcast_ref(tlas.data.as_ref()) -} -fn downcast_sampler(sampler: &crate::Sampler) -> &::SamplerData { - downcast_ref(sampler.data.as_ref()) -} -fn downcast_query_set( - query_set: &crate::QuerySet, -) -> &::QuerySetData { - downcast_ref(query_set.data.as_ref()) -} -fn downcast_bind_group_layout( - bind_group_layout: &crate::BindGroupLayout, -) -> &::BindGroupLayoutData { - downcast_ref(bind_group_layout.data.as_ref()) -} -fn downcast_pipeline_layout( - pipeline_layout: &crate::PipelineLayout, -) -> &::PipelineLayoutData { - downcast_ref(pipeline_layout.data.as_ref()) -} -fn downcast_shader_module( - shader_module: &crate::ShaderModule, -) -> &::ShaderModuleData { - downcast_ref(shader_module.data.as_ref()) -} -fn downcast_pipeline_cache( - pipeline_cache: &crate::PipelineCache, -) -> &::PipelineCacheData { - downcast_ref(pipeline_cache.data.as_ref()) -} diff --git a/wgpu/src/cmp.rs b/wgpu/src/cmp.rs new file mode 100644 index 0000000000..95d0e16f4a --- /dev/null +++ b/wgpu/src/cmp.rs @@ -0,0 +1,107 @@ +//! We need to impl PartialEq, Eq, PartialOrd, Ord, and Hash for all handle types in wgpu. +//! +//! For types that have some already-unique property, we can use that property to implement these traits. +//! +//! For types (like WebGPU) that don't have such a property, we generate an identifier and use that. + +use std::{ + num::NonZeroU64, + sync::atomic::{AtomicU64, Ordering}, +}; + +static NEXT_ID: AtomicU64 = AtomicU64::new(1); + +#[derive(Debug, PartialEq, Eq, PartialOrd, Ord, Hash)] +pub struct Identifier { + inner: NonZeroU64, +} + +impl Identifier { + pub fn create() -> Self { + let id = NEXT_ID.fetch_add(1, Ordering::Relaxed); + // Safety: Will take 7000+ years of constant incrementing to overflow. It's fine. + let inner = unsafe { NonZeroU64::new_unchecked(id) }; + Self { inner } + } +} + +/// Implements PartialEq, Eq, PartialOrd, Ord, and Hash for a type by proxying the operations to a single field. +/// +/// ```ignore +/// impl_eq_ord_hash_proxy!(MyType => .field); +/// ``` +macro_rules! impl_eq_ord_hash_proxy { + ($type:ty => $($access:tt)*) => { + impl PartialEq for $type { + fn eq(&self, other: &Self) -> bool { + self $($access)* == other $($access)* + } + } + + impl Eq for $type {} + + impl PartialOrd for $type { + fn partial_cmp(&self, other: &Self) -> Option { + Some(self.cmp(other)) + } + } + + impl Ord for $type { + fn cmp(&self, other: &Self) -> std::cmp::Ordering { + self $($access)*.cmp(&other $($access)*) + } + } + + impl std::hash::Hash for $type { + fn hash(&self, state: &mut H) { + self $($access)*.hash(state) + } + } + }; +} + +/// Implements PartialEq, Eq, PartialOrd, Ord, and Hash for a type by comparing the addresses of the Arcs. +/// +/// ```ignore +/// impl_eq_ord_hash_arc_address!(MyType => .field); +/// ``` +#[cfg_attr(not(wgpu_core), allow(unused_macros))] +macro_rules! impl_eq_ord_hash_arc_address { + ($type:ty => $($access:tt)*) => { + impl PartialEq for $type { + fn eq(&self, other: &Self) -> bool { + let address_left = std::sync::Arc::as_ptr(&self $($access)*); + let address_right = std::sync::Arc::as_ptr(&other $($access)*); + + address_left == address_right + } + } + + impl Eq for $type {} + + impl PartialOrd for $type { + fn partial_cmp(&self, other: &Self) -> Option { + Some(self.cmp(other)) + } + } + + impl Ord for $type { + fn cmp(&self, other: &Self) -> std::cmp::Ordering { + let address_left = std::sync::Arc::as_ptr(&self $($access)*); + let address_right = std::sync::Arc::as_ptr(&other $($access)*); + + address_left.cmp(&address_right) + } + } + + impl std::hash::Hash for $type { + fn hash(&self, state: &mut H) { + let address = std::sync::Arc::as_ptr(&self $($access)*); + address.hash(state) + } + } + }; +} + +#[cfg_attr(not(wgpu_core), allow(unused_imports))] +pub(crate) use {impl_eq_ord_hash_arc_address, impl_eq_ord_hash_proxy}; diff --git a/wgpu/src/context.rs b/wgpu/src/context.rs deleted file mode 100644 index cd831e1fec..0000000000 --- a/wgpu/src/context.rs +++ /dev/null @@ -1,2914 +0,0 @@ -use std::{any::Any, fmt::Debug, future::Future, ops::Range, pin::Pin, sync::Arc}; - -use wgt::{ - strict_assert, AdapterInfo, BufferAddress, BufferSize, Color, DeviceLostReason, - DownlevelCapabilities, DynamicOffset, Extent3d, Features, ImageSubresourceRange, IndexFormat, - Limits, ShaderStages, SurfaceStatus, TexelCopyBufferLayout, TextureFormat, - TextureFormatFeatures, WasmNotSend, WasmNotSendSync, -}; - -use crate::{ - AnyWasmNotSendSync, BindGroupDescriptor, BindGroupLayoutDescriptor, BufferAsyncError, - BufferDescriptor, CommandEncoderDescriptor, CompilationInfo, ComputePassDescriptor, - ComputePipelineDescriptor, DeviceDescriptor, Error, ErrorFilter, Maintain, MaintainResult, - MapMode, PipelineCacheDescriptor, PipelineLayoutDescriptor, QuerySetDescriptor, - RenderBundleDescriptor, RenderBundleEncoderDescriptor, RenderPassDescriptor, - RenderPipelineDescriptor, RequestAdapterOptions, RequestDeviceError, SamplerDescriptor, - ShaderModuleDescriptor, ShaderModuleDescriptorSpirV, SurfaceTargetUnsafe, TexelCopyBufferInfo, - TexelCopyTextureInfo, TextureDescriptor, TextureViewDescriptor, UncapturedErrorHandler, -}; -/// Meta trait for an data associated with an id tracked by a context. -/// -/// There is no need to manually implement this trait since there is a blanket implementation for this trait. -#[cfg_attr(target_os = "emscripten", allow(dead_code))] -pub trait ContextData: Debug + WasmNotSendSync + 'static {} -impl ContextData for T {} - -pub trait Context: Debug + WasmNotSendSync + Sized { - type AdapterData: ContextData; - type DeviceData: ContextData; - type QueueData: ContextData; - type ShaderModuleData: ContextData; - type BindGroupLayoutData: ContextData; - type BindGroupData: ContextData; - type TextureViewData: ContextData; - type SamplerData: ContextData; - type BufferData: ContextData; - type TextureData: ContextData; - type QuerySetData: ContextData; - type PipelineLayoutData: ContextData; - type RenderPipelineData: ContextData; - type ComputePipelineData: ContextData; - type PipelineCacheData: ContextData; - type CommandEncoderData: ContextData; - type ComputePassData: ContextData; - type RenderPassData: ContextData; - type CommandBufferData: ContextData; - type RenderBundleEncoderData: ContextData; - type RenderBundleData: ContextData; - type SurfaceData: ContextData; - - type BlasData: ContextData; - type TlasData: ContextData; - - type SurfaceOutputDetail: WasmNotSendSync + 'static; - type SubmissionIndexData: ContextData + Copy; - - type RequestAdapterFuture: Future> + WasmNotSend + 'static; - type RequestDeviceFuture: Future> - + WasmNotSend - + 'static; - type PopErrorScopeFuture: Future> + WasmNotSend + 'static; - - type CompilationInfoFuture: Future + WasmNotSend + 'static; - - #[cfg(not(target_os = "emscripten"))] - fn init(instance_desc: wgt::InstanceDescriptor) -> Self; - unsafe fn instance_create_surface( - &self, - target: SurfaceTargetUnsafe, - ) -> Result; - fn instance_request_adapter( - &self, - options: &RequestAdapterOptions<'_, '_>, - ) -> Self::RequestAdapterFuture; - fn adapter_request_device( - &self, - adapter_data: &Self::AdapterData, - desc: &DeviceDescriptor<'_>, - trace_dir: Option<&std::path::Path>, - ) -> Self::RequestDeviceFuture; - fn instance_poll_all_devices(&self, force_wait: bool) -> bool; - fn adapter_is_surface_supported( - &self, - adapter_data: &Self::AdapterData, - surface_data: &Self::SurfaceData, - ) -> bool; - fn adapter_features(&self, adapter_data: &Self::AdapterData) -> Features; - fn adapter_limits(&self, adapter_data: &Self::AdapterData) -> Limits; - fn adapter_downlevel_capabilities( - &self, - adapter_data: &Self::AdapterData, - ) -> DownlevelCapabilities; - fn adapter_get_info(&self, adapter_data: &Self::AdapterData) -> AdapterInfo; - fn adapter_get_texture_format_features( - &self, - adapter_data: &Self::AdapterData, - format: TextureFormat, - ) -> TextureFormatFeatures; - fn adapter_get_presentation_timestamp( - &self, - adapter_data: &Self::AdapterData, - ) -> wgt::PresentationTimestamp; - - fn surface_get_capabilities( - &self, - surface_data: &Self::SurfaceData, - adapter_data: &Self::AdapterData, - ) -> wgt::SurfaceCapabilities; - fn surface_configure( - &self, - surface_data: &Self::SurfaceData, - device_data: &Self::DeviceData, - config: &crate::SurfaceConfiguration, - ); - #[allow(clippy::type_complexity)] - fn surface_get_current_texture( - &self, - surface_data: &Self::SurfaceData, - ) -> ( - Option, - SurfaceStatus, - Self::SurfaceOutputDetail, - ); - fn surface_present(&self, detail: &Self::SurfaceOutputDetail); - fn surface_texture_discard(&self, detail: &Self::SurfaceOutputDetail); - - fn device_features(&self, device_data: &Self::DeviceData) -> Features; - fn device_limits(&self, device_data: &Self::DeviceData) -> Limits; - fn device_create_shader_module( - &self, - device_data: &Self::DeviceData, - desc: ShaderModuleDescriptor<'_>, - shader_bound_checks: wgt::ShaderBoundChecks, - ) -> Self::ShaderModuleData; - unsafe fn device_create_shader_module_spirv( - &self, - device_data: &Self::DeviceData, - desc: &ShaderModuleDescriptorSpirV<'_>, - ) -> Self::ShaderModuleData; - fn device_create_bind_group_layout( - &self, - device_data: &Self::DeviceData, - desc: &BindGroupLayoutDescriptor<'_>, - ) -> Self::BindGroupLayoutData; - fn device_create_bind_group( - &self, - device_data: &Self::DeviceData, - desc: &BindGroupDescriptor<'_>, - ) -> Self::BindGroupData; - fn device_create_pipeline_layout( - &self, - device_data: &Self::DeviceData, - desc: &PipelineLayoutDescriptor<'_>, - ) -> Self::PipelineLayoutData; - fn device_create_render_pipeline( - &self, - device_data: &Self::DeviceData, - desc: &RenderPipelineDescriptor<'_>, - ) -> Self::RenderPipelineData; - fn device_create_compute_pipeline( - &self, - device_data: &Self::DeviceData, - desc: &ComputePipelineDescriptor<'_>, - ) -> Self::ComputePipelineData; - unsafe fn device_create_pipeline_cache( - &self, - device_data: &Self::DeviceData, - desc: &PipelineCacheDescriptor<'_>, - ) -> Self::PipelineCacheData; - fn device_create_buffer( - &self, - device_data: &Self::DeviceData, - desc: &BufferDescriptor<'_>, - ) -> Self::BufferData; - fn device_create_texture( - &self, - device_data: &Self::DeviceData, - desc: &TextureDescriptor<'_>, - ) -> Self::TextureData; - fn device_create_sampler( - &self, - device_data: &Self::DeviceData, - desc: &SamplerDescriptor<'_>, - ) -> Self::SamplerData; - fn device_create_query_set( - &self, - device_data: &Self::DeviceData, - desc: &QuerySetDescriptor<'_>, - ) -> Self::QuerySetData; - fn device_create_command_encoder( - &self, - device_data: &Self::DeviceData, - desc: &CommandEncoderDescriptor<'_>, - ) -> Self::CommandEncoderData; - fn device_create_render_bundle_encoder( - &self, - device_data: &Self::DeviceData, - desc: &RenderBundleEncoderDescriptor<'_>, - ) -> Self::RenderBundleEncoderData; - fn device_drop(&self, device_data: &Self::DeviceData); - fn device_set_device_lost_callback( - &self, - device_data: &Self::DeviceData, - device_lost_callback: DeviceLostCallback, - ); - fn device_destroy(&self, device_data: &Self::DeviceData); - fn queue_drop(&self, queue_data: &Self::QueueData); - fn device_poll(&self, device_data: &Self::DeviceData, maintain: Maintain) -> MaintainResult; - fn device_on_uncaptured_error( - &self, - device_data: &Self::DeviceData, - handler: Box, - ); - fn device_push_error_scope(&self, device_data: &Self::DeviceData, filter: ErrorFilter); - fn device_pop_error_scope(&self, device_data: &Self::DeviceData) -> Self::PopErrorScopeFuture; - - fn buffer_map_async( - &self, - buffer_data: &Self::BufferData, - mode: MapMode, - range: Range, - callback: BufferMapCallback, - ); - fn buffer_get_mapped_range( - &self, - buffer_data: &Self::BufferData, - sub_range: Range, - ) -> Box; - fn buffer_unmap(&self, buffer_data: &Self::BufferData); - fn shader_get_compilation_info( - &self, - shader_data: &Self::ShaderModuleData, - ) -> Self::CompilationInfoFuture; - fn texture_create_view( - &self, - texture_data: &Self::TextureData, - desc: &TextureViewDescriptor<'_>, - ) -> Self::TextureViewData; - - fn surface_drop(&self, surface_data: &Self::SurfaceData); - fn adapter_drop(&self, adapter_data: &Self::AdapterData); - fn buffer_destroy(&self, buffer_data: &Self::BufferData); - fn buffer_drop(&self, buffer_data: &Self::BufferData); - fn texture_destroy(&self, texture_data: &Self::TextureData); - fn texture_drop(&self, texture_data: &Self::TextureData); - fn texture_view_drop(&self, texture_view_data: &Self::TextureViewData); - fn sampler_drop(&self, sampler_data: &Self::SamplerData); - fn query_set_drop(&self, query_set_data: &Self::QuerySetData); - fn bind_group_drop(&self, bind_group_data: &Self::BindGroupData); - fn bind_group_layout_drop(&self, bind_group_layout_data: &Self::BindGroupLayoutData); - fn pipeline_layout_drop(&self, pipeline_layout_data: &Self::PipelineLayoutData); - fn shader_module_drop(&self, shader_module_data: &Self::ShaderModuleData); - fn command_encoder_drop(&self, command_encoder_data: &Self::CommandEncoderData); - fn command_buffer_drop(&self, command_buffer_data: &Self::CommandBufferData); - fn render_bundle_drop(&self, render_bundle_data: &Self::RenderBundleData); - fn compute_pipeline_drop(&self, pipeline_data: &Self::ComputePipelineData); - fn render_pipeline_drop(&self, pipeline_data: &Self::RenderPipelineData); - fn pipeline_cache_drop(&self, cache_data: &Self::PipelineCacheData); - - fn compute_pipeline_get_bind_group_layout( - &self, - pipeline_data: &Self::ComputePipelineData, - index: u32, - ) -> Self::BindGroupLayoutData; - fn render_pipeline_get_bind_group_layout( - &self, - pipeline_data: &Self::RenderPipelineData, - index: u32, - ) -> Self::BindGroupLayoutData; - - #[allow(clippy::too_many_arguments)] - fn command_encoder_copy_buffer_to_buffer( - &self, - encoder_data: &Self::CommandEncoderData, - source_data: &Self::BufferData, - source_offset: BufferAddress, - destination_data: &Self::BufferData, - destination_offset: BufferAddress, - copy_size: BufferAddress, - ); - fn command_encoder_copy_buffer_to_texture( - &self, - encoder_data: &Self::CommandEncoderData, - source: TexelCopyBufferInfo<'_>, - destination: TexelCopyTextureInfo<'_>, - copy_size: Extent3d, - ); - fn command_encoder_copy_texture_to_buffer( - &self, - encoder_data: &Self::CommandEncoderData, - source: TexelCopyTextureInfo<'_>, - destination: TexelCopyBufferInfo<'_>, - copy_size: Extent3d, - ); - fn command_encoder_copy_texture_to_texture( - &self, - encoder_data: &Self::CommandEncoderData, - source: TexelCopyTextureInfo<'_>, - destination: TexelCopyTextureInfo<'_>, - copy_size: Extent3d, - ); - - fn command_encoder_begin_compute_pass( - &self, - encoder_data: &Self::CommandEncoderData, - desc: &ComputePassDescriptor<'_>, - ) -> Self::ComputePassData; - fn command_encoder_begin_render_pass( - &self, - encoder_data: &Self::CommandEncoderData, - desc: &RenderPassDescriptor<'_>, - ) -> Self::RenderPassData; - fn command_encoder_finish( - &self, - encoder_data: &mut Self::CommandEncoderData, - ) -> Self::CommandBufferData; - - fn command_encoder_clear_texture( - &self, - encoder_data: &Self::CommandEncoderData, - texture_data: &Self::TextureData, - subresource_range: &ImageSubresourceRange, - ); - fn command_encoder_clear_buffer( - &self, - encoder_data: &Self::CommandEncoderData, - buffer_data: &Self::BufferData, - offset: BufferAddress, - size: Option, - ); - - fn command_encoder_insert_debug_marker( - &self, - encoder_data: &Self::CommandEncoderData, - label: &str, - ); - fn command_encoder_push_debug_group( - &self, - encoder_data: &Self::CommandEncoderData, - label: &str, - ); - fn command_encoder_pop_debug_group(&self, encoder_data: &Self::CommandEncoderData); - - fn command_encoder_write_timestamp( - &self, - encoder_data: &Self::CommandEncoderData, - query_set_data: &Self::QuerySetData, - query_index: u32, - ); - #[allow(clippy::too_many_arguments)] - fn command_encoder_resolve_query_set( - &self, - encoder_data: &Self::CommandEncoderData, - query_set_data: &Self::QuerySetData, - first_query: u32, - query_count: u32, - destination_data: &Self::BufferData, - destination_offset: BufferAddress, - ); - - fn render_bundle_encoder_finish( - &self, - encoder_data: Self::RenderBundleEncoderData, - desc: &RenderBundleDescriptor<'_>, - ) -> Self::RenderBundleData; - fn queue_write_buffer( - &self, - queue_data: &Self::QueueData, - buffer_data: &Self::BufferData, - offset: BufferAddress, - data: &[u8], - ); - fn queue_validate_write_buffer( - &self, - queue_data: &Self::QueueData, - buffer_data: &Self::BufferData, - offset: wgt::BufferAddress, - size: wgt::BufferSize, - ) -> Option<()>; - fn queue_create_staging_buffer( - &self, - queue_data: &Self::QueueData, - size: BufferSize, - ) -> Option>; - fn queue_write_staging_buffer( - &self, - queue_data: &Self::QueueData, - buffer_data: &Self::BufferData, - offset: BufferAddress, - staging_buffer: &dyn QueueWriteBuffer, - ); - fn queue_write_texture( - &self, - queue_data: &Self::QueueData, - texture: TexelCopyTextureInfo<'_>, - data: &[u8], - data_layout: TexelCopyBufferLayout, - size: Extent3d, - ); - #[cfg(any(webgl, webgpu))] - fn queue_copy_external_image_to_texture( - &self, - queue_data: &Self::QueueData, - source: &wgt::CopyExternalImageSourceInfo, - dest: crate::CopyExternalImageDestInfo<'_>, - size: wgt::Extent3d, - ); - fn queue_submit>( - &self, - queue_data: &Self::QueueData, - command_buffers: I, - ) -> Self::SubmissionIndexData; - fn queue_get_timestamp_period(&self, queue_data: &Self::QueueData) -> f32; - fn queue_on_submitted_work_done( - &self, - queue_data: &Self::QueueData, - callback: SubmittedWorkDoneCallback, - ); - - fn device_start_capture(&self, device_data: &Self::DeviceData); - fn device_stop_capture(&self, device_data: &Self::DeviceData); - - fn device_get_internal_counters( - &self, - _device_data: &Self::DeviceData, - ) -> wgt::InternalCounters; - - fn device_generate_allocator_report( - &self, - _device_data: &Self::DeviceData, - ) -> Option; - - fn pipeline_cache_get_data(&self, cache_data: &Self::PipelineCacheData) -> Option>; - - fn compute_pass_set_pipeline( - &self, - pass_data: &mut Self::ComputePassData, - pipeline_data: &Self::ComputePipelineData, - ); - fn compute_pass_set_bind_group( - &self, - pass_data: &mut Self::ComputePassData, - index: u32, - bind_group_data: Option<&Self::BindGroupData>, - offsets: &[DynamicOffset], - ); - fn compute_pass_set_push_constants( - &self, - pass_data: &mut Self::ComputePassData, - offset: u32, - data: &[u8], - ); - fn compute_pass_insert_debug_marker(&self, pass_data: &mut Self::ComputePassData, label: &str); - fn compute_pass_push_debug_group( - &self, - pass_data: &mut Self::ComputePassData, - group_label: &str, - ); - fn compute_pass_pop_debug_group(&self, pass_data: &mut Self::ComputePassData); - fn compute_pass_write_timestamp( - &self, - pass_data: &mut Self::ComputePassData, - query_set_data: &Self::QuerySetData, - query_index: u32, - ); - fn compute_pass_begin_pipeline_statistics_query( - &self, - pass_data: &mut Self::ComputePassData, - query_set_data: &Self::QuerySetData, - query_index: u32, - ); - fn compute_pass_end_pipeline_statistics_query(&self, pass_data: &mut Self::ComputePassData); - fn compute_pass_dispatch_workgroups( - &self, - pass_data: &mut Self::ComputePassData, - x: u32, - y: u32, - z: u32, - ); - fn compute_pass_dispatch_workgroups_indirect( - &self, - pass_data: &mut Self::ComputePassData, - indirect_buffer_data: &Self::BufferData, - indirect_offset: BufferAddress, - ); - fn compute_pass_end(&self, pass_data: &mut Self::ComputePassData); - - fn render_bundle_encoder_set_pipeline( - &self, - encoder_data: &mut Self::RenderBundleEncoderData, - pipeline_data: &Self::RenderPipelineData, - ); - fn render_bundle_encoder_set_bind_group( - &self, - encoder_data: &mut Self::RenderBundleEncoderData, - index: u32, - bind_group_data: Option<&Self::BindGroupData>, - offsets: &[DynamicOffset], - ); - #[allow(clippy::too_many_arguments)] - fn render_bundle_encoder_set_index_buffer( - &self, - encoder_data: &mut Self::RenderBundleEncoderData, - buffer_data: &Self::BufferData, - index_format: IndexFormat, - offset: BufferAddress, - size: Option, - ); - #[allow(clippy::too_many_arguments)] - fn render_bundle_encoder_set_vertex_buffer( - &self, - encoder_data: &mut Self::RenderBundleEncoderData, - slot: u32, - buffer_data: &Self::BufferData, - offset: BufferAddress, - size: Option, - ); - fn render_bundle_encoder_set_push_constants( - &self, - encoder_data: &mut Self::RenderBundleEncoderData, - stages: ShaderStages, - offset: u32, - data: &[u8], - ); - fn render_bundle_encoder_draw( - &self, - encoder_data: &mut Self::RenderBundleEncoderData, - vertices: Range, - instances: Range, - ); - fn render_bundle_encoder_draw_indexed( - &self, - encoder_data: &mut Self::RenderBundleEncoderData, - indices: Range, - base_vertex: i32, - instances: Range, - ); - fn render_bundle_encoder_draw_indirect( - &self, - encoder_data: &mut Self::RenderBundleEncoderData, - indirect_buffer_data: &Self::BufferData, - indirect_offset: BufferAddress, - ); - fn render_bundle_encoder_draw_indexed_indirect( - &self, - encoder_data: &mut Self::RenderBundleEncoderData, - indirect_buffer_data: &Self::BufferData, - indirect_offset: BufferAddress, - ); - - fn render_pass_set_pipeline( - &self, - pass_data: &mut Self::RenderPassData, - pipeline_data: &Self::RenderPipelineData, - ); - fn render_pass_set_bind_group( - &self, - pass_data: &mut Self::RenderPassData, - index: u32, - bind_group_data: Option<&Self::BindGroupData>, - offsets: &[DynamicOffset], - ); - #[allow(clippy::too_many_arguments)] - fn render_pass_set_index_buffer( - &self, - pass_data: &mut Self::RenderPassData, - buffer_data: &Self::BufferData, - index_format: IndexFormat, - offset: BufferAddress, - size: Option, - ); - #[allow(clippy::too_many_arguments)] - fn render_pass_set_vertex_buffer( - &self, - pass_data: &mut Self::RenderPassData, - slot: u32, - buffer_data: &Self::BufferData, - offset: BufferAddress, - size: Option, - ); - fn render_pass_set_push_constants( - &self, - pass_data: &mut Self::RenderPassData, - stages: ShaderStages, - offset: u32, - data: &[u8], - ); - fn render_pass_draw( - &self, - pass_data: &mut Self::RenderPassData, - vertices: Range, - instances: Range, - ); - fn render_pass_draw_indexed( - &self, - pass_data: &mut Self::RenderPassData, - indices: Range, - base_vertex: i32, - instances: Range, - ); - fn render_pass_draw_indirect( - &self, - pass_data: &mut Self::RenderPassData, - indirect_buffer_data: &Self::BufferData, - indirect_offset: BufferAddress, - ); - fn render_pass_draw_indexed_indirect( - &self, - pass_data: &mut Self::RenderPassData, - indirect_buffer_data: &Self::BufferData, - indirect_offset: BufferAddress, - ); - fn render_pass_multi_draw_indirect( - &self, - pass_data: &mut Self::RenderPassData, - indirect_buffer_data: &Self::BufferData, - indirect_offset: BufferAddress, - count: u32, - ); - fn render_pass_multi_draw_indexed_indirect( - &self, - pass_data: &mut Self::RenderPassData, - indirect_buffer_data: &Self::BufferData, - indirect_offset: BufferAddress, - count: u32, - ); - #[allow(clippy::too_many_arguments)] - fn render_pass_multi_draw_indirect_count( - &self, - pass_data: &mut Self::RenderPassData, - indirect_buffer_data: &Self::BufferData, - indirect_offset: BufferAddress, - count_buffer_data: &Self::BufferData, - count_buffer_offset: BufferAddress, - max_count: u32, - ); - #[allow(clippy::too_many_arguments)] - fn render_pass_multi_draw_indexed_indirect_count( - &self, - pass_data: &mut Self::RenderPassData, - indirect_buffer_data: &Self::BufferData, - indirect_offset: BufferAddress, - count_buffer_data: &Self::BufferData, - count_buffer_offset: BufferAddress, - max_count: u32, - ); - fn render_pass_set_blend_constant(&self, pass_data: &mut Self::RenderPassData, color: Color); - fn render_pass_set_scissor_rect( - &self, - pass_data: &mut Self::RenderPassData, - x: u32, - y: u32, - width: u32, - height: u32, - ); - #[allow(clippy::too_many_arguments)] - fn render_pass_set_viewport( - &self, - pass_data: &mut Self::RenderPassData, - x: f32, - y: f32, - width: f32, - height: f32, - min_depth: f32, - max_depth: f32, - ); - fn render_pass_set_stencil_reference( - &self, - pass_data: &mut Self::RenderPassData, - reference: u32, - ); - fn render_pass_insert_debug_marker(&self, pass_data: &mut Self::RenderPassData, label: &str); - fn render_pass_push_debug_group(&self, pass_data: &mut Self::RenderPassData, group_label: &str); - fn render_pass_pop_debug_group(&self, pass_data: &mut Self::RenderPassData); - fn render_pass_write_timestamp( - &self, - pass_data: &mut Self::RenderPassData, - query_set_data: &Self::QuerySetData, - query_index: u32, - ); - fn render_pass_begin_occlusion_query( - &self, - pass_data: &mut Self::RenderPassData, - query_index: u32, - ); - fn render_pass_end_occlusion_query(&self, pass_data: &mut Self::RenderPassData); - fn render_pass_begin_pipeline_statistics_query( - &self, - pass_data: &mut Self::RenderPassData, - query_set_data: &Self::QuerySetData, - query_index: u32, - ); - fn render_pass_end_pipeline_statistics_query(&self, pass_data: &mut Self::RenderPassData); - fn render_pass_execute_bundles( - &self, - pass_data: &mut Self::RenderPassData, - render_bundles: &mut dyn Iterator, - ); - fn render_pass_end(&self, pass_data: &mut Self::RenderPassData); - - fn device_create_blas( - &self, - device_data: &Self::DeviceData, - desc: &crate::CreateBlasDescriptor<'_>, - sizes: wgt::BlasGeometrySizeDescriptors, - ) -> (Option, Self::BlasData); - fn device_create_tlas( - &self, - device_data: &Self::DeviceData, - desc: &crate::CreateTlasDescriptor<'_>, - ) -> Self::TlasData; - fn command_encoder_build_acceleration_structures_unsafe_tlas<'a>( - &'a self, - encoder_data: &Self::CommandEncoderData, - blas: impl Iterator>, - tlas: impl Iterator>, - ); - fn command_encoder_build_acceleration_structures<'a>( - &'a self, - encoder_data: &Self::CommandEncoderData, - blas: impl Iterator>, - tlas: impl Iterator>, - ); - fn blas_destroy(&self, blas_data: &Self::BlasData); - fn blas_drop(&self, blas_data: &Self::BlasData); - fn tlas_destroy(&self, tlas_data: &Self::TlasData); - fn tlas_drop(&self, tlas_data: &Self::TlasData); -} - -pub(crate) fn downcast_ref(data: &crate::Data) -> &T { - strict_assert!(data.is::()); - // Copied from std. - unsafe { &*(data as *const dyn Any as *const T) } -} - -fn downcast_mut(data: &mut crate::Data) -> &mut T { - strict_assert!(data.is::()); - // Copied from std. - unsafe { &mut *(data as *mut dyn Any as *mut T) } -} - -pub(crate) struct DeviceRequest { - pub device_data: Box, - pub queue_data: Box, -} - -#[cfg(send_sync)] -pub type BufferMapCallback = Box) + Send + 'static>; -#[cfg(not(send_sync))] -pub type BufferMapCallback = Box) + 'static>; - -#[cfg(send_sync)] -pub(crate) type AdapterRequestDeviceFuture = - Box> + Send>; -#[cfg(not(send_sync))] -pub(crate) type AdapterRequestDeviceFuture = - Box>>; - -#[cfg(send_sync)] -pub type InstanceRequestAdapterFuture = Box>> + Send>; -#[cfg(not(send_sync))] -pub type InstanceRequestAdapterFuture = Box>>>; - -#[cfg(send_sync)] -pub type DevicePopErrorFuture = Box> + Send>; -#[cfg(not(send_sync))] -pub type DevicePopErrorFuture = Box>>; - -#[cfg(send_sync)] -pub type ShaderCompilationInfoFuture = Box + Send>; -#[cfg(not(send_sync))] -pub type ShaderCompilationInfoFuture = Box>; - -#[cfg(send_sync)] -pub type SubmittedWorkDoneCallback = Box; -#[cfg(not(send_sync))] -pub type SubmittedWorkDoneCallback = Box; -#[cfg(send_sync)] -pub type DeviceLostCallback = Box; -#[cfg(not(send_sync))] -pub type DeviceLostCallback = Box; - -/// An object safe variant of [`Context`] implemented by all types that implement [`Context`]. -pub(crate) trait DynContext: Debug + WasmNotSendSync { - #[cfg(not(target_os = "emscripten"))] - fn as_any(&self) -> &dyn Any; - - unsafe fn instance_create_surface( - &self, - target: SurfaceTargetUnsafe, - ) -> Result, crate::CreateSurfaceError>; - #[allow(clippy::type_complexity)] - fn instance_request_adapter( - &self, - options: &RequestAdapterOptions<'_, '_>, - ) -> Pin; - fn adapter_request_device( - &self, - adapter_data: &crate::Data, - desc: &DeviceDescriptor<'_>, - trace_dir: Option<&std::path::Path>, - ) -> Pin; - - fn instance_poll_all_devices(&self, force_wait: bool) -> bool; - fn adapter_is_surface_supported( - &self, - adapter_data: &crate::Data, - surface_data: &crate::Data, - ) -> bool; - fn adapter_features(&self, adapter_data: &crate::Data) -> Features; - fn adapter_limits(&self, adapter_data: &crate::Data) -> Limits; - fn adapter_downlevel_capabilities(&self, adapter_data: &crate::Data) -> DownlevelCapabilities; - fn adapter_get_info(&self, adapter_data: &crate::Data) -> AdapterInfo; - fn adapter_get_texture_format_features( - &self, - adapter_data: &crate::Data, - format: TextureFormat, - ) -> TextureFormatFeatures; - fn adapter_get_presentation_timestamp( - &self, - adapter_data: &crate::Data, - ) -> wgt::PresentationTimestamp; - - fn surface_get_capabilities( - &self, - surface_data: &crate::Data, - adapter_data: &crate::Data, - ) -> wgt::SurfaceCapabilities; - fn surface_configure( - &self, - surface_data: &crate::Data, - device_data: &crate::Data, - config: &crate::SurfaceConfiguration, - ); - fn surface_get_current_texture( - &self, - surface_data: &crate::Data, - ) -> ( - Option>, - SurfaceStatus, - Box, - ); - fn surface_present(&self, detail: &dyn AnyWasmNotSendSync); - fn surface_texture_discard(&self, detail: &dyn AnyWasmNotSendSync); - - fn device_features(&self, device_data: &crate::Data) -> Features; - fn device_limits(&self, device_data: &crate::Data) -> Limits; - fn device_create_shader_module( - &self, - device_data: &crate::Data, - desc: ShaderModuleDescriptor<'_>, - shader_bound_checks: wgt::ShaderBoundChecks, - ) -> Box; - unsafe fn device_create_shader_module_spirv( - &self, - device_data: &crate::Data, - desc: &ShaderModuleDescriptorSpirV<'_>, - ) -> Box; - fn device_create_bind_group_layout( - &self, - device_data: &crate::Data, - desc: &BindGroupLayoutDescriptor<'_>, - ) -> Box; - fn device_create_bind_group( - &self, - device_data: &crate::Data, - desc: &BindGroupDescriptor<'_>, - ) -> Box; - fn device_create_pipeline_layout( - &self, - device_data: &crate::Data, - desc: &PipelineLayoutDescriptor<'_>, - ) -> Box; - fn device_create_render_pipeline( - &self, - device_data: &crate::Data, - desc: &RenderPipelineDescriptor<'_>, - ) -> Box; - fn device_create_compute_pipeline( - &self, - device_data: &crate::Data, - desc: &ComputePipelineDescriptor<'_>, - ) -> Box; - unsafe fn device_create_pipeline_cache( - &self, - device_data: &crate::Data, - desc: &PipelineCacheDescriptor<'_>, - ) -> Box; - fn device_create_buffer( - &self, - device_data: &crate::Data, - desc: &BufferDescriptor<'_>, - ) -> Box; - fn device_create_texture( - &self, - device_data: &crate::Data, - desc: &TextureDescriptor<'_>, - ) -> Box; - fn device_create_sampler( - &self, - device_data: &crate::Data, - desc: &SamplerDescriptor<'_>, - ) -> Box; - fn device_create_query_set( - &self, - device_data: &crate::Data, - desc: &QuerySetDescriptor<'_>, - ) -> Box; - fn device_create_command_encoder( - &self, - device_data: &crate::Data, - desc: &CommandEncoderDescriptor<'_>, - ) -> Box; - fn device_create_render_bundle_encoder( - &self, - device_data: &crate::Data, - desc: &RenderBundleEncoderDescriptor<'_>, - ) -> Box; - fn device_drop(&self, device_data: &crate::Data); - fn device_set_device_lost_callback( - &self, - device_data: &crate::Data, - device_lost_callback: DeviceLostCallback, - ); - fn device_destroy(&self, device_data: &crate::Data); - fn queue_drop(&self, queue_data: &crate::Data); - fn device_poll(&self, device_data: &crate::Data, maintain: Maintain) -> MaintainResult; - fn device_on_uncaptured_error( - &self, - device_data: &crate::Data, - handler: Box, - ); - fn device_push_error_scope(&self, device_data: &crate::Data, filter: ErrorFilter); - fn device_pop_error_scope(&self, device_data: &crate::Data) -> Pin; - fn buffer_map_async( - &self, - buffer_data: &crate::Data, - mode: MapMode, - range: Range, - callback: BufferMapCallback, - ); - fn buffer_get_mapped_range( - &self, - buffer_data: &crate::Data, - sub_range: Range, - ) -> Box; - fn buffer_unmap(&self, buffer_data: &crate::Data); - fn shader_get_compilation_info( - &self, - shader_data: &crate::Data, - ) -> Pin; - fn texture_create_view( - &self, - texture_data: &crate::Data, - desc: &TextureViewDescriptor<'_>, - ) -> Box; - - fn surface_drop(&self, surface_data: &crate::Data); - fn adapter_drop(&self, adapter_data: &crate::Data); - fn buffer_destroy(&self, buffer_data: &crate::Data); - fn buffer_drop(&self, buffer_data: &crate::Data); - fn texture_destroy(&self, buffer_data: &crate::Data); - fn texture_drop(&self, texture_data: &crate::Data); - fn texture_view_drop(&self, texture_view_data: &crate::Data); - fn sampler_drop(&self, sampler_data: &crate::Data); - fn query_set_drop(&self, query_set_data: &crate::Data); - fn bind_group_drop(&self, bind_group_data: &crate::Data); - fn bind_group_layout_drop(&self, bind_group_layout_data: &crate::Data); - fn pipeline_layout_drop(&self, pipeline_layout_data: &crate::Data); - fn shader_module_drop(&self, shader_module_data: &crate::Data); - fn command_encoder_drop(&self, command_encoder_data: &crate::Data); - fn command_buffer_drop(&self, command_buffer_data: &crate::Data); - fn render_bundle_drop(&self, render_bundle_data: &crate::Data); - fn compute_pipeline_drop(&self, pipeline_data: &crate::Data); - fn render_pipeline_drop(&self, pipeline_data: &crate::Data); - fn pipeline_cache_drop(&self, _cache_data: &crate::Data); - - fn compute_pipeline_get_bind_group_layout( - &self, - pipeline_data: &crate::Data, - index: u32, - ) -> Box; - fn render_pipeline_get_bind_group_layout( - &self, - pipeline_data: &crate::Data, - index: u32, - ) -> Box; - - #[allow(clippy::too_many_arguments)] - fn command_encoder_copy_buffer_to_buffer( - &self, - encoder_data: &crate::Data, - source_data: &crate::Data, - source_offset: BufferAddress, - destination_data: &crate::Data, - destination_offset: BufferAddress, - copy_size: BufferAddress, - ); - fn command_encoder_copy_buffer_to_texture( - &self, - encoder_data: &crate::Data, - source: TexelCopyBufferInfo<'_>, - destination: TexelCopyTextureInfo<'_>, - copy_size: Extent3d, - ); - fn command_encoder_copy_texture_to_buffer( - &self, - encoder_data: &crate::Data, - source: TexelCopyTextureInfo<'_>, - destination: TexelCopyBufferInfo<'_>, - copy_size: Extent3d, - ); - fn command_encoder_copy_texture_to_texture( - &self, - encoder_data: &crate::Data, - source: TexelCopyTextureInfo<'_>, - destination: TexelCopyTextureInfo<'_>, - copy_size: Extent3d, - ); - - fn command_encoder_begin_compute_pass( - &self, - encoder_data: &crate::Data, - desc: &ComputePassDescriptor<'_>, - ) -> Box; - fn command_encoder_begin_render_pass( - &self, - encoder_data: &crate::Data, - desc: &RenderPassDescriptor<'_>, - ) -> Box; - fn command_encoder_finish(&self, encoder_data: &mut crate::Data) -> Box; - - fn command_encoder_clear_texture( - &self, - encoder_data: &crate::Data, - texture_data: &crate::Data, - subresource_range: &ImageSubresourceRange, - ); - fn command_encoder_clear_buffer( - &self, - encoder_data: &crate::Data, - buffer_data: &crate::Data, - offset: BufferAddress, - size: Option, - ); - - fn command_encoder_insert_debug_marker(&self, encoder_data: &crate::Data, label: &str); - fn command_encoder_push_debug_group(&self, encoder_data: &crate::Data, label: &str); - fn command_encoder_pop_debug_group(&self, encoder_data: &crate::Data); - - fn command_encoder_write_timestamp( - &self, - encoder_data: &crate::Data, - query_set_data: &crate::Data, - query_index: u32, - ); - #[allow(clippy::too_many_arguments)] - fn command_encoder_resolve_query_set( - &self, - encoder_data: &crate::Data, - query_set_data: &crate::Data, - first_query: u32, - query_count: u32, - destination_data: &crate::Data, - destination_offset: BufferAddress, - ); - - fn render_bundle_encoder_finish( - &self, - encoder_data: Box, - desc: &RenderBundleDescriptor<'_>, - ) -> Box; - fn queue_write_buffer( - &self, - queue_data: &crate::Data, - buffer_data: &crate::Data, - offset: BufferAddress, - data: &[u8], - ); - fn queue_validate_write_buffer( - &self, - queue_data: &crate::Data, - buffer_data: &crate::Data, - offset: wgt::BufferAddress, - size: wgt::BufferSize, - ) -> Option<()>; - fn queue_create_staging_buffer( - &self, - queue_data: &crate::Data, - size: BufferSize, - ) -> Option>; - fn queue_write_staging_buffer( - &self, - queue_data: &crate::Data, - buffer_data: &crate::Data, - offset: BufferAddress, - staging_buffer: &dyn QueueWriteBuffer, - ); - fn queue_write_texture( - &self, - queue_data: &crate::Data, - texture: TexelCopyTextureInfo<'_>, - data: &[u8], - data_layout: TexelCopyBufferLayout, - size: Extent3d, - ); - #[cfg(any(webgpu, webgl))] - fn queue_copy_external_image_to_texture( - &self, - queue_data: &crate::Data, - source: &wgt::CopyExternalImageSourceInfo, - dest: crate::CopyExternalImageDestInfo<'_>, - size: wgt::Extent3d, - ); - fn queue_submit( - &self, - queue_data: &crate::Data, - command_buffers: &mut dyn Iterator>, - ) -> Arc; - fn queue_get_timestamp_period(&self, queue_data: &crate::Data) -> f32; - fn queue_on_submitted_work_done( - &self, - queue_data: &crate::Data, - callback: SubmittedWorkDoneCallback, - ); - - fn device_start_capture(&self, data: &crate::Data); - fn device_stop_capture(&self, data: &crate::Data); - - fn device_get_internal_counters(&self, device_data: &crate::Data) -> wgt::InternalCounters; - - fn generate_allocator_report(&self, device_data: &crate::Data) -> Option; - - fn pipeline_cache_get_data(&self, cache_data: &crate::Data) -> Option>; - - fn compute_pass_set_pipeline(&self, pass_data: &mut crate::Data, pipeline_data: &crate::Data); - fn compute_pass_set_bind_group( - &self, - pass_data: &mut crate::Data, - index: u32, - bind_group_data: Option<&crate::Data>, - offsets: &[DynamicOffset], - ); - fn compute_pass_set_push_constants( - &self, - pass_data: &mut crate::Data, - offset: u32, - data: &[u8], - ); - fn compute_pass_insert_debug_marker(&self, pass_data: &mut crate::Data, label: &str); - fn compute_pass_push_debug_group(&self, pass_data: &mut crate::Data, group_label: &str); - fn compute_pass_pop_debug_group(&self, pass_data: &mut crate::Data); - fn compute_pass_write_timestamp( - &self, - pass_data: &mut crate::Data, - query_set_data: &crate::Data, - query_index: u32, - ); - fn compute_pass_begin_pipeline_statistics_query( - &self, - pass_data: &mut crate::Data, - query_set_data: &crate::Data, - query_index: u32, - ); - fn compute_pass_end_pipeline_statistics_query(&self, pass_data: &mut crate::Data); - fn compute_pass_dispatch_workgroups(&self, pass_data: &mut crate::Data, x: u32, y: u32, z: u32); - fn compute_pass_dispatch_workgroups_indirect( - &self, - pass_data: &mut crate::Data, - indirect_buffer_data: &crate::Data, - indirect_offset: BufferAddress, - ); - fn compute_pass_end(&self, pass_data: &mut crate::Data); - - fn render_bundle_encoder_set_pipeline( - &self, - encoder_data: &mut crate::Data, - pipeline_data: &crate::Data, - ); - fn render_bundle_encoder_set_bind_group( - &self, - encoder_data: &mut crate::Data, - index: u32, - bind_group_data: Option<&crate::Data>, - offsets: &[DynamicOffset], - ); - #[allow(clippy::too_many_arguments)] - fn render_bundle_encoder_set_index_buffer( - &self, - encoder_data: &mut crate::Data, - buffer_data: &crate::Data, - index_format: IndexFormat, - offset: BufferAddress, - size: Option, - ); - #[allow(clippy::too_many_arguments)] - fn render_bundle_encoder_set_vertex_buffer( - &self, - encoder_data: &mut crate::Data, - slot: u32, - buffer_data: &crate::Data, - offset: BufferAddress, - size: Option, - ); - fn render_bundle_encoder_set_push_constants( - &self, - encoder_data: &mut crate::Data, - stages: ShaderStages, - offset: u32, - data: &[u8], - ); - fn render_bundle_encoder_draw( - &self, - encoder_data: &mut crate::Data, - vertices: Range, - instances: Range, - ); - fn render_bundle_encoder_draw_indexed( - &self, - encoder_data: &mut crate::Data, - indices: Range, - base_vertex: i32, - instances: Range, - ); - fn render_bundle_encoder_draw_indirect( - &self, - encoder_data: &mut crate::Data, - indirect_buffer_data: &crate::Data, - indirect_offset: BufferAddress, - ); - fn render_bundle_encoder_draw_indexed_indirect( - &self, - encoder_data: &mut crate::Data, - indirect_buffer_data: &crate::Data, - indirect_offset: BufferAddress, - ); - - fn render_pass_set_pipeline(&self, pass_data: &mut crate::Data, pipeline_data: &crate::Data); - fn render_pass_set_bind_group( - &self, - pass_data: &mut crate::Data, - index: u32, - bind_group_data: Option<&crate::Data>, - offsets: &[DynamicOffset], - ); - #[allow(clippy::too_many_arguments)] - fn render_pass_set_index_buffer( - &self, - pass_data: &mut crate::Data, - buffer_data: &crate::Data, - index_format: IndexFormat, - offset: BufferAddress, - size: Option, - ); - #[allow(clippy::too_many_arguments)] - fn render_pass_set_vertex_buffer( - &self, - pass_data: &mut crate::Data, - slot: u32, - buffer_data: &crate::Data, - offset: BufferAddress, - size: Option, - ); - fn render_pass_set_push_constants( - &self, - pass_data: &mut crate::Data, - stages: ShaderStages, - offset: u32, - data: &[u8], - ); - fn render_pass_draw( - &self, - pass_data: &mut crate::Data, - vertices: Range, - instances: Range, - ); - fn render_pass_draw_indexed( - &self, - pass_data: &mut crate::Data, - indices: Range, - base_vertex: i32, - instances: Range, - ); - fn render_pass_draw_indirect( - &self, - pass_data: &mut crate::Data, - indirect_buffer_data: &crate::Data, - indirect_offset: BufferAddress, - ); - fn render_pass_draw_indexed_indirect( - &self, - pass_data: &mut crate::Data, - indirect_buffer_data: &crate::Data, - indirect_offset: BufferAddress, - ); - fn render_pass_multi_draw_indirect( - &self, - pass_data: &mut crate::Data, - indirect_buffer_data: &crate::Data, - indirect_offset: BufferAddress, - count: u32, - ); - fn render_pass_multi_draw_indexed_indirect( - &self, - pass_data: &mut crate::Data, - indirect_buffer_data: &crate::Data, - indirect_offset: BufferAddress, - count: u32, - ); - #[allow(clippy::too_many_arguments)] - fn render_pass_multi_draw_indirect_count( - &self, - pass_data: &mut crate::Data, - indirect_buffer_data: &crate::Data, - indirect_offset: BufferAddress, - count_buffer_data: &crate::Data, - count_buffer_offset: BufferAddress, - max_count: u32, - ); - #[allow(clippy::too_many_arguments)] - fn render_pass_multi_draw_indexed_indirect_count( - &self, - pass_data: &mut crate::Data, - indirect_buffer_data: &crate::Data, - indirect_offset: BufferAddress, - command_buffer_data: &crate::Data, - count_buffer_offset: BufferAddress, - max_count: u32, - ); - fn render_pass_set_blend_constant(&self, pass_data: &mut crate::Data, color: Color); - fn render_pass_set_scissor_rect( - &self, - pass_data: &mut crate::Data, - x: u32, - y: u32, - width: u32, - height: u32, - ); - #[allow(clippy::too_many_arguments)] - fn render_pass_set_viewport( - &self, - pass_data: &mut crate::Data, - x: f32, - y: f32, - width: f32, - height: f32, - min_depth: f32, - max_depth: f32, - ); - fn render_pass_set_stencil_reference(&self, pass_data: &mut crate::Data, reference: u32); - fn render_pass_insert_debug_marker(&self, pass_data: &mut crate::Data, label: &str); - fn render_pass_push_debug_group(&self, pass_data: &mut crate::Data, group_label: &str); - fn render_pass_pop_debug_group(&self, pass_data: &mut crate::Data); - fn render_pass_write_timestamp( - &self, - pass_data: &mut crate::Data, - query_set_data: &crate::Data, - query_index: u32, - ); - fn render_pass_begin_occlusion_query(&self, pass_data: &mut crate::Data, query_index: u32); - fn render_pass_end_occlusion_query(&self, pass_data: &mut crate::Data); - fn render_pass_begin_pipeline_statistics_query( - &self, - pass_data: &mut crate::Data, - query_set_data: &crate::Data, - query_index: u32, - ); - fn render_pass_end_pipeline_statistics_query(&self, pass_data: &mut crate::Data); - fn render_pass_execute_bundles( - &self, - pass_data: &mut crate::Data, - render_bundles: &mut dyn Iterator, - ); - fn device_create_blas( - &self, - device_data: &crate::Data, - desc: &crate::CreateBlasDescriptor<'_>, - sizes: wgt::BlasGeometrySizeDescriptors, - ) -> (Option, Box); - fn device_create_tlas( - &self, - device_data: &crate::Data, - desc: &crate::CreateTlasDescriptor<'_>, - ) -> Box; - fn command_encoder_build_acceleration_structures_unsafe_tlas( - &self, - encoder_data: &crate::Data, - blas: &mut dyn Iterator>, - tlas: &mut dyn Iterator>, - ); - fn command_encoder_build_acceleration_structures( - &self, - encoder_data: &crate::Data, - blas: &mut dyn Iterator>, - tlas: &mut dyn Iterator>, - ); - fn blas_destroy(&self, blas_data: &crate::Data); - fn blas_drop(&self, blas_data: &crate::Data); - fn tlas_destroy(&self, tlas_data: &crate::Data); - fn tlas_drop(&self, tlas_data: &crate::Data); - fn render_pass_end(&self, pass_data: &mut crate::Data); -} - -// Blanket impl of DynContext for all types which implement Context. -impl DynContext for T -where - T: Context + 'static, -{ - #[cfg(not(target_os = "emscripten"))] - fn as_any(&self) -> &dyn Any { - self - } - - unsafe fn instance_create_surface( - &self, - target: SurfaceTargetUnsafe, - ) -> Result, crate::CreateSurfaceError> { - let data = unsafe { Context::instance_create_surface(self, target) }?; - Ok(Box::new(data) as _) - } - - fn instance_request_adapter( - &self, - options: &RequestAdapterOptions<'_, '_>, - ) -> Pin { - let future: T::RequestAdapterFuture = Context::instance_request_adapter(self, options); - Box::pin(async move { future.await.map(|data| Box::new(data) as _) }) - } - - fn adapter_request_device( - &self, - adapter_data: &crate::Data, - desc: &DeviceDescriptor<'_>, - trace_dir: Option<&std::path::Path>, - ) -> Pin { - let adapter_data = downcast_ref(adapter_data); - let future = Context::adapter_request_device(self, adapter_data, desc, trace_dir); - - Box::pin(async move { - let (device_data, queue_data) = future.await?; - Ok(DeviceRequest { - device_data: Box::new(device_data) as _, - queue_data: Box::new(queue_data) as _, - }) - }) - } - - fn instance_poll_all_devices(&self, force_wait: bool) -> bool { - Context::instance_poll_all_devices(self, force_wait) - } - - fn adapter_is_surface_supported( - &self, - adapter_data: &crate::Data, - surface_data: &crate::Data, - ) -> bool { - let adapter_data = downcast_ref(adapter_data); - let surface_data = downcast_ref(surface_data); - Context::adapter_is_surface_supported(self, adapter_data, surface_data) - } - - fn adapter_features(&self, adapter_data: &crate::Data) -> Features { - let adapter_data = downcast_ref(adapter_data); - Context::adapter_features(self, adapter_data) - } - - fn adapter_limits(&self, adapter_data: &crate::Data) -> Limits { - let adapter_data = downcast_ref(adapter_data); - Context::adapter_limits(self, adapter_data) - } - - fn adapter_downlevel_capabilities(&self, adapter_data: &crate::Data) -> DownlevelCapabilities { - let adapter_data = downcast_ref(adapter_data); - Context::adapter_downlevel_capabilities(self, adapter_data) - } - - fn adapter_get_info(&self, adapter_data: &crate::Data) -> AdapterInfo { - let adapter_data = downcast_ref(adapter_data); - Context::adapter_get_info(self, adapter_data) - } - - fn adapter_get_texture_format_features( - &self, - adapter_data: &crate::Data, - format: TextureFormat, - ) -> TextureFormatFeatures { - let adapter_data = downcast_ref(adapter_data); - Context::adapter_get_texture_format_features(self, adapter_data, format) - } - fn adapter_get_presentation_timestamp( - &self, - adapter_data: &crate::Data, - ) -> wgt::PresentationTimestamp { - let adapter_data = downcast_ref(adapter_data); - Context::adapter_get_presentation_timestamp(self, adapter_data) - } - - fn surface_get_capabilities( - &self, - surface_data: &crate::Data, - adapter_data: &crate::Data, - ) -> wgt::SurfaceCapabilities { - let surface_data = downcast_ref(surface_data); - let adapter_data = downcast_ref(adapter_data); - Context::surface_get_capabilities(self, surface_data, adapter_data) - } - - fn surface_configure( - &self, - surface_data: &crate::Data, - device_data: &crate::Data, - config: &crate::SurfaceConfiguration, - ) { - let surface_data = downcast_ref(surface_data); - let device_data = downcast_ref(device_data); - Context::surface_configure(self, surface_data, device_data, config) - } - - fn surface_get_current_texture( - &self, - surface_data: &crate::Data, - ) -> ( - Option>, - SurfaceStatus, - Box, - ) { - let surface_data = downcast_ref(surface_data); - let (texture_data, status, detail) = - Context::surface_get_current_texture(self, surface_data); - let detail = Box::new(detail) as Box; - (texture_data.map(|b| Box::new(b) as _), status, detail) - } - - fn surface_present(&self, detail: &dyn AnyWasmNotSendSync) { - Context::surface_present(self, detail.downcast_ref().unwrap()) - } - - fn surface_texture_discard(&self, detail: &dyn AnyWasmNotSendSync) { - Context::surface_texture_discard(self, detail.downcast_ref().unwrap()) - } - - fn device_features(&self, device_data: &crate::Data) -> Features { - let device_data = downcast_ref(device_data); - Context::device_features(self, device_data) - } - - fn device_limits(&self, device_data: &crate::Data) -> Limits { - let device_data = downcast_ref(device_data); - Context::device_limits(self, device_data) - } - - fn device_create_shader_module( - &self, - device_data: &crate::Data, - desc: ShaderModuleDescriptor<'_>, - shader_bound_checks: wgt::ShaderBoundChecks, - ) -> Box { - let device_data = downcast_ref(device_data); - let data = - Context::device_create_shader_module(self, device_data, desc, shader_bound_checks); - Box::new(data) as _ - } - - unsafe fn device_create_shader_module_spirv( - &self, - device_data: &crate::Data, - desc: &ShaderModuleDescriptorSpirV<'_>, - ) -> Box { - let device_data = downcast_ref(device_data); - let data = unsafe { Context::device_create_shader_module_spirv(self, device_data, desc) }; - Box::new(data) as _ - } - - fn device_create_bind_group_layout( - &self, - device_data: &crate::Data, - desc: &BindGroupLayoutDescriptor<'_>, - ) -> Box { - let device_data = downcast_ref(device_data); - let data = Context::device_create_bind_group_layout(self, device_data, desc); - Box::new(data) as _ - } - - fn device_create_bind_group( - &self, - device_data: &crate::Data, - desc: &BindGroupDescriptor<'_>, - ) -> Box { - let device_data = downcast_ref(device_data); - let data = Context::device_create_bind_group(self, device_data, desc); - Box::new(data) as _ - } - - fn device_create_pipeline_layout( - &self, - device_data: &crate::Data, - desc: &PipelineLayoutDescriptor<'_>, - ) -> Box { - let device_data = downcast_ref(device_data); - let data = Context::device_create_pipeline_layout(self, device_data, desc); - Box::new(data) as _ - } - - fn device_create_render_pipeline( - &self, - device_data: &crate::Data, - desc: &RenderPipelineDescriptor<'_>, - ) -> Box { - let device_data = downcast_ref(device_data); - let data = Context::device_create_render_pipeline(self, device_data, desc); - Box::new(data) as _ - } - - fn device_create_compute_pipeline( - &self, - device_data: &crate::Data, - desc: &ComputePipelineDescriptor<'_>, - ) -> Box { - let device_data = downcast_ref(device_data); - let data = Context::device_create_compute_pipeline(self, device_data, desc); - Box::new(data) as _ - } - - unsafe fn device_create_pipeline_cache( - &self, - device_data: &crate::Data, - desc: &PipelineCacheDescriptor<'_>, - ) -> Box { - let device_data = downcast_ref(device_data); - let data = unsafe { Context::device_create_pipeline_cache(self, device_data, desc) }; - Box::new(data) as _ - } - - fn device_create_buffer( - &self, - device_data: &crate::Data, - desc: &BufferDescriptor<'_>, - ) -> Box { - let device_data = downcast_ref(device_data); - let data = Context::device_create_buffer(self, device_data, desc); - Box::new(data) as _ - } - - fn device_create_texture( - &self, - device_data: &crate::Data, - desc: &TextureDescriptor<'_>, - ) -> Box { - let device_data = downcast_ref(device_data); - let data = Context::device_create_texture(self, device_data, desc); - Box::new(data) as _ - } - - fn device_create_sampler( - &self, - device_data: &crate::Data, - desc: &SamplerDescriptor<'_>, - ) -> Box { - let device_data = downcast_ref(device_data); - let data = Context::device_create_sampler(self, device_data, desc); - Box::new(data) as _ - } - - fn device_create_query_set( - &self, - device_data: &crate::Data, - desc: &QuerySetDescriptor<'_>, - ) -> Box { - let device_data = downcast_ref(device_data); - let data = Context::device_create_query_set(self, device_data, desc); - Box::new(data) as _ - } - - fn device_create_command_encoder( - &self, - device_data: &crate::Data, - desc: &CommandEncoderDescriptor<'_>, - ) -> Box { - let device_data = downcast_ref(device_data); - let data = Context::device_create_command_encoder(self, device_data, desc); - Box::new(data) as _ - } - - fn device_create_render_bundle_encoder( - &self, - device_data: &crate::Data, - desc: &RenderBundleEncoderDescriptor<'_>, - ) -> Box { - let device_data = downcast_ref(device_data); - let data = Context::device_create_render_bundle_encoder(self, device_data, desc); - Box::new(data) as _ - } - - fn device_drop(&self, device_data: &crate::Data) { - let device_data = downcast_ref(device_data); - Context::device_drop(self, device_data) - } - - fn device_set_device_lost_callback( - &self, - device_data: &crate::Data, - device_lost_callback: DeviceLostCallback, - ) { - let device_data = downcast_ref(device_data); - Context::device_set_device_lost_callback(self, device_data, device_lost_callback) - } - - fn device_destroy(&self, device_data: &crate::Data) { - let device_data = downcast_ref(device_data); - Context::device_destroy(self, device_data) - } - - fn queue_drop(&self, queue_data: &crate::Data) { - let queue_data = downcast_ref(queue_data); - Context::queue_drop(self, queue_data) - } - - fn device_poll(&self, device_data: &crate::Data, maintain: Maintain) -> MaintainResult { - let device_data = downcast_ref(device_data); - Context::device_poll(self, device_data, maintain) - } - - fn device_on_uncaptured_error( - &self, - device_data: &crate::Data, - handler: Box, - ) { - let device_data = downcast_ref(device_data); - Context::device_on_uncaptured_error(self, device_data, handler) - } - - fn device_push_error_scope(&self, device_data: &crate::Data, filter: ErrorFilter) { - let device_data = downcast_ref(device_data); - Context::device_push_error_scope(self, device_data, filter) - } - - fn device_pop_error_scope(&self, device_data: &crate::Data) -> Pin { - let device_data = downcast_ref(device_data); - Box::pin(Context::device_pop_error_scope(self, device_data)) - } - - fn buffer_map_async( - &self, - buffer_data: &crate::Data, - mode: MapMode, - range: Range, - callback: BufferMapCallback, - ) { - let buffer_data = downcast_ref(buffer_data); - Context::buffer_map_async(self, buffer_data, mode, range, callback) - } - - fn buffer_get_mapped_range( - &self, - buffer_data: &crate::Data, - sub_range: Range, - ) -> Box { - let buffer_data = downcast_ref(buffer_data); - Context::buffer_get_mapped_range(self, buffer_data, sub_range) - } - - fn buffer_unmap(&self, buffer_data: &crate::Data) { - let buffer_data = downcast_ref(buffer_data); - Context::buffer_unmap(self, buffer_data) - } - - fn shader_get_compilation_info( - &self, - shader_data: &crate::Data, - ) -> Pin { - let shader_data = downcast_ref(shader_data); - let future = Context::shader_get_compilation_info(self, shader_data); - Box::pin(future) - } - - fn texture_create_view( - &self, - texture_data: &crate::Data, - desc: &TextureViewDescriptor<'_>, - ) -> Box { - let texture_data = downcast_ref(texture_data); - let data = Context::texture_create_view(self, texture_data, desc); - Box::new(data) as _ - } - - fn surface_drop(&self, surface_data: &crate::Data) { - let surface_data = downcast_ref(surface_data); - Context::surface_drop(self, surface_data) - } - - fn adapter_drop(&self, adapter_data: &crate::Data) { - let adapter_data = downcast_ref(adapter_data); - Context::adapter_drop(self, adapter_data) - } - - fn buffer_destroy(&self, buffer_data: &crate::Data) { - let buffer_data = downcast_ref(buffer_data); - Context::buffer_destroy(self, buffer_data) - } - - fn buffer_drop(&self, buffer_data: &crate::Data) { - let buffer_data = downcast_ref(buffer_data); - Context::buffer_drop(self, buffer_data) - } - - fn texture_destroy(&self, texture_data: &crate::Data) { - let texture_data = downcast_ref(texture_data); - Context::texture_destroy(self, texture_data) - } - - fn texture_drop(&self, texture_data: &crate::Data) { - let texture_data = downcast_ref(texture_data); - Context::texture_drop(self, texture_data) - } - - fn texture_view_drop(&self, texture_view_data: &crate::Data) { - let texture_view_data = downcast_ref(texture_view_data); - Context::texture_view_drop(self, texture_view_data) - } - - fn sampler_drop(&self, sampler_data: &crate::Data) { - let sampler_data = downcast_ref(sampler_data); - Context::sampler_drop(self, sampler_data) - } - - fn query_set_drop(&self, query_set_data: &crate::Data) { - let query_set_data = downcast_ref(query_set_data); - Context::query_set_drop(self, query_set_data) - } - - fn bind_group_drop(&self, bind_group_data: &crate::Data) { - let bind_group_data = downcast_ref(bind_group_data); - Context::bind_group_drop(self, bind_group_data) - } - - fn bind_group_layout_drop(&self, bind_group_layout_data: &crate::Data) { - let bind_group_layout_data = downcast_ref(bind_group_layout_data); - Context::bind_group_layout_drop(self, bind_group_layout_data) - } - - fn pipeline_layout_drop(&self, pipeline_layout_data: &crate::Data) { - let pipeline_layout_data = downcast_ref(pipeline_layout_data); - Context::pipeline_layout_drop(self, pipeline_layout_data) - } - - fn shader_module_drop(&self, shader_module_data: &crate::Data) { - let shader_module_data = downcast_ref(shader_module_data); - Context::shader_module_drop(self, shader_module_data) - } - - fn command_encoder_drop(&self, command_encoder_data: &crate::Data) { - let command_encoder_data = downcast_ref(command_encoder_data); - Context::command_encoder_drop(self, command_encoder_data) - } - - fn command_buffer_drop(&self, command_buffer_data: &crate::Data) { - let command_buffer_data = downcast_ref(command_buffer_data); - Context::command_buffer_drop(self, command_buffer_data) - } - - fn render_bundle_drop(&self, render_bundle_data: &crate::Data) { - let render_bundle_data = downcast_ref(render_bundle_data); - Context::render_bundle_drop(self, render_bundle_data) - } - - fn compute_pipeline_drop(&self, pipeline_data: &crate::Data) { - let pipeline_data = downcast_ref(pipeline_data); - Context::compute_pipeline_drop(self, pipeline_data) - } - - fn render_pipeline_drop(&self, pipeline_data: &crate::Data) { - let pipeline_data = downcast_ref(pipeline_data); - Context::render_pipeline_drop(self, pipeline_data) - } - - fn pipeline_cache_drop(&self, cache_data: &crate::Data) { - let cache_data = downcast_ref(cache_data); - Context::pipeline_cache_drop(self, cache_data) - } - - fn compute_pipeline_get_bind_group_layout( - &self, - pipeline_data: &crate::Data, - index: u32, - ) -> Box { - let pipeline_data = downcast_ref(pipeline_data); - let data = Context::compute_pipeline_get_bind_group_layout(self, pipeline_data, index); - Box::new(data) as _ - } - - fn render_pipeline_get_bind_group_layout( - &self, - pipeline_data: &crate::Data, - index: u32, - ) -> Box { - let pipeline_data = downcast_ref(pipeline_data); - let data = Context::render_pipeline_get_bind_group_layout(self, pipeline_data, index); - Box::new(data) as _ - } - - fn command_encoder_copy_buffer_to_buffer( - &self, - encoder_data: &crate::Data, - source_data: &crate::Data, - source_offset: BufferAddress, - destination_data: &crate::Data, - destination_offset: BufferAddress, - copy_size: BufferAddress, - ) { - let encoder_data = downcast_ref(encoder_data); - let source_data = downcast_ref(source_data); - let destination_data = downcast_ref(destination_data); - Context::command_encoder_copy_buffer_to_buffer( - self, - encoder_data, - source_data, - source_offset, - destination_data, - destination_offset, - copy_size, - ) - } - - fn command_encoder_copy_buffer_to_texture( - &self, - encoder_data: &crate::Data, - source: TexelCopyBufferInfo<'_>, - destination: TexelCopyTextureInfo<'_>, - copy_size: Extent3d, - ) { - let encoder_data = downcast_ref(encoder_data); - Context::command_encoder_copy_buffer_to_texture( - self, - encoder_data, - source, - destination, - copy_size, - ) - } - - fn command_encoder_copy_texture_to_buffer( - &self, - encoder_data: &crate::Data, - source: TexelCopyTextureInfo<'_>, - destination: TexelCopyBufferInfo<'_>, - copy_size: Extent3d, - ) { - let encoder_data = downcast_ref(encoder_data); - Context::command_encoder_copy_texture_to_buffer( - self, - encoder_data, - source, - destination, - copy_size, - ) - } - - fn command_encoder_copy_texture_to_texture( - &self, - encoder_data: &crate::Data, - source: TexelCopyTextureInfo<'_>, - destination: TexelCopyTextureInfo<'_>, - copy_size: Extent3d, - ) { - let encoder_data = downcast_ref(encoder_data); - Context::command_encoder_copy_texture_to_texture( - self, - encoder_data, - source, - destination, - copy_size, - ) - } - - fn command_encoder_begin_compute_pass( - &self, - encoder_data: &crate::Data, - desc: &ComputePassDescriptor<'_>, - ) -> Box { - let encoder_data = downcast_ref(encoder_data); - let data = Context::command_encoder_begin_compute_pass(self, encoder_data, desc); - Box::new(data) as _ - } - - fn command_encoder_begin_render_pass( - &self, - encoder_data: &crate::Data, - desc: &RenderPassDescriptor<'_>, - ) -> Box { - let encoder_data = downcast_ref(encoder_data); - let data = Context::command_encoder_begin_render_pass(self, encoder_data, desc); - Box::new(data) as _ - } - - fn command_encoder_finish(&self, encoder_data: &mut crate::Data) -> Box { - let data = Context::command_encoder_finish(self, downcast_mut(encoder_data)); - Box::new(data) as _ - } - - fn command_encoder_clear_texture( - &self, - encoder_data: &crate::Data, - texture_data: &crate::Data, - subresource_range: &ImageSubresourceRange, - ) { - let encoder_data = downcast_ref(encoder_data); - let texture_data = downcast_ref(texture_data); - Context::command_encoder_clear_texture(self, encoder_data, texture_data, subresource_range) - } - - fn command_encoder_clear_buffer( - &self, - encoder_data: &crate::Data, - buffer_data: &crate::Data, - offset: BufferAddress, - size: Option, - ) { - let encoder_data = downcast_ref(encoder_data); - let buffer_data = downcast_ref(buffer_data); - Context::command_encoder_clear_buffer(self, encoder_data, buffer_data, offset, size) - } - - fn command_encoder_insert_debug_marker(&self, encoder_data: &crate::Data, label: &str) { - let encoder_data = downcast_ref(encoder_data); - Context::command_encoder_insert_debug_marker(self, encoder_data, label) - } - - fn command_encoder_push_debug_group(&self, encoder_data: &crate::Data, label: &str) { - let encoder_data = downcast_ref(encoder_data); - Context::command_encoder_push_debug_group(self, encoder_data, label) - } - - fn command_encoder_pop_debug_group(&self, encoder_data: &crate::Data) { - let encoder_data = downcast_ref(encoder_data); - Context::command_encoder_pop_debug_group(self, encoder_data) - } - - fn command_encoder_write_timestamp( - &self, - encoder_data: &crate::Data, - query_set_data: &crate::Data, - query_index: u32, - ) { - let encoder_data = downcast_ref(encoder_data); - let query_set_data = downcast_ref(query_set_data); - Context::command_encoder_write_timestamp(self, encoder_data, query_set_data, query_index) - } - - fn command_encoder_resolve_query_set( - &self, - encoder_data: &crate::Data, - query_set_data: &crate::Data, - first_query: u32, - query_count: u32, - destination_data: &crate::Data, - destination_offset: BufferAddress, - ) { - let encoder_data = downcast_ref(encoder_data); - let query_set_data = downcast_ref(query_set_data); - let destination_data = downcast_ref(destination_data); - Context::command_encoder_resolve_query_set( - self, - encoder_data, - query_set_data, - first_query, - query_count, - destination_data, - destination_offset, - ) - } - - fn render_bundle_encoder_finish( - &self, - encoder_data: Box, - desc: &RenderBundleDescriptor<'_>, - ) -> Box { - let encoder_data = *encoder_data.downcast().unwrap(); - let data = Context::render_bundle_encoder_finish(self, encoder_data, desc); - Box::new(data) as _ - } - - fn queue_write_buffer( - &self, - queue_data: &crate::Data, - buffer_data: &crate::Data, - offset: BufferAddress, - data: &[u8], - ) { - let queue_data = downcast_ref(queue_data); - let buffer_data = downcast_ref(buffer_data); - Context::queue_write_buffer(self, queue_data, buffer_data, offset, data) - } - - fn queue_validate_write_buffer( - &self, - queue_data: &crate::Data, - buffer_data: &crate::Data, - offset: wgt::BufferAddress, - size: wgt::BufferSize, - ) -> Option<()> { - let queue_data = downcast_ref(queue_data); - let buffer_data = downcast_ref(buffer_data); - Context::queue_validate_write_buffer(self, queue_data, buffer_data, offset, size) - } - - fn queue_create_staging_buffer( - &self, - queue_data: &crate::Data, - size: BufferSize, - ) -> Option> { - let queue_data = downcast_ref(queue_data); - Context::queue_create_staging_buffer(self, queue_data, size) - } - - fn queue_write_staging_buffer( - &self, - queue_data: &crate::Data, - buffer_data: &crate::Data, - offset: BufferAddress, - staging_buffer: &dyn QueueWriteBuffer, - ) { - let queue_data = downcast_ref(queue_data); - let buffer_data = downcast_ref(buffer_data); - Context::queue_write_staging_buffer(self, queue_data, buffer_data, offset, staging_buffer) - } - - fn queue_write_texture( - &self, - queue_data: &crate::Data, - texture: TexelCopyTextureInfo<'_>, - data: &[u8], - data_layout: TexelCopyBufferLayout, - size: Extent3d, - ) { - let queue_data = downcast_ref(queue_data); - Context::queue_write_texture(self, queue_data, texture, data, data_layout, size) - } - - #[cfg(any(webgpu, webgl))] - fn queue_copy_external_image_to_texture( - &self, - queue_data: &crate::Data, - source: &wgt::CopyExternalImageSourceInfo, - dest: crate::CopyExternalImageDestInfo<'_>, - size: wgt::Extent3d, - ) { - let queue_data = downcast_ref(queue_data); - Context::queue_copy_external_image_to_texture(self, queue_data, source, dest, size) - } - - fn queue_submit( - &self, - queue_data: &crate::Data, - command_buffers: &mut dyn Iterator>, - ) -> Arc { - let queue_data = downcast_ref(queue_data); - let command_buffers = command_buffers.map(|data| *data.downcast().unwrap()); - let data = Context::queue_submit(self, queue_data, command_buffers); - Arc::new(data) as _ - } - - fn queue_get_timestamp_period(&self, queue_data: &crate::Data) -> f32 { - let queue_data = downcast_ref(queue_data); - Context::queue_get_timestamp_period(self, queue_data) - } - - fn queue_on_submitted_work_done( - &self, - queue_data: &crate::Data, - callback: SubmittedWorkDoneCallback, - ) { - let queue_data = downcast_ref(queue_data); - Context::queue_on_submitted_work_done(self, queue_data, callback) - } - - fn device_start_capture(&self, device_data: &crate::Data) { - let device_data = downcast_ref(device_data); - Context::device_start_capture(self, device_data) - } - - fn device_stop_capture(&self, device_data: &crate::Data) { - let device_data = downcast_ref(device_data); - Context::device_stop_capture(self, device_data) - } - - fn device_get_internal_counters(&self, device_data: &crate::Data) -> wgt::InternalCounters { - let device_data = downcast_ref(device_data); - Context::device_get_internal_counters(self, device_data) - } - - fn generate_allocator_report(&self, device_data: &crate::Data) -> Option { - let device_data = downcast_ref(device_data); - Context::device_generate_allocator_report(self, device_data) - } - - fn pipeline_cache_get_data(&self, cache_data: &crate::Data) -> Option> { - let cache_data = downcast_ref::(cache_data); - Context::pipeline_cache_get_data(self, cache_data) - } - - fn compute_pass_set_pipeline(&self, pass_data: &mut crate::Data, pipeline_data: &crate::Data) { - let pass_data = downcast_mut::(pass_data); - let pipeline_data = downcast_ref(pipeline_data); - Context::compute_pass_set_pipeline(self, pass_data, pipeline_data) - } - - fn compute_pass_set_bind_group( - &self, - pass_data: &mut crate::Data, - index: u32, - bind_group_data: Option<&crate::Data>, - offsets: &[DynamicOffset], - ) { - let pass_data = downcast_mut::(pass_data); - let bg = bind_group_data.map(downcast_ref); - Context::compute_pass_set_bind_group(self, pass_data, index, bg, offsets) - } - - fn compute_pass_set_push_constants( - &self, - pass_data: &mut crate::Data, - offset: u32, - data: &[u8], - ) { - let pass_data = downcast_mut::(pass_data); - Context::compute_pass_set_push_constants(self, pass_data, offset, data) - } - - fn compute_pass_insert_debug_marker(&self, pass_data: &mut crate::Data, label: &str) { - let pass_data = downcast_mut::(pass_data); - Context::compute_pass_insert_debug_marker(self, pass_data, label) - } - - fn compute_pass_push_debug_group(&self, pass_data: &mut crate::Data, group_label: &str) { - let pass_data = downcast_mut::(pass_data); - Context::compute_pass_push_debug_group(self, pass_data, group_label) - } - - fn compute_pass_pop_debug_group(&self, pass_data: &mut crate::Data) { - let pass_data = downcast_mut::(pass_data); - Context::compute_pass_pop_debug_group(self, pass_data) - } - - fn compute_pass_write_timestamp( - &self, - pass_data: &mut crate::Data, - query_set_data: &crate::Data, - query_index: u32, - ) { - let pass_data = downcast_mut::(pass_data); - let query_set_data = downcast_ref(query_set_data); - Context::compute_pass_write_timestamp(self, pass_data, query_set_data, query_index) - } - - fn compute_pass_begin_pipeline_statistics_query( - &self, - pass_data: &mut crate::Data, - query_set_data: &crate::Data, - query_index: u32, - ) { - let pass_data = downcast_mut::(pass_data); - let query_set_data = downcast_ref(query_set_data); - Context::compute_pass_begin_pipeline_statistics_query( - self, - pass_data, - query_set_data, - query_index, - ) - } - - fn compute_pass_end_pipeline_statistics_query(&self, pass_data: &mut crate::Data) { - let pass_data = downcast_mut::(pass_data); - Context::compute_pass_end_pipeline_statistics_query(self, pass_data) - } - - fn compute_pass_dispatch_workgroups( - &self, - pass_data: &mut crate::Data, - x: u32, - y: u32, - z: u32, - ) { - let pass_data = downcast_mut::(pass_data); - Context::compute_pass_dispatch_workgroups(self, pass_data, x, y, z) - } - - fn compute_pass_dispatch_workgroups_indirect( - &self, - pass_data: &mut crate::Data, - indirect_buffer_data: &crate::Data, - indirect_offset: BufferAddress, - ) { - let pass_data = downcast_mut::(pass_data); - let indirect_buffer_data = downcast_ref(indirect_buffer_data); - Context::compute_pass_dispatch_workgroups_indirect( - self, - pass_data, - indirect_buffer_data, - indirect_offset, - ) - } - - fn compute_pass_end(&self, pass_data: &mut crate::Data) { - let pass_data = downcast_mut(pass_data); - Context::compute_pass_end(self, pass_data) - } - - fn render_bundle_encoder_set_pipeline( - &self, - encoder_data: &mut crate::Data, - pipeline_data: &crate::Data, - ) { - let encoder_data = downcast_mut::(encoder_data); - let pipeline_data = downcast_ref(pipeline_data); - Context::render_bundle_encoder_set_pipeline(self, encoder_data, pipeline_data) - } - - fn render_bundle_encoder_set_bind_group( - &self, - encoder_data: &mut crate::Data, - index: u32, - bind_group_data: Option<&crate::Data>, - offsets: &[DynamicOffset], - ) { - let encoder_data = downcast_mut::(encoder_data); - let bg = bind_group_data.map(downcast_ref); - Context::render_bundle_encoder_set_bind_group(self, encoder_data, index, bg, offsets) - } - - fn render_bundle_encoder_set_index_buffer( - &self, - encoder_data: &mut crate::Data, - buffer_data: &crate::Data, - index_format: IndexFormat, - offset: BufferAddress, - size: Option, - ) { - let encoder_data = downcast_mut::(encoder_data); - let buffer_data = downcast_ref(buffer_data); - Context::render_bundle_encoder_set_index_buffer( - self, - encoder_data, - buffer_data, - index_format, - offset, - size, - ) - } - - fn render_bundle_encoder_set_vertex_buffer( - &self, - encoder_data: &mut crate::Data, - slot: u32, - buffer_data: &crate::Data, - offset: BufferAddress, - size: Option, - ) { - let encoder_data = downcast_mut::(encoder_data); - let buffer_data = downcast_ref(buffer_data); - Context::render_bundle_encoder_set_vertex_buffer( - self, - encoder_data, - slot, - buffer_data, - offset, - size, - ) - } - - fn render_bundle_encoder_set_push_constants( - &self, - encoder_data: &mut crate::Data, - stages: ShaderStages, - offset: u32, - data: &[u8], - ) { - let encoder_data = downcast_mut::(encoder_data); - Context::render_bundle_encoder_set_push_constants(self, encoder_data, stages, offset, data) - } - - fn render_bundle_encoder_draw( - &self, - encoder_data: &mut crate::Data, - vertices: Range, - instances: Range, - ) { - let encoder_data = downcast_mut::(encoder_data); - Context::render_bundle_encoder_draw(self, encoder_data, vertices, instances) - } - - fn render_bundle_encoder_draw_indexed( - &self, - encoder_data: &mut crate::Data, - indices: Range, - base_vertex: i32, - instances: Range, - ) { - let encoder_data = downcast_mut::(encoder_data); - Context::render_bundle_encoder_draw_indexed( - self, - encoder_data, - indices, - base_vertex, - instances, - ) - } - - fn render_bundle_encoder_draw_indirect( - &self, - encoder_data: &mut crate::Data, - indirect_buffer_data: &crate::Data, - indirect_offset: BufferAddress, - ) { - let encoder_data = downcast_mut::(encoder_data); - let indirect_buffer_data = downcast_ref(indirect_buffer_data); - Context::render_bundle_encoder_draw_indirect( - self, - encoder_data, - indirect_buffer_data, - indirect_offset, - ) - } - - fn render_bundle_encoder_draw_indexed_indirect( - &self, - encoder_data: &mut crate::Data, - indirect_buffer_data: &crate::Data, - indirect_offset: BufferAddress, - ) { - let encoder_data = downcast_mut::(encoder_data); - let indirect_buffer_data = downcast_ref(indirect_buffer_data); - Context::render_bundle_encoder_draw_indexed_indirect( - self, - encoder_data, - indirect_buffer_data, - indirect_offset, - ) - } - - fn render_pass_set_pipeline(&self, pass_data: &mut crate::Data, pipeline_data: &crate::Data) { - let pass_data = downcast_mut::(pass_data); - let pipeline_data = downcast_ref(pipeline_data); - Context::render_pass_set_pipeline(self, pass_data, pipeline_data) - } - - fn render_pass_set_bind_group( - &self, - pass_data: &mut crate::Data, - index: u32, - bind_group_data: Option<&crate::Data>, - offsets: &[DynamicOffset], - ) { - let pass_data = downcast_mut::(pass_data); - let bg = bind_group_data.map(downcast_ref); - Context::render_pass_set_bind_group(self, pass_data, index, bg, offsets) - } - - fn render_pass_set_index_buffer( - &self, - pass_data: &mut crate::Data, - buffer_data: &crate::Data, - index_format: IndexFormat, - offset: BufferAddress, - size: Option, - ) { - let pass_data = downcast_mut::(pass_data); - let buffer_data = downcast_ref(buffer_data); - Context::render_pass_set_index_buffer( - self, - pass_data, - buffer_data, - index_format, - offset, - size, - ) - } - - fn render_pass_set_vertex_buffer( - &self, - pass_data: &mut crate::Data, - slot: u32, - buffer_data: &crate::Data, - offset: BufferAddress, - size: Option, - ) { - let pass_data = downcast_mut::(pass_data); - let buffer_data = downcast_ref(buffer_data); - Context::render_pass_set_vertex_buffer(self, pass_data, slot, buffer_data, offset, size) - } - - fn render_pass_set_push_constants( - &self, - pass_data: &mut crate::Data, - stages: ShaderStages, - offset: u32, - data: &[u8], - ) { - let pass_data = downcast_mut::(pass_data); - Context::render_pass_set_push_constants(self, pass_data, stages, offset, data) - } - - fn render_pass_draw( - &self, - pass_data: &mut crate::Data, - vertices: Range, - instances: Range, - ) { - let pass_data = downcast_mut::(pass_data); - Context::render_pass_draw(self, pass_data, vertices, instances) - } - - fn render_pass_draw_indexed( - &self, - pass_data: &mut crate::Data, - indices: Range, - base_vertex: i32, - instances: Range, - ) { - let pass_data = downcast_mut::(pass_data); - Context::render_pass_draw_indexed(self, pass_data, indices, base_vertex, instances) - } - - fn render_pass_draw_indirect( - &self, - pass_data: &mut crate::Data, - indirect_buffer_data: &crate::Data, - indirect_offset: BufferAddress, - ) { - let pass_data = downcast_mut::(pass_data); - let indirect_buffer_data = downcast_ref(indirect_buffer_data); - Context::render_pass_draw_indirect(self, pass_data, indirect_buffer_data, indirect_offset) - } - - fn render_pass_draw_indexed_indirect( - &self, - pass_data: &mut crate::Data, - indirect_buffer_data: &crate::Data, - indirect_offset: BufferAddress, - ) { - let pass_data = downcast_mut::(pass_data); - let indirect_buffer_data = downcast_ref(indirect_buffer_data); - Context::render_pass_draw_indexed_indirect( - self, - pass_data, - indirect_buffer_data, - indirect_offset, - ) - } - - fn render_pass_multi_draw_indirect( - &self, - pass_data: &mut crate::Data, - indirect_buffer_data: &crate::Data, - indirect_offset: BufferAddress, - count: u32, - ) { - let pass_data = downcast_mut::(pass_data); - let indirect_buffer_data = downcast_ref(indirect_buffer_data); - Context::render_pass_multi_draw_indirect( - self, - pass_data, - indirect_buffer_data, - indirect_offset, - count, - ) - } - - fn render_pass_multi_draw_indexed_indirect( - &self, - pass_data: &mut crate::Data, - indirect_buffer_data: &crate::Data, - indirect_offset: BufferAddress, - count: u32, - ) { - let pass_data = downcast_mut::(pass_data); - let indirect_buffer_data = downcast_ref(indirect_buffer_data); - Context::render_pass_multi_draw_indexed_indirect( - self, - pass_data, - indirect_buffer_data, - indirect_offset, - count, - ) - } - - fn render_pass_multi_draw_indirect_count( - &self, - pass_data: &mut crate::Data, - indirect_buffer_data: &crate::Data, - indirect_offset: BufferAddress, - count_buffer_data: &crate::Data, - count_buffer_offset: BufferAddress, - max_count: u32, - ) { - let pass_data = downcast_mut::(pass_data); - let indirect_buffer_data = downcast_ref(indirect_buffer_data); - let count_buffer_data = downcast_ref(count_buffer_data); - Context::render_pass_multi_draw_indirect_count( - self, - pass_data, - indirect_buffer_data, - indirect_offset, - count_buffer_data, - count_buffer_offset, - max_count, - ) - } - - fn render_pass_multi_draw_indexed_indirect_count( - &self, - pass_data: &mut crate::Data, - indirect_buffer_data: &crate::Data, - indirect_offset: BufferAddress, - count_buffer_data: &crate::Data, - count_buffer_offset: BufferAddress, - max_count: u32, - ) { - let pass_data = downcast_mut::(pass_data); - let indirect_buffer_data = downcast_ref(indirect_buffer_data); - let count_buffer_data = downcast_ref(count_buffer_data); - Context::render_pass_multi_draw_indexed_indirect_count( - self, - pass_data, - indirect_buffer_data, - indirect_offset, - count_buffer_data, - count_buffer_offset, - max_count, - ) - } - - fn render_pass_set_blend_constant(&self, pass_data: &mut crate::Data, color: Color) { - let pass_data = downcast_mut::(pass_data); - Context::render_pass_set_blend_constant(self, pass_data, color) - } - - fn render_pass_set_scissor_rect( - &self, - pass_data: &mut crate::Data, - x: u32, - y: u32, - width: u32, - height: u32, - ) { - let pass_data = downcast_mut::(pass_data); - Context::render_pass_set_scissor_rect(self, pass_data, x, y, width, height) - } - - fn render_pass_set_viewport( - &self, - pass_data: &mut crate::Data, - x: f32, - y: f32, - width: f32, - height: f32, - min_depth: f32, - max_depth: f32, - ) { - let pass_data = downcast_mut::(pass_data); - Context::render_pass_set_viewport( - self, pass_data, x, y, width, height, min_depth, max_depth, - ) - } - - fn render_pass_set_stencil_reference(&self, pass_data: &mut crate::Data, reference: u32) { - let pass_data = downcast_mut::(pass_data); - Context::render_pass_set_stencil_reference(self, pass_data, reference) - } - - fn render_pass_insert_debug_marker(&self, pass_data: &mut crate::Data, label: &str) { - let pass_data = downcast_mut::(pass_data); - Context::render_pass_insert_debug_marker(self, pass_data, label) - } - - fn render_pass_push_debug_group(&self, pass_data: &mut crate::Data, group_label: &str) { - let pass_data = downcast_mut::(pass_data); - Context::render_pass_push_debug_group(self, pass_data, group_label) - } - - fn render_pass_pop_debug_group(&self, pass_data: &mut crate::Data) { - let pass_data = downcast_mut::(pass_data); - Context::render_pass_pop_debug_group(self, pass_data) - } - - fn render_pass_write_timestamp( - &self, - pass_data: &mut crate::Data, - query_set_data: &crate::Data, - query_index: u32, - ) { - let pass_data = downcast_mut::(pass_data); - let query_set_data = downcast_ref(query_set_data); - Context::render_pass_write_timestamp(self, pass_data, query_set_data, query_index) - } - - fn render_pass_begin_occlusion_query(&self, pass_data: &mut crate::Data, query_index: u32) { - let pass_data = downcast_mut::(pass_data); - Context::render_pass_begin_occlusion_query(self, pass_data, query_index) - } - - fn render_pass_end_occlusion_query(&self, pass_data: &mut crate::Data) { - let pass_data = downcast_mut::(pass_data); - Context::render_pass_end_occlusion_query(self, pass_data) - } - - fn render_pass_begin_pipeline_statistics_query( - &self, - pass_data: &mut crate::Data, - query_set_data: &crate::Data, - query_index: u32, - ) { - let pass_data = downcast_mut::(pass_data); - let query_set_data = downcast_ref(query_set_data); - Context::render_pass_begin_pipeline_statistics_query( - self, - pass_data, - query_set_data, - query_index, - ) - } - - fn render_pass_end_pipeline_statistics_query(&self, pass_data: &mut crate::Data) { - let pass_data = downcast_mut::(pass_data); - Context::render_pass_end_pipeline_statistics_query(self, pass_data) - } - - fn render_pass_execute_bundles( - &self, - pass_data: &mut crate::Data, - render_bundles: &mut dyn Iterator, - ) { - let pass_data = downcast_mut::(pass_data); - let mut render_bundles = render_bundles.map(downcast_ref); - Context::render_pass_execute_bundles(self, pass_data, &mut render_bundles) - } - - fn render_pass_end(&self, pass_data: &mut crate::Data) { - let pass_data = downcast_mut(pass_data); - Context::render_pass_end(self, pass_data) - } - - fn device_create_blas( - &self, - device_data: &crate::Data, - desc: &crate::CreateBlasDescriptor<'_>, - sizes: wgt::BlasGeometrySizeDescriptors, - ) -> (Option, Box) { - let device_data = downcast_ref(device_data); - let (handle, data) = Context::device_create_blas(self, device_data, desc, sizes); - (handle, Box::new(data) as _) - } - - fn device_create_tlas( - &self, - device_data: &crate::Data, - desc: &crate::CreateTlasDescriptor<'_>, - ) -> Box { - let device_data = downcast_ref(device_data); - let data = Context::device_create_tlas(self, device_data, desc); - Box::new(data) as _ - } - - fn command_encoder_build_acceleration_structures_unsafe_tlas( - &self, - encoder_data: &crate::Data, - blas: &mut dyn Iterator>, - tlas: &mut dyn Iterator>, - ) { - let encoder_data = downcast_ref(encoder_data); - - let blas = blas.into_iter().map(|e| { - let geometries = match e.geometries { - crate::DynContextBlasGeometries::TriangleGeometries(triangle_geometries) => { - let iter = triangle_geometries.into_iter().map(|tg| { - crate::ContextBlasTriangleGeometry { - vertex_buffer: downcast_ref(tg.vertex_buffer), - index_buffer: tg.index_buffer.map(downcast_ref), - transform_buffer: tg.transform_buffer.map(downcast_ref), - size: tg.size, - transform_buffer_offset: tg.transform_buffer_offset, - first_vertex: tg.first_vertex, - vertex_stride: tg.vertex_stride, - index_buffer_offset: tg.index_buffer_offset, - } - }); - crate::ContextBlasGeometries::TriangleGeometries(Box::new(iter)) - } - }; - crate::ContextBlasBuildEntry { - blas_data: downcast_ref(e.blas_data), - // blas_data: downcast_ref(e.blas_data), - geometries, - } - }); - - let tlas = tlas - .into_iter() - .map( - |e: crate::DynContextTlasBuildEntry<'_>| crate::ContextTlasBuildEntry { - tlas_data: downcast_ref(e.tlas_data), - instance_buffer_data: downcast_ref(e.instance_buffer_data), - instance_count: e.instance_count, - }, - ); - - Context::command_encoder_build_acceleration_structures_unsafe_tlas( - self, - encoder_data, - blas, - tlas, - ) - } - - fn command_encoder_build_acceleration_structures( - &self, - encoder_data: &crate::Data, - blas: &mut dyn Iterator>, - tlas: &mut dyn Iterator>, - ) { - let encoder_data = downcast_ref(encoder_data); - - let blas = blas.into_iter().map(|e| { - let geometries = match e.geometries { - crate::DynContextBlasGeometries::TriangleGeometries(triangle_geometries) => { - let iter = triangle_geometries.into_iter().map(|tg| { - crate::ContextBlasTriangleGeometry { - vertex_buffer: downcast_ref(tg.vertex_buffer), - index_buffer: tg.index_buffer.map(downcast_ref), - transform_buffer: tg.transform_buffer.map(downcast_ref), - size: tg.size, - transform_buffer_offset: tg.transform_buffer_offset, - first_vertex: tg.first_vertex, - vertex_stride: tg.vertex_stride, - index_buffer_offset: tg.index_buffer_offset, - } - }); - crate::ContextBlasGeometries::TriangleGeometries(Box::new(iter)) - } - }; - crate::ContextBlasBuildEntry { - blas_data: downcast_ref(e.blas_data), - // blas_data: downcast_ref(e.blas_data), - geometries, - } - }); - - let tlas = tlas.into_iter().map(|e: crate::DynContextTlasPackage<'_>| { - let instances = - e.instances - .map(|instance: Option>| { - instance.map(|instance| crate::ContextTlasInstance { - blas_data: downcast_ref(instance.blas), - transform: instance.transform, - custom_index: instance.custom_index, - mask: instance.mask, - }) - }); - crate::ContextTlasPackage { - tlas_data: downcast_ref(e.tlas_data), - instances: Box::new(instances), - lowest_unmodified: e.lowest_unmodified, - } - }); - - Context::command_encoder_build_acceleration_structures(self, encoder_data, blas, tlas) - } - - fn blas_destroy(&self, blas_data: &crate::Data) { - let blas_data = downcast_ref(blas_data); - Context::blas_destroy(self, blas_data) - } - - fn blas_drop(&self, blas_data: &crate::Data) { - let blas_data = downcast_ref(blas_data); - Context::blas_drop(self, blas_data) - } - - fn tlas_destroy(&self, tlas_data: &crate::Data) { - let tlas_data = downcast_ref(tlas_data); - Context::tlas_destroy(self, tlas_data) - } - - fn tlas_drop(&self, tlas_data: &crate::Data) { - let tlas_data = downcast_ref(tlas_data); - Context::tlas_drop(self, tlas_data) - } -} - -pub trait QueueWriteBuffer: WasmNotSendSync + Debug { - fn slice(&self) -> &[u8]; - - fn slice_mut(&mut self) -> &mut [u8]; - - #[cfg(not(target_os = "emscripten"))] - fn as_any(&self) -> &dyn Any; -} - -pub trait BufferMappedRange: WasmNotSendSync + Debug { - fn slice(&self) -> &[u8]; - fn slice_mut(&mut self) -> &mut [u8]; -} - -#[cfg(test)] -mod tests { - use super::DynContext; - - fn compiles() {} - - /// Assert that DynContext is object safe. - #[test] - fn object_safe() { - compiles::>(); - } -} diff --git a/wgpu/src/dispatch.rs b/wgpu/src/dispatch.rs new file mode 100644 index 0000000000..71826eb429 --- /dev/null +++ b/wgpu/src/dispatch.rs @@ -0,0 +1,735 @@ +//! Infrastructure for dispatching calls to the appropriate "backend". The "backends" are: +//! +//! - `wgpu_core`: An implementation of the the wgpu api on top of various native graphics APIs. +//! - `webgpu`: An implementation of the wgpu api which calls WebGPU directly. +//! +//! The interface traits are all object safe and listed in the `InterfaceTypes` trait. +//! +//! The method for dispatching should optimize well if only one backend is compiled in, +//! as-if there was no dispatching at all. + +#![allow(drop_bounds)] // This exists to remind implementors to impl drop. +#![allow(clippy::too_many_arguments)] // It's fine. + +use crate::{WasmNotSend, WasmNotSendSync}; + +use std::{any::Any, fmt::Debug, future::Future, hash::Hash, ops::Range, pin::Pin}; + +use crate::backend; + +/// Create a single trait with the given supertraits and a blanket impl for all types that implement them. +/// +/// This is useful for creating a trait alias as a shorthand. +macro_rules! trait_alias { + ($name:ident: $($bound:tt)+) => { + pub trait $name: $($bound)+ {} + impl $name for T {} + }; +} + +// Various return futures in the API. +trait_alias!(RequestAdapterFuture: Future> + WasmNotSend + 'static); +trait_alias!(RequestDeviceFuture: Future> + WasmNotSend + 'static); +trait_alias!(PopErrorScopeFuture: Future> + WasmNotSend + 'static); +trait_alias!(ShaderCompilationInfoFuture: Future + WasmNotSend + 'static); + +// We can't use trait aliases here, as you can't convert from a dyn Trait to dyn Supertrait _yet_. +#[cfg(send_sync)] +pub type BoxDeviceLostCallback = Box; +#[cfg(not(send_sync))] +pub type BoxDeviceLostCallback = Box; +#[cfg(send_sync)] +pub type BoxSubmittedWorkDoneCallback = Box; +#[cfg(not(send_sync))] +pub type BoxSubmittedWorkDoneCallback = Box; +#[cfg(send_sync)] +pub type BufferMapCallback = Box) + Send + 'static>; +#[cfg(not(send_sync))] +pub type BufferMapCallback = Box) + 'static>; + +// Common traits on all the interface traits +trait_alias!(CommonTraits: Any + Debug + WasmNotSendSync); +// Non-object-safe traits that are added as a bound on InterfaceTypes. +trait_alias!(ComparisonTraits: PartialEq + Eq + PartialOrd + Ord + Hash); + +/// Types that represent a "Backend" for the wgpu API. +pub trait InterfaceTypes { + type Instance: InstanceInterface + ComparisonTraits; + type Adapter: AdapterInterface + ComparisonTraits; + type Device: DeviceInterface + ComparisonTraits; + type Queue: QueueInterface + ComparisonTraits; + type ShaderModule: ShaderModuleInterface + ComparisonTraits; + type BindGroupLayout: BindGroupLayoutInterface + ComparisonTraits; + type BindGroup: BindGroupInterface + ComparisonTraits; + type TextureView: TextureViewInterface + ComparisonTraits; + type Sampler: SamplerInterface + ComparisonTraits; + type Buffer: BufferInterface + ComparisonTraits; + type Texture: TextureInterface + ComparisonTraits; + type Blas: BlasInterface + ComparisonTraits; + type Tlas: TlasInterface + ComparisonTraits; + type QuerySet: QuerySetInterface + ComparisonTraits; + type PipelineLayout: PipelineLayoutInterface + ComparisonTraits; + type RenderPipeline: RenderPipelineInterface + ComparisonTraits; + type ComputePipeline: ComputePipelineInterface + ComparisonTraits; + type PipelineCache: PipelineCacheInterface + ComparisonTraits; + type CommandEncoder: CommandEncoderInterface + ComparisonTraits; + type ComputePass: ComputePassInterface + ComparisonTraits; + type RenderPass: RenderPassInterface + ComparisonTraits; + type CommandBuffer: CommandBufferInterface + ComparisonTraits; + type RenderBundleEncoder: RenderBundleEncoderInterface + ComparisonTraits; + type RenderBundle: RenderBundleInterface + ComparisonTraits; + type Surface: SurfaceInterface + ComparisonTraits; + type SurfaceOutputDetail: SurfaceOutputDetailInterface + ComparisonTraits; + type QueueWriteBuffer: QueueWriteBufferInterface + ComparisonTraits; + type BufferMappedRange: BufferMappedRangeInterface + ComparisonTraits; +} + +pub trait InstanceInterface: CommonTraits { + fn new(desc: crate::InstanceDescriptor) -> Self + where + Self: Sized; + + unsafe fn create_surface( + &self, + target: crate::SurfaceTargetUnsafe, + ) -> Result; + + fn request_adapter( + &self, + options: &crate::RequestAdapterOptions<'_, '_>, + ) -> Pin>; + + fn poll_all_devices(&self, force_wait: bool) -> bool; +} + +pub trait AdapterInterface: CommonTraits { + fn request_device( + &self, + desc: &crate::DeviceDescriptor<'_>, + trace_dir: Option<&std::path::Path>, + ) -> Pin>; + + fn is_surface_supported(&self, surface: &DispatchSurface) -> bool; + + fn features(&self) -> crate::Features; + + fn limits(&self) -> crate::Limits; + + fn downlevel_capabilities(&self) -> crate::DownlevelCapabilities; + + fn get_info(&self) -> crate::AdapterInfo; + + fn get_texture_format_features( + &self, + format: crate::TextureFormat, + ) -> crate::TextureFormatFeatures; + + fn get_presentation_timestamp(&self) -> crate::PresentationTimestamp; +} + +pub trait DeviceInterface: CommonTraits { + fn features(&self) -> crate::Features; + fn limits(&self) -> crate::Limits; + + fn create_shader_module( + &self, + desc: crate::ShaderModuleDescriptor<'_>, + shader_bound_checks: wgt::ShaderBoundChecks, + ) -> DispatchShaderModule; + unsafe fn create_shader_module_spirv( + &self, + desc: &crate::ShaderModuleDescriptorSpirV<'_>, + ) -> DispatchShaderModule; + fn create_bind_group_layout( + &self, + desc: &crate::BindGroupLayoutDescriptor<'_>, + ) -> DispatchBindGroupLayout; + fn create_bind_group(&self, desc: &crate::BindGroupDescriptor<'_>) -> DispatchBindGroup; + fn create_pipeline_layout( + &self, + desc: &crate::PipelineLayoutDescriptor<'_>, + ) -> DispatchPipelineLayout; + fn create_render_pipeline( + &self, + desc: &crate::RenderPipelineDescriptor<'_>, + ) -> DispatchRenderPipeline; + fn create_compute_pipeline( + &self, + desc: &crate::ComputePipelineDescriptor<'_>, + ) -> DispatchComputePipeline; + unsafe fn create_pipeline_cache( + &self, + desc: &crate::PipelineCacheDescriptor<'_>, + ) -> DispatchPipelineCache; + fn create_buffer(&self, desc: &crate::BufferDescriptor<'_>) -> DispatchBuffer; + fn create_texture(&self, desc: &crate::TextureDescriptor<'_>) -> DispatchTexture; + fn create_blas( + &self, + desc: &crate::CreateBlasDescriptor<'_>, + sizes: crate::BlasGeometrySizeDescriptors, + ) -> (Option, DispatchBlas); + fn create_tlas(&self, desc: &crate::CreateTlasDescriptor<'_>) -> DispatchTlas; + fn create_sampler(&self, desc: &crate::SamplerDescriptor<'_>) -> DispatchSampler; + fn create_query_set(&self, desc: &crate::QuerySetDescriptor<'_>) -> DispatchQuerySet; + fn create_command_encoder( + &self, + desc: &crate::CommandEncoderDescriptor<'_>, + ) -> DispatchCommandEncoder; + fn create_render_bundle_encoder( + &self, + desc: &crate::RenderBundleEncoderDescriptor<'_>, + ) -> DispatchRenderBundleEncoder; + + fn set_device_lost_callback(&self, device_lost_callback: BoxDeviceLostCallback); + + fn on_uncaptured_error(&self, handler: Box); + fn push_error_scope(&self, filter: crate::ErrorFilter); + fn pop_error_scope(&self) -> Pin>; + + fn start_capture(&self); + fn stop_capture(&self); + + fn poll(&self, maintain: crate::Maintain) -> crate::MaintainResult; + + fn get_internal_counters(&self) -> crate::InternalCounters; + fn generate_allocator_report(&self) -> Option; + + fn destroy(&self); +} + +pub trait QueueInterface: CommonTraits { + fn write_buffer(&self, buffer: &DispatchBuffer, offset: crate::BufferAddress, data: &[u8]); + + fn create_staging_buffer(&self, size: crate::BufferSize) -> Option; + fn validate_write_buffer( + &self, + buffer: &DispatchBuffer, + offset: wgt::BufferAddress, + size: wgt::BufferSize, + ) -> Option<()>; + fn write_staging_buffer( + &self, + buffer: &DispatchBuffer, + offset: crate::BufferAddress, + staging_buffer: &DispatchQueueWriteBuffer, + ); + + fn write_texture( + &self, + texture: crate::TexelCopyTextureInfo<'_>, + data: &[u8], + data_layout: crate::TexelCopyBufferLayout, + size: crate::Extent3d, + ); + #[cfg(any(webgpu, webgl))] + fn copy_external_image_to_texture( + &self, + source: &wgt::CopyExternalImageSourceInfo, + dest: wgt::CopyExternalImageDestInfo<&crate::api::Texture>, + size: crate::Extent3d, + ); + + fn submit(&self, command_buffers: &mut dyn Iterator) -> u64; + + fn get_timestamp_period(&self) -> f32; + fn on_submitted_work_done(&self, callback: BoxSubmittedWorkDoneCallback); +} + +pub trait ShaderModuleInterface: CommonTraits { + fn get_compilation_info(&self) -> Pin>; +} +pub trait BindGroupLayoutInterface: CommonTraits {} +pub trait BindGroupInterface: CommonTraits {} +pub trait TextureViewInterface: CommonTraits {} +pub trait SamplerInterface: CommonTraits {} +pub trait BufferInterface: CommonTraits { + fn map_async( + &self, + mode: crate::MapMode, + range: Range, + callback: BufferMapCallback, + ); + fn get_mapped_range(&self, sub_range: Range) + -> DispatchBufferMappedRange; + #[cfg(webgpu)] + fn get_mapped_range_as_array_buffer( + &self, + sub_range: Range, + ) -> Option; + + fn unmap(&self); + + fn destroy(&self); +} +pub trait TextureInterface: CommonTraits { + fn create_view(&self, desc: &crate::TextureViewDescriptor<'_>) -> DispatchTextureView; + + fn destroy(&self); +} +pub trait BlasInterface: CommonTraits { + fn destroy(&self); +} +pub trait TlasInterface: CommonTraits { + fn destroy(&self); +} +pub trait QuerySetInterface: CommonTraits {} +pub trait PipelineLayoutInterface: CommonTraits {} +pub trait RenderPipelineInterface: CommonTraits { + fn get_bind_group_layout(&self, index: u32) -> DispatchBindGroupLayout; +} +pub trait ComputePipelineInterface: CommonTraits { + fn get_bind_group_layout(&self, index: u32) -> DispatchBindGroupLayout; +} +pub trait PipelineCacheInterface: CommonTraits { + fn get_data(&self) -> Option>; +} +pub trait CommandEncoderInterface: CommonTraits { + fn copy_buffer_to_buffer( + &self, + source: &DispatchBuffer, + source_offset: crate::BufferAddress, + destination: &DispatchBuffer, + destination_offset: crate::BufferAddress, + copy_size: crate::BufferAddress, + ); + fn copy_buffer_to_texture( + &self, + source: crate::TexelCopyBufferInfo<'_>, + destination: crate::TexelCopyTextureInfo<'_>, + copy_size: crate::Extent3d, + ); + fn copy_texture_to_buffer( + &self, + source: crate::TexelCopyTextureInfo<'_>, + destination: crate::TexelCopyBufferInfo<'_>, + copy_size: crate::Extent3d, + ); + fn copy_texture_to_texture( + &self, + source: crate::TexelCopyTextureInfo<'_>, + destination: crate::TexelCopyTextureInfo<'_>, + copy_size: crate::Extent3d, + ); + + fn begin_compute_pass(&self, desc: &crate::ComputePassDescriptor<'_>) -> DispatchComputePass; + fn begin_render_pass(&self, desc: &crate::RenderPassDescriptor<'_>) -> DispatchRenderPass; + fn finish(&mut self) -> DispatchCommandBuffer; + + fn clear_texture( + &self, + texture: &DispatchTexture, + subresource_range: &crate::ImageSubresourceRange, + ); + fn clear_buffer( + &self, + buffer: &DispatchBuffer, + offset: crate::BufferAddress, + size: Option, + ); + + fn insert_debug_marker(&self, label: &str); + fn push_debug_group(&self, label: &str); + fn pop_debug_group(&self); + + fn write_timestamp(&self, query_set: &DispatchQuerySet, query_index: u32); + fn resolve_query_set( + &self, + query_set: &DispatchQuerySet, + first_query: u32, + query_count: u32, + destination: &DispatchBuffer, + destination_offset: crate::BufferAddress, + ); + + fn build_acceleration_structures_unsafe_tlas<'a>( + &self, + blas: &mut dyn Iterator>, + tlas: &mut dyn Iterator>, + ); + fn build_acceleration_structures<'a>( + &self, + blas: &mut dyn Iterator>, + tlas: &mut dyn Iterator, + ); +} +pub trait ComputePassInterface: CommonTraits { + fn set_pipeline(&mut self, pipeline: &DispatchComputePipeline); + fn set_bind_group( + &mut self, + index: u32, + bind_group: Option<&DispatchBindGroup>, + offsets: &[crate::DynamicOffset], + ); + fn set_push_constants(&mut self, offset: u32, data: &[u8]); + + fn insert_debug_marker(&mut self, label: &str); + fn push_debug_group(&mut self, group_label: &str); + fn pop_debug_group(&mut self); + + fn write_timestamp(&mut self, query_set: &DispatchQuerySet, query_index: u32); + fn begin_pipeline_statistics_query(&mut self, query_set: &DispatchQuerySet, query_index: u32); + fn end_pipeline_statistics_query(&mut self); + + fn dispatch_workgroups(&mut self, x: u32, y: u32, z: u32); + fn dispatch_workgroups_indirect( + &mut self, + indirect_buffer: &DispatchBuffer, + indirect_offset: crate::BufferAddress, + ); + fn end(&mut self); +} +pub trait RenderPassInterface: CommonTraits { + fn set_pipeline(&mut self, pipeline: &DispatchRenderPipeline); + fn set_bind_group( + &mut self, + index: u32, + bind_group: Option<&DispatchBindGroup>, + offsets: &[crate::DynamicOffset], + ); + fn set_index_buffer( + &mut self, + buffer: &DispatchBuffer, + index_format: crate::IndexFormat, + offset: crate::BufferAddress, + size: Option, + ); + fn set_vertex_buffer( + &mut self, + slot: u32, + buffer: &DispatchBuffer, + offset: crate::BufferAddress, + size: Option, + ); + fn set_push_constants(&mut self, stages: crate::ShaderStages, offset: u32, data: &[u8]); + fn set_blend_constant(&mut self, color: crate::Color); + fn set_scissor_rect(&mut self, x: u32, y: u32, width: u32, height: u32); + fn set_viewport( + &mut self, + x: f32, + y: f32, + width: f32, + height: f32, + min_depth: f32, + max_depth: f32, + ); + fn set_stencil_reference(&mut self, reference: u32); + + fn draw(&mut self, vertices: Range, instances: Range); + fn draw_indexed(&mut self, indices: Range, base_vertex: i32, instances: Range); + fn draw_indirect( + &mut self, + indirect_buffer: &DispatchBuffer, + indirect_offset: crate::BufferAddress, + ); + fn draw_indexed_indirect( + &mut self, + indirect_buffer: &DispatchBuffer, + indirect_offset: crate::BufferAddress, + ); + + fn multi_draw_indirect( + &mut self, + indirect_buffer: &DispatchBuffer, + indirect_offset: crate::BufferAddress, + count: u32, + ); + fn multi_draw_indexed_indirect( + &mut self, + indirect_buffer: &DispatchBuffer, + indirect_offset: crate::BufferAddress, + count: u32, + ); + fn multi_draw_indirect_count( + &mut self, + indirect_buffer: &DispatchBuffer, + indirect_offset: crate::BufferAddress, + count_buffer: &DispatchBuffer, + count_buffer_offset: crate::BufferAddress, + max_count: u32, + ); + fn multi_draw_indexed_indirect_count( + &mut self, + indirect_buffer: &DispatchBuffer, + indirect_offset: crate::BufferAddress, + count_buffer: &DispatchBuffer, + count_buffer_offset: crate::BufferAddress, + max_count: u32, + ); + + fn insert_debug_marker(&mut self, label: &str); + fn push_debug_group(&mut self, group_label: &str); + fn pop_debug_group(&mut self); + + fn write_timestamp(&mut self, query_set: &DispatchQuerySet, query_index: u32); + fn begin_occlusion_query(&mut self, query_index: u32); + fn end_occlusion_query(&mut self); + fn begin_pipeline_statistics_query(&mut self, query_set: &DispatchQuerySet, query_index: u32); + fn end_pipeline_statistics_query(&mut self); + + fn execute_bundles(&mut self, render_bundles: &mut dyn Iterator); + + fn end(&mut self); +} + +pub trait RenderBundleEncoderInterface: CommonTraits { + fn set_pipeline(&mut self, pipeline: &DispatchRenderPipeline); + fn set_bind_group( + &mut self, + index: u32, + bind_group: Option<&DispatchBindGroup>, + offsets: &[crate::DynamicOffset], + ); + fn set_index_buffer( + &mut self, + buffer: &DispatchBuffer, + index_format: crate::IndexFormat, + offset: crate::BufferAddress, + size: Option, + ); + fn set_vertex_buffer( + &mut self, + slot: u32, + buffer: &DispatchBuffer, + offset: crate::BufferAddress, + size: Option, + ); + fn set_push_constants(&mut self, stages: crate::ShaderStages, offset: u32, data: &[u8]); + + fn draw(&mut self, vertices: Range, instances: Range); + fn draw_indexed(&mut self, indices: Range, base_vertex: i32, instances: Range); + fn draw_indirect( + &mut self, + indirect_buffer: &DispatchBuffer, + indirect_offset: crate::BufferAddress, + ); + fn draw_indexed_indirect( + &mut self, + indirect_buffer: &DispatchBuffer, + indirect_offset: crate::BufferAddress, + ); + + fn finish(self, desc: &crate::RenderBundleDescriptor<'_>) -> DispatchRenderBundle + where + Self: Sized; +} + +pub trait CommandBufferInterface: CommonTraits {} +pub trait RenderBundleInterface: CommonTraits {} + +pub trait SurfaceInterface: CommonTraits { + fn get_capabilities(&self, adapter: &DispatchAdapter) -> wgt::SurfaceCapabilities; + + fn configure(&self, device: &DispatchDevice, config: &crate::SurfaceConfiguration); + fn get_current_texture( + &self, + ) -> ( + Option, + crate::SurfaceStatus, + DispatchSurfaceOutputDetail, + ); +} + +pub trait SurfaceOutputDetailInterface: CommonTraits { + fn present(&self); + fn texture_discard(&self); +} + +pub trait QueueWriteBufferInterface: CommonTraits { + fn slice(&self) -> &[u8]; + + fn slice_mut(&mut self) -> &mut [u8]; +} + +pub trait BufferMappedRangeInterface: CommonTraits { + fn slice(&self) -> &[u8]; + fn slice_mut(&mut self) -> &mut [u8]; +} + +/// Generates Dispatch types for each of the interfaces. Each type is a wrapper around the +/// wgpu_core and webgpu types, and derefs to the appropriate interface trait-object. +/// +/// When there is only one backend, deviritualization fires and all dispatches should turn into +/// direct calls. If there are multiple, some dispatching will occur. +/// +/// This also provides `as_*` methods so that the backend implementations can dereference other +/// arguments. These are similarly free when there is only one backend. +/// +/// In the future, we may want a truly generic backend, which could be extended from this enum. +macro_rules! dispatch_types { + ( + wgpu_core = $wgpu_core_context:ty; + webgpu = $webgpu_context:ty; + {$( + type $name:ident = InterfaceTypes::$subtype:ident: $trait:ident; + )*} + ) => { + $( + #[derive(Debug, PartialEq, Eq, PartialOrd, Ord, Hash)] + pub enum $name { + #[cfg(wgpu_core)] + Core(<$wgpu_core_context as InterfaceTypes>::$subtype), + #[cfg(webgpu)] + WebGPU(<$webgpu_context as InterfaceTypes>::$subtype), + } + + impl $name { + #[cfg(wgpu_core)] + #[inline] + #[allow(unused)] + pub fn as_core(&self) -> &<$wgpu_core_context as InterfaceTypes>::$subtype { + match self { + Self::Core(value) => value, + _ => panic!(concat!(stringify!($name), " is not core")), + } + } + + #[cfg(wgpu_core)] + #[inline] + #[allow(unused)] + pub fn as_core_mut(&mut self) -> &mut <$wgpu_core_context as InterfaceTypes>::$subtype { + match self { + Self::Core(value) => value, + _ => panic!(concat!(stringify!($name), " is not core")), + } + } + + #[cfg(wgpu_core)] + #[inline] + #[allow(unused)] + pub fn as_core_opt(&self) -> Option<&<$wgpu_core_context as InterfaceTypes>::$subtype> { + match self { + Self::Core(value) => Some(value), + _ => None, + } + } + + #[cfg(wgpu_core)] + #[inline] + #[allow(unused)] + pub fn as_core_mut_opt(&mut self) -> Option<&mut <$wgpu_core_context as InterfaceTypes>::$subtype> { + match self { + Self::Core(value) => Some(value), + _ => None, + } + } + + #[cfg(webgpu)] + #[inline] + #[allow(unused)] + pub fn as_webgpu(&self) -> &<$webgpu_context as InterfaceTypes>::$subtype { + match self { + Self::WebGPU(value) => value, + _ => panic!(concat!(stringify!($name), " is not webgpu")), + } + } + + #[cfg(webgpu)] + #[inline] + #[allow(unused)] + pub fn as_webgpu_mut(&mut self) -> &mut <$webgpu_context as InterfaceTypes>::$subtype { + match self { + Self::WebGPU(value) => value, + _ => panic!(concat!(stringify!($name), " is not webgpu")), + } + } + + #[cfg(webgpu)] + #[inline] + #[allow(unused)] + pub fn as_webgpu_opt(&self) -> Option<&<$webgpu_context as InterfaceTypes>::$subtype> { + match self { + Self::WebGPU(value) => Some(value), + _ => None, + } + } + + #[cfg(webgpu)] + #[inline] + #[allow(unused)] + pub fn as_webgpu_mut_opt(&mut self) -> Option<&mut <$webgpu_context as InterfaceTypes>::$subtype> { + match self { + Self::WebGPU(value) => Some(value), + _ => None, + } + } + } + + #[cfg(wgpu_core)] + impl From<<$wgpu_core_context as InterfaceTypes>::$subtype> for $name { + #[inline] + fn from(value: <$wgpu_core_context as InterfaceTypes>::$subtype) -> Self { + Self::Core(value) + } + } + + #[cfg(webgpu)] + impl From<<$webgpu_context as InterfaceTypes>::$subtype> for $name { + #[inline] + fn from(value: <$webgpu_context as InterfaceTypes>::$subtype) -> Self { + Self::WebGPU(value) + } + } + + impl std::ops::Deref for $name { + type Target = dyn $trait; + + #[inline] + fn deref(&self) -> &Self::Target { + match self { + #[cfg(wgpu_core)] + Self::Core(value) => value, + #[cfg(webgpu)] + Self::WebGPU(value) => value, + } + } + } + + impl std::ops::DerefMut for $name { + #[inline] + fn deref_mut(&mut self) -> &mut Self::Target { + match self { + #[cfg(wgpu_core)] + Self::Core(value) => value, + #[cfg(webgpu)] + Self::WebGPU(value) => value, + } + } + } + )* + }; +} + +dispatch_types! { + wgpu_core = backend::ContextWgpuCore; + webgpu = backend::ContextWebGpu; + { + type DispatchInstance = InterfaceTypes::Instance: InstanceInterface; + type DispatchAdapter = InterfaceTypes::Adapter: AdapterInterface; + type DispatchDevice = InterfaceTypes::Device: DeviceInterface; + type DispatchQueue = InterfaceTypes::Queue: QueueInterface; + type DispatchShaderModule = InterfaceTypes::ShaderModule: ShaderModuleInterface; + type DispatchBindGroupLayout = InterfaceTypes::BindGroupLayout: BindGroupLayoutInterface; + type DispatchBindGroup = InterfaceTypes::BindGroup: BindGroupInterface; + type DispatchTextureView = InterfaceTypes::TextureView: TextureViewInterface; + type DispatchSampler = InterfaceTypes::Sampler: SamplerInterface; + type DispatchBuffer = InterfaceTypes::Buffer: BufferInterface; + type DispatchTexture = InterfaceTypes::Texture: TextureInterface; + type DispatchBlas = InterfaceTypes::Blas: BlasInterface; + type DispatchTlas = InterfaceTypes::Tlas: TlasInterface; + type DispatchQuerySet = InterfaceTypes::QuerySet: QuerySetInterface; + type DispatchPipelineLayout = InterfaceTypes::PipelineLayout: PipelineLayoutInterface; + type DispatchRenderPipeline = InterfaceTypes::RenderPipeline: RenderPipelineInterface; + type DispatchComputePipeline = InterfaceTypes::ComputePipeline: ComputePipelineInterface; + type DispatchPipelineCache = InterfaceTypes::PipelineCache: PipelineCacheInterface; + type DispatchCommandEncoder = InterfaceTypes::CommandEncoder: CommandEncoderInterface; + type DispatchComputePass = InterfaceTypes::ComputePass: ComputePassInterface; + type DispatchRenderPass = InterfaceTypes::RenderPass: RenderPassInterface; + type DispatchCommandBuffer = InterfaceTypes::CommandBuffer: CommandBufferInterface; + type DispatchRenderBundleEncoder = InterfaceTypes::RenderBundleEncoder: RenderBundleEncoderInterface; + type DispatchRenderBundle = InterfaceTypes::RenderBundle: RenderBundleInterface; + type DispatchSurface = InterfaceTypes::Surface: SurfaceInterface; + type DispatchSurfaceOutputDetail = InterfaceTypes::SurfaceOutputDetail: SurfaceOutputDetailInterface; + type DispatchQueueWriteBuffer = InterfaceTypes::QueueWriteBuffer: QueueWriteBufferInterface; + type DispatchBufferMappedRange = InterfaceTypes::BufferMappedRange: BufferMappedRangeInterface; + } +} diff --git a/wgpu/src/lib.rs b/wgpu/src/lib.rs index 0e1f4357f4..d716fb1c5e 100644 --- a/wgpu/src/lib.rs +++ b/wgpu/src/lib.rs @@ -17,6 +17,7 @@ #![cfg_attr(docsrs, feature(doc_cfg, doc_auto_cfg))] #![doc(html_logo_url = "https://raw.githubusercontent.com/gfx-rs/wgpu/trunk/logo.png")] #![warn(missing_docs, rust_2018_idioms, unsafe_op_in_unsafe_fn)] +#![allow(clippy::arc_with_non_send_sync)] // // @@ -26,9 +27,9 @@ mod api; mod backend; -mod context; +mod cmp; +mod dispatch; mod macros; -mod send_sync; pub mod util; // @@ -37,16 +38,6 @@ pub mod util; // // -#[allow(unused_imports)] // WebGPU needs this -use context::Context; -use send_sync::*; - -type C = dyn context::DynContext; -#[cfg(send_sync)] -type Data = dyn std::any::Any + Send + Sync; -#[cfg(not(send_sync))] -type Data = dyn std::any::Any; - // // // Public re-exports diff --git a/wgpu/src/send_sync.rs b/wgpu/src/send_sync.rs deleted file mode 100644 index 3842931716..0000000000 --- a/wgpu/src/send_sync.rs +++ /dev/null @@ -1,27 +0,0 @@ -use std::any::Any; -use std::fmt; - -use wgt::WasmNotSendSync; - -pub trait AnyWasmNotSendSync: Any + WasmNotSendSync { - fn upcast_any_ref(&self) -> &dyn Any; -} -impl AnyWasmNotSendSync for T { - #[inline] - fn upcast_any_ref(&self) -> &dyn Any { - self - } -} - -impl dyn AnyWasmNotSendSync + 'static { - #[inline] - pub fn downcast_ref(&self) -> Option<&T> { - self.upcast_any_ref().downcast_ref::() - } -} - -impl fmt::Debug for dyn AnyWasmNotSendSync { - fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - f.debug_struct("Any").finish_non_exhaustive() - } -} diff --git a/wgpu/src/util/belt.rs b/wgpu/src/util/belt.rs index 9d8c3c4e21..d6ef7a0c46 100644 --- a/wgpu/src/util/belt.rs +++ b/wgpu/src/util/belt.rs @@ -117,7 +117,6 @@ impl StagingBelt { } else { let size = self.chunk_size.max(size.get()); Chunk { - #[allow(clippy::arc_with_non_send_sync)] // False positive on emscripten buffer: Arc::new(device.create_buffer(&BufferDescriptor { label: Some("(wgpu internal) StagingBelt staging buffer"), size, diff --git a/wgpu/src/util/mod.rs b/wgpu/src/util/mod.rs index 9a1e643761..9d181eba93 100644 --- a/wgpu/src/util/mod.rs +++ b/wgpu/src/util/mod.rs @@ -23,6 +23,8 @@ pub use wgt::{ math::*, DispatchIndirectArgs, DrawIndexedIndirectArgs, DrawIndirectArgs, TextureDataOrder, }; +use crate::dispatch; + /// Treat the given byte slice as a SPIR-V module. /// /// # Panic @@ -86,7 +88,7 @@ pub fn make_spirv_raw(data: &[u8]) -> Cow<'_, [u32]> { /// CPU accessible buffer used to download data back from the GPU. pub struct DownloadBuffer { _gpu_buffer: Arc, - mapped_range: Box, + mapped_range: dispatch::DispatchBufferMappedRange, } impl DownloadBuffer { @@ -102,7 +104,6 @@ impl DownloadBuffer { None => buffer.buffer.map_context.lock().total_size - buffer.offset, }; - #[allow(clippy::arc_with_non_send_sync)] // False positive on emscripten let download = Arc::new(device.create_buffer(&super::BufferDescriptor { size, usage: super::BufferUsages::COPY_DST | super::BufferUsages::MAP_READ, @@ -125,11 +126,7 @@ impl DownloadBuffer { return; } - let mapped_range = crate::context::DynContext::buffer_get_mapped_range( - &*download.context, - download.data.as_ref(), - 0..size, - ); + let mapped_range = download.inner.get_mapped_range(0..size); callback(Ok(Self { _gpu_buffer: download, mapped_range,