diff --git a/deno_webgpu/lib.rs b/deno_webgpu/lib.rs index c2dfb240fa..6f708014f2 100644 --- a/deno_webgpu/lib.rs +++ b/deno_webgpu/lib.rs @@ -414,9 +414,9 @@ pub fn op_webgpu_request_adapter( }) } }; - let adapter_features = instance.adapter_features(adapter)?; + let adapter_features = instance.adapter_features(adapter); let features = deserialize_features(&adapter_features); - let adapter_limits = instance.adapter_limits(adapter)?; + let adapter_limits = instance.adapter_limits(adapter); let instance = instance.clone(); @@ -649,7 +649,7 @@ pub fn op_webgpu_request_device( memory_hints: wgpu_types::MemoryHints::default(), }; - let (device, queue, maybe_err) = instance.adapter_request_device( + let res = instance.adapter_request_device( adapter, &descriptor, std::env::var("DENO_WEBGPU_TRACE") @@ -660,13 +660,12 @@ pub fn op_webgpu_request_device( None, ); adapter_resource.close(); - if let Some(err) = maybe_err { - return Err(DomExceptionOperationError::new(&err.to_string()).into()); - } - let device_features = instance.device_features(device)?; + let (device, queue) = res.map_err(|err| DomExceptionOperationError::new(&err.to_string()))?; + + let device_features = instance.device_features(device); let features = deserialize_features(&device_features); - let limits = instance.device_limits(device)?; + let limits = instance.device_limits(device); let instance = instance.clone(); let instance2 = instance.clone(); @@ -705,7 +704,7 @@ pub fn op_webgpu_request_adapter_info( let adapter = adapter_resource.1; let instance = state.borrow::(); - let info = instance.adapter_get_info(adapter)?; + let info = instance.adapter_get_info(adapter); adapter_resource.close(); Ok(GPUAdapterInfo { diff --git a/player/src/bin/play.rs b/player/src/bin/play.rs index 4726fe63a7..eb26ce6ba5 100644 --- a/player/src/bin/play.rs +++ b/player/src/bin/play.rs @@ -78,18 +78,18 @@ fn main() { ) .expect("Unable to find an adapter for selected backend"); - let info = global.adapter_get_info(adapter).unwrap(); + let info = global.adapter_get_info(adapter); log::info!("Picked '{}'", info.name); let device_id = wgc::id::Id::zip(1, 0, backend); let queue_id = wgc::id::Id::zip(1, 0, backend); - let (_, _, error) = global.adapter_request_device( + let res = global.adapter_request_device( adapter, &desc, None, Some(device_id), Some(queue_id), ); - if let Some(e) = error { + if let Err(e) = res { panic!("{:?}", e); } (device_id, queue_id) diff --git a/player/tests/test.rs b/player/tests/test.rs index 367a7494a7..3b26a202da 100644 --- a/player/tests/test.rs +++ b/player/tests/test.rs @@ -108,7 +108,7 @@ impl Test<'_> { let backend = adapter.backend(); let device_id = wgc::id::Id::zip(test_num, 0, backend); let queue_id = wgc::id::Id::zip(test_num, 0, backend); - let (_, _, error) = global.adapter_request_device( + let res = global.adapter_request_device( adapter, &wgt::DeviceDescriptor { label: None, @@ -120,7 +120,7 @@ impl Test<'_> { Some(device_id), Some(queue_id), ); - if let Some(e) = error { + if let Err(e) = res { panic!("{:?}", e); } @@ -244,8 +244,8 @@ impl Corpus { }; println!("\tBackend {:?}", backend); - let supported_features = global.adapter_features(adapter).unwrap(); - let downlevel_caps = global.adapter_downlevel_capabilities(adapter).unwrap(); + let supported_features = global.adapter_features(adapter); + let downlevel_caps = global.adapter_downlevel_capabilities(adapter); let test = Test::load(dir.join(test_path), adapter.backend()); if !supported_features.contains(test.features) { diff --git a/tests/tests/device.rs b/tests/tests/device.rs index 6edc73868c..c832af06b4 100644 --- a/tests/tests/device.rs +++ b/tests/tests/device.rs @@ -651,33 +651,6 @@ static DEVICE_DROP_THEN_LOST: GpuTestConfiguration = GpuTestConfiguration::new() ); }); -#[gpu_test] -static DEVICE_INVALID_THEN_SET_LOST_CALLBACK: GpuTestConfiguration = GpuTestConfiguration::new() - .parameters(TestParameters::default().expect_fail(FailureCase::webgl2())) - .run_sync(|ctx| { - // This test checks that when the device is invalid, a subsequent call - // to set the device lost callback will immediately call the callback. - // Invalidating the device is done via a testing-only method. Fails on - // webgl because webgl doesn't implement make_invalid. - - // Make the device invalid. - ctx.device.make_invalid(); - - static WAS_CALLED: AtomicBool = AtomicBool::new(false); - - // Set a LoseDeviceCallback on the device. - let callback = Box::new(|reason, _m| { - WAS_CALLED.store(true, std::sync::atomic::Ordering::SeqCst); - assert_eq!(reason, wgt::DeviceLostReason::DeviceInvalid); - }); - ctx.device.set_device_lost_callback(callback); - - assert!( - WAS_CALLED.load(std::sync::atomic::Ordering::SeqCst), - "Device lost callback should have been called." - ); - }); - #[gpu_test] static DEVICE_LOST_REPLACED_CALLBACK: GpuTestConfiguration = GpuTestConfiguration::new() .parameters(TestParameters::default()) diff --git a/tests/tests/pipeline.rs b/tests/tests/pipeline.rs index 4c3888a210..f7d8d1ec7b 100644 --- a/tests/tests/pipeline.rs +++ b/tests/tests/pipeline.rs @@ -8,7 +8,7 @@ static PIPELINE_DEFAULT_LAYOUT_BAD_MODULE: GpuTestConfiguration = GpuTestConfigu .parameters( TestParameters::default() // https://github.com/gfx-rs/wgpu/issues/4167 - .expect_fail(FailureCase::always().panic("Pipeline is invalid")), + .expect_fail(FailureCase::always().panic("Error reflecting bind group")), ) .run_sync(|ctx| { ctx.device.push_error_scope(wgpu::ErrorFilter::Validation); diff --git a/tests/tests/resource_error.rs b/tests/tests/resource_error.rs index fc7e062f4c..d071053ebd 100644 --- a/tests/tests/resource_error.rs +++ b/tests/tests/resource_error.rs @@ -17,21 +17,16 @@ static BAD_BUFFER: GpuTestConfiguration = GpuTestConfiguration::new().run_sync(| Some("`map` usage can only be combined with the opposite `copy`"), ); - let error = match ctx.adapter_info.backend.to_str() { - "vulkan" | "vk" => "bufferid id(0,1,vk) is invalid", - "dx12" | "d3d12" => "bufferid id(0,1,d3d12) is invalid", - "metal" | "mtl" => "bufferid id(0,1,mtl) is invalid", - "opengl" | "gles" | "gl" => "bufferid id(0,1,gl) is invalid", - "webgpu" => "bufferid id(0,1,webgpu) is invalid", - b => b, - }; - fail( &ctx.device, || buffer.slice(..).map_async(wgpu::MapMode::Write, |_| {}), - Some(error), + Some("Buffer with '' label is invalid"), + ); + fail( + &ctx.device, + || buffer.unmap(), + Some("Buffer with '' label is invalid"), ); - fail(&ctx.device, || buffer.unmap(), Some(error)); valid(&ctx.device, || buffer.destroy()); valid(&ctx.device, || buffer.destroy()); }); @@ -59,21 +54,12 @@ static BAD_TEXTURE: GpuTestConfiguration = GpuTestConfiguration::new().run_sync( Some("dimension x is zero"), ); - let error = match ctx.adapter_info.backend.to_str() { - "vulkan" | "vk" => "textureid id(0,1,vk) is invalid", - "dx12" | "d3d12" => "textureid id(0,1,d3d12) is invalid", - "metal" | "mtl" => "textureid id(0,1,mtl) is invalid", - "opengl" | "gles" | "gl" => "textureid id(0,1,gl) is invalid", - "webgpu" => "textureid id(0,1,webgpu) is invalid", - b => b, - }; - fail( &ctx.device, || { let _ = texture.create_view(&wgpu::TextureViewDescriptor::default()); }, - Some(error), + Some("Texture with '' label is invalid"), ); valid(&ctx.device, || texture.destroy()); valid(&ctx.device, || texture.destroy()); diff --git a/wgpu-core/src/binding_model.rs b/wgpu-core/src/binding_model.rs index bd89fbb2b0..2357a8c776 100644 --- a/wgpu-core/src/binding_model.rs +++ b/wgpu-core/src/binding_model.rs @@ -6,8 +6,8 @@ use crate::{ init_tracker::{BufferInitTrackerAction, TextureInitTrackerAction}, pipeline::{ComputePipeline, RenderPipeline}, resource::{ - Buffer, DestroyedResourceError, Labeled, MissingBufferUsageError, MissingTextureUsageError, - ResourceErrorIdent, Sampler, TextureView, TrackingData, + Buffer, DestroyedResourceError, InvalidResourceError, Labeled, MissingBufferUsageError, + MissingTextureUsageError, ResourceErrorIdent, Sampler, TextureView, TrackingData, }, resource_log, snatch::{SnatchGuard, Snatchable}, @@ -79,14 +79,6 @@ pub enum CreateBindGroupLayoutError { pub enum CreateBindGroupError { #[error(transparent)] Device(#[from] DeviceError), - #[error("Bind group layout is invalid")] - InvalidLayout, - #[error("BufferId {0:?} is invalid")] - InvalidBufferId(BufferId), - #[error("TextureViewId {0:?} is invalid")] - InvalidTextureViewId(TextureViewId), - #[error("SamplerId {0:?} is invalid")] - InvalidSamplerId(SamplerId), #[error(transparent)] DestroyedResource(#[from] DestroyedResourceError), #[error( @@ -188,6 +180,8 @@ pub enum CreateBindGroupError { StorageReadNotSupported(wgt::TextureFormat), #[error(transparent)] ResourceUsageCompatibility(#[from] ResourceUsageCompatibilityError), + #[error(transparent)] + InvalidResource(#[from] InvalidResourceError), } #[derive(Clone, Debug, Error)] @@ -545,8 +539,6 @@ impl BindGroupLayout { pub enum CreatePipelineLayoutError { #[error(transparent)] Device(#[from] DeviceError), - #[error("BindGroupLayoutId {0:?} is invalid")] - InvalidBindGroupLayoutId(BindGroupLayoutId), #[error( "Push constant at index {index} has range bound {bound} not aligned to {}", wgt::PUSH_CONSTANT_ALIGNMENT @@ -570,6 +562,8 @@ pub enum CreatePipelineLayoutError { TooManyBindings(BindingTypeMaxCountError), #[error("Bind group layout count {actual} exceeds device bind group limit {max}")] TooManyGroups { actual: usize, max: usize }, + #[error(transparent)] + InvalidResource(#[from] InvalidResourceError), } #[derive(Clone, Debug, Error)] @@ -1003,10 +997,10 @@ crate::impl_trackable!(BindGroup); #[derive(Clone, Debug, Error)] #[non_exhaustive] pub enum GetBindGroupLayoutError { - #[error("Pipeline is invalid")] - InvalidPipeline, #[error("Invalid group index {0}")] InvalidGroupIndex(u32), + #[error(transparent)] + InvalidResource(#[from] InvalidResourceError), } #[derive(Clone, Debug, Error, Eq, PartialEq)] diff --git a/wgpu-core/src/command/bundle.rs b/wgpu-core/src/command/bundle.rs index 775bd5f825..394858f9fd 100644 --- a/wgpu-core/src/command/bundle.rs +++ b/wgpu-core/src/command/bundle.rs @@ -92,7 +92,10 @@ use crate::{ id, init_tracker::{BufferInitTrackerAction, MemoryInitKind, TextureInitTrackerAction}, pipeline::{PipelineFlags, RenderPipeline, VertexStep}, - resource::{Buffer, DestroyedResourceError, Labeled, ParentDevice, TrackingData}, + resource::{ + Buffer, DestroyedResourceError, Fallible, InvalidResourceError, Labeled, ParentDevice, + TrackingData, + }, resource_log, snatch::SnatchGuard, track::RenderBundleScope, @@ -578,7 +581,7 @@ impl RenderBundleEncoder { fn set_bind_group( state: &mut State, - bind_group_guard: &crate::lock::RwLockReadGuard>, + bind_group_guard: &crate::storage::Storage>, dynamic_offsets: &[u32], index: u32, num_dynamic_offsets: usize, @@ -591,9 +594,7 @@ fn set_bind_group( let bind_group_id = bind_group_id.unwrap(); - let bind_group = bind_group_guard - .get_owned(bind_group_id) - .map_err(|_| RenderCommandError::InvalidBindGroupId(bind_group_id))?; + let bind_group = bind_group_guard.get(bind_group_id).get()?; bind_group.same_device(&state.device)?; @@ -630,15 +631,13 @@ fn set_bind_group( fn set_pipeline( state: &mut State, - pipeline_guard: &crate::lock::RwLockReadGuard>, + pipeline_guard: &crate::storage::Storage>, context: &RenderPassContext, is_depth_read_only: bool, is_stencil_read_only: bool, pipeline_id: id::Id, ) -> Result<(), RenderBundleErrorInner> { - let pipeline = pipeline_guard - .get_owned(pipeline_id) - .map_err(|_| RenderCommandError::InvalidPipelineId(pipeline_id))?; + let pipeline = pipeline_guard.get(pipeline_id).get()?; pipeline.same_device(&state.device)?; @@ -673,15 +672,13 @@ fn set_pipeline( fn set_index_buffer( state: &mut State, - buffer_guard: &crate::lock::RwLockReadGuard>, + buffer_guard: &crate::storage::Storage>, buffer_id: id::Id, index_format: wgt::IndexFormat, offset: u64, size: Option, ) -> Result<(), RenderBundleErrorInner> { - let buffer = buffer_guard - .get_owned(buffer_id) - .map_err(|_| RenderCommandError::InvalidBufferId(buffer_id))?; + let buffer = buffer_guard.get(buffer_id).get()?; state .trackers @@ -708,7 +705,7 @@ fn set_index_buffer( fn set_vertex_buffer( state: &mut State, - buffer_guard: &crate::lock::RwLockReadGuard>, + buffer_guard: &crate::storage::Storage>, slot: u32, buffer_id: id::Id, offset: u64, @@ -723,9 +720,7 @@ fn set_vertex_buffer( .into()); } - let buffer = buffer_guard - .get_owned(buffer_id) - .map_err(|_| RenderCommandError::InvalidBufferId(buffer_id))?; + let buffer = buffer_guard.get(buffer_id).get()?; state .trackers @@ -852,7 +847,7 @@ fn draw_indexed( fn multi_draw_indirect( state: &mut State, dynamic_offsets: &[u32], - buffer_guard: &crate::lock::RwLockReadGuard>, + buffer_guard: &crate::storage::Storage>, buffer_id: id::Id, offset: u64, indexed: bool, @@ -864,9 +859,7 @@ fn multi_draw_indirect( let pipeline = state.pipeline()?; let used_bind_groups = pipeline.used_bind_groups; - let buffer = buffer_guard - .get_owned(buffer_id) - .map_err(|_| RenderCommandError::InvalidBufferId(buffer_id))?; + let buffer = buffer_guard.get(buffer_id).get()?; state .trackers @@ -1538,6 +1531,8 @@ pub(super) enum RenderBundleErrorInner { MissingDownlevelFlags(#[from] MissingDownlevelFlags), #[error(transparent)] Bind(#[from] BindError), + #[error(transparent)] + InvalidResource(#[from] InvalidResourceError), } impl From for RenderBundleErrorInner diff --git a/wgpu-core/src/command/clear.rs b/wgpu-core/src/command/clear.rs index 7a16f6db38..3a12a54117 100644 --- a/wgpu-core/src/command/clear.rs +++ b/wgpu-core/src/command/clear.rs @@ -11,8 +11,8 @@ use crate::{ id::{BufferId, CommandEncoderId, TextureId}, init_tracker::{MemoryInitKind, TextureInitRange}, resource::{ - DestroyedResourceError, Labeled, MissingBufferUsageError, ParentDevice, ResourceErrorIdent, - Texture, TextureClearMode, + DestroyedResourceError, InvalidResourceError, Labeled, MissingBufferUsageError, + ParentDevice, ResourceErrorIdent, Texture, TextureClearMode, }, snatch::SnatchGuard, track::{TextureSelector, TextureTrackerSetSingle}, @@ -27,10 +27,6 @@ use wgt::{math::align_to, BufferAddress, BufferUsages, ImageSubresourceRange, Te pub enum ClearError { #[error("To use clear_texture the CLEAR_TEXTURE feature needs to be enabled")] MissingClearTextureFeature, - #[error("BufferId {0:?} is invalid")] - InvalidBufferId(BufferId), - #[error("TextureId {0:?} is invalid")] - InvalidTextureId(TextureId), #[error(transparent)] DestroyedResource(#[from] DestroyedResourceError), #[error("{0} can not be cleared")] @@ -75,6 +71,8 @@ whereas subesource range specified start {subresource_base_array_layer} and coun Device(#[from] DeviceError), #[error(transparent)] CommandEncoderError(#[from] CommandEncoderError), + #[error(transparent)] + InvalidResource(#[from] InvalidResourceError), } impl Global { @@ -90,27 +88,18 @@ impl Global { let hub = &self.hub; - let cmd_buf = match hub + let cmd_buf = hub .command_buffers - .get(command_encoder_id.into_command_buffer_id()) - { - Ok(cmd_buf) => cmd_buf, - Err(_) => return Err(CommandEncoderError::Invalid.into()), - }; - cmd_buf.check_recording()?; - - let mut cmd_buf_data = cmd_buf.data.lock(); - let cmd_buf_data = cmd_buf_data.as_mut().unwrap(); + .get(command_encoder_id.into_command_buffer_id()); + let mut cmd_buf_data = cmd_buf.try_get()?; + cmd_buf_data.check_recording()?; #[cfg(feature = "trace")] if let Some(ref mut list) = cmd_buf_data.commands { list.push(TraceCommand::ClearBuffer { dst, offset, size }); } - let dst_buffer = hub - .buffers - .get(dst) - .map_err(|_| ClearError::InvalidBufferId(dst))?; + let dst_buffer = hub.buffers.get(dst).get()?; dst_buffer.same_device_as(cmd_buf.as_ref())?; @@ -182,17 +171,11 @@ impl Global { let hub = &self.hub; - let cmd_buf = match hub + let cmd_buf = hub .command_buffers - .get(command_encoder_id.into_command_buffer_id()) - { - Ok(cmd_buf) => cmd_buf, - Err(_) => return Err(CommandEncoderError::Invalid.into()), - }; - cmd_buf.check_recording()?; - - let mut cmd_buf_data = cmd_buf.data.lock(); - let cmd_buf_data = cmd_buf_data.as_mut().unwrap(); + .get(command_encoder_id.into_command_buffer_id()); + let mut cmd_buf_data = cmd_buf.try_get()?; + cmd_buf_data.check_recording()?; #[cfg(feature = "trace")] if let Some(ref mut list) = cmd_buf_data.commands { @@ -206,10 +189,7 @@ impl Global { return Err(ClearError::MissingClearTextureFeature); } - let dst_texture = hub - .textures - .get(dst) - .map_err(|_| ClearError::InvalidTextureId(dst))?; + let dst_texture = hub.textures.get(dst).get()?; dst_texture.same_device_as(cmd_buf.as_ref())?; diff --git a/wgpu-core/src/command/compute.rs b/wgpu-core/src/command/compute.rs index 8dbd6efa11..5de03917d2 100644 --- a/wgpu-core/src/command/compute.rs +++ b/wgpu-core/src/command/compute.rs @@ -17,8 +17,8 @@ use crate::{ init_tracker::{BufferInitTrackerAction, MemoryInitKind}, pipeline::ComputePipeline, resource::{ - self, Buffer, DestroyedResourceError, Labeled, MissingBufferUsageError, ParentDevice, - Trackable, + self, Buffer, DestroyedResourceError, InvalidResourceError, Labeled, + MissingBufferUsageError, ParentDevice, Trackable, }, snatch::SnatchGuard, track::{ResourceUsageCompatibilityError, Tracker, TrackerIndex, UsageScope}, @@ -132,14 +132,8 @@ pub enum ComputePassErrorInner { Encoder(#[from] CommandEncoderError), #[error("Parent encoder is invalid")] InvalidParentEncoder, - #[error("BindGroupId {0:?} is invalid")] - InvalidBindGroupId(id::BindGroupId), #[error("Bind group index {index} is greater than the device's requested `max_bind_group` limit {max}")] BindGroupIndexOutOfRange { index: u32, max: u32 }, - #[error("ComputePipelineId {0:?} is invalid")] - InvalidPipelineId(id::ComputePipelineId), - #[error("QuerySet {0:?} is invalid")] - InvalidQuerySet(id::QuerySetId), #[error(transparent)] DestroyedResource(#[from] DestroyedResourceError), #[error("Indirect buffer uses bytes {offset}..{end_offset} which overruns indirect buffer of size {buffer_size}")] @@ -148,8 +142,6 @@ pub enum ComputePassErrorInner { end_offset: u64, buffer_size: u64, }, - #[error("BufferId {0:?} is invalid")] - InvalidBufferId(id::BufferId), #[error(transparent)] ResourceUsageCompatibility(#[from] ResourceUsageCompatibilityError), #[error(transparent)] @@ -176,6 +168,8 @@ pub enum ComputePassErrorInner { MissingDownlevelFlags(#[from] MissingDownlevelFlags), #[error("The compute pass has already been ended and no further commands can be recorded")] PassEnded, + #[error(transparent)] + InvalidResource(#[from] InvalidResourceError), } /// Error encountered when performing a compute pass. @@ -298,22 +292,21 @@ impl Global { let make_err = |e, arc_desc| (ComputePass::new(None, arc_desc), Some(e)); - let cmd_buf = match hub.command_buffers.get(encoder_id.into_command_buffer_id()) { - Ok(cmd_buf) => cmd_buf, - Err(_) => return make_err(CommandEncoderError::Invalid, arc_desc), - }; + let cmd_buf = hub.command_buffers.get(encoder_id.into_command_buffer_id()); - match cmd_buf.lock_encoder() { + match cmd_buf + .try_get() + .map_err(|e| e.into()) + .and_then(|mut cmd_buf_data| cmd_buf_data.lock_encoder()) + { Ok(_) => {} Err(e) => return make_err(e, arc_desc), }; arc_desc.timestamp_writes = if let Some(tw) = desc.timestamp_writes { - let Ok(query_set) = hub.query_sets.get(tw.query_set) else { - return make_err( - CommandEncoderError::InvalidTimestampWritesQuerySetId(tw.query_set), - arc_desc, - ); + let query_set = match hub.query_sets.get(tw.query_set).get() { + Ok(query_set) => query_set, + Err(e) => return make_err(e.into(), arc_desc), }; Some(ArcPassTimestampWrites { @@ -328,25 +321,8 @@ impl Global { (ComputePass::new(Some(cmd_buf), arc_desc), None) } - pub fn compute_pass_end(&self, pass: &mut ComputePass) -> Result<(), ComputePassError> { - let scope = PassErrorScope::Pass; - - let cmd_buf = pass - .parent - .as_ref() - .ok_or(ComputePassErrorInner::InvalidParentEncoder) - .map_pass_err(scope)?; - - cmd_buf.unlock_encoder().map_pass_err(scope)?; - - let base = pass - .base - .take() - .ok_or(ComputePassErrorInner::PassEnded) - .map_pass_err(scope)?; - self.compute_pass_end_impl(cmd_buf, base, pass.timestamp_writes.take()) - } - + /// Note that this differs from [`Self::compute_pass_end`], it will + /// create a new pass, replay the commands and end the pass. #[doc(hidden)] #[cfg(any(feature = "serde", feature = "replay"))] pub fn compute_pass_end_with_unresolved_commands( @@ -355,19 +331,16 @@ impl Global { base: BasePass, timestamp_writes: Option<&PassTimestampWrites>, ) -> Result<(), ComputePassError> { - let hub = &self.hub; - let scope = PassErrorScope::Pass; - - let cmd_buf = match hub.command_buffers.get(encoder_id.into_command_buffer_id()) { - Ok(cmd_buf) => cmd_buf, - Err(_) => return Err(CommandEncoderError::Invalid).map_pass_err(scope), - }; - cmd_buf.check_recording().map_pass_err(scope)?; + let pass_scope = PassErrorScope::Pass; #[cfg(feature = "trace")] { - let mut cmd_buf_data = cmd_buf.data.lock(); - let cmd_buf_data = cmd_buf_data.as_mut().unwrap(); + let cmd_buf = self + .hub + .command_buffers + .get(encoder_id.into_command_buffer_id()); + let mut cmd_buf_data = cmd_buf.try_get().map_pass_err(pass_scope)?; + if let Some(ref mut list) = cmd_buf_data.commands { list.push(crate::device::trace::Command::RunComputePass { base: BasePass { @@ -382,50 +355,61 @@ impl Global { } } - let commands = - super::ComputeCommand::resolve_compute_command_ids(&self.hub, &base.commands)?; - - let timestamp_writes = if let Some(tw) = timestamp_writes { - Some(ArcPassTimestampWrites { - query_set: hub - .query_sets - .get(tw.query_set) - .map_err(|_| ComputePassErrorInner::InvalidQuerySet(tw.query_set)) - .map_pass_err(scope)?, - beginning_of_pass_write_index: tw.beginning_of_pass_write_index, - end_of_pass_write_index: tw.end_of_pass_write_index, - }) - } else { - None + let BasePass { + label, + commands, + dynamic_offsets, + string_data, + push_constant_data, + } = base; + + let (mut compute_pass, encoder_error) = self.command_encoder_create_compute_pass( + encoder_id, + &ComputePassDescriptor { + label: label.as_deref().map(std::borrow::Cow::Borrowed), + timestamp_writes, + }, + ); + if let Some(err) = encoder_error { + return Err(ComputePassError { + scope: pass_scope, + inner: err.into(), + }); }; - self.compute_pass_end_impl( - &cmd_buf, - BasePass { - label: base.label, - commands, - dynamic_offsets: base.dynamic_offsets, - string_data: base.string_data, - push_constant_data: base.push_constant_data, - }, - timestamp_writes, - ) + compute_pass.base = Some(BasePass { + label, + commands: super::ComputeCommand::resolve_compute_command_ids(&self.hub, &commands)?, + dynamic_offsets, + string_data, + push_constant_data, + }); + + self.compute_pass_end(&mut compute_pass) } - fn compute_pass_end_impl( - &self, - cmd_buf: &CommandBuffer, - base: BasePass, - mut timestamp_writes: Option, - ) -> Result<(), ComputePassError> { + pub fn compute_pass_end(&self, pass: &mut ComputePass) -> Result<(), ComputePassError> { profiling::scope!("CommandEncoder::run_compute_pass"); let pass_scope = PassErrorScope::Pass; + let cmd_buf = pass + .parent + .as_ref() + .ok_or(ComputePassErrorInner::InvalidParentEncoder) + .map_pass_err(pass_scope)?; + + let base = pass + .base + .take() + .ok_or(ComputePassErrorInner::PassEnded) + .map_pass_err(pass_scope)?; + let device = &cmd_buf.device; device.check_is_valid().map_pass_err(pass_scope)?; - let mut cmd_buf_data = cmd_buf.data.lock(); - let cmd_buf_data = cmd_buf_data.as_mut().unwrap(); + let mut cmd_buf_data = cmd_buf.try_get().map_pass_err(pass_scope)?; + cmd_buf_data.unlock_encoder().map_pass_err(pass_scope)?; + let cmd_buf_data = &mut *cmd_buf_data; let encoder = &mut cmd_buf_data.encoder; let status = &mut cmd_buf_data.status; @@ -467,9 +451,9 @@ impl Global { state.tracker.textures.set_size(indices.textures.size()); let timestamp_writes: Option> = - if let Some(tw) = timestamp_writes.take() { + if let Some(tw) = pass.timestamp_writes.take() { tw.query_set - .same_device_as(cmd_buf) + .same_device_as(cmd_buf.as_ref()) .map_pass_err(pass_scope)?; let query_set = state.tracker.query_sets.insert_single(tw.query_set); @@ -503,7 +487,7 @@ impl Global { }; let hal_desc = hal::ComputePassDescriptor { - label: hal_label(base.label.as_deref(), self.instance.flags), + label: hal_label(base.label.as_deref(), device.instance_flags), timestamp_writes, }; @@ -990,7 +974,7 @@ impl Global { let bg = hub .bind_groups .get(bind_group_id) - .map_err(|_| ComputePassErrorInner::InvalidBindGroupId(bind_group_id)) + .get() .map_pass_err(scope)?; bind_group = Some(bg); } @@ -1023,7 +1007,7 @@ impl Global { let pipeline = hub .compute_pipelines .get(pipeline_id) - .map_err(|_| ComputePassErrorInner::InvalidPipelineId(pipeline_id)) + .get() .map_pass_err(scope)?; base.commands.push(ArcComputeCommand::SetPipeline(pipeline)); @@ -1094,11 +1078,7 @@ impl Global { let scope = PassErrorScope::Dispatch { indirect: true }; let base = pass.base_mut(scope)?; - let buffer = hub - .buffers - .get(buffer_id) - .map_err(|_| ComputePassErrorInner::InvalidBufferId(buffer_id)) - .map_pass_err(scope)?; + let buffer = hub.buffers.get(buffer_id).get().map_pass_err(scope)?; base.commands .push(ArcComputeCommand::DispatchIndirect { buffer, offset }); @@ -1165,11 +1145,7 @@ impl Global { let base = pass.base_mut(scope)?; let hub = &self.hub; - let query_set = hub - .query_sets - .get(query_set_id) - .map_err(|_| ComputePassErrorInner::InvalidQuerySet(query_set_id)) - .map_pass_err(scope)?; + let query_set = hub.query_sets.get(query_set_id).get().map_pass_err(scope)?; base.commands.push(ArcComputeCommand::WriteTimestamp { query_set, @@ -1189,11 +1165,7 @@ impl Global { let base = pass.base_mut(scope)?; let hub = &self.hub; - let query_set = hub - .query_sets - .get(query_set_id) - .map_err(|_| ComputePassErrorInner::InvalidQuerySet(query_set_id)) - .map_pass_err(scope)?; + let query_set = hub.query_sets.get(query_set_id).get().map_pass_err(scope)?; base.commands .push(ArcComputeCommand::BeginPipelineStatisticsQuery { diff --git a/wgpu-core/src/command/compute_command.rs b/wgpu-core/src/command/compute_command.rs index 6596b8e6ac..67c23d9452 100644 --- a/wgpu-core/src/command/compute_command.rs +++ b/wgpu-core/src/command/compute_command.rs @@ -74,7 +74,7 @@ impl ComputeCommand { hub: &crate::hub::Hub, commands: &[ComputeCommand], ) -> Result, super::ComputePassError> { - use super::{ComputePassError, ComputePassErrorInner, PassErrorScope}; + use super::{ComputePassError, PassErrorScope}; let buffers_guard = hub.buffers.read(); let bind_group_guard = hub.bind_groups.read(); @@ -99,10 +99,10 @@ impl ComputeCommand { } let bind_group_id = bind_group_id.unwrap(); - let bg = bind_group_guard.get_owned(bind_group_id).map_err(|_| { + let bg = bind_group_guard.get(bind_group_id).get().map_err(|e| { ComputePassError { scope: PassErrorScope::SetBindGroup, - inner: ComputePassErrorInner::InvalidBindGroupId(bind_group_id), + inner: e.into(), } })?; @@ -112,13 +112,13 @@ impl ComputeCommand { bind_group: Some(bg), } } - ComputeCommand::SetPipeline(pipeline_id) => ArcComputeCommand::SetPipeline( pipelines_guard - .get_owned(pipeline_id) - .map_err(|_| ComputePassError { + .get(pipeline_id) + .get() + .map_err(|e| ComputePassError { scope: PassErrorScope::SetPipelineCompute, - inner: ComputePassErrorInner::InvalidPipelineId(pipeline_id), + inner: e.into(), })?, ), @@ -136,10 +136,10 @@ impl ComputeCommand { ComputeCommand::DispatchIndirect { buffer_id, offset } => { ArcComputeCommand::DispatchIndirect { - buffer: buffers_guard.get_owned(buffer_id).map_err(|_| { + buffer: buffers_guard.get(buffer_id).get().map_err(|e| { ComputePassError { scope: PassErrorScope::Dispatch { indirect: true }, - inner: ComputePassErrorInner::InvalidBufferId(buffer_id), + inner: e.into(), } })?, offset, @@ -160,10 +160,10 @@ impl ComputeCommand { query_set_id, query_index, } => ArcComputeCommand::WriteTimestamp { - query_set: query_set_guard.get_owned(query_set_id).map_err(|_| { + query_set: query_set_guard.get(query_set_id).get().map_err(|e| { ComputePassError { scope: PassErrorScope::WriteTimestamp, - inner: ComputePassErrorInner::InvalidQuerySet(query_set_id), + inner: e.into(), } })?, query_index, @@ -173,10 +173,10 @@ impl ComputeCommand { query_set_id, query_index, } => ArcComputeCommand::BeginPipelineStatisticsQuery { - query_set: query_set_guard.get_owned(query_set_id).map_err(|_| { + query_set: query_set_guard.get(query_set_id).get().map_err(|e| { ComputePassError { scope: PassErrorScope::BeginPipelineStatisticsQuery, - inner: ComputePassErrorInner::InvalidQuerySet(query_set_id), + inner: e.into(), } })?, query_index, diff --git a/wgpu-core/src/command/draw.rs b/wgpu-core/src/command/draw.rs index e8578bba05..dd54b60f26 100644 --- a/wgpu-core/src/command/draw.rs +++ b/wgpu-core/src/command/draw.rs @@ -1,6 +1,5 @@ use crate::{ binding_model::{LateMinBufferBindingSizeMismatch, PushConstantUploadError}, - id, resource::{ DestroyedResourceError, MissingBufferUsageError, MissingTextureUsageError, ResourceErrorIdent, @@ -68,22 +67,12 @@ pub enum DrawError { #[derive(Clone, Debug, Error)] #[non_exhaustive] pub enum RenderCommandError { - #[error("BufferId {0:?} is invalid")] - InvalidBufferId(id::BufferId), - #[error("BindGroupId {0:?} is invalid")] - InvalidBindGroupId(id::BindGroupId), - #[error("Render bundle {0:?} is invalid")] - InvalidRenderBundle(id::RenderBundleId), #[error("Bind group index {index} is greater than the device's requested `max_bind_group` limit {max}")] BindGroupIndexOutOfRange { index: u32, max: u32 }, #[error("Vertex buffer index {index} is greater than the device's requested `max_vertex_buffers` limit {max}")] VertexBufferIndexOutOfRange { index: u32, max: u32 }, #[error("Dynamic buffer offset {0} does not respect device's requested `{1}` limit {2}")] UnalignedBufferOffset(u64, &'static str, u32), - #[error("RenderPipelineId {0:?} is invalid")] - InvalidPipelineId(id::RenderPipelineId), - #[error("QuerySet {0:?} is invalid")] - InvalidQuerySet(id::QuerySetId), #[error("Render pipeline targets are incompatible with render pass")] IncompatiblePipelineTargets(#[from] crate::device::RenderPassCompatibilityError), #[error("{0} writes to depth, while the pass has read-only depth access")] diff --git a/wgpu-core/src/command/mod.rs b/wgpu-core/src/command/mod.rs index 91af31662b..42c80d3ca5 100644 --- a/wgpu-core/src/command/mod.rs +++ b/wgpu-core/src/command/mod.rs @@ -31,7 +31,7 @@ use crate::lock::{rank, Mutex}; use crate::snatch::SnatchGuard; use crate::init_tracker::BufferInitTrackerAction; -use crate::resource::Labeled; +use crate::resource::{InvalidResourceError, Labeled}; use crate::track::{DeviceTracker, Tracker, UsageScope}; use crate::LabelHelpers; use crate::{api_log, global::Global, id, resource_log, Label}; @@ -83,7 +83,7 @@ pub(crate) enum CommandEncoderStatus { /// When a `CommandEncoder` is left in this state, we have also /// returned an error result from the function that encountered /// the problem. Future attempts to use the encoder (for example, - /// calls to [`CommandBuffer::check_recording`]) will also return + /// calls to [`CommandBufferMutable::check_recording`]) will also return /// errors. /// /// Calling [`Global::command_encoder_finish`] in this state @@ -288,6 +288,106 @@ impl CommandBufferMutable { Ok((encoder, tracker)) } + + fn lock_encoder_impl(&mut self, lock: bool) -> Result<(), CommandEncoderError> { + match self.status { + CommandEncoderStatus::Recording => { + if lock { + self.status = CommandEncoderStatus::Locked; + } + Ok(()) + } + CommandEncoderStatus::Locked => { + // Any operation on a locked encoder is required to put it into the invalid/error state. + // See https://www.w3.org/TR/webgpu/#encoder-state-locked + self.encoder.discard(); + self.status = CommandEncoderStatus::Error; + Err(CommandEncoderError::Locked) + } + CommandEncoderStatus::Finished => Err(CommandEncoderError::NotRecording), + CommandEncoderStatus::Error => Err(CommandEncoderError::Invalid), + } + } + + /// Checks that the encoder is in the [`CommandEncoderStatus::Recording`] state. + fn check_recording(&mut self) -> Result<(), CommandEncoderError> { + self.lock_encoder_impl(false) + } + + /// Locks the encoder by putting it in the [`CommandEncoderStatus::Locked`] state. + /// + /// Call [`CommandBufferMutable::unlock_encoder`] to put the [`CommandBuffer`] back into the [`CommandEncoderStatus::Recording`] state. + fn lock_encoder(&mut self) -> Result<(), CommandEncoderError> { + self.lock_encoder_impl(true) + } + + /// Unlocks the [`CommandBuffer`] and puts it back into the [`CommandEncoderStatus::Recording`] state. + /// + /// This function is the counterpart to [`CommandBufferMutable::lock_encoder`]. + /// It is only valid to call this function if the encoder is in the [`CommandEncoderStatus::Locked`] state. + fn unlock_encoder(&mut self) -> Result<(), CommandEncoderError> { + match self.status { + CommandEncoderStatus::Recording => Err(CommandEncoderError::Invalid), + CommandEncoderStatus::Locked => { + self.status = CommandEncoderStatus::Recording; + Ok(()) + } + CommandEncoderStatus::Finished => Err(CommandEncoderError::Invalid), + CommandEncoderStatus::Error => Err(CommandEncoderError::Invalid), + } + } + + pub fn check_finished(&self) -> Result<(), CommandEncoderError> { + match self.status { + CommandEncoderStatus::Finished => Ok(()), + _ => Err(CommandEncoderError::Invalid), + } + } + + pub(crate) fn finish(&mut self, device: &Device) -> Result<(), CommandEncoderError> { + match self.status { + CommandEncoderStatus::Recording => { + if let Err(e) = self.encoder.close(device) { + Err(e.into()) + } else { + self.status = CommandEncoderStatus::Finished; + // Note: if we want to stop tracking the swapchain texture view, + // this is the place to do it. + Ok(()) + } + } + CommandEncoderStatus::Locked => { + self.encoder.discard(); + self.status = CommandEncoderStatus::Error; + Err(CommandEncoderError::Locked) + } + CommandEncoderStatus::Finished => Err(CommandEncoderError::NotRecording), + CommandEncoderStatus::Error => { + self.encoder.discard(); + Err(CommandEncoderError::Invalid) + } + } + } + + pub(crate) fn into_baked_commands(self) -> BakedCommands { + BakedCommands { + encoder: self.encoder.raw, + list: self.encoder.list, + trackers: self.trackers, + buffer_memory_init_actions: self.buffer_memory_init_actions, + texture_memory_actions: self.texture_memory_actions, + } + } + + pub(crate) fn destroy(mut self, device: &Device) { + self.encoder.discard(); + unsafe { + self.encoder.raw.reset_all(self.encoder.list); + } + unsafe { + device.raw().destroy_command_encoder(self.encoder.raw); + } + } } /// A buffer of commands to be submitted to the GPU for execution. @@ -319,22 +419,15 @@ pub struct CommandBuffer { /// This `Option` is populated when the command buffer is first created. /// When this is submitted, dropped, or destroyed, its contents are /// extracted into a [`BakedCommands`] by - /// [`CommandBuffer::extract_baked_commands`]. + /// [`CommandBufferMutable::into_baked_commands`]. pub(crate) data: Mutex>, } impl Drop for CommandBuffer { fn drop(&mut self) { resource_log!("Drop {}", self.error_ident()); - if self.data.lock().is_none() { - return; - } - let mut baked = self.extract_baked_commands(); - unsafe { - baked.encoder.reset_all(baked.list); - } - unsafe { - self.device.raw().destroy_command_encoder(baked.encoder); + if let Some(data) = self.data.lock().take() { + data.destroy(&self.device); } } } @@ -374,6 +467,15 @@ impl CommandBuffer { } } + pub(crate) fn new_invalid(device: &Arc, label: &Label) -> Self { + CommandBuffer { + device: device.clone(), + support_clear_texture: device.features.contains(wgt::Features::CLEAR_TEXTURE), + label: label.to_string(), + data: Mutex::new(rank::COMMAND_BUFFER_DATA, None), + } + } + pub(crate) fn insert_barriers_from_tracker( raw: &mut dyn hal::DynCommandEncoder, base: &mut Tracker, @@ -452,80 +554,19 @@ impl CommandBuffer { } impl CommandBuffer { - fn lock_encoder_impl(&self, lock: bool) -> Result<(), CommandEncoderError> { - let mut cmd_buf_data_guard = self.data.lock(); - let cmd_buf_data = cmd_buf_data_guard.as_mut().unwrap(); - match cmd_buf_data.status { - CommandEncoderStatus::Recording => { - if lock { - cmd_buf_data.status = CommandEncoderStatus::Locked; - } - Ok(()) - } - CommandEncoderStatus::Locked => { - // Any operation on a locked encoder is required to put it into the invalid/error state. - // See https://www.w3.org/TR/webgpu/#encoder-state-locked - cmd_buf_data.encoder.discard(); - cmd_buf_data.status = CommandEncoderStatus::Error; - Err(CommandEncoderError::Locked) - } - CommandEncoderStatus::Finished => Err(CommandEncoderError::NotRecording), - CommandEncoderStatus::Error => Err(CommandEncoderError::Invalid), - } + pub fn try_get<'a>( + &'a self, + ) -> Result, InvalidResourceError> { + let g = self.data.lock(); + crate::lock::MutexGuard::try_map(g, |data| data.as_mut()) + .map_err(|_| InvalidResourceError(self.error_ident())) } - /// Checks that the encoder is in the [`CommandEncoderStatus::Recording`] state. - fn check_recording(&self) -> Result<(), CommandEncoderError> { - self.lock_encoder_impl(false) - } - - /// Locks the encoder by putting it in the [`CommandEncoderStatus::Locked`] state. - /// - /// Call [`CommandBuffer::unlock_encoder`] to put the [`CommandBuffer`] back into the [`CommandEncoderStatus::Recording`] state. - fn lock_encoder(&self) -> Result<(), CommandEncoderError> { - self.lock_encoder_impl(true) - } - - /// Unlocks the [`CommandBuffer`] and puts it back into the [`CommandEncoderStatus::Recording`] state. - /// - /// This function is the counterpart to [`CommandBuffer::lock_encoder`]. - /// It is only valid to call this function if the encoder is in the [`CommandEncoderStatus::Locked`] state. - fn unlock_encoder(&self) -> Result<(), CommandEncoderError> { - let mut data_lock = self.data.lock(); - let status = &mut data_lock.as_mut().unwrap().status; - match *status { - CommandEncoderStatus::Recording => Err(CommandEncoderError::Invalid), - CommandEncoderStatus::Locked => { - *status = CommandEncoderStatus::Recording; - Ok(()) - } - CommandEncoderStatus::Finished => Err(CommandEncoderError::Invalid), - CommandEncoderStatus::Error => Err(CommandEncoderError::Invalid), - } - } - - pub fn is_finished(&self) -> bool { - match self.data.lock().as_ref().unwrap().status { - CommandEncoderStatus::Finished => true, - _ => false, - } - } - - pub(crate) fn extract_baked_commands(&mut self) -> BakedCommands { - let data = self.data.lock().take().unwrap(); - BakedCommands { - encoder: data.encoder.raw, - list: data.encoder.list, - trackers: data.trackers, - buffer_memory_init_actions: data.buffer_memory_init_actions, - texture_memory_actions: data.texture_memory_actions, - } - } - - pub(crate) fn from_arc_into_baked(self: Arc) -> BakedCommands { - let mut command_buffer = Arc::into_inner(self) - .expect("CommandBuffer cannot be destroyed because is still in use"); - command_buffer.extract_baked_commands() + pub fn try_take<'a>(&'a self) -> Result { + self.data + .lock() + .take() + .ok_or_else(|| InvalidResourceError(self.error_ident())) } } @@ -597,18 +638,10 @@ pub enum CommandEncoderError { #[error("Command encoder is locked by a previously created render/compute pass. Before recording any new commands, the pass must be ended.")] Locked, - #[error("QuerySet {0:?} for pass timestamp writes is invalid.")] - InvalidTimestampWritesQuerySetId(id::QuerySetId), - #[error("Attachment TextureViewId {0:?} is invalid")] - InvalidAttachmentId(id::TextureViewId), #[error(transparent)] InvalidColorAttachment(#[from] ColorAttachmentError), - #[error("Resolve attachment TextureViewId {0:?} is invalid")] - InvalidResolveTargetId(id::TextureViewId), - #[error("Depth stencil attachment TextureViewId {0:?} is invalid")] - InvalidDepthStencilAttachmentId(id::TextureViewId), - #[error("Occlusion QuerySetId {0:?} is invalid")] - InvalidOcclusionQuerySetId(id::QuerySetId), + #[error(transparent)] + InvalidResource(#[from] InvalidResourceError), } impl Global { @@ -621,34 +654,15 @@ impl Global { let hub = &self.hub; - let error = match hub.command_buffers.get(encoder_id.into_command_buffer_id()) { - Ok(cmd_buf) => { - let mut cmd_buf_data = cmd_buf.data.lock(); - let cmd_buf_data = cmd_buf_data.as_mut().unwrap(); - match cmd_buf_data.status { - CommandEncoderStatus::Recording => { - if let Err(e) = cmd_buf_data.encoder.close(&cmd_buf.device) { - Some(e.into()) - } else { - cmd_buf_data.status = CommandEncoderStatus::Finished; - //Note: if we want to stop tracking the swapchain texture view, - // this is the place to do it. - None - } - } - CommandEncoderStatus::Locked => { - cmd_buf_data.encoder.discard(); - cmd_buf_data.status = CommandEncoderStatus::Error; - Some(CommandEncoderError::Locked) - } - CommandEncoderStatus::Finished => Some(CommandEncoderError::NotRecording), - CommandEncoderStatus::Error => { - cmd_buf_data.encoder.discard(); - Some(CommandEncoderError::Invalid) - } - } - } - Err(_) => Some(CommandEncoderError::Invalid), + let cmd_buf = hub.command_buffers.get(encoder_id.into_command_buffer_id()); + + let error = match cmd_buf + .try_get() + .map_err(|e| e.into()) + .and_then(|mut cmd_buf_data| cmd_buf_data.finish(&cmd_buf.device)) + { + Ok(_) => None, + Err(e) => Some(e), }; (encoder_id.into_command_buffer_id(), error) @@ -664,23 +678,19 @@ impl Global { let hub = &self.hub; - let cmd_buf = match hub.command_buffers.get(encoder_id.into_command_buffer_id()) { - Ok(cmd_buf) => cmd_buf, - Err(_) => return Err(CommandEncoderError::Invalid), - }; - cmd_buf.check_recording()?; + let cmd_buf = hub.command_buffers.get(encoder_id.into_command_buffer_id()); + let mut cmd_buf_data = cmd_buf.try_get()?; + cmd_buf_data.check_recording()?; - let mut cmd_buf_data = cmd_buf.data.lock(); - let cmd_buf_data = cmd_buf_data.as_mut().unwrap(); #[cfg(feature = "trace")] if let Some(ref mut list) = cmd_buf_data.commands { list.push(TraceCommand::PushDebugGroup(label.to_string())); } let cmd_buf_raw = cmd_buf_data.encoder.open(&cmd_buf.device)?; - if !self - .instance - .flags + if !cmd_buf + .device + .instance_flags .contains(wgt::InstanceFlags::DISCARD_HAL_LABELS) { unsafe { @@ -700,23 +710,18 @@ impl Global { let hub = &self.hub; - let cmd_buf = match hub.command_buffers.get(encoder_id.into_command_buffer_id()) { - Ok(cmd_buf) => cmd_buf, - Err(_) => return Err(CommandEncoderError::Invalid), - }; - cmd_buf.check_recording()?; - - let mut cmd_buf_data = cmd_buf.data.lock(); - let cmd_buf_data = cmd_buf_data.as_mut().unwrap(); + let cmd_buf = hub.command_buffers.get(encoder_id.into_command_buffer_id()); + let mut cmd_buf_data = cmd_buf.try_get()?; + cmd_buf_data.check_recording()?; #[cfg(feature = "trace")] if let Some(ref mut list) = cmd_buf_data.commands { list.push(TraceCommand::InsertDebugMarker(label.to_string())); } - if !self - .instance - .flags + if !cmd_buf + .device + .instance_flags .contains(wgt::InstanceFlags::DISCARD_HAL_LABELS) { let cmd_buf_raw = cmd_buf_data.encoder.open(&cmd_buf.device)?; @@ -736,14 +741,9 @@ impl Global { let hub = &self.hub; - let cmd_buf = match hub.command_buffers.get(encoder_id.into_command_buffer_id()) { - Ok(cmd_buf) => cmd_buf, - Err(_) => return Err(CommandEncoderError::Invalid), - }; - cmd_buf.check_recording()?; - - let mut cmd_buf_data = cmd_buf.data.lock(); - let cmd_buf_data = cmd_buf_data.as_mut().unwrap(); + let cmd_buf = hub.command_buffers.get(encoder_id.into_command_buffer_id()); + let mut cmd_buf_data = cmd_buf.try_get()?; + cmd_buf_data.check_recording()?; #[cfg(feature = "trace")] if let Some(ref mut list) = cmd_buf_data.commands { @@ -751,9 +751,9 @@ impl Global { } let cmd_buf_raw = cmd_buf_data.encoder.open(&cmd_buf.device)?; - if !self - .instance - .flags + if !cmd_buf + .device + .instance_flags .contains(wgt::InstanceFlags::DISCARD_HAL_LABELS) { unsafe { diff --git a/wgpu-core/src/command/query.rs b/wgpu-core/src/command/query.rs index 6d9fa80217..d783721fb4 100644 --- a/wgpu-core/src/command/query.rs +++ b/wgpu-core/src/command/query.rs @@ -7,7 +7,8 @@ use crate::{ id, init_tracker::MemoryInitKind, resource::{ - DestroyedResourceError, MissingBufferUsageError, ParentDevice, QuerySet, Trackable, + DestroyedResourceError, InvalidResourceError, MissingBufferUsageError, ParentDevice, + QuerySet, Trackable, }, track::{StatelessTracker, TrackerIndex}, FastHashMap, @@ -100,12 +101,10 @@ pub enum QueryError { Use(#[from] QueryUseError), #[error("Error encountered while trying to resolve a query")] Resolve(#[from] ResolveError), - #[error("BufferId {0:?} is invalid")] - InvalidBufferId(id::BufferId), #[error(transparent)] DestroyedResource(#[from] DestroyedResourceError), - #[error("QuerySetId {0:?} is invalid or destroyed")] - InvalidQuerySetId(id::QuerySetId), + #[error(transparent)] + InvalidResource(#[from] InvalidResourceError), } /// Error encountered while trying to use queries @@ -319,22 +318,16 @@ impl Global { ) -> Result<(), QueryError> { let hub = &self.hub; - let cmd_buf = match hub + let cmd_buf = hub .command_buffers - .get(command_encoder_id.into_command_buffer_id()) - { - Ok(cmd_buf) => cmd_buf, - Err(_) => return Err(CommandEncoderError::Invalid.into()), - }; - cmd_buf.check_recording()?; + .get(command_encoder_id.into_command_buffer_id()); + let mut cmd_buf_data = cmd_buf.try_get()?; + cmd_buf_data.check_recording()?; cmd_buf .device .require_features(wgt::Features::TIMESTAMP_QUERY_INSIDE_ENCODERS)?; - let mut cmd_buf_data = cmd_buf.data.lock(); - let cmd_buf_data = cmd_buf_data.as_mut().unwrap(); - #[cfg(feature = "trace")] if let Some(ref mut list) = cmd_buf_data.commands { list.push(TraceCommand::WriteTimestamp { @@ -343,20 +336,14 @@ impl Global { }); } - let encoder = &mut cmd_buf_data.encoder; - let tracker = &mut cmd_buf_data.trackers; - - let raw_encoder = encoder.open(&cmd_buf.device)?; + let raw_encoder = cmd_buf_data.encoder.open(&cmd_buf.device)?; - let query_set = hub - .query_sets - .get(query_set_id) - .map_err(|_| QueryError::InvalidQuerySetId(query_set_id))?; - - let query_set = tracker.query_sets.insert_single(query_set); + let query_set = hub.query_sets.get(query_set_id).get()?; query_set.validate_and_write_timestamp(raw_encoder, query_index, None)?; + cmd_buf_data.trackers.query_sets.insert_single(query_set); + Ok(()) } @@ -371,17 +358,11 @@ impl Global { ) -> Result<(), QueryError> { let hub = &self.hub; - let cmd_buf = match hub + let cmd_buf = hub .command_buffers - .get(command_encoder_id.into_command_buffer_id()) - { - Ok(cmd_buf) => cmd_buf, - Err(_) => return Err(CommandEncoderError::Invalid.into()), - }; - cmd_buf.check_recording()?; - - let mut cmd_buf_data = cmd_buf.data.lock(); - let cmd_buf_data = cmd_buf_data.as_mut().unwrap(); + .get(command_encoder_id.into_command_buffer_id()); + let mut cmd_buf_data = cmd_buf.try_get()?; + cmd_buf_data.check_recording()?; #[cfg(feature = "trace")] if let Some(ref mut list) = cmd_buf_data.commands { @@ -394,32 +375,20 @@ impl Global { }); } - let encoder = &mut cmd_buf_data.encoder; - let tracker = &mut cmd_buf_data.trackers; - let buffer_memory_init_actions = &mut cmd_buf_data.buffer_memory_init_actions; - let raw_encoder = encoder.open(&cmd_buf.device)?; - if destination_offset % wgt::QUERY_RESOLVE_BUFFER_ALIGNMENT != 0 { return Err(QueryError::Resolve(ResolveError::BufferOffsetAlignment)); } - let query_set = hub - .query_sets - .get(query_set_id) - .map_err(|_| QueryError::InvalidQuerySetId(query_set_id))?; - - let query_set = tracker.query_sets.insert_single(query_set); + let query_set = hub.query_sets.get(query_set_id).get()?; query_set.same_device_as(cmd_buf.as_ref())?; - let dst_buffer = hub - .buffers - .get(destination) - .map_err(|_| QueryError::InvalidBufferId(destination))?; + let dst_buffer = hub.buffers.get(destination).get()?; dst_buffer.same_device_as(cmd_buf.as_ref())?; - let dst_pending = tracker + let dst_pending = cmd_buf_data + .trackers .buffers .set_single(&dst_buffer, hal::BufferUses::COPY_DST); @@ -465,14 +434,16 @@ impl Global { } // TODO(https://github.com/gfx-rs/wgpu/issues/3993): Need to track initialization state. - buffer_memory_init_actions.extend(dst_buffer.initialization_status.read().create_action( - &dst_buffer, - buffer_start_offset..buffer_end_offset, - MemoryInitKind::ImplicitlyInitialized, - )); + cmd_buf_data.buffer_memory_init_actions.extend( + dst_buffer.initialization_status.read().create_action( + &dst_buffer, + buffer_start_offset..buffer_end_offset, + MemoryInitKind::ImplicitlyInitialized, + ), + ); let raw_dst_buffer = dst_buffer.try_raw(&snatch_guard)?; - + let raw_encoder = cmd_buf_data.encoder.open(&cmd_buf.device)?; unsafe { raw_encoder.transition_buffers(dst_barrier.as_slice()); raw_encoder.copy_query_results( @@ -484,6 +455,8 @@ impl Global { ); } + cmd_buf_data.trackers.query_sets.insert_single(query_set); + Ok(()) } } diff --git a/wgpu-core/src/command/render.rs b/wgpu-core/src/command/render.rs index 17e04d14c7..b6680333c2 100644 --- a/wgpu-core/src/command/render.rs +++ b/wgpu-core/src/command/render.rs @@ -4,6 +4,7 @@ use crate::command::{ }; use crate::init_tracker::BufferInitTrackerAction; use crate::pipeline::RenderPipeline; +use crate::resource::InvalidResourceError; use crate::snatch::SnatchGuard; use crate::{ api_log, @@ -582,14 +583,6 @@ pub enum RenderPassErrorInner { InvalidParentEncoder, #[error("The format of the depth-stencil attachment ({0:?}) is not a depth-stencil format")] InvalidDepthStencilAttachmentFormat(wgt::TextureFormat), - #[error("Buffer {0:?} is invalid or destroyed")] - InvalidBuffer(id::BufferId), - #[error("Render pipeline {0:?} is invalid")] - InvalidPipeline(id::RenderPipelineId), - #[error("QuerySet {0:?} is invalid")] - InvalidQuerySet(id::QuerySetId), - #[error("Render bundle {0:?} is invalid")] - InvalidRenderBundle(id::RenderBundleId), #[error("The format of the {location} ({format:?}) is not resolvable")] UnsupportedResolveTargetFormat { location: AttachmentErrorLocation, @@ -635,8 +628,6 @@ pub enum RenderPassErrorInner { SurfaceTextureDropped, #[error("Not enough memory left for render pass")] OutOfMemory, - #[error("BindGroupId {0:?} is invalid")] - InvalidBindGroupId(id::BindGroupId), #[error("Unable to clear non-present/read-only depth")] InvalidDepthOps, #[error("Unable to clear non-present/read-only stencil")] @@ -705,6 +696,8 @@ pub enum RenderPassErrorInner { DestroyedResource(#[from] DestroyedResourceError), #[error("The compute pass has already been ended and no further commands can be recorded")] PassEnded, + #[error(transparent)] + InvalidResource(#[from] InvalidResourceError), } impl From for RenderPassErrorInner { @@ -1362,14 +1355,10 @@ impl Global { channel, }) = color_attachment { - let view = texture_views - .get_owned(*view_id) - .map_err(|_| CommandEncoderError::InvalidAttachmentId(*view_id))?; + let view = texture_views.get(*view_id).get()?; let resolve_target = if let Some(resolve_target_id) = resolve_target { - let rt_arc = texture_views.get_owned(*resolve_target_id).map_err(|_| { - CommandEncoderError::InvalidResolveTargetId(*resolve_target_id) - })?; + let rt_arc = texture_views.get(*resolve_target_id).get()?; Some(rt_arc) } else { @@ -1390,13 +1379,7 @@ impl Global { arc_desc.depth_stencil_attachment = if let Some(depth_stencil_attachment) = desc.depth_stencil_attachment { - let view = texture_views - .get_owned(depth_stencil_attachment.view) - .map_err(|_| { - CommandEncoderError::InvalidDepthStencilAttachmentId( - depth_stencil_attachment.view, - ) - })?; + let view = texture_views.get(depth_stencil_attachment.view).get()?; Some(ArcRenderPassDepthStencilAttachment { view, @@ -1408,9 +1391,7 @@ impl Global { }; arc_desc.timestamp_writes = if let Some(tw) = desc.timestamp_writes { - let query_set = query_sets.get_owned(tw.query_set).map_err(|_| { - CommandEncoderError::InvalidTimestampWritesQuerySetId(tw.query_set) - })?; + let query_set = query_sets.get(tw.query_set).get()?; Some(ArcPassTimestampWrites { query_set, @@ -1423,9 +1404,7 @@ impl Global { arc_desc.occlusion_query_set = if let Some(occlusion_query_set) = desc.occlusion_query_set { - let query_set = query_sets.get_owned(occlusion_query_set).map_err(|_| { - CommandEncoderError::InvalidOcclusionQuerySetId(occlusion_query_set) - })?; + let query_set = query_sets.get(occlusion_query_set).get()?; Some(query_set) } else { @@ -1446,12 +1425,13 @@ impl Global { let make_err = |e, arc_desc| (RenderPass::new(None, arc_desc), Some(e)); - let cmd_buf = match hub.command_buffers.get(encoder_id.into_command_buffer_id()) { - Ok(cmd_buf) => cmd_buf, - Err(_) => return make_err(CommandEncoderError::Invalid, arc_desc), - }; + let cmd_buf = hub.command_buffers.get(encoder_id.into_command_buffer_id()); - match cmd_buf.lock_encoder() { + match cmd_buf + .try_get() + .map_err(|e| e.into()) + .and_then(|mut cmd_buf_data| cmd_buf_data.lock_encoder()) + { Ok(_) => {} Err(e) => return make_err(e, arc_desc), }; @@ -1461,6 +1441,8 @@ impl Global { (RenderPass::new(Some(cmd_buf), arc_desc), err) } + /// Note that this differs from [`Self::render_pass_end`], it will + /// create a new pass, replay the commands and end the pass. #[doc(hidden)] #[cfg(any(feature = "serde", feature = "replay"))] pub fn render_pass_end_with_unresolved_commands( @@ -1476,15 +1458,11 @@ impl Global { #[cfg(feature = "trace")] { - let hub = &self.hub; - - let cmd_buf = match hub.command_buffers.get(encoder_id.into_command_buffer_id()) { - Ok(cmd_buf) => cmd_buf, - Err(_) => return Err(CommandEncoderError::Invalid).map_pass_err(pass_scope)?, - }; - - let mut cmd_buf_data = cmd_buf.data.lock(); - let cmd_buf_data = cmd_buf_data.as_mut().unwrap(); + let cmd_buf = self + .hub + .command_buffers + .get(encoder_id.into_command_buffer_id()); + let mut cmd_buf_data = cmd_buf.try_get().map_pass_err(pass_scope)?; if let Some(ref mut list) = cmd_buf_data.commands { list.push(crate::device::trace::Command::RunRenderPass { @@ -1528,29 +1506,26 @@ impl Global { }); }; - let hub = &self.hub; render_pass.base = Some(BasePass { label, - commands: super::RenderCommand::resolve_render_command_ids(hub, &commands)?, + commands: super::RenderCommand::resolve_render_command_ids(&self.hub, &commands)?, dynamic_offsets, string_data, push_constant_data, }); - if let Some(err) = encoder_error { - Err(RenderPassError { - scope: pass_scope, - inner: err.into(), - }) - } else { - self.render_pass_end(&mut render_pass) - } + self.render_pass_end(&mut render_pass) } - #[doc(hidden)] pub fn render_pass_end(&self, pass: &mut RenderPass) -> Result<(), RenderPassError> { let pass_scope = PassErrorScope::Pass; + let cmd_buf = pass + .parent + .as_ref() + .ok_or(RenderPassErrorInner::InvalidParentEncoder) + .map_pass_err(pass_scope)?; + let base = pass .base .take() @@ -1562,20 +1537,16 @@ impl Global { base.label.as_deref().unwrap_or("") ); - let Some(cmd_buf) = pass.parent.as_ref() else { - return Err(RenderPassErrorInner::InvalidParentEncoder).map_pass_err(pass_scope); - }; - cmd_buf.unlock_encoder().map_pass_err(pass_scope)?; - - let hal_label = hal_label(base.label.as_deref(), self.instance.flags); + let mut cmd_buf_data = cmd_buf.try_get().map_pass_err(pass_scope)?; + cmd_buf_data.unlock_encoder().map_pass_err(pass_scope)?; + let cmd_buf_data = &mut *cmd_buf_data; let device = &cmd_buf.device; let snatch_guard = &device.snatchable_lock.read(); - let (scope, pending_discard_init_fixups) = { - let mut cmd_buf_data = cmd_buf.data.lock(); - let cmd_buf_data = cmd_buf_data.as_mut().unwrap(); + let hal_label = hal_label(base.label.as_deref(), device.instance_flags); + let (scope, pending_discard_init_fixups) = { device.check_is_valid().map_pass_err(pass_scope)?; let encoder = &mut cmd_buf_data.encoder; @@ -1900,9 +1871,6 @@ impl Global { (trackers, pending_discard_init_fixups) }; - let mut cmd_buf_data = cmd_buf.data.lock(); - let cmd_buf_data = cmd_buf_data.as_mut().unwrap(); - let encoder = &mut cmd_buf_data.encoder; let status = &mut cmd_buf_data.status; let tracker = &mut cmd_buf_data.trackers; @@ -2774,11 +2742,7 @@ impl Global { buffer_id: id::Id, ) -> Result, RenderPassError> { let hub = &self.hub; - let buffer = hub - .buffers - .get(buffer_id) - .map_err(|_| RenderPassErrorInner::InvalidBuffer(buffer_id)) - .map_pass_err(scope)?; + let buffer = hub.buffers.get(buffer_id).get().map_pass_err(scope)?; Ok(buffer) } @@ -2789,11 +2753,7 @@ impl Global { query_set_id: id::Id, ) -> Result, RenderPassError> { let hub = &self.hub; - let query_set = hub - .query_sets - .get(query_set_id) - .map_err(|_| RenderPassErrorInner::InvalidQuerySet(query_set_id)) - .map_pass_err(scope)?; + let query_set = hub.query_sets.get(query_set_id).get().map_pass_err(scope)?; Ok(query_set) } @@ -2830,7 +2790,7 @@ impl Global { let bg = hub .bind_groups .get(bind_group_id) - .map_err(|_| RenderPassErrorInner::InvalidBindGroupId(bind_group_id)) + .get() .map_pass_err(scope)?; bind_group = Some(bg); } @@ -2863,7 +2823,7 @@ impl Global { let pipeline = hub .render_pipelines .get(pipeline_id) - .map_err(|_| RenderPassErrorInner::InvalidPipeline(pipeline_id)) + .get() .map_pass_err(scope)?; base.commands.push(ArcRenderCommand::SetPipeline(pipeline)); @@ -3174,23 +3134,11 @@ impl Global { }; let base = pass.base_mut(scope)?; - // Don't use resolve_render_pass_buffer_id here, because we don't want to take the read-lock twice. - let hub = &self.hub; - let buffers = hub.buffers.read(); - let buffer = buffers - .get_owned(buffer_id) - .map_err(|_| RenderPassErrorInner::InvalidBuffer(buffer_id)) - .map_pass_err(scope)?; - let count_buffer = buffers - .get_owned(count_buffer_id) - .map_err(|_| RenderPassErrorInner::InvalidBuffer(count_buffer_id)) - .map_pass_err(scope)?; - base.commands .push(ArcRenderCommand::MultiDrawIndirectCount { - buffer, + buffer: self.resolve_render_pass_buffer_id(scope, buffer_id)?, offset, - count_buffer, + count_buffer: self.resolve_render_pass_buffer_id(scope, count_buffer_id)?, count_buffer_offset, max_count, indexed: false, @@ -3214,24 +3162,11 @@ impl Global { }; let base = pass.base_mut(scope)?; - // Don't use resolve_render_pass_buffer_id here, because we don't want to take the read-lock twice. - let hub = &self.hub; - let buffers = hub.buffers.read(); - let buffer = buffers - .get_owned(buffer_id) - .map_err(|_| RenderPassErrorInner::InvalidBuffer(buffer_id)) - .map_pass_err(scope)?; - - let count_buffer = buffers - .get_owned(count_buffer_id) - .map_err(|_| RenderPassErrorInner::InvalidBuffer(count_buffer_id)) - .map_pass_err(scope)?; - base.commands .push(ArcRenderCommand::MultiDrawIndirectCount { - buffer, + buffer: self.resolve_render_pass_buffer_id(scope, buffer_id)?, offset, - count_buffer, + count_buffer: self.resolve_render_pass_buffer_id(scope, count_buffer_id)?, count_buffer_offset, max_count, indexed: true, @@ -3375,10 +3310,7 @@ impl Global { let bundles = hub.render_bundles.read(); for &bundle_id in render_bundle_ids { - let bundle = bundles - .get_owned(bundle_id) - .map_err(|_| RenderPassErrorInner::InvalidRenderBundle(bundle_id)) - .map_pass_err(scope)?; + let bundle = bundles.get(bundle_id).get().map_pass_err(scope)?; base.commands.push(ArcRenderCommand::ExecuteBundle(bundle)); } diff --git a/wgpu-core/src/command/render_command.rs b/wgpu-core/src/command/render_command.rs index 7b8a2f2837..d4e2689d27 100644 --- a/wgpu-core/src/command/render_command.rs +++ b/wgpu-core/src/command/render_command.rs @@ -129,9 +129,7 @@ impl RenderCommand { hub: &crate::hub::Hub, commands: &[RenderCommand], ) -> Result, super::RenderPassError> { - use super::{ - DrawKind, PassErrorScope, RenderCommandError, RenderPassError, RenderPassErrorInner, - }; + use super::{DrawKind, PassErrorScope, RenderPassError}; let buffers_guard = hub.buffers.read(); let bind_group_guard = hub.bind_groups.read(); @@ -139,253 +137,253 @@ impl RenderCommand { let pipelines_guard = hub.render_pipelines.read(); let render_bundles_guard = hub.render_bundles.read(); - let resolved_commands: Vec = commands - .iter() - .map(|c| -> Result { - Ok(match *c { - RenderCommand::SetBindGroup { - index, - num_dynamic_offsets, - bind_group_id, - } => { - if bind_group_id.is_none() { - return Ok(ArcRenderCommand::SetBindGroup { + let resolved_commands: Vec = + commands + .iter() + .map(|c| -> Result { + Ok(match *c { + RenderCommand::SetBindGroup { + index, + num_dynamic_offsets, + bind_group_id, + } => { + if bind_group_id.is_none() { + return Ok(ArcRenderCommand::SetBindGroup { + index, + num_dynamic_offsets, + bind_group: None, + }); + } + + let bind_group_id = bind_group_id.unwrap(); + let bg = bind_group_guard.get(bind_group_id).get().map_err(|e| { + RenderPassError { + scope: PassErrorScope::SetBindGroup, + inner: e.into(), + } + })?; + + ArcRenderCommand::SetBindGroup { index, num_dynamic_offsets, - bind_group: None, - }); + bind_group: Some(bg), + } } - let bind_group_id = bind_group_id.unwrap(); - let bg = bind_group_guard.get_owned(bind_group_id).map_err(|_| { - RenderPassError { - scope: PassErrorScope::SetBindGroup, - inner: RenderPassErrorInner::InvalidBindGroupId(bind_group_id), - } - })?; + RenderCommand::SetPipeline(pipeline_id) => ArcRenderCommand::SetPipeline( + pipelines_guard.get(pipeline_id).get().map_err(|e| { + RenderPassError { + scope: PassErrorScope::SetPipelineRender, + inner: e.into(), + } + })?, + ), - ArcRenderCommand::SetBindGroup { - index, - num_dynamic_offsets, - bind_group: Some(bg), + RenderCommand::SetPushConstant { + offset, + size_bytes, + values_offset, + stages, + } => ArcRenderCommand::SetPushConstant { + offset, + size_bytes, + values_offset, + stages, + }, + + RenderCommand::PushDebugGroup { color, len } => { + ArcRenderCommand::PushDebugGroup { color, len } + } + + RenderCommand::PopDebugGroup => ArcRenderCommand::PopDebugGroup, + + RenderCommand::InsertDebugMarker { color, len } => { + ArcRenderCommand::InsertDebugMarker { color, len } } - } - - RenderCommand::SetPipeline(pipeline_id) => ArcRenderCommand::SetPipeline( - pipelines_guard - .get_owned(pipeline_id) - .map_err(|_| RenderPassError { - scope: PassErrorScope::SetPipelineRender, - inner: RenderCommandError::InvalidPipelineId(pipeline_id).into(), + + RenderCommand::WriteTimestamp { + query_set_id, + query_index, + } => ArcRenderCommand::WriteTimestamp { + query_set: query_set_guard.get(query_set_id).get().map_err(|e| { + RenderPassError { + scope: PassErrorScope::WriteTimestamp, + inner: e.into(), + } })?, - ), - - RenderCommand::SetPushConstant { - offset, - size_bytes, - values_offset, - stages, - } => ArcRenderCommand::SetPushConstant { - offset, - size_bytes, - values_offset, - stages, - }, - - RenderCommand::PushDebugGroup { color, len } => { - ArcRenderCommand::PushDebugGroup { color, len } - } - - RenderCommand::PopDebugGroup => ArcRenderCommand::PopDebugGroup, - - RenderCommand::InsertDebugMarker { color, len } => { - ArcRenderCommand::InsertDebugMarker { color, len } - } - - RenderCommand::WriteTimestamp { - query_set_id, - query_index, - } => ArcRenderCommand::WriteTimestamp { - query_set: query_set_guard.get_owned(query_set_id).map_err(|_| { - RenderPassError { - scope: PassErrorScope::WriteTimestamp, - inner: RenderPassErrorInner::InvalidQuerySet(query_set_id), - } - })?, - query_index, - }, - - RenderCommand::BeginPipelineStatisticsQuery { - query_set_id, - query_index, - } => ArcRenderCommand::BeginPipelineStatisticsQuery { - query_set: query_set_guard.get_owned(query_set_id).map_err(|_| { - RenderPassError { - scope: PassErrorScope::BeginPipelineStatisticsQuery, - inner: RenderPassErrorInner::InvalidQuerySet(query_set_id), - } - })?, - query_index, - }, - - RenderCommand::EndPipelineStatisticsQuery => { - ArcRenderCommand::EndPipelineStatisticsQuery - } - - RenderCommand::SetIndexBuffer { - buffer_id, - index_format, - offset, - size, - } => ArcRenderCommand::SetIndexBuffer { - buffer: buffers_guard.get_owned(buffer_id).map_err(|_| { - RenderPassError { - scope: PassErrorScope::SetIndexBuffer, - inner: RenderCommandError::InvalidBufferId(buffer_id).into(), - } - })?, - index_format, - offset, - size, - }, - - RenderCommand::SetVertexBuffer { - slot, - buffer_id, - offset, - size, - } => ArcRenderCommand::SetVertexBuffer { - slot, - buffer: buffers_guard.get_owned(buffer_id).map_err(|_| { - RenderPassError { - scope: PassErrorScope::SetVertexBuffer, - inner: RenderCommandError::InvalidBufferId(buffer_id).into(), - } - })?, - offset, - size, - }, - - RenderCommand::SetBlendConstant(color) => { - ArcRenderCommand::SetBlendConstant(color) - } - - RenderCommand::SetStencilReference(reference) => { - ArcRenderCommand::SetStencilReference(reference) - } - - RenderCommand::SetViewport { - rect, - depth_min, - depth_max, - } => ArcRenderCommand::SetViewport { - rect, - depth_min, - depth_max, - }, - - RenderCommand::SetScissor(scissor) => ArcRenderCommand::SetScissor(scissor), - - RenderCommand::Draw { - vertex_count, - instance_count, - first_vertex, - first_instance, - } => ArcRenderCommand::Draw { - vertex_count, - instance_count, - first_vertex, - first_instance, - }, - - RenderCommand::DrawIndexed { - index_count, - instance_count, - first_index, - base_vertex, - first_instance, - } => ArcRenderCommand::DrawIndexed { - index_count, - instance_count, - first_index, - base_vertex, - first_instance, - }, - - RenderCommand::MultiDrawIndirect { - buffer_id, - offset, - count, - indexed, - } => ArcRenderCommand::MultiDrawIndirect { - buffer: buffers_guard.get_owned(buffer_id).map_err(|_| { - RenderPassError { - scope: PassErrorScope::Draw { - kind: if count.is_some() { - DrawKind::MultiDrawIndirect - } else { - DrawKind::DrawIndirect - }, - indexed, - }, - inner: RenderCommandError::InvalidBufferId(buffer_id).into(), - } - })?, - offset, - count, - indexed, - }, - - RenderCommand::MultiDrawIndirectCount { - buffer_id, - offset, - count_buffer_id, - count_buffer_offset, - max_count, - indexed, - } => { - let scope = PassErrorScope::Draw { - kind: DrawKind::MultiDrawIndirectCount, + query_index, + }, + + RenderCommand::BeginPipelineStatisticsQuery { + query_set_id, + query_index, + } => ArcRenderCommand::BeginPipelineStatisticsQuery { + query_set: query_set_guard.get(query_set_id).get().map_err(|e| { + RenderPassError { + scope: PassErrorScope::BeginPipelineStatisticsQuery, + inner: e.into(), + } + })?, + query_index, + }, + + RenderCommand::EndPipelineStatisticsQuery => { + ArcRenderCommand::EndPipelineStatisticsQuery + } + + RenderCommand::SetIndexBuffer { + buffer_id, + index_format, + offset, + size, + } => ArcRenderCommand::SetIndexBuffer { + buffer: buffers_guard.get(buffer_id).get().map_err(|e| { + RenderPassError { + scope: PassErrorScope::SetIndexBuffer, + inner: e.into(), + } + })?, + index_format, + offset, + size, + }, + + RenderCommand::SetVertexBuffer { + slot, + buffer_id, + offset, + size, + } => ArcRenderCommand::SetVertexBuffer { + slot, + buffer: buffers_guard.get(buffer_id).get().map_err(|e| { + RenderPassError { + scope: PassErrorScope::SetVertexBuffer, + inner: e.into(), + } + })?, + offset, + size, + }, + + RenderCommand::SetBlendConstant(color) => { + ArcRenderCommand::SetBlendConstant(color) + } + + RenderCommand::SetStencilReference(reference) => { + ArcRenderCommand::SetStencilReference(reference) + } + + RenderCommand::SetViewport { + rect, + depth_min, + depth_max, + } => ArcRenderCommand::SetViewport { + rect, + depth_min, + depth_max, + }, + + RenderCommand::SetScissor(scissor) => ArcRenderCommand::SetScissor(scissor), + + RenderCommand::Draw { + vertex_count, + instance_count, + first_vertex, + first_instance, + } => ArcRenderCommand::Draw { + vertex_count, + instance_count, + first_vertex, + first_instance, + }, + + RenderCommand::DrawIndexed { + index_count, + instance_count, + first_index, + base_vertex, + first_instance, + } => ArcRenderCommand::DrawIndexed { + index_count, + instance_count, + first_index, + base_vertex, + first_instance, + }, + + RenderCommand::MultiDrawIndirect { + buffer_id, + offset, + count, indexed, - }; - ArcRenderCommand::MultiDrawIndirectCount { - buffer: buffers_guard.get_owned(buffer_id).map_err(|_| { + } => ArcRenderCommand::MultiDrawIndirect { + buffer: buffers_guard.get(buffer_id).get().map_err(|e| { RenderPassError { - scope, - inner: RenderCommandError::InvalidBufferId(buffer_id).into(), + scope: PassErrorScope::Draw { + kind: if count.is_some() { + DrawKind::MultiDrawIndirect + } else { + DrawKind::DrawIndirect + }, + indexed, + }, + inner: e.into(), } })?, offset, - count_buffer: buffers_guard.get_owned(count_buffer_id).map_err( - |_| RenderPassError { - scope, - inner: RenderCommandError::InvalidBufferId(count_buffer_id) - .into(), - }, - )?, + count, + indexed, + }, + + RenderCommand::MultiDrawIndirectCount { + buffer_id, + offset, + count_buffer_id, count_buffer_offset, max_count, indexed, + } => { + let scope = PassErrorScope::Draw { + kind: DrawKind::MultiDrawIndirectCount, + indexed, + }; + ArcRenderCommand::MultiDrawIndirectCount { + buffer: buffers_guard.get(buffer_id).get().map_err(|e| { + RenderPassError { + scope, + inner: e.into(), + } + })?, + offset, + count_buffer: buffers_guard.get(count_buffer_id).get().map_err( + |e| RenderPassError { + scope, + inner: e.into(), + }, + )?, + count_buffer_offset, + max_count, + indexed, + } } - } - RenderCommand::BeginOcclusionQuery { query_index } => { - ArcRenderCommand::BeginOcclusionQuery { query_index } - } + RenderCommand::BeginOcclusionQuery { query_index } => { + ArcRenderCommand::BeginOcclusionQuery { query_index } + } - RenderCommand::EndOcclusionQuery => ArcRenderCommand::EndOcclusionQuery, + RenderCommand::EndOcclusionQuery => ArcRenderCommand::EndOcclusionQuery, - RenderCommand::ExecuteBundle(bundle) => ArcRenderCommand::ExecuteBundle( - render_bundles_guard - .get_owned(bundle) - .map_err(|_| RenderPassError { - scope: PassErrorScope::ExecuteBundle, - inner: RenderCommandError::InvalidRenderBundle(bundle).into(), + RenderCommand::ExecuteBundle(bundle) => ArcRenderCommand::ExecuteBundle( + render_bundles_guard.get(bundle).get().map_err(|e| { + RenderPassError { + scope: PassErrorScope::ExecuteBundle, + inner: e.into(), + } })?, - ), + ), + }) }) - }) - .collect::, RenderPassError>>()?; + .collect::, RenderPassError>>()?; Ok(resolved_commands) } } diff --git a/wgpu-core/src/command/transfer.rs b/wgpu-core/src/command/transfer.rs index aeceb29af3..72eae50c25 100644 --- a/wgpu-core/src/command/transfer.rs +++ b/wgpu-core/src/command/transfer.rs @@ -12,11 +12,11 @@ use crate::{ TextureInitTrackerAction, }, resource::{ - DestroyedResourceError, MissingBufferUsageError, MissingTextureUsageError, ParentDevice, - Texture, TextureErrorDimension, + DestroyedResourceError, InvalidResourceError, MissingBufferUsageError, + MissingTextureUsageError, ParentDevice, Texture, TextureErrorDimension, }, snatch::SnatchGuard, - track::{TextureSelector, Tracker}, + track::TextureSelector, }; use arrayvec::ArrayVec; @@ -25,7 +25,7 @@ use wgt::{BufferAddress, BufferUsages, Extent3d, TextureUsages}; use std::sync::Arc; -use super::{memory_init::CommandBufferTextureMemoryActions, ClearError, CommandEncoder}; +use super::{ClearError, CommandBufferMutable}; pub type ImageCopyBuffer = wgt::ImageCopyBuffer; pub type ImageCopyTexture = wgt::ImageCopyTexture; @@ -41,10 +41,6 @@ pub enum CopySide { #[derive(Clone, Debug, Error)] #[non_exhaustive] pub enum TransferError { - #[error("BufferId {0:?} is invalid")] - InvalidBufferId(BufferId), - #[error("TextureId {0:?} is invalid")] - InvalidTextureId(TextureId), #[error("Source and destination cannot be the same buffer")] SameSourceDestinationBuffer, #[error(transparent)] @@ -150,6 +146,8 @@ pub enum CopyError { Transfer(#[from] TransferError), #[error(transparent)] DestroyedResource(#[from] DestroyedResourceError), + #[error(transparent)] + InvalidResource(#[from] InvalidResourceError), } impl From for CopyError { @@ -408,9 +406,7 @@ pub(crate) fn validate_texture_copy_range( fn handle_texture_init( init_kind: MemoryInitKind, - encoder: &mut CommandEncoder, - trackers: &mut Tracker, - texture_memory_actions: &mut CommandBufferTextureMemoryActions, + cmd_buf_data: &mut CommandBufferMutable, device: &Device, copy_texture: &ImageCopyTexture, copy_size: &Extent3d, @@ -428,11 +424,13 @@ fn handle_texture_init( }; // Register the init action. - let immediate_inits = texture_memory_actions.register_init_action(&{ init_action }); + let immediate_inits = cmd_buf_data + .texture_memory_actions + .register_init_action(&{ init_action }); // In rare cases we may need to insert an init operation immediately onto the command buffer. if !immediate_inits.is_empty() { - let cmd_buf_raw = encoder.open(device)?; + let cmd_buf_raw = cmd_buf_data.encoder.open(device)?; for init in immediate_inits { clear_texture( &init.texture, @@ -441,7 +439,7 @@ fn handle_texture_init( layer_range: init.layer..(init.layer + 1), }, cmd_buf_raw, - &mut trackers.textures, + &mut cmd_buf_data.trackers.textures, &device.alignments, device.zero_buffer.as_ref(), snatch_guard, @@ -457,9 +455,7 @@ fn handle_texture_init( /// Ensure the source texture of a transfer is in the right initialization /// state, and record the state for after the transfer operation. fn handle_src_texture_init( - encoder: &mut CommandEncoder, - trackers: &mut Tracker, - texture_memory_actions: &mut CommandBufferTextureMemoryActions, + cmd_buf_data: &mut CommandBufferMutable, device: &Device, source: &ImageCopyTexture, copy_size: &Extent3d, @@ -468,9 +464,7 @@ fn handle_src_texture_init( ) -> Result<(), TransferError> { handle_texture_init( MemoryInitKind::NeedsInitializedMemory, - encoder, - trackers, - texture_memory_actions, + cmd_buf_data, device, source, copy_size, @@ -485,9 +479,7 @@ fn handle_src_texture_init( /// Ensure the destination texture of a transfer is in the right initialization /// state, and record the state for after the transfer operation. fn handle_dst_texture_init( - encoder: &mut CommandEncoder, - trackers: &mut Tracker, - texture_memory_actions: &mut CommandBufferTextureMemoryActions, + cmd_buf_data: &mut CommandBufferMutable, device: &Device, destination: &ImageCopyTexture, copy_size: &Extent3d, @@ -510,9 +502,7 @@ fn handle_dst_texture_init( handle_texture_init( dst_init_kind, - encoder, - trackers, - texture_memory_actions, + cmd_buf_data, device, destination, copy_size, @@ -542,17 +532,11 @@ impl Global { } let hub = &self.hub; - let cmd_buf = match hub + let cmd_buf = hub .command_buffers - .get(command_encoder_id.into_command_buffer_id()) - { - Ok(cmd_buf) => cmd_buf, - Err(_) => return Err(CommandEncoderError::Invalid.into()), - }; - cmd_buf.check_recording()?; - - let mut cmd_buf_data = cmd_buf.data.lock(); - let cmd_buf_data = cmd_buf_data.as_mut().unwrap(); + .get(command_encoder_id.into_command_buffer_id()); + let mut cmd_buf_data = cmd_buf.try_get()?; + cmd_buf_data.check_recording()?; let device = &cmd_buf.device; device.check_is_valid()?; @@ -570,10 +554,7 @@ impl Global { let snatch_guard = device.snatchable_lock.read(); - let src_buffer = hub - .buffers - .get(source) - .map_err(|_| TransferError::InvalidBufferId(source))?; + let src_buffer = hub.buffers.get(source).get()?; src_buffer.same_device_as(cmd_buf.as_ref())?; @@ -589,10 +570,7 @@ impl Global { // expecting only a single barrier let src_barrier = src_pending.map(|pending| pending.into_hal(&src_buffer, &snatch_guard)); - let dst_buffer = hub - .buffers - .get(destination) - .map_err(|_| TransferError::InvalidBufferId(destination))?; + let dst_buffer = hub.buffers.get(destination).get()?; dst_buffer.same_device_as(cmd_buf.as_ref())?; @@ -712,21 +690,15 @@ impl Global { let hub = &self.hub; - let cmd_buf = match hub + let cmd_buf = hub .command_buffers - .get(command_encoder_id.into_command_buffer_id()) - { - Ok(cmd_buf) => cmd_buf, - Err(_) => return Err(CommandEncoderError::Invalid.into()), - }; - cmd_buf.check_recording()?; + .get(command_encoder_id.into_command_buffer_id()); + let mut cmd_buf_data = cmd_buf.try_get()?; + cmd_buf_data.check_recording()?; let device = &cmd_buf.device; device.check_is_valid()?; - let mut cmd_buf_data = cmd_buf.data.lock(); - let cmd_buf_data = cmd_buf_data.as_mut().unwrap(); - #[cfg(feature = "trace")] if let Some(ref mut list) = cmd_buf_data.commands { list.push(TraceCommand::CopyBufferToTexture { @@ -736,20 +708,12 @@ impl Global { }); } - let encoder = &mut cmd_buf_data.encoder; - let tracker = &mut cmd_buf_data.trackers; - let buffer_memory_init_actions = &mut cmd_buf_data.buffer_memory_init_actions; - let texture_memory_actions = &mut cmd_buf_data.texture_memory_actions; - if copy_size.width == 0 || copy_size.height == 0 || copy_size.depth_or_array_layers == 0 { log::trace!("Ignoring copy_buffer_to_texture of size 0"); return Ok(()); } - let dst_texture = hub - .textures - .get(destination.texture) - .map_err(|_| TransferError::InvalidTextureId(destination.texture))?; + let dst_texture = hub.textures.get(destination.texture).get()?; dst_texture.same_device_as(cmd_buf.as_ref())?; @@ -768,9 +732,7 @@ impl Global { // have an easier time inserting "immediate-inits" that may be required // by prior discards in rare cases. handle_dst_texture_init( - encoder, - tracker, - texture_memory_actions, + &mut cmd_buf_data, device, destination, copy_size, @@ -778,14 +740,12 @@ impl Global { &snatch_guard, )?; - let src_buffer = hub - .buffers - .get(source.buffer) - .map_err(|_| TransferError::InvalidBufferId(source.buffer))?; + let src_buffer = hub.buffers.get(source.buffer).get()?; src_buffer.same_device_as(cmd_buf.as_ref())?; - let src_pending = tracker + let src_pending = cmd_buf_data + .trackers .buffers .set_single(&src_buffer, hal::BufferUses::COPY_SRC); @@ -795,10 +755,11 @@ impl Global { .map_err(TransferError::MissingBufferUsage)?; let src_barrier = src_pending.map(|pending| pending.into_hal(&src_buffer, &snatch_guard)); - let dst_pending = - tracker - .textures - .set_single(&dst_texture, dst_range, hal::TextureUses::COPY_DST); + let dst_pending = cmd_buf_data.trackers.textures.set_single( + &dst_texture, + dst_range, + hal::TextureUses::COPY_DST, + ); let dst_raw = dst_texture.try_raw(&snatch_guard)?; dst_texture .check_usage(TextureUsages::COPY_DST) @@ -835,11 +796,13 @@ impl Global { .map_err(TransferError::from)?; } - buffer_memory_init_actions.extend(src_buffer.initialization_status.read().create_action( - &src_buffer, - source.layout.offset..(source.layout.offset + required_buffer_bytes_in_copy), - MemoryInitKind::NeedsInitializedMemory, - )); + cmd_buf_data.buffer_memory_init_actions.extend( + src_buffer.initialization_status.read().create_action( + &src_buffer, + source.layout.offset..(source.layout.offset + required_buffer_bytes_in_copy), + MemoryInitKind::NeedsInitializedMemory, + ), + ); let regions = (0..array_layer_count) .map(|rel_array_layer| { @@ -855,7 +818,7 @@ impl Global { }) .collect::>(); - let cmd_buf_raw = encoder.open(&cmd_buf.device)?; + let cmd_buf_raw = cmd_buf_data.encoder.open(&cmd_buf.device)?; unsafe { cmd_buf_raw.transition_textures(&dst_barrier); cmd_buf_raw.transition_buffers(src_barrier.as_slice()); @@ -880,21 +843,15 @@ impl Global { let hub = &self.hub; - let cmd_buf = match hub + let cmd_buf = hub .command_buffers - .get(command_encoder_id.into_command_buffer_id()) - { - Ok(cmd_buf) => cmd_buf, - Err(_) => return Err(CommandEncoderError::Invalid.into()), - }; - cmd_buf.check_recording()?; + .get(command_encoder_id.into_command_buffer_id()); + let mut cmd_buf_data = cmd_buf.try_get()?; + cmd_buf_data.check_recording()?; let device = &cmd_buf.device; device.check_is_valid()?; - let mut cmd_buf_data = cmd_buf.data.lock(); - let cmd_buf_data = cmd_buf_data.as_mut().unwrap(); - #[cfg(feature = "trace")] if let Some(ref mut list) = cmd_buf_data.commands { list.push(TraceCommand::CopyTextureToBuffer { @@ -903,20 +860,13 @@ impl Global { size: *copy_size, }); } - let encoder = &mut cmd_buf_data.encoder; - let tracker = &mut cmd_buf_data.trackers; - let buffer_memory_init_actions = &mut cmd_buf_data.buffer_memory_init_actions; - let texture_memory_actions = &mut cmd_buf_data.texture_memory_actions; if copy_size.width == 0 || copy_size.height == 0 || copy_size.depth_or_array_layers == 0 { log::trace!("Ignoring copy_texture_to_buffer of size 0"); return Ok(()); } - let src_texture = hub - .textures - .get(source.texture) - .map_err(|_| TransferError::InvalidTextureId(source.texture))?; + let src_texture = hub.textures.get(source.texture).get()?; src_texture.same_device_as(cmd_buf.as_ref())?; @@ -931,9 +881,7 @@ impl Global { // have an easier time inserting "immediate-inits" that may be required // by prior discards in rare cases. handle_src_texture_init( - encoder, - tracker, - texture_memory_actions, + &mut cmd_buf_data, device, source, copy_size, @@ -941,10 +889,11 @@ impl Global { &snatch_guard, )?; - let src_pending = - tracker - .textures - .set_single(&src_texture, src_range, hal::TextureUses::COPY_SRC); + let src_pending = cmd_buf_data.trackers.textures.set_single( + &src_texture, + src_range, + hal::TextureUses::COPY_SRC, + ); let src_raw = src_texture.try_raw(&snatch_guard)?; src_texture .check_usage(TextureUsages::COPY_SRC) @@ -966,14 +915,12 @@ impl Global { .map(|pending| pending.into_hal(src_raw)) .collect::>(); - let dst_buffer = hub - .buffers - .get(destination.buffer) - .map_err(|_| TransferError::InvalidBufferId(destination.buffer))?; + let dst_buffer = hub.buffers.get(destination.buffer).get()?; dst_buffer.same_device_as(cmd_buf.as_ref())?; - let dst_pending = tracker + let dst_pending = cmd_buf_data + .trackers .buffers .set_single(&dst_buffer, hal::BufferUses::COPY_DST); @@ -1011,11 +958,14 @@ impl Global { .map_err(TransferError::from)?; } - buffer_memory_init_actions.extend(dst_buffer.initialization_status.read().create_action( - &dst_buffer, - destination.layout.offset..(destination.layout.offset + required_buffer_bytes_in_copy), - MemoryInitKind::ImplicitlyInitialized, - )); + cmd_buf_data.buffer_memory_init_actions.extend( + dst_buffer.initialization_status.read().create_action( + &dst_buffer, + destination.layout.offset + ..(destination.layout.offset + required_buffer_bytes_in_copy), + MemoryInitKind::ImplicitlyInitialized, + ), + ); let regions = (0..array_layer_count) .map(|rel_array_layer| { @@ -1030,7 +980,7 @@ impl Global { } }) .collect::>(); - let cmd_buf_raw = encoder.open(&cmd_buf.device)?; + let cmd_buf_raw = cmd_buf_data.encoder.open(&cmd_buf.device)?; unsafe { cmd_buf_raw.transition_buffers(dst_barrier.as_slice()); cmd_buf_raw.transition_textures(&src_barrier); @@ -1060,23 +1010,17 @@ impl Global { let hub = &self.hub; - let cmd_buf = match hub + let cmd_buf = hub .command_buffers - .get(command_encoder_id.into_command_buffer_id()) - { - Ok(cmd_buf) => cmd_buf, - Err(_) => return Err(CommandEncoderError::Invalid.into()), - }; - cmd_buf.check_recording()?; + .get(command_encoder_id.into_command_buffer_id()); + let mut cmd_buf_data = cmd_buf.try_get()?; + cmd_buf_data.check_recording()?; let device = &cmd_buf.device; device.check_is_valid()?; let snatch_guard = device.snatchable_lock.read(); - let mut cmd_buf_data = cmd_buf.data.lock(); - let cmd_buf_data = cmd_buf_data.as_mut().unwrap(); - #[cfg(feature = "trace")] if let Some(ref mut list) = cmd_buf_data.commands { list.push(TraceCommand::CopyTextureToTexture { @@ -1085,23 +1029,14 @@ impl Global { size: *copy_size, }); } - let encoder = &mut cmd_buf_data.encoder; - let tracker = &mut cmd_buf_data.trackers; - let texture_memory_actions = &mut cmd_buf_data.texture_memory_actions; if copy_size.width == 0 || copy_size.height == 0 || copy_size.depth_or_array_layers == 0 { log::trace!("Ignoring copy_texture_to_texture of size 0"); return Ok(()); } - let src_texture = hub - .textures - .get(source.texture) - .map_err(|_| TransferError::InvalidTextureId(source.texture))?; - let dst_texture = hub - .textures - .get(destination.texture) - .map_err(|_| TransferError::InvalidTextureId(source.texture))?; + let src_texture = hub.textures.get(source.texture).get()?; + let dst_texture = hub.textures.get(destination.texture).get()?; src_texture.same_device_as(cmd_buf.as_ref())?; dst_texture.same_device_as(cmd_buf.as_ref())?; @@ -1143,9 +1078,7 @@ impl Global { // have an easier time inserting "immediate-inits" that may be required // by prior discards in rare cases. handle_src_texture_init( - encoder, - tracker, - texture_memory_actions, + &mut cmd_buf_data, device, source, copy_size, @@ -1153,9 +1086,7 @@ impl Global { &snatch_guard, )?; handle_dst_texture_init( - encoder, - tracker, - texture_memory_actions, + &mut cmd_buf_data, device, destination, copy_size, diff --git a/wgpu-core/src/device/global.rs b/wgpu-core/src/device/global.rs index a86651d87f..4b7143e556 100644 --- a/wgpu-core/src/device/global.rs +++ b/wgpu-core/src/device/global.rs @@ -6,7 +6,8 @@ use crate::{ self, BindGroupEntry, BindingResource, BufferBinding, ResolvedBindGroupDescriptor, ResolvedBindGroupEntry, ResolvedBindingResource, ResolvedBufferBinding, }, - command, conv, + command::{self, CommandBuffer}, + conv, device::{bgl, life::WaitIdleError, DeviceError, DeviceLostClosure, DeviceLostReason}, global::Global, hal_api::HalApi, @@ -19,14 +20,19 @@ use crate::{ present, resource::{ self, BufferAccessError, BufferAccessResult, BufferMapOperation, CreateBufferError, + Fallible, }, storage::Storage, - Label, + Label, LabelHelpers, }; use wgt::{BufferAddress, TextureFormat}; -use std::{borrow::Cow, ptr::NonNull, sync::atomic::Ordering}; +use std::{ + borrow::Cow, + ptr::NonNull, + sync::{atomic::Ordering, Arc}, +}; use super::{ImplicitPipelineIds, UserClosures}; @@ -35,18 +41,10 @@ impl Global { &self, adapter_id: AdapterId, surface_id: SurfaceId, - ) -> Result { - let hub = &self.hub; - - let surface_guard = self.surfaces.read(); - let adapter_guard = hub.adapters.read(); - let adapter = adapter_guard - .get(adapter_id) - .map_err(|_| instance::IsSurfaceSupportedError::InvalidAdapter)?; - let surface = surface_guard - .get(surface_id) - .map_err(|_| instance::IsSurfaceSupportedError::InvalidSurface)?; - Ok(adapter.is_surface_supported(surface)) + ) -> bool { + let surface = self.surfaces.get(surface_id); + let adapter = self.hub.adapters.get(adapter_id); + adapter.is_surface_supported(&surface) } pub fn surface_get_capabilities( @@ -71,63 +69,30 @@ impl Global { }) } - fn fetch_adapter_and_surface< - F: FnOnce(&Adapter, &Surface) -> Result, - B, - >( + fn fetch_adapter_and_surface B, B>( &self, surface_id: SurfaceId, adapter_id: AdapterId, get_supported_callback: F, - ) -> Result { - let hub = &self.hub; - - let surface_guard = self.surfaces.read(); - let adapter_guard = hub.adapters.read(); - let adapter = adapter_guard - .get(adapter_id) - .map_err(|_| instance::GetSurfaceSupportError::InvalidAdapter)?; - let surface = surface_guard - .get(surface_id) - .map_err(|_| instance::GetSurfaceSupportError::InvalidSurface)?; - - get_supported_callback(adapter, surface) + ) -> B { + let surface = self.surfaces.get(surface_id); + let adapter = self.hub.adapters.get(adapter_id); + get_supported_callback(&adapter, &surface) } - pub fn device_features(&self, device_id: DeviceId) -> Result { - let hub = &self.hub; - - let device = hub - .devices - .get(device_id) - .map_err(|_| DeviceError::InvalidDeviceId)?; - - Ok(device.features) + pub fn device_features(&self, device_id: DeviceId) -> wgt::Features { + let device = self.hub.devices.get(device_id); + device.features } - pub fn device_limits(&self, device_id: DeviceId) -> Result { - let hub = &self.hub; - - let device = hub - .devices - .get(device_id) - .map_err(|_| DeviceError::InvalidDeviceId)?; - - Ok(device.limits.clone()) + pub fn device_limits(&self, device_id: DeviceId) -> wgt::Limits { + let device = self.hub.devices.get(device_id); + device.limits.clone() } - pub fn device_downlevel_properties( - &self, - device_id: DeviceId, - ) -> Result { - let hub = &self.hub; - - let device = hub - .devices - .get(device_id) - .map_err(|_| DeviceError::InvalidDeviceId)?; - - Ok(device.downlevel.clone()) + pub fn device_downlevel_properties(&self, device_id: DeviceId) -> wgt::DownlevelCapabilities { + let device = self.hub.devices.get(device_id); + device.downlevel.clone() } pub fn device_create_buffer( @@ -142,12 +107,7 @@ impl Global { let fid = hub.buffers.prepare(device_id.backend(), id_in); let error = 'error: { - let device = match hub.devices.get(device_id) { - Ok(device) => device, - Err(_) => { - break 'error DeviceError::InvalidDeviceId.into(); - } - }; + let device = self.hub.devices.get(device_id); #[cfg(feature = "trace")] if let Some(ref mut trace) = *device.trace.lock() { @@ -166,7 +126,7 @@ impl Global { } }; - let id = fid.assign(buffer); + let id = fid.assign(Fallible::Valid(buffer)); api_log!( "Device::create_buffer({:?}{}) -> {id:?}", @@ -181,7 +141,7 @@ impl Global { return (id, None); }; - let id = fid.assign_error(); + let id = fid.assign(Fallible::Invalid(Arc::new(desc.label.to_string()))); (id, Some(error)) } @@ -213,32 +173,37 @@ impl Global { /// [`device_create_buffer`]: Global::device_create_buffer /// [`usage`]: https://www.w3.org/TR/webgpu/#dom-gputexturedescriptor-usage /// [`wgpu_types::BufferUsages`]: wgt::BufferUsages - pub fn create_buffer_error(&self, backend: wgt::Backend, id_in: Option) { - let hub = &self.hub; - let fid = hub.buffers.prepare(backend, id_in); - - fid.assign_error(); + pub fn create_buffer_error( + &self, + backend: wgt::Backend, + id_in: Option, + desc: &resource::BufferDescriptor, + ) { + let fid = self.hub.buffers.prepare(backend, id_in); + fid.assign(Fallible::Invalid(Arc::new(desc.label.to_string()))); } pub fn create_render_bundle_error( &self, backend: wgt::Backend, id_in: Option, + desc: &command::RenderBundleDescriptor, ) { - let hub = &self.hub; - let fid = hub.render_bundles.prepare(backend, id_in); - - fid.assign_error(); + let fid = self.hub.render_bundles.prepare(backend, id_in); + fid.assign(Fallible::Invalid(Arc::new(desc.label.to_string()))); } /// Assign `id_in` an error with the given `label`. /// /// See `create_buffer_error` for more context and explanation. - pub fn create_texture_error(&self, backend: wgt::Backend, id_in: Option) { - let hub = &self.hub; - let fid = hub.textures.prepare(backend, id_in); - - fid.assign_error(); + pub fn create_texture_error( + &self, + backend: wgt::Backend, + id_in: Option, + desc: &resource::TextureDescriptor, + ) { + let fid = self.hub.textures.prepare(backend, id_in); + fid.assign(Fallible::Invalid(Arc::new(desc.label.to_string()))); } #[cfg(feature = "replay")] @@ -250,10 +215,7 @@ impl Global { ) -> BufferAccessResult { let hub = &self.hub; - let buffer = hub - .buffers - .get(buffer_id) - .map_err(|_| BufferAccessError::InvalidBufferId(buffer_id))?; + let buffer = hub.buffers.get(buffer_id).get()?; let device = &buffer.device; @@ -300,10 +262,7 @@ impl Global { let hub = &self.hub; - let buffer = hub - .buffers - .get(buffer_id) - .map_err(|_| resource::DestroyError::Invalid)?; + let buffer = hub.buffers.get(buffer_id).get()?; #[cfg(feature = "trace")] if let Some(trace) = buffer.device.trace.lock().as_mut() { @@ -324,9 +283,9 @@ impl Global { let hub = &self.hub; - let buffer = match hub.buffers.unregister(buffer_id) { - Some(buffer) => buffer, - None => { + let buffer = match hub.buffers.remove(buffer_id).get() { + Ok(buffer) => buffer, + Err(_) => { return; } }; @@ -355,10 +314,7 @@ impl Global { let fid = hub.textures.prepare(device_id.backend(), id_in); let error = 'error: { - let device = match hub.devices.get(device_id) { - Ok(device) => device, - Err(_) => break 'error DeviceError::InvalidDeviceId.into(), - }; + let device = self.hub.devices.get(device_id); #[cfg(feature = "trace")] if let Some(ref mut trace) = *device.trace.lock() { @@ -370,7 +326,7 @@ impl Global { Err(error) => break 'error error, }; - let id = fid.assign(texture); + let id = fid.assign(Fallible::Valid(texture)); api_log!("Device::create_texture({desc:?}) -> {id:?}"); return (id, None); @@ -378,7 +334,7 @@ impl Global { log::error!("Device::create_texture error: {error}"); - let id = fid.assign_error(); + let id = fid.assign(Fallible::Invalid(Arc::new(desc.label.to_string()))); (id, Some(error)) } @@ -401,10 +357,7 @@ impl Global { let fid = hub.textures.prepare(device_id.backend(), id_in); let error = 'error: { - let device = match hub.devices.get(device_id) { - Ok(device) => device, - Err(_) => break 'error DeviceError::InvalidDeviceId.into(), - }; + let device = self.hub.devices.get(device_id); // NB: Any change done through the raw texture handle will not be // recorded in the replay @@ -418,7 +371,7 @@ impl Global { Err(error) => break 'error error, }; - let id = fid.assign(texture); + let id = fid.assign(Fallible::Valid(texture)); api_log!("Device::create_texture({desc:?}) -> {id:?}"); return (id, None); @@ -426,7 +379,7 @@ impl Global { log::error!("Device::create_texture error: {error}"); - let id = fid.assign_error(); + let id = fid.assign(Fallible::Invalid(Arc::new(desc.label.to_string()))); (id, Some(error)) } @@ -447,31 +400,21 @@ impl Global { let hub = &self.hub; let fid = hub.buffers.prepare(A::VARIANT, id_in); - let error = 'error: { - let device = match hub.devices.get(device_id) { - Ok(device) => device, - Err(_) => break 'error DeviceError::InvalidDeviceId.into(), - }; - - // NB: Any change done through the raw buffer handle will not be - // recorded in the replay - #[cfg(feature = "trace")] - if let Some(trace) = device.trace.lock().as_mut() { - trace.add(trace::Action::CreateBuffer(fid.id(), desc.clone())); - } - - let buffer = device.create_buffer_from_hal(Box::new(hal_buffer), desc); + let device = self.hub.devices.get(device_id); - let id = fid.assign(buffer); - api_log!("Device::create_buffer -> {id:?}"); + // NB: Any change done through the raw buffer handle will not be + // recorded in the replay + #[cfg(feature = "trace")] + if let Some(trace) = device.trace.lock().as_mut() { + trace.add(trace::Action::CreateBuffer(fid.id(), desc.clone())); + } - return (id, None); - }; + let buffer = device.create_buffer_from_hal(Box::new(hal_buffer), desc); - log::error!("Device::create_buffer error: {error}"); + let id = fid.assign(buffer); + api_log!("Device::create_buffer -> {id:?}"); - let id = fid.assign_error(); - (id, Some(error)) + (id, None) } pub fn texture_destroy(&self, texture_id: id::TextureId) -> Result<(), resource::DestroyError> { @@ -480,10 +423,7 @@ impl Global { let hub = &self.hub; - let texture = hub - .textures - .get(texture_id) - .map_err(|_| resource::DestroyError::Invalid)?; + let texture = hub.textures.get(texture_id).get()?; #[cfg(feature = "trace")] if let Some(trace) = texture.device.trace.lock().as_mut() { @@ -499,9 +439,10 @@ impl Global { let hub = &self.hub; - if let Some(_texture) = hub.textures.unregister(texture_id) { - #[cfg(feature = "trace")] - if let Some(t) = _texture.device.trace.lock().as_mut() { + let _texture = hub.textures.remove(texture_id); + #[cfg(feature = "trace")] + if let Ok(texture) = _texture.get() { + if let Some(t) = texture.device.trace.lock().as_mut() { t.add(trace::Action::DestroyTexture(texture_id)); } } @@ -520,11 +461,9 @@ impl Global { let fid = hub.texture_views.prepare(texture_id.backend(), id_in); let error = 'error: { - let texture = match hub.textures.get(texture_id) { + let texture = match hub.textures.get(texture_id).get() { Ok(texture) => texture, - Err(_) => { - break 'error resource::CreateTextureViewError::InvalidTextureId(texture_id) - } + Err(e) => break 'error e.into(), }; let device = &texture.device; @@ -542,7 +481,7 @@ impl Global { Err(e) => break 'error e, }; - let id = fid.assign(view); + let id = fid.assign(Fallible::Valid(view)); api_log!("Texture::create_view({texture_id:?}) -> {id:?}"); @@ -550,7 +489,7 @@ impl Global { }; log::error!("Texture::create_view({texture_id:?}) error: {error}"); - let id = fid.assign_error(); + let id = fid.assign(Fallible::Invalid(Arc::new(desc.label.to_string()))); (id, Some(error)) } @@ -563,9 +502,11 @@ impl Global { let hub = &self.hub; - if let Some(_view) = hub.texture_views.unregister(texture_view_id) { - #[cfg(feature = "trace")] - if let Some(t) = _view.device.trace.lock().as_mut() { + let _view = hub.texture_views.remove(texture_view_id); + + #[cfg(feature = "trace")] + if let Ok(view) = _view.get() { + if let Some(t) = view.device.trace.lock().as_mut() { t.add(trace::Action::DestroyTextureView(texture_view_id)); } } @@ -584,10 +525,7 @@ impl Global { let fid = hub.samplers.prepare(device_id.backend(), id_in); let error = 'error: { - let device = match hub.devices.get(device_id) { - Ok(device) => device, - Err(_) => break 'error DeviceError::InvalidDeviceId.into(), - }; + let device = self.hub.devices.get(device_id); #[cfg(feature = "trace")] if let Some(ref mut trace) = *device.trace.lock() { @@ -599,13 +537,13 @@ impl Global { Err(e) => break 'error e, }; - let id = fid.assign(sampler); + let id = fid.assign(Fallible::Valid(sampler)); api_log!("Device::create_sampler -> {id:?}"); return (id, None); }; - let id = fid.assign_error(); + let id = fid.assign(Fallible::Invalid(Arc::new(desc.label.to_string()))); (id, Some(error)) } @@ -615,9 +553,11 @@ impl Global { let hub = &self.hub; - if let Some(_sampler) = hub.samplers.unregister(sampler_id) { - #[cfg(feature = "trace")] - if let Some(t) = _sampler.device.trace.lock().as_mut() { + let _sampler = hub.samplers.remove(sampler_id); + + #[cfg(feature = "trace")] + if let Ok(sampler) = _sampler.get() { + if let Some(t) = sampler.device.trace.lock().as_mut() { t.add(trace::Action::DestroySampler(sampler_id)); } } @@ -638,10 +578,7 @@ impl Global { let fid = hub.bind_group_layouts.prepare(device_id.backend(), id_in); let error = 'error: { - let device = match hub.devices.get(device_id) { - Ok(device) => device, - Err(_) => break 'error DeviceError::InvalidDeviceId.into(), - }; + let device = self.hub.devices.get(device_id); #[cfg(feature = "trace")] if let Some(ref mut trace) = *device.trace.lock() { @@ -672,14 +609,13 @@ impl Global { Err(e) => break 'error e, }; - let id = fid.assign(layout.clone()); + let id = fid.assign(Fallible::Valid(layout.clone())); api_log!("Device::create_bind_group_layout -> {id:?}"); return (id, None); }; - let fid = hub.bind_group_layouts.prepare(device_id.backend(), id_in); - let id = fid.assign_error(); + let id = fid.assign(Fallible::Invalid(Arc::new(desc.label.to_string()))); (id, Some(error)) } @@ -689,9 +625,11 @@ impl Global { let hub = &self.hub; - if let Some(_layout) = hub.bind_group_layouts.unregister(bind_group_layout_id) { - #[cfg(feature = "trace")] - if let Some(t) = _layout.device.trace.lock().as_mut() { + let _layout = hub.bind_group_layouts.remove(bind_group_layout_id); + + #[cfg(feature = "trace")] + if let Ok(layout) = _layout.get() { + if let Some(t) = layout.device.trace.lock().as_mut() { t.add(trace::Action::DestroyBindGroupLayout(bind_group_layout_id)); } } @@ -712,10 +650,7 @@ impl Global { let fid = hub.pipeline_layouts.prepare(device_id.backend(), id_in); let error = 'error: { - let device = match hub.devices.get(device_id) { - Ok(device) => device, - Err(_) => break 'error DeviceError::InvalidDeviceId.into(), - }; + let device = self.hub.devices.get(device_id); #[cfg(feature = "trace")] if let Some(ref mut trace) = *device.trace.lock() { @@ -726,19 +661,13 @@ impl Global { let bind_group_layouts_guard = hub.bind_group_layouts.read(); desc.bind_group_layouts .iter() - .map(|bgl_id| { - bind_group_layouts_guard.get_owned(*bgl_id).map_err(|_| { - binding_model::CreatePipelineLayoutError::InvalidBindGroupLayoutId( - *bgl_id, - ) - }) - }) + .map(|bgl_id| bind_group_layouts_guard.get(*bgl_id).get()) .collect::, _>>() }; let bind_group_layouts = match bind_group_layouts { Ok(bind_group_layouts) => bind_group_layouts, - Err(e) => break 'error e, + Err(e) => break 'error e.into(), }; let desc = binding_model::ResolvedPipelineLayoutDescriptor { @@ -752,12 +681,12 @@ impl Global { Err(e) => break 'error e, }; - let id = fid.assign(layout); + let id = fid.assign(Fallible::Valid(layout)); api_log!("Device::create_pipeline_layout -> {id:?}"); return (id, None); }; - let id = fid.assign_error(); + let id = fid.assign(Fallible::Invalid(Arc::new(desc.label.to_string()))); (id, Some(error)) } @@ -766,9 +695,12 @@ impl Global { api_log!("PipelineLayout::drop {pipeline_layout_id:?}"); let hub = &self.hub; - if let Some(_layout) = hub.pipeline_layouts.unregister(pipeline_layout_id) { - #[cfg(feature = "trace")] - if let Some(t) = _layout.device.trace.lock().as_mut() { + + let _layout = hub.pipeline_layouts.remove(pipeline_layout_id); + + #[cfg(feature = "trace")] + if let Ok(layout) = _layout.get() { + if let Some(t) = layout.device.trace.lock().as_mut() { t.add(trace::Action::DestroyPipelineLayout(pipeline_layout_id)); } } @@ -786,49 +718,47 @@ impl Global { let fid = hub.bind_groups.prepare(device_id.backend(), id_in); let error = 'error: { - let device = match hub.devices.get(device_id) { - Ok(device) => device, - Err(_) => break 'error DeviceError::InvalidDeviceId.into(), - }; + let device = self.hub.devices.get(device_id); #[cfg(feature = "trace")] if let Some(ref mut trace) = *device.trace.lock() { trace.add(trace::Action::CreateBindGroup(fid.id(), desc.clone())); } - let layout = match hub.bind_group_layouts.get(desc.layout) { + let layout = match hub.bind_group_layouts.get(desc.layout).get() { Ok(layout) => layout, - Err(..) => break 'error binding_model::CreateBindGroupError::InvalidLayout, + Err(e) => break 'error e.into(), }; fn resolve_entry<'a>( e: &BindGroupEntry<'a>, - buffer_storage: &Storage, - sampler_storage: &Storage, - texture_view_storage: &Storage, + buffer_storage: &Storage>, + sampler_storage: &Storage>, + texture_view_storage: &Storage>, ) -> Result, binding_model::CreateBindGroupError> { let resolve_buffer = |bb: &BufferBinding| { buffer_storage - .get_owned(bb.buffer_id) + .get(bb.buffer_id) + .get() .map(|buffer| ResolvedBufferBinding { buffer, offset: bb.offset, size: bb.size, }) - .map_err(|_| { - binding_model::CreateBindGroupError::InvalidBufferId(bb.buffer_id) - }) + .map_err(binding_model::CreateBindGroupError::from) }; let resolve_sampler = |id: &id::SamplerId| { sampler_storage - .get_owned(*id) - .map_err(|_| binding_model::CreateBindGroupError::InvalidSamplerId(*id)) + .get(*id) + .get() + .map_err(binding_model::CreateBindGroupError::from) }; let resolve_view = |id: &id::TextureViewId| { texture_view_storage - .get_owned(*id) - .map_err(|_| binding_model::CreateBindGroupError::InvalidTextureViewId(*id)) + .get(*id) + .get() + .map_err(binding_model::CreateBindGroupError::from) }; let resource = match e.resource { BindingResource::Buffer(ref buffer) => { @@ -893,14 +823,14 @@ impl Global { Err(e) => break 'error e, }; - let id = fid.assign(bind_group); + let id = fid.assign(Fallible::Valid(bind_group)); api_log!("Device::create_bind_group -> {id:?}"); return (id, None); }; - let id = fid.assign_error(); + let id = fid.assign(Fallible::Invalid(Arc::new(desc.label.to_string()))); (id, Some(error)) } @@ -910,8 +840,10 @@ impl Global { let hub = &self.hub; - if let Some(_bind_group) = hub.bind_groups.unregister(bind_group_id) { - #[cfg(feature = "trace")] + let _bind_group = hub.bind_groups.remove(bind_group_id); + + #[cfg(feature = "trace")] + if let Ok(_bind_group) = _bind_group.get() { if let Some(t) = _bind_group.device.trace.lock().as_mut() { t.add(trace::Action::DestroyBindGroup(bind_group_id)); } @@ -948,10 +880,7 @@ impl Global { let fid = hub.shader_modules.prepare(device_id.backend(), id_in); let error = 'error: { - let device = match hub.devices.get(device_id) { - Ok(device) => device, - Err(_) => break 'error DeviceError::InvalidDeviceId.into(), - }; + let device = self.hub.devices.get(device_id); #[cfg(feature = "trace")] if let Some(ref mut trace) = *device.trace.lock() { @@ -990,14 +919,14 @@ impl Global { Err(e) => break 'error e, }; - let id = fid.assign(shader); + let id = fid.assign(Fallible::Valid(shader)); api_log!("Device::create_shader_module -> {id:?}"); return (id, None); }; log::error!("Device::create_shader_module error: {error}"); - let id = fid.assign_error(); + let id = fid.assign(Fallible::Invalid(Arc::new(desc.label.to_string()))); (id, Some(error)) } @@ -1023,10 +952,7 @@ impl Global { let fid = hub.shader_modules.prepare(device_id.backend(), id_in); let error = 'error: { - let device = match hub.devices.get(device_id) { - Ok(device) => device, - Err(_) => break 'error DeviceError::InvalidDeviceId.into(), - }; + let device = self.hub.devices.get(device_id); #[cfg(feature = "trace")] if let Some(ref mut trace) = *device.trace.lock() { @@ -1044,14 +970,14 @@ impl Global { Ok(shader) => shader, Err(e) => break 'error e, }; - let id = fid.assign(shader); + let id = fid.assign(Fallible::Valid(shader)); api_log!("Device::create_shader_module_spirv -> {id:?}"); return (id, None); }; log::error!("Device::create_shader_module_spirv error: {error}"); - let id = fid.assign_error(); + let id = fid.assign(Fallible::Invalid(Arc::new(desc.label.to_string()))); (id, Some(error)) } @@ -1061,12 +987,13 @@ impl Global { let hub = &self.hub; - if let Some(shader_module) = hub.shader_modules.unregister(shader_module_id) { - #[cfg(feature = "trace")] + let _shader_module = hub.shader_modules.remove(shader_module_id); + + #[cfg(feature = "trace")] + if let Ok(shader_module) = _shader_module.get() { if let Some(t) = shader_module.device.trace.lock().as_mut() { t.add(trace::Action::DestroyShaderModule(shader_module_id)); } - drop(shader_module) } } @@ -1084,12 +1011,9 @@ impl Global { id_in.map(|id| id.into_command_buffer_id()), ); - let error = 'error: { - let device = match hub.devices.get(device_id) { - Ok(device) => device, - Err(_) => break 'error DeviceError::InvalidDeviceId, - }; + let device = self.hub.devices.get(device_id); + let error = 'error: { let command_buffer = match device.create_command_encoder(&desc.label) { Ok(command_buffer) => command_buffer, Err(e) => break 'error e, @@ -1100,7 +1024,7 @@ impl Global { return (id.into_command_encoder_id(), None); }; - let id = fid.assign_error(); + let id = fid.assign(Arc::new(CommandBuffer::new_invalid(&device, &desc.label))); (id.into_command_encoder_id(), Some(error)) } @@ -1110,12 +1034,9 @@ impl Global { let hub = &self.hub; - if let Some(cmd_buf) = hub + let _cmd_buf = hub .command_buffers - .unregister(command_encoder_id.into_command_buffer_id()) - { - cmd_buf.data.lock().as_mut().unwrap().encoder.discard(); - } + .remove(command_encoder_id.into_command_buffer_id()); } pub fn command_buffer_drop(&self, command_buffer_id: id::CommandBufferId) { @@ -1156,14 +1077,7 @@ impl Global { .prepare(bundle_encoder.parent().backend(), id_in); let error = 'error: { - let device = match hub.devices.get(bundle_encoder.parent()) { - Ok(device) => device, - Err(_) => { - break 'error command::RenderBundleError::from_device_error( - DeviceError::InvalidDeviceId, - ); - } - }; + let device = self.hub.devices.get(bundle_encoder.parent()); #[cfg(feature = "trace")] if let Some(ref mut trace) = *device.trace.lock() { @@ -1184,13 +1098,13 @@ impl Global { Err(e) => break 'error e, }; - let id = fid.assign(render_bundle); + let id = fid.assign(Fallible::Valid(render_bundle)); api_log!("RenderBundleEncoder::finish -> {id:?}"); return (id, None); }; - let id = fid.assign_error(); + let id = fid.assign(Fallible::Invalid(Arc::new(desc.label.to_string()))); (id, Some(error)) } @@ -1200,9 +1114,11 @@ impl Global { let hub = &self.hub; - if let Some(_bundle) = hub.render_bundles.unregister(render_bundle_id) { - #[cfg(feature = "trace")] - if let Some(t) = _bundle.device.trace.lock().as_mut() { + let _bundle = hub.render_bundles.remove(render_bundle_id); + + #[cfg(feature = "trace")] + if let Ok(bundle) = _bundle.get() { + if let Some(t) = bundle.device.trace.lock().as_mut() { t.add(trace::Action::DestroyRenderBundle(render_bundle_id)); } } @@ -1220,10 +1136,7 @@ impl Global { let fid = hub.query_sets.prepare(device_id.backend(), id_in); let error = 'error: { - let device = match hub.devices.get(device_id) { - Ok(device) => device, - Err(_) => break 'error DeviceError::InvalidDeviceId.into(), - }; + let device = self.hub.devices.get(device_id); #[cfg(feature = "trace")] if let Some(ref mut trace) = *device.trace.lock() { @@ -1238,13 +1151,13 @@ impl Global { Err(err) => break 'error err, }; - let id = fid.assign(query_set); + let id = fid.assign(Fallible::Valid(query_set)); api_log!("Device::create_query_set -> {id:?}"); return (id, None); }; - let id = fid.assign_error(); + let id = fid.assign(Fallible::Invalid(Arc::new(desc.label.to_string()))); (id, Some(error)) } @@ -1254,9 +1167,11 @@ impl Global { let hub = &self.hub; - if let Some(_query_set) = hub.query_sets.unregister(query_set_id) { - #[cfg(feature = "trace")] - if let Some(trace) = _query_set.device.trace.lock().as_mut() { + let _query_set = hub.query_sets.remove(query_set_id); + + #[cfg(feature = "trace")] + if let Ok(query_set) = _query_set.get() { + if let Some(trace) = query_set.device.trace.lock().as_mut() { trace.add(trace::Action::DestroyQuerySet(query_set_id)); } } @@ -1288,10 +1203,7 @@ impl Global { break 'error pipeline::ImplicitLayoutError::MissingImplicitPipelineIds.into(); } - let device = match hub.devices.get(device_id) { - Ok(device) => device, - Err(_) => break 'error DeviceError::InvalidDeviceId.into(), - }; + let device = self.hub.devices.get(device_id); #[cfg(feature = "trace")] if let Some(ref mut trace) = *device.trace.lock() { @@ -1304,37 +1216,30 @@ impl Global { let layout = desc .layout - .map(|layout| { - hub.pipeline_layouts - .get(layout) - .map_err(|_| pipeline::CreateRenderPipelineError::InvalidLayout) - }) + .map(|layout| hub.pipeline_layouts.get(layout).get()) .transpose(); let layout = match layout { Ok(layout) => layout, - Err(e) => break 'error e, + Err(e) => break 'error e.into(), }; let cache = desc .cache - .map(|cache| { - hub.pipeline_caches - .get(cache) - .map_err(|_| pipeline::CreateRenderPipelineError::InvalidCache) - }) + .map(|cache| hub.pipeline_caches.get(cache).get()) .transpose(); let cache = match cache { Ok(cache) => cache, - Err(e) => break 'error e, + Err(e) => break 'error e.into(), }; let vertex = { let module = hub .shader_modules .get(desc.vertex.stage.module) - .map_err(|_| pipeline::CreateRenderPipelineError::Stage { + .get() + .map_err(|e| pipeline::CreateRenderPipelineError::Stage { stage: wgt::ShaderStages::VERTEX, - error: crate::validation::StageError::InvalidModule, + error: e.into(), }); let module = match module { Ok(module) => module, @@ -1356,12 +1261,14 @@ impl Global { }; let fragment = if let Some(ref state) = desc.fragment { - let module = hub.shader_modules.get(state.stage.module).map_err(|_| { - pipeline::CreateRenderPipelineError::Stage { + let module = hub + .shader_modules + .get(state.stage.module) + .get() + .map_err(|e| pipeline::CreateRenderPipelineError::Stage { stage: wgt::ShaderStages::FRAGMENT, - error: crate::validation::StageError::InvalidModule, - } - }); + error: e.into(), + }); let module = match module { Ok(module) => module, Err(e) => break 'error e, @@ -1415,7 +1322,7 @@ impl Global { let mut pipeline_layout_guard = hub.pipeline_layouts.write(); let mut bgl_guard = hub.bind_group_layouts.write(); - pipeline_layout_guard.insert(ids.root_id, pipeline.layout.clone()); + pipeline_layout_guard.insert(ids.root_id, Fallible::Valid(pipeline.layout.clone())); let mut group_ids = ids.group_ids.iter(); // NOTE: If the first iterator is longer than the second, the `.zip()` impl will still advance the // the first iterator before realizing that the second iterator has finished. @@ -1427,29 +1334,29 @@ impl Global { .iter() .zip(&mut group_ids) { - bgl_guard.insert(*bgl_id, bgl.clone()); + bgl_guard.insert(*bgl_id, Fallible::Valid(bgl.clone())); } for bgl_id in group_ids { - bgl_guard.insert_error(*bgl_id); + bgl_guard.insert(*bgl_id, Fallible::Invalid(Arc::new(String::new()))); } } - let id = fid.assign(pipeline); + let id = fid.assign(Fallible::Valid(pipeline)); api_log!("Device::create_render_pipeline -> {id:?}"); return (id, None); }; - let id = fid.assign_error(); + let id = fid.assign(Fallible::Invalid(Arc::new(desc.label.to_string()))); // We also need to assign errors to the implicit pipeline layout and the // implicit bind group layouts. if let Some(ids) = implicit_context { let mut pipeline_layout_guard = hub.pipeline_layouts.write(); let mut bgl_guard = hub.bind_group_layouts.write(); - pipeline_layout_guard.insert_error(ids.root_id); + pipeline_layout_guard.insert(ids.root_id, Fallible::Invalid(Arc::new(String::new()))); for bgl_id in ids.group_ids { - bgl_guard.insert_error(bgl_id); + bgl_guard.insert(bgl_id, Fallible::Invalid(Arc::new(String::new()))); } } @@ -1471,16 +1378,15 @@ impl Global { ) { let hub = &self.hub; + let fid = hub.bind_group_layouts.prepare(pipeline_id.backend(), id_in); + let error = 'error: { - let pipeline = match hub.render_pipelines.get(pipeline_id) { + let pipeline = match hub.render_pipelines.get(pipeline_id).get() { Ok(pipeline) => pipeline, - Err(_) => break 'error binding_model::GetBindGroupLayoutError::InvalidPipeline, + Err(e) => break 'error e.into(), }; let id = match pipeline.layout.bind_group_layouts.get(index as usize) { - Some(bg) => hub - .bind_group_layouts - .prepare(pipeline_id.backend(), id_in) - .assign(bg.clone()), + Some(bg) => fid.assign(Fallible::Valid(bg.clone())), None => { break 'error binding_model::GetBindGroupLayoutError::InvalidGroupIndex(index) } @@ -1488,10 +1394,7 @@ impl Global { return (id, None); }; - let id = hub - .bind_group_layouts - .prepare(pipeline_id.backend(), id_in) - .assign_error(); + let id = fid.assign(Fallible::Invalid(Arc::new(String::new()))); (id, Some(error)) } @@ -1501,9 +1404,11 @@ impl Global { let hub = &self.hub; - if let Some(_pipeline) = hub.render_pipelines.unregister(render_pipeline_id) { - #[cfg(feature = "trace")] - if let Some(t) = _pipeline.device.trace.lock().as_mut() { + let _pipeline = hub.render_pipelines.remove(render_pipeline_id); + + #[cfg(feature = "trace")] + if let Ok(pipeline) = _pipeline.get() { + if let Some(t) = pipeline.device.trace.lock().as_mut() { t.add(trace::Action::DestroyRenderPipeline(render_pipeline_id)); } } @@ -1535,10 +1440,7 @@ impl Global { break 'error pipeline::ImplicitLayoutError::MissingImplicitPipelineIds.into(); } - let device = match hub.devices.get(device_id) { - Ok(device) => device, - Err(_) => break 'error DeviceError::InvalidDeviceId.into(), - }; + let device = self.hub.devices.get(device_id); #[cfg(feature = "trace")] if let Some(ref mut trace) = *device.trace.lock() { @@ -1551,34 +1453,23 @@ impl Global { let layout = desc .layout - .map(|layout| { - hub.pipeline_layouts - .get(layout) - .map_err(|_| pipeline::CreateComputePipelineError::InvalidLayout) - }) + .map(|layout| hub.pipeline_layouts.get(layout).get()) .transpose(); let layout = match layout { Ok(layout) => layout, - Err(e) => break 'error e, + Err(e) => break 'error e.into(), }; let cache = desc .cache - .map(|cache| { - hub.pipeline_caches - .get(cache) - .map_err(|_| pipeline::CreateComputePipelineError::InvalidCache) - }) + .map(|cache| hub.pipeline_caches.get(cache).get()) .transpose(); let cache = match cache { Ok(cache) => cache, - Err(e) => break 'error e, + Err(e) => break 'error e.into(), }; - let module = hub - .shader_modules - .get(desc.stage.module) - .map_err(|_| crate::validation::StageError::InvalidModule); + let module = hub.shader_modules.get(desc.stage.module).get(); let module = match module { Ok(module) => module, Err(e) => break 'error e.into(), @@ -1617,7 +1508,7 @@ impl Global { let mut pipeline_layout_guard = hub.pipeline_layouts.write(); let mut bgl_guard = hub.bind_group_layouts.write(); - pipeline_layout_guard.insert(ids.root_id, pipeline.layout.clone()); + pipeline_layout_guard.insert(ids.root_id, Fallible::Valid(pipeline.layout.clone())); let mut group_ids = ids.group_ids.iter(); // NOTE: If the first iterator is longer than the second, the `.zip()` impl will still advance the // the first iterator before realizing that the second iterator has finished. @@ -1629,29 +1520,29 @@ impl Global { .iter() .zip(&mut group_ids) { - bgl_guard.insert(*bgl_id, bgl.clone()); + bgl_guard.insert(*bgl_id, Fallible::Valid(bgl.clone())); } for bgl_id in group_ids { - bgl_guard.insert_error(*bgl_id); + bgl_guard.insert(*bgl_id, Fallible::Invalid(Arc::new(String::new()))); } } - let id = fid.assign(pipeline); + let id = fid.assign(Fallible::Valid(pipeline)); api_log!("Device::create_compute_pipeline -> {id:?}"); return (id, None); }; - let id = fid.assign_error(); + let id = fid.assign(Fallible::Invalid(Arc::new(desc.label.to_string()))); // We also need to assign errors to the implicit pipeline layout and the // implicit bind group layouts. if let Some(ids) = implicit_context { let mut pipeline_layout_guard = hub.pipeline_layouts.write(); let mut bgl_guard = hub.bind_group_layouts.write(); - pipeline_layout_guard.insert_error(ids.root_id); + pipeline_layout_guard.insert(ids.root_id, Fallible::Invalid(Arc::new(String::new()))); for bgl_id in ids.group_ids { - bgl_guard.insert_error(bgl_id); + bgl_guard.insert(bgl_id, Fallible::Invalid(Arc::new(String::new()))); } } @@ -1671,17 +1562,16 @@ impl Global { ) { let hub = &self.hub; + let fid = hub.bind_group_layouts.prepare(pipeline_id.backend(), id_in); + let error = 'error: { - let pipeline = match hub.compute_pipelines.get(pipeline_id) { + let pipeline = match hub.compute_pipelines.get(pipeline_id).get() { Ok(pipeline) => pipeline, - Err(_) => break 'error binding_model::GetBindGroupLayoutError::InvalidPipeline, + Err(e) => break 'error e.into(), }; let id = match pipeline.layout.bind_group_layouts.get(index as usize) { - Some(bg) => hub - .bind_group_layouts - .prepare(pipeline_id.backend(), id_in) - .assign(bg.clone()), + Some(bg) => fid.assign(Fallible::Valid(bg.clone())), None => { break 'error binding_model::GetBindGroupLayoutError::InvalidGroupIndex(index) } @@ -1690,10 +1580,7 @@ impl Global { return (id, None); }; - let id = hub - .bind_group_layouts - .prepare(pipeline_id.backend(), id_in) - .assign_error(); + let id = fid.assign(Fallible::Invalid(Arc::new(String::new()))); (id, Some(error)) } @@ -1703,9 +1590,11 @@ impl Global { let hub = &self.hub; - if let Some(_pipeline) = hub.compute_pipelines.unregister(compute_pipeline_id) { - #[cfg(feature = "trace")] - if let Some(t) = _pipeline.device.trace.lock().as_mut() { + let _pipeline = hub.compute_pipelines.remove(compute_pipeline_id); + + #[cfg(feature = "trace")] + if let Ok(pipeline) = _pipeline.get() { + if let Some(t) = pipeline.device.trace.lock().as_mut() { t.add(trace::Action::DestroyComputePipeline(compute_pipeline_id)); } } @@ -1729,11 +1618,7 @@ impl Global { let fid = hub.pipeline_caches.prepare(device_id.backend(), id_in); let error: pipeline::CreatePipelineCacheError = 'error: { - let device = match hub.devices.get(device_id) { - Ok(device) => device, - // TODO: Handle error properly - Err(crate::storage::InvalidId) => break 'error DeviceError::InvalidDeviceId.into(), - }; + let device = self.hub.devices.get(device_id); #[cfg(feature = "trace")] if let Some(ref mut trace) = *device.trace.lock() { @@ -1746,7 +1631,7 @@ impl Global { let cache = unsafe { device.create_pipeline_cache(desc) }; match cache { Ok(cache) => { - let id = fid.assign(cache); + let id = fid.assign(Fallible::Valid(cache)); api_log!("Device::create_pipeline_cache -> {id:?}"); return (id, None); } @@ -1754,7 +1639,7 @@ impl Global { } }; - let id = fid.assign_error(); + let id = fid.assign(Fallible::Invalid(Arc::new(desc.label.to_string()))); (id, Some(error)) } @@ -1765,12 +1650,13 @@ impl Global { let hub = &self.hub; - if let Some(cache) = hub.pipeline_caches.unregister(pipeline_cache_id) { - #[cfg(feature = "trace")] + let _cache = hub.pipeline_caches.remove(pipeline_cache_id); + + #[cfg(feature = "trace")] + if let Ok(cache) = _cache.get() { if let Some(t) = cache.device.trace.lock().as_mut() { t.add(trace::Action::DestroyPipelineCache(pipeline_cache_id)); } - drop(cache) } } @@ -1901,13 +1787,7 @@ impl Global { // User callbacks must not be called while we are holding locks. let user_callbacks; { - let hub = &self.hub; - let surface_guard = self.surfaces.read(); - - let device = match hub.devices.get(device_id) { - Ok(device) => device, - Err(_) => break 'error DeviceError::InvalidDeviceId.into(), - }; + let device = self.hub.devices.get(device_id); #[cfg(feature = "trace")] if let Some(ref mut trace) = *device.trace.lock() { @@ -1918,10 +1798,7 @@ impl Global { break 'error e.into(); } - let surface = match surface_guard.get(surface_id) { - Ok(surface) => surface, - Err(_) => break 'error E::InvalidSurface, - }; + let surface = self.surfaces.get(surface_id); let caps = match surface.get_capabilities(&device.adapter) { Ok(caps) => caps, @@ -2048,11 +1925,7 @@ impl Global { ) -> Result { api_log!("Device::poll {maintain:?}"); - let hub = &self.hub; - let device = hub - .devices - .get(device_id) - .map_err(|_| DeviceError::InvalidDeviceId)?; + let device = self.hub.devices.get(device_id); let DevicePoll { closures, @@ -2159,38 +2032,26 @@ impl Global { Ok(all_queue_empty) } - pub fn device_start_capture(&self, id: DeviceId) { + pub fn device_start_capture(&self, device_id: DeviceId) { api_log!("Device::start_capture"); - let hub = &self.hub; + let device = self.hub.devices.get(device_id); - if let Ok(device) = hub.devices.get(id) { - if !device.is_valid() { - return; - } - unsafe { device.raw().start_capture() }; + if !device.is_valid() { + return; } + unsafe { device.raw().start_capture() }; } - pub fn device_stop_capture(&self, id: DeviceId) { + pub fn device_stop_capture(&self, device_id: DeviceId) { api_log!("Device::stop_capture"); - let hub = &self.hub; + let device = self.hub.devices.get(device_id); - if let Ok(device) = hub.devices.get(id) { - if !device.is_valid() { - return; - } - unsafe { device.raw().stop_capture() }; + if !device.is_valid() { + return; } - } - - // This is a test-only function to force the device into an - // invalid state by inserting an error value in its place in - // the registry. - pub fn device_make_invalid(&self, device_id: DeviceId) { - let hub = &self.hub; - hub.devices.force_replace_with_error(device_id); + unsafe { device.raw().stop_capture() }; } pub fn pipeline_cache_get_data(&self, id: id::PipelineCacheId) -> Option> { @@ -2198,7 +2059,7 @@ impl Global { api_log!("PipelineCache::get_data"); let hub = &self.hub; - if let Ok(cache) = hub.pipeline_caches.get(id) { + if let Ok(cache) = hub.pipeline_caches.get(id).get() { // TODO: Is this check needed? if !cache.device.is_valid() { return None; @@ -2226,22 +2087,20 @@ impl Global { profiling::scope!("Device::drop"); api_log!("Device::drop {device_id:?}"); - let hub = &self.hub; - if let Some(device) = hub.devices.unregister(device_id) { - let device_lost_closure = device.lock_life().device_lost_closure.take(); - if let Some(closure) = device_lost_closure { - closure.call(DeviceLostReason::Dropped, String::from("Device dropped.")); - } + let device = self.hub.devices.remove(device_id); + let device_lost_closure = device.lock_life().device_lost_closure.take(); + if let Some(closure) = device_lost_closure { + closure.call(DeviceLostReason::Dropped, String::from("Device dropped.")); + } - // The things `Device::prepare_to_die` takes care are mostly - // unnecessary here. We know our queue is empty, so we don't - // need to wait for submissions or triage them. We know we were - // just polled, so `life_tracker.free_resources` is empty. - debug_assert!(device.lock_life().queue_empty()); - device.pending_writes.lock().deactivate(); + // The things `Device::prepare_to_die` takes care are mostly + // unnecessary here. We know our queue is empty, so we don't + // need to wait for submissions or triage them. We know we were + // just polled, so `life_tracker.free_resources` is empty. + debug_assert!(device.lock_life().queue_empty()); + device.pending_writes.lock().deactivate(); - drop(device); - } + drop(device); } // This closure will be called exactly once during "lose the device", @@ -2251,61 +2110,47 @@ impl Global { device_id: DeviceId, device_lost_closure: DeviceLostClosure, ) { - let hub = &self.hub; - - if let Ok(device) = hub.devices.get(device_id) { - let mut life_tracker = device.lock_life(); - if let Some(existing_closure) = life_tracker.device_lost_closure.take() { - // It's important to not hold the lock while calling the closure. - drop(life_tracker); - existing_closure.call(DeviceLostReason::ReplacedCallback, "".to_string()); - life_tracker = device.lock_life(); - } - life_tracker.device_lost_closure = Some(device_lost_closure); - } else { - // No device? Okay. Just like we have to call any existing closure - // before we drop it, we need to call this closure before we exit - // this function, because there's no device that is ever going to - // call it. - device_lost_closure.call(DeviceLostReason::DeviceInvalid, "".to_string()); + let device = self.hub.devices.get(device_id); + + let mut life_tracker = device.lock_life(); + if let Some(existing_closure) = life_tracker.device_lost_closure.take() { + // It's important to not hold the lock while calling the closure. + drop(life_tracker); + existing_closure.call(DeviceLostReason::ReplacedCallback, "".to_string()); + life_tracker = device.lock_life(); } + life_tracker.device_lost_closure = Some(device_lost_closure); } pub fn device_destroy(&self, device_id: DeviceId) { api_log!("Device::destroy {device_id:?}"); - let hub = &self.hub; - - if let Ok(device) = hub.devices.get(device_id) { - // Follow the steps at - // https://gpuweb.github.io/gpuweb/#dom-gpudevice-destroy. - // It's legal to call destroy multiple times, but if the device - // is already invalid, there's nothing more to do. There's also - // no need to return an error. - if !device.is_valid() { - return; - } + let device = self.hub.devices.get(device_id); - // The last part of destroy is to lose the device. The spec says - // delay that until all "currently-enqueued operations on any - // queue on this device are completed." This is accomplished by - // setting valid to false, and then relying upon maintain to - // check for empty queues and a DeviceLostClosure. At that time, - // the DeviceLostClosure will be called with "destroyed" as the - // reason. - device.valid.store(false, Ordering::Release); + // Follow the steps at + // https://gpuweb.github.io/gpuweb/#dom-gpudevice-destroy. + // It's legal to call destroy multiple times, but if the device + // is already invalid, there's nothing more to do. There's also + // no need to return an error. + if !device.is_valid() { + return; } + + // The last part of destroy is to lose the device. The spec says + // delay that until all "currently-enqueued operations on any + // queue on this device are completed." This is accomplished by + // setting valid to false, and then relying upon maintain to + // check for empty queues and a DeviceLostClosure. At that time, + // the DeviceLostClosure will be called with "destroyed" as the + // reason. + device.valid.store(false, Ordering::Release); } pub fn device_get_internal_counters(&self, device_id: DeviceId) -> wgt::InternalCounters { - let hub = &self.hub; - if let Ok(device) = hub.devices.get(device_id) { - wgt::InternalCounters { - hal: device.get_hal_counters(), - core: wgt::CoreCounters {}, - } - } else { - Default::default() + let device = self.hub.devices.get(device_id); + wgt::InternalCounters { + hal: device.get_hal_counters(), + core: wgt::CoreCounters {}, } } @@ -2313,21 +2158,15 @@ impl Global { &self, device_id: DeviceId, ) -> Option { - let hub = &self.hub; - hub.devices - .get(device_id) - .ok() - .and_then(|device| device.generate_allocator_report()) + let device = self.hub.devices.get(device_id); + device.generate_allocator_report() } pub fn queue_drop(&self, queue_id: QueueId) { profiling::scope!("Queue::drop"); api_log!("Queue::drop {queue_id:?}"); - let hub = &self.hub; - if let Some(queue) = hub.queues.unregister(queue_id) { - drop(queue); - } + self.hub.queues.remove(queue_id); } pub fn buffer_map_async( @@ -2343,9 +2182,9 @@ impl Global { let hub = &self.hub; let op_and_err = 'error: { - let buffer = match hub.buffers.get(buffer_id) { + let buffer = match hub.buffers.get(buffer_id).get() { Ok(buffer) => buffer, - Err(_) => break 'error Some((op, BufferAccessError::InvalidBufferId(buffer_id))), + Err(e) => break 'error Some((op, e.into())), }; buffer.map_async(offset, size, op).err() @@ -2376,10 +2215,7 @@ impl Global { let hub = &self.hub; - let buffer = hub - .buffers - .get(buffer_id) - .map_err(|_| BufferAccessError::InvalidBufferId(buffer_id))?; + let buffer = hub.buffers.get(buffer_id).get()?; { let snatch_guard = buffer.device.snatchable_lock.read(); @@ -2452,10 +2288,7 @@ impl Global { let hub = &self.hub; - let buffer = hub - .buffers - .get(buffer_id) - .map_err(|_| BufferAccessError::InvalidBufferId(buffer_id))?; + let buffer = hub.buffers.get(buffer_id).get()?; let snatch_guard = buffer.device.snatchable_lock.read(); buffer.check_destroyed(&snatch_guard)?; diff --git a/wgpu-core/src/device/mod.rs b/wgpu-core/src/device/mod.rs index 71bbce19cf..97ce639fe7 100644 --- a/wgpu-core/src/device/mod.rs +++ b/wgpu-core/src/device/mod.rs @@ -414,8 +414,6 @@ pub enum DeviceError { OutOfMemory, #[error("Creation of a resource failed for a reason other than running out of memory.")] ResourceCreationFailed, - #[error("DeviceId is invalid")] - InvalidDeviceId, #[error(transparent)] DeviceMismatch(#[from] Box), } @@ -464,15 +462,11 @@ impl ImplicitPipelineIds<'_> { root_id: hub .pipeline_layouts .prepare(backend, Some(self.root_id)) - .into_id(), + .id(), group_ids: self .group_ids .iter() - .map(|id_in| { - hub.bind_group_layouts - .prepare(backend, Some(*id_in)) - .into_id() - }) + .map(|id_in| hub.bind_group_layouts.prepare(backend, Some(*id_in)).id()) .collect(), } } diff --git a/wgpu-core/src/device/queue.rs b/wgpu-core/src/device/queue.rs index 7058799a03..1710c05919 100644 --- a/wgpu-core/src/device/queue.rs +++ b/wgpu-core/src/device/queue.rs @@ -4,7 +4,8 @@ use crate::{ api_log, command::{ extract_texture_selector, validate_linear_texture_data, validate_texture_copy_range, - ClearError, CommandAllocator, CommandBuffer, CopySide, ImageCopyTexture, TransferError, + ClearError, CommandAllocator, CommandBuffer, CommandEncoderError, CopySide, + ImageCopyTexture, TransferError, }, conv, device::{DeviceError, WaitIdleError}, @@ -16,8 +17,8 @@ use crate::{ lock::RwLockWriteGuard, resource::{ Buffer, BufferAccessError, BufferMapState, DestroyedBuffer, DestroyedResourceError, - DestroyedTexture, FlushedStagingBuffer, Labeled, ParentDevice, ResourceErrorIdent, - StagingBuffer, Texture, TextureInner, Trackable, + DestroyedTexture, FlushedStagingBuffer, InvalidResourceError, Labeled, ParentDevice, + ResourceErrorIdent, StagingBuffer, Texture, TextureInner, Trackable, }, resource_log, track::{self, Tracker, TrackerIndex}, @@ -321,15 +322,9 @@ impl PendingWrites { } } -#[derive(Clone, Debug, Error)] -#[error("Queue is invalid")] -pub struct InvalidQueue; - #[derive(Clone, Debug, Error)] #[non_exhaustive] pub enum QueueWriteError { - #[error("QueueId is invalid")] - InvalidQueueId, #[error(transparent)] Queue(#[from] DeviceError), #[error(transparent)] @@ -338,13 +333,13 @@ pub enum QueueWriteError { MemoryInitFailure(#[from] ClearError), #[error(transparent)] DestroyedResource(#[from] DestroyedResourceError), + #[error(transparent)] + InvalidResource(#[from] InvalidResourceError), } #[derive(Clone, Debug, Error)] #[non_exhaustive] pub enum QueueSubmitError { - #[error("QueueId is invalid")] - InvalidQueueId, #[error(transparent)] Queue(#[from] DeviceError), #[error(transparent)] @@ -359,6 +354,10 @@ pub enum QueueSubmitError { SurfaceUnconfigured, #[error("GPU got stuck :(")] StuckGpu, + #[error(transparent)] + InvalidResource(#[from] InvalidResourceError), + #[error(transparent)] + CommandEncoder(#[from] CommandEncoderError), } //TODO: move out common parts of write_xxx. @@ -376,15 +375,9 @@ impl Global { let hub = &self.hub; - let buffer = hub - .buffers - .get(buffer_id) - .map_err(|_| TransferError::InvalidBufferId(buffer_id))?; + let buffer = hub.buffers.get(buffer_id).get()?; - let queue = hub - .queues - .get(queue_id) - .map_err(|_| QueueWriteError::InvalidQueueId)?; + let queue = hub.queues.get(queue_id); let device = &queue.device; @@ -444,10 +437,7 @@ impl Global { profiling::scope!("Queue::create_staging_buffer"); let hub = &self.hub; - let queue = hub - .queues - .get(queue_id) - .map_err(|_| QueueWriteError::InvalidQueueId)?; + let queue = hub.queues.get(queue_id); let device = &queue.device; @@ -455,7 +445,7 @@ impl Global { let ptr = unsafe { staging_buffer.ptr() }; let fid = hub.staging_buffers.prepare(queue_id.backend(), id_in); - let id = fid.assign(Arc::new(staging_buffer)); + let id = fid.assign(staging_buffer); resource_log!("Queue::create_staging_buffer {id:?}"); Ok((id, ptr)) @@ -471,18 +461,11 @@ impl Global { profiling::scope!("Queue::write_staging_buffer"); let hub = &self.hub; - let queue = hub - .queues - .get(queue_id) - .map_err(|_| QueueWriteError::InvalidQueueId)?; + let queue = hub.queues.get(queue_id); let device = &queue.device; - let staging_buffer = hub - .staging_buffers - .unregister(staging_buffer_id) - .and_then(Arc::into_inner) - .ok_or_else(|| QueueWriteError::Transfer(TransferError::InvalidBufferId(buffer_id)))?; + let staging_buffer = hub.staging_buffers.remove(staging_buffer_id); let mut pending_writes = device.pending_writes.lock(); @@ -515,10 +498,7 @@ impl Global { profiling::scope!("Queue::validate_write_buffer"); let hub = &self.hub; - let buffer = hub - .buffers - .get(buffer_id) - .map_err(|_| TransferError::InvalidBufferId(buffer_id))?; + let buffer = hub.buffers.get(buffer_id).get()?; self.queue_validate_write_buffer_impl(&buffer, buffer_offset, buffer_size)?; @@ -561,10 +541,7 @@ impl Global { ) -> Result<(), QueueWriteError> { let hub = &self.hub; - let dst = hub - .buffers - .get(buffer_id) - .map_err(|_| TransferError::InvalidBufferId(buffer_id))?; + let dst = hub.buffers.get(buffer_id).get()?; let transition = { let mut trackers = device.trackers.lock(); @@ -621,10 +598,7 @@ impl Global { let hub = &self.hub; - let queue = hub - .queues - .get(queue_id) - .map_err(|_| QueueWriteError::InvalidQueueId)?; + let queue = hub.queues.get(queue_id); let device = &queue.device; @@ -644,10 +618,7 @@ impl Global { return Ok(()); } - let dst = hub - .textures - .get(destination.texture) - .map_err(|_| TransferError::InvalidTextureId(destination.texture))?; + let dst = hub.textures.get(destination.texture).get()?; dst.same_device_as(queue.as_ref())?; @@ -738,12 +709,6 @@ impl Global { let snatch_guard = device.snatchable_lock.read(); - // Re-get `dst` immutably here, so that the mutable borrow of the - // `texture_guard.get` above ends in time for the `clear_texture` - // call above. Since we've held `texture_guard` the whole time, we know - // the texture hasn't gone away in the mean time, so we can unwrap. - let dst = hub.textures.get(destination.texture).unwrap(); - let dst_raw = dst.try_raw(&snatch_guard)?; let (block_width, block_height) = dst.desc.format.block_dimensions(); @@ -862,10 +827,7 @@ impl Global { let hub = &self.hub; - let queue = hub - .queues - .get(queue_id) - .map_err(|_| QueueWriteError::InvalidQueueId)?; + let queue = hub.queues.get(queue_id); let device = &queue.device; @@ -893,7 +855,7 @@ impl Global { let src_width = source.source.width(); let src_height = source.source.height(); - let dst = hub.textures.get(destination.texture).unwrap(); + let dst = hub.textures.get(destination.texture).get()?; if !conv::is_valid_external_image_copy_dst_texture_format(dst.desc.format) { return Err( @@ -1072,10 +1034,7 @@ impl Global { let (submit_index, callbacks) = { let hub = &self.hub; - let queue = hub - .queues - .get(queue_id) - .map_err(|_| QueueSubmitError::InvalidQueueId)?; + let queue = hub.queues.get(queue_id); let device = &queue.device; @@ -1096,104 +1055,68 @@ impl Global { let mut submit_surface_textures_owned = FastHashMap::default(); { - let mut command_buffer_guard = hub.command_buffers.write(); + let command_buffer_guard = hub.command_buffers.read(); if !command_buffer_ids.is_empty() { profiling::scope!("prepare"); + let mut first_error = None; + //TODO: if multiple command buffers are submitted, we can re-use the last // native command buffer of the previous chain instead of always creating // a temporary one, since the chains are not finished. // finish all the command buffers first - for &cmb_id in command_buffer_ids { + for command_buffer_id in command_buffer_ids { profiling::scope!("process command buffer"); // we reset the used surface textures every time we use // it, so make sure to set_size on it. used_surface_textures.set_size(device.tracker_indices.textures.size()); + let command_buffer = command_buffer_guard.get(*command_buffer_id); + + // Note that we are required to invalidate all command buffers in both the success and failure paths. + // This is why we `continue` and don't early return via `?`. #[allow(unused_mut)] - let mut cmdbuf = match command_buffer_guard.replace_with_error(cmb_id) { - Ok(cmdbuf) => cmdbuf, - Err(_) => continue, - }; + let mut cmd_buf_data = command_buffer.try_take(); #[cfg(feature = "trace")] if let Some(ref mut trace) = *device.trace.lock() { - trace.add(Action::Submit( - submit_index, - cmdbuf - .data - .lock() - .as_mut() - .unwrap() - .commands - .take() - .unwrap(), - )); - } - - cmdbuf.same_device_as(queue.as_ref())?; - - if !cmdbuf.is_finished() { - let cmdbuf = Arc::into_inner(cmdbuf).expect( - "Command buffer cannot be destroyed because is still in use", - ); - device.destroy_command_buffer(cmdbuf); - continue; + if let Ok(ref mut cmd_buf_data) = cmd_buf_data { + trace.add(Action::Submit( + submit_index, + cmd_buf_data.commands.take().unwrap(), + )); + } } - { - profiling::scope!("check resource state"); - - let cmd_buf_data = cmdbuf.data.lock(); - let cmd_buf_trackers = &cmd_buf_data.as_ref().unwrap().trackers; - - // update submission IDs - { - profiling::scope!("buffers"); - for buffer in cmd_buf_trackers.buffers.used_resources() { - buffer.check_destroyed(&snatch_guard)?; - - match *buffer.map_state.lock() { - BufferMapState::Idle => (), - _ => { - return Err(QueueSubmitError::BufferStillMapped( - buffer.error_ident(), - )) - } - } + let mut baked = match cmd_buf_data { + Ok(cmd_buf_data) => { + let res = validate_command_buffer( + &command_buffer, + &queue, + &cmd_buf_data, + &snatch_guard, + &mut submit_surface_textures_owned, + &mut used_surface_textures, + ); + if let Err(err) = res { + first_error.get_or_insert(err); + cmd_buf_data.destroy(&command_buffer.device); + continue; } + cmd_buf_data.into_baked_commands() } - { - profiling::scope!("textures"); - for texture in cmd_buf_trackers.textures.used_resources() { - let should_extend = match texture.try_inner(&snatch_guard)? { - TextureInner::Native { .. } => false, - TextureInner::Surface { .. } => { - // Compare the Arcs by pointer as Textures don't implement Eq. - submit_surface_textures_owned - .insert(Arc::as_ptr(&texture), texture.clone()); - - true - } - }; - if should_extend { - unsafe { - used_surface_textures - .merge_single( - &texture, - None, - hal::TextureUses::PRESENT, - ) - .unwrap(); - }; - } - } + Err(err) => { + first_error.get_or_insert(err.into()); + continue; } + }; + + if first_error.is_some() { + continue; } - let mut baked = cmdbuf.from_arc_into_baked(); // execute resource transitions unsafe { @@ -1255,6 +1178,10 @@ impl Global { pending_textures: FastHashMap::default(), }); } + + if let Some(first_error) = first_error { + return Err(first_error); + } } } @@ -1369,27 +1296,71 @@ impl Global { Ok(submit_index) } - pub fn queue_get_timestamp_period(&self, queue_id: QueueId) -> Result { - let hub = &self.hub; - match hub.queues.get(queue_id) { - Ok(queue) => Ok(unsafe { queue.raw().get_timestamp_period() }), - Err(_) => Err(InvalidQueue), - } + pub fn queue_get_timestamp_period(&self, queue_id: QueueId) -> f32 { + let queue = self.hub.queues.get(queue_id); + unsafe { queue.raw().get_timestamp_period() } } pub fn queue_on_submitted_work_done( &self, queue_id: QueueId, closure: SubmittedWorkDoneClosure, - ) -> Result<(), InvalidQueue> { + ) { api_log!("Queue::on_submitted_work_done {queue_id:?}"); //TODO: flush pending writes - let hub = &self.hub; - match hub.queues.get(queue_id) { - Ok(queue) => queue.device.lock_life().add_work_done_closure(closure), - Err(_) => return Err(InvalidQueue), + let queue = self.hub.queues.get(queue_id); + queue.device.lock_life().add_work_done_closure(closure); + } +} + +fn validate_command_buffer( + command_buffer: &CommandBuffer, + queue: &Queue, + cmd_buf_data: &crate::command::CommandBufferMutable, + snatch_guard: &crate::snatch::SnatchGuard<'_>, + submit_surface_textures_owned: &mut FastHashMap<*const Texture, Arc>, + used_surface_textures: &mut track::TextureUsageScope, +) -> Result<(), QueueSubmitError> { + command_buffer.same_device_as(queue)?; + cmd_buf_data.check_finished()?; + + { + profiling::scope!("check resource state"); + + { + profiling::scope!("buffers"); + for buffer in cmd_buf_data.trackers.buffers.used_resources() { + buffer.check_destroyed(snatch_guard)?; + + match *buffer.map_state.lock() { + BufferMapState::Idle => (), + _ => return Err(QueueSubmitError::BufferStillMapped(buffer.error_ident())), + } + } + } + { + profiling::scope!("textures"); + for texture in cmd_buf_data.trackers.textures.used_resources() { + let should_extend = match texture.try_inner(snatch_guard)? { + TextureInner::Native { .. } => false, + TextureInner::Surface { .. } => { + // Compare the Arcs by pointer as Textures don't implement Eq. + submit_surface_textures_owned + .insert(Arc::as_ptr(&texture), texture.clone()); + + true + } + }; + if should_extend { + unsafe { + used_surface_textures + .merge_single(&texture, None, hal::TextureUses::PRESENT) + .unwrap(); + }; + } + } } - Ok(()) } + Ok(()) } diff --git a/wgpu-core/src/device/resource.rs b/wgpu-core/src/device/resource.rs index 253f2ca94b..1cd8ef0fe7 100644 --- a/wgpu-core/src/device/resource.rs +++ b/wgpu-core/src/device/resource.rs @@ -21,7 +21,7 @@ use crate::{ pipeline, pool::ResourcePool, resource::{ - self, Buffer, Labeled, ParentDevice, QuerySet, Sampler, StagingBuffer, Texture, + self, Buffer, Fallible, Labeled, ParentDevice, QuerySet, Sampler, StagingBuffer, Texture, TextureView, TextureViewNotRenderableReason, TrackingData, }, resource_log, @@ -694,11 +694,11 @@ impl Device { Ok(texture) } - pub fn create_buffer_from_hal( + pub(crate) fn create_buffer_from_hal( self: &Arc, hal_buffer: Box, desc: &resource::BufferDescriptor, - ) -> Arc { + ) -> Fallible { unsafe { self.raw().add_raw_buffer(&*hal_buffer) }; let buffer = Buffer { @@ -723,7 +723,7 @@ impl Device { .buffers .insert_single(&buffer, hal::BufferUses::empty()); - buffer + Fallible::Valid(buffer) } pub(crate) fn create_texture( @@ -3630,16 +3630,6 @@ impl Device { } impl Device { - pub(crate) fn destroy_command_buffer(&self, mut cmd_buf: command::CommandBuffer) { - let mut baked = cmd_buf.extract_baked_commands(); - unsafe { - baked.encoder.reset_all(baked.list); - } - unsafe { - self.raw().destroy_command_encoder(baked.encoder); - } - } - /// Wait for idle and remove resources that we can, before we die. pub(crate) fn prepare_to_die(&self) { self.pending_writes.lock().deactivate(); diff --git a/wgpu-core/src/global.rs b/wgpu-core/src/global.rs index 4d79a81e3b..bb672612e4 100644 --- a/wgpu-core/src/global.rs +++ b/wgpu-core/src/global.rs @@ -1,3 +1,5 @@ +use std::sync::Arc; + use crate::{ hal_api::HalApi, hub::{Hub, HubReport}, @@ -23,7 +25,7 @@ impl GlobalReport { pub struct Global { pub instance: Instance, - pub(crate) surfaces: Registry, + pub(crate) surfaces: Registry>, pub(crate) hub: Hub, } diff --git a/wgpu-core/src/hub.rs b/wgpu-core/src/hub.rs index 5cbb736301..f4e8b9c756 100644 --- a/wgpu-core/src/hub.rs +++ b/wgpu-core/src/hub.rs @@ -107,10 +107,10 @@ use crate::{ instance::{Adapter, Surface}, pipeline::{ComputePipeline, PipelineCache, RenderPipeline, ShaderModule}, registry::{Registry, RegistryReport}, - resource::{Buffer, QuerySet, Sampler, StagingBuffer, Texture, TextureView}, + resource::{Buffer, Fallible, QuerySet, Sampler, StagingBuffer, Texture, TextureView}, storage::{Element, Storage}, }; -use std::fmt::Debug; +use std::{fmt::Debug, sync::Arc}; #[derive(Debug, PartialEq, Eq)] pub struct HubReport { @@ -162,24 +162,24 @@ impl HubReport { /// /// [`A::hub(global)`]: HalApi::hub pub struct Hub { - pub(crate) adapters: Registry, - pub(crate) devices: Registry, - pub(crate) queues: Registry, - pub(crate) pipeline_layouts: Registry, - pub(crate) shader_modules: Registry, - pub(crate) bind_group_layouts: Registry, - pub(crate) bind_groups: Registry, - pub(crate) command_buffers: Registry, - pub(crate) render_bundles: Registry, - pub(crate) render_pipelines: Registry, - pub(crate) compute_pipelines: Registry, - pub(crate) pipeline_caches: Registry, - pub(crate) query_sets: Registry, - pub(crate) buffers: Registry, + pub(crate) adapters: Registry>, + pub(crate) devices: Registry>, + pub(crate) queues: Registry>, + pub(crate) pipeline_layouts: Registry>, + pub(crate) shader_modules: Registry>, + pub(crate) bind_group_layouts: Registry>, + pub(crate) bind_groups: Registry>, + pub(crate) command_buffers: Registry>, + pub(crate) render_bundles: Registry>, + pub(crate) render_pipelines: Registry>, + pub(crate) compute_pipelines: Registry>, + pub(crate) pipeline_caches: Registry>, + pub(crate) query_sets: Registry>, + pub(crate) buffers: Registry>, pub(crate) staging_buffers: Registry, - pub(crate) textures: Registry, - pub(crate) texture_views: Registry, - pub(crate) samplers: Registry, + pub(crate) textures: Registry>, + pub(crate) texture_views: Registry>, + pub(crate) samplers: Registry>, } impl Hub { @@ -206,7 +206,7 @@ impl Hub { } } - pub(crate) fn clear(&self, surface_guard: &Storage) { + pub(crate) fn clear(&self, surface_guard: &Storage>) { let mut devices = self.devices.write(); for element in devices.map.iter() { if let Element::Occupied(ref device, _) = *element { diff --git a/wgpu-core/src/instance.rs b/wgpu-core/src/instance.rs index ca19d3c532..014fbc7bc7 100644 --- a/wgpu-core/src/instance.rs +++ b/wgpu-core/src/instance.rs @@ -346,22 +346,9 @@ impl Adapter { crate::impl_resource_type!(Adapter); crate::impl_storage_item!(Adapter); -#[derive(Clone, Debug, Error)] -#[non_exhaustive] -pub enum IsSurfaceSupportedError { - #[error("Invalid adapter")] - InvalidAdapter, - #[error("Invalid surface")] - InvalidSurface, -} - #[derive(Clone, Debug, Error)] #[non_exhaustive] pub enum GetSurfaceSupportError { - #[error("Invalid adapter")] - InvalidAdapter, - #[error("Invalid surface")] - InvalidSurface, #[error("Surface is not supported by the adapter")] Unsupported, } @@ -373,8 +360,6 @@ pub enum GetSurfaceSupportError { pub enum RequestDeviceError { #[error(transparent)] Device(#[from] DeviceError), - #[error("Parent adapter is invalid")] - InvalidAdapter, #[error(transparent)] LimitsExceeded(#[from] FailedLimit), #[error("Device has no queue supporting graphics")] @@ -403,18 +388,12 @@ impl AdapterInputs<'_, M> { } } -#[derive(Clone, Debug, Error)] -#[error("Adapter is invalid")] -pub struct InvalidAdapter; - #[derive(Clone, Debug, Error)] #[cfg_attr(feature = "serde", derive(serde::Serialize, serde::Deserialize))] #[non_exhaustive] pub enum RequestAdapterError { #[error("No suitable adapter found")] NotFound, - #[error("Surface {0:?} is invalid")] - InvalidSurface(SurfaceId), } #[derive(Clone, Debug, Error)] @@ -614,9 +593,9 @@ impl Global { api_log!("Surface::drop {id:?}"); - let surface = self.surfaces.unregister(id); - let surface = Arc::into_inner(surface.unwrap()) - .expect("Surface cannot be destroyed because is still in use"); + let surface = self.surfaces.remove(id); + let surface = + Arc::into_inner(surface).expect("Surface cannot be destroyed because is still in use"); if let Some(present) = surface.presentation.lock().take() { for (&backend, surface) in &surface.surface_per_backend { @@ -731,14 +710,7 @@ impl Global { } } - let compatible_surface = desc - .compatible_surface - .map(|id| { - self.surfaces - .get(id) - .map_err(|_| RequestAdapterError::InvalidSurface(id)) - }) - .transpose()?; + let compatible_surface = desc.compatible_surface.map(|id| self.surfaces.get(id)); let compatible_surface = compatible_surface.as_ref().map(|surface| surface.as_ref()); let mut device_types = Vec::new(); @@ -869,73 +841,51 @@ impl Global { id } - pub fn adapter_get_info( - &self, - adapter_id: AdapterId, - ) -> Result { - self.hub - .adapters - .get(adapter_id) - .map(|adapter| adapter.raw.info.clone()) - .map_err(|_| InvalidAdapter) + pub fn adapter_get_info(&self, adapter_id: AdapterId) -> wgt::AdapterInfo { + let adapter = self.hub.adapters.get(adapter_id); + adapter.raw.info.clone() } pub fn adapter_get_texture_format_features( &self, adapter_id: AdapterId, format: wgt::TextureFormat, - ) -> Result { - self.hub - .adapters - .get(adapter_id) - .map(|adapter| adapter.get_texture_format_features(format)) - .map_err(|_| InvalidAdapter) + ) -> wgt::TextureFormatFeatures { + let adapter = self.hub.adapters.get(adapter_id); + adapter.get_texture_format_features(format) } - pub fn adapter_features(&self, adapter_id: AdapterId) -> Result { - self.hub - .adapters - .get(adapter_id) - .map(|adapter| adapter.raw.features) - .map_err(|_| InvalidAdapter) + pub fn adapter_features(&self, adapter_id: AdapterId) -> wgt::Features { + let adapter = self.hub.adapters.get(adapter_id); + adapter.raw.features } - pub fn adapter_limits(&self, adapter_id: AdapterId) -> Result { - self.hub - .adapters - .get(adapter_id) - .map(|adapter| adapter.raw.capabilities.limits.clone()) - .map_err(|_| InvalidAdapter) + pub fn adapter_limits(&self, adapter_id: AdapterId) -> wgt::Limits { + let adapter = self.hub.adapters.get(adapter_id); + adapter.raw.capabilities.limits.clone() } pub fn adapter_downlevel_capabilities( &self, adapter_id: AdapterId, - ) -> Result { - self.hub - .adapters - .get(adapter_id) - .map(|adapter| adapter.raw.capabilities.downlevel.clone()) - .map_err(|_| InvalidAdapter) + ) -> wgt::DownlevelCapabilities { + let adapter = self.hub.adapters.get(adapter_id); + adapter.raw.capabilities.downlevel.clone() } pub fn adapter_get_presentation_timestamp( &self, adapter_id: AdapterId, - ) -> Result { - let hub = &self.hub; - - let adapter = hub.adapters.get(adapter_id).map_err(|_| InvalidAdapter)?; - - Ok(unsafe { adapter.raw.adapter.get_presentation_timestamp() }) + ) -> wgt::PresentationTimestamp { + let adapter = self.hub.adapters.get(adapter_id); + unsafe { adapter.raw.adapter.get_presentation_timestamp() } } pub fn adapter_drop(&self, adapter_id: AdapterId) { profiling::scope!("Adapter::drop"); api_log!("Adapter::drop {adapter_id:?}"); - let hub = &self.hub; - hub.adapters.unregister(adapter_id); + self.hub.adapters.remove(adapter_id); } } @@ -947,7 +897,7 @@ impl Global { trace_path: Option<&std::path::Path>, device_id_in: Option, queue_id_in: Option, - ) -> (DeviceId, QueueId, Option) { + ) -> Result<(DeviceId, QueueId), RequestDeviceError> { profiling::scope!("Adapter::request_device"); api_log!("Adapter::request_device"); @@ -955,29 +905,17 @@ impl Global { let device_fid = self.hub.devices.prepare(backend, device_id_in); let queue_fid = self.hub.queues.prepare(backend, queue_id_in); - let error = 'error: { - let adapter = match self.hub.adapters.get(adapter_id) { - Ok(adapter) => adapter, - Err(_) => break 'error RequestDeviceError::InvalidAdapter, - }; - let (device, queue) = - match adapter.create_device_and_queue(desc, self.instance.flags, trace_path) { - Ok((device, queue)) => (device, queue), - Err(e) => break 'error e, - }; - - let device_id = device_fid.assign(device); - resource_log!("Created Device {:?}", device_id); + let adapter = self.hub.adapters.get(adapter_id); + let (device, queue) = + adapter.create_device_and_queue(desc, self.instance.flags, trace_path)?; - let queue_id = queue_fid.assign(queue); - resource_log!("Created Queue {:?}", queue_id); + let device_id = device_fid.assign(device); + resource_log!("Created Device {:?}", device_id); - return (device_id, queue_id, None); - }; + let queue_id = queue_fid.assign(queue); + resource_log!("Created Queue {:?}", queue_id); - let device_id = device_fid.assign_error(); - let queue_id = queue_fid.assign_error(); - (device_id, queue_id, Some(error)) + Ok((device_id, queue_id)) } /// # Safety @@ -992,40 +930,28 @@ impl Global { trace_path: Option<&std::path::Path>, device_id_in: Option, queue_id_in: Option, - ) -> (DeviceId, QueueId, Option) { + ) -> Result<(DeviceId, QueueId), RequestDeviceError> { profiling::scope!("Global::create_device_from_hal"); let backend = adapter_id.backend(); let devices_fid = self.hub.devices.prepare(backend, device_id_in); let queues_fid = self.hub.queues.prepare(backend, queue_id_in); - let error = 'error: { - let adapter = match self.hub.adapters.get(adapter_id) { - Ok(adapter) => adapter, - Err(_) => break 'error RequestDeviceError::InvalidAdapter, - }; - let (device, queue) = match adapter.create_device_and_queue_from_hal( - hal_device, - desc, - self.instance.flags, - trace_path, - ) { - Ok(device) => device, - Err(e) => break 'error e, - }; - - let device_id = devices_fid.assign(device); - resource_log!("Created Device {:?}", device_id); + let adapter = self.hub.adapters.get(adapter_id); + let (device, queue) = adapter.create_device_and_queue_from_hal( + hal_device, + desc, + self.instance.flags, + trace_path, + )?; - let queue_id = queues_fid.assign(queue); - resource_log!("Created Queue {:?}", queue_id); + let device_id = devices_fid.assign(device); + resource_log!("Created Device {:?}", device_id); - return (device_id, queue_id, None); - }; + let queue_id = queues_fid.assign(queue); + resource_log!("Created Queue {:?}", queue_id); - let device_id = devices_fid.assign_error(); - let queue_id = queues_fid.assign_error(); - (device_id, queue_id, Some(error)) + Ok((device_id, queue_id)) } } diff --git a/wgpu-core/src/lock/observing.rs b/wgpu-core/src/lock/observing.rs index c530a56539..afda1ad574 100644 --- a/wgpu-core/src/lock/observing.rs +++ b/wgpu-core/src/lock/observing.rs @@ -78,6 +78,15 @@ impl Mutex { } } +impl<'a, T> MutexGuard<'a, T> { + pub fn try_map(s: Self, f: F) -> Result, ()> + where + F: FnOnce(&mut T) -> Option<&mut U>, + { + parking_lot::MutexGuard::try_map(s.inner, f).map_err(|_| ()) + } +} + impl<'a, T> std::ops::Deref for MutexGuard<'a, T> { type Target = T; diff --git a/wgpu-core/src/lock/vanilla.rs b/wgpu-core/src/lock/vanilla.rs index 9a35b6d9d8..51e472b118 100644 --- a/wgpu-core/src/lock/vanilla.rs +++ b/wgpu-core/src/lock/vanilla.rs @@ -30,6 +30,15 @@ impl Mutex { } } +impl<'a, T> MutexGuard<'a, T> { + pub fn try_map(s: Self, f: F) -> Result, ()> + where + F: FnOnce(&mut T) -> Option<&mut U>, + { + parking_lot::MutexGuard::try_map(s.0, f).map_err(|_| ()) + } +} + impl<'a, T> std::ops::Deref for MutexGuard<'a, T> { type Target = T; diff --git a/wgpu-core/src/pipeline.rs b/wgpu-core/src/pipeline.rs index e5eb9e555a..08e7167db6 100644 --- a/wgpu-core/src/pipeline.rs +++ b/wgpu-core/src/pipeline.rs @@ -4,7 +4,7 @@ use crate::{ command::ColorAttachmentError, device::{Device, DeviceError, MissingDownlevelFlags, MissingFeatures, RenderPassContext}, id::{PipelineCacheId, PipelineLayoutId, ShaderModuleId}, - resource::{Labeled, TrackingData}, + resource::{InvalidResourceError, Labeled, TrackingData}, resource_log, validation, Label, }; use arrayvec::ArrayVec; @@ -222,10 +222,6 @@ pub struct ResolvedComputePipelineDescriptor<'a> { pub enum CreateComputePipelineError { #[error(transparent)] Device(#[from] DeviceError), - #[error("Pipeline layout is invalid")] - InvalidLayout, - #[error("Cache is invalid")] - InvalidCache, #[error("Unable to derive an implicit layout")] Implicit(#[from] ImplicitLayoutError), #[error("Error matching shader requirements against the pipeline")] @@ -236,6 +232,8 @@ pub enum CreateComputePipelineError { PipelineConstants(String), #[error(transparent)] MissingDownlevelFlags(#[from] MissingDownlevelFlags), + #[error(transparent)] + InvalidResource(#[from] InvalidResourceError), } #[derive(Debug)] @@ -467,10 +465,6 @@ pub enum CreateRenderPipelineError { ColorAttachment(#[from] ColorAttachmentError), #[error(transparent)] Device(#[from] DeviceError), - #[error("Pipeline layout is invalid")] - InvalidLayout, - #[error("Pipeline cache is invalid")] - InvalidCache, #[error("Unable to derive an implicit layout")] Implicit(#[from] ImplicitLayoutError), #[error("Color state [{0}] is invalid")] @@ -540,6 +534,8 @@ pub enum CreateRenderPipelineError { "but no render target for the pipeline was specified." ))] NoTargetSpecified, + #[error(transparent)] + InvalidResource(#[from] InvalidResourceError), } bitflags::bitflags! { diff --git a/wgpu-core/src/present.rs b/wgpu-core/src/present.rs index b4fe0bbfbb..93297bf3f6 100644 --- a/wgpu-core/src/present.rs +++ b/wgpu-core/src/present.rs @@ -122,10 +122,7 @@ impl Global { let hub = &self.hub; - let surface = self - .surfaces - .get(surface_id) - .map_err(|_| SurfaceError::Invalid)?; + let surface = self.surfaces.get(surface_id); let (device, config) = if let Some(ref present) = *surface.presentation.lock() { present.device.check_is_valid()?; @@ -179,7 +176,7 @@ impl Global { let clear_view_desc = hal::TextureViewDescriptor { label: hal_label( Some("(wgpu internal) clear surface texture view"), - self.instance.flags, + device.instance_flags, ), format: config.format, dimension: wgt::TextureViewDimension::D2, @@ -218,7 +215,7 @@ impl Global { .textures .insert_single(&texture, hal::TextureUses::UNINITIALIZED); - let id = fid.assign(texture); + let id = fid.assign(resource::Fallible::Valid(texture)); if present.acquired_texture.is_some() { return Err(SurfaceError::AlreadyAcquired); @@ -257,10 +254,7 @@ impl Global { let hub = &self.hub; - let surface = self - .surfaces - .get(surface_id) - .map_err(|_| SurfaceError::Invalid)?; + let surface = self.surfaces.get(surface_id); let mut presentation = surface.presentation.lock(); let present = match presentation.as_mut() { @@ -286,8 +280,8 @@ impl Global { // The texture ID got added to the device tracker by `submit()`, // and now we are moving it away. - let texture = hub.textures.unregister(texture_id); - if let Some(texture) = texture { + let texture = hub.textures.remove(texture_id).get(); + if let Ok(texture) = texture { device .trackers .lock() @@ -332,10 +326,7 @@ impl Global { let hub = &self.hub; - let surface = self - .surfaces - .get(surface_id) - .map_err(|_| SurfaceError::Invalid)?; + let surface = self.surfaces.get(surface_id); let mut presentation = surface.presentation.lock(); let present = match presentation.as_mut() { Some(present) => present, @@ -359,9 +350,9 @@ impl Global { // The texture ID got added to the device tracker by `submit()`, // and now we are moving it away. - let texture = hub.textures.unregister(texture_id); + let texture = hub.textures.remove(texture_id).get(); - if let Some(texture) = texture { + if let Ok(texture) = texture { device .trackers .lock() diff --git a/wgpu-core/src/registry.rs b/wgpu-core/src/registry.rs index 84f073d086..6524a4df99 100644 --- a/wgpu-core/src/registry.rs +++ b/wgpu-core/src/registry.rs @@ -4,7 +4,7 @@ use crate::{ id::Id, identity::IdentityManager, lock::{rank, RwLock, RwLockReadGuard, RwLockWriteGuard}, - storage::{Element, InvalidId, Storage, StorageItem}, + storage::{Element, Storage, StorageItem}, }; #[derive(Copy, Clone, Debug, Default, PartialEq, Eq)] @@ -12,7 +12,6 @@ pub struct RegistryReport { pub num_allocated: usize, pub num_kept_from_user: usize, pub num_released_from_user: usize, - pub num_error: usize, pub element_size: usize, } @@ -56,28 +55,18 @@ pub(crate) struct FutureId<'a, T: StorageItem> { } impl FutureId<'_, T> { - #[allow(dead_code)] pub fn id(&self) -> Id { self.id } - pub fn into_id(self) -> Id { - self.id - } - /// Assign a new resource to this ID. /// /// Registers it with the registry. - pub fn assign(self, value: Arc) -> Id { + pub fn assign(self, value: T) -> Id { let mut data = self.data.write(); data.insert(self.id, value); self.id } - - pub fn assign_error(self) -> Id { - self.data.write().insert_error(self.id); - self.id - } } impl Registry { @@ -98,9 +87,6 @@ impl Registry { } } - pub(crate) fn get(&self, id: Id) -> Result, InvalidId> { - self.read().get_owned(id) - } #[track_caller] pub(crate) fn read<'a>(&'a self) -> RwLockReadGuard<'a, Storage> { self.storage.read() @@ -109,12 +95,7 @@ impl Registry { pub(crate) fn write<'a>(&'a self) -> RwLockWriteGuard<'a, Storage> { self.storage.write() } - pub(crate) fn force_replace_with_error(&self, id: Id) { - let mut storage = self.storage.write(); - storage.remove(id); - storage.insert_error(id); - } - pub(crate) fn unregister(&self, id: Id) -> Option> { + pub(crate) fn remove(&self, id: Id) -> T { let value = self.storage.write().remove(id); // This needs to happen *after* removing it from the storage, to maintain the // invariant that `self.identity` only contains ids which are actually available @@ -135,13 +116,18 @@ impl Registry { match *element { Element::Occupied(..) => report.num_kept_from_user += 1, Element::Vacant => report.num_released_from_user += 1, - Element::Error(_) => report.num_error += 1, } } report } } +impl Registry { + pub(crate) fn get(&self, id: Id) -> T { + self.read().get(id) + } +} + #[cfg(test)] mod tests { use std::sync::Arc; @@ -170,7 +156,7 @@ mod tests { let value = Arc::new(TestData); let new_id = registry.prepare(wgt::Backend::Empty, None); let id = new_id.assign(value); - registry.unregister(id); + registry.remove(id); } }); } diff --git a/wgpu-core/src/resource.rs b/wgpu-core/src/resource.rs index dc0792cb3f..dbf2bac762 100644 --- a/wgpu-core/src/resource.rs +++ b/wgpu-core/src/resource.rs @@ -305,7 +305,7 @@ impl BufferMapCallback { let status = match result { Ok(()) => BufferMapAsyncStatus::Success, Err(BufferAccessError::Device(_)) => BufferMapAsyncStatus::ContextLost, - Err(BufferAccessError::InvalidBufferId(_)) + Err(BufferAccessError::InvalidResource(_)) | Err(BufferAccessError::DestroyedResource(_)) => BufferMapAsyncStatus::Invalid, Err(BufferAccessError::AlreadyMapped) => BufferMapAsyncStatus::AlreadyMapped, Err(BufferAccessError::MapAlreadyPending) => { @@ -324,7 +324,9 @@ impl BufferMapCallback { | Err(BufferAccessError::NegativeRange { .. }) => { BufferMapAsyncStatus::InvalidRange } - Err(_) => BufferMapAsyncStatus::Error, + Err(BufferAccessError::Failed) + | Err(BufferAccessError::NotMapped) + | Err(BufferAccessError::MapAborted) => BufferMapAsyncStatus::Error, }; (inner.callback)(status, inner.user_data); @@ -347,8 +349,6 @@ pub enum BufferAccessError { Device(#[from] DeviceError), #[error("Buffer map failed")] Failed, - #[error("BufferId {0:?} is invalid")] - InvalidBufferId(BufferId), #[error(transparent)] DestroyedResource(#[from] DestroyedResourceError), #[error("Buffer is already mapped")] @@ -386,6 +386,8 @@ pub enum BufferAccessError { }, #[error("Buffer map aborted")] MapAborted, + #[error(transparent)] + InvalidResource(#[from] InvalidResourceError), } #[derive(Clone, Debug, Error)] @@ -410,6 +412,45 @@ pub struct MissingTextureUsageError { #[error("{0} has been destroyed")] pub struct DestroyedResourceError(pub ResourceErrorIdent); +#[derive(Clone, Debug, Error)] +#[cfg_attr(feature = "serde", derive(serde::Serialize, serde::Deserialize))] +#[error("{0} is invalid")] +pub struct InvalidResourceError(pub ResourceErrorIdent); + +pub(crate) enum Fallible { + Valid(Arc), + Invalid(Arc), +} + +impl Fallible { + pub fn get(self) -> Result, InvalidResourceError> { + match self { + Fallible::Valid(v) => Ok(v), + Fallible::Invalid(label) => Err(InvalidResourceError(ResourceErrorIdent { + r#type: Cow::Borrowed(T::TYPE), + label: (*label).clone(), + })), + } + } +} + +impl Clone for Fallible { + fn clone(&self) -> Self { + match self { + Self::Valid(v) => Self::Valid(v.clone()), + Self::Invalid(l) => Self::Invalid(l.clone()), + } + } +} + +impl ResourceType for Fallible { + const TYPE: &'static str = T::TYPE; +} + +impl crate::storage::StorageItem for Fallible { + type Marker = T::Marker; +} + pub type BufferAccessResult = Result<(), BufferAccessError>; #[derive(Debug)] @@ -1202,7 +1243,7 @@ impl Global { let hub = &self.hub; - if let Ok(buffer) = hub.buffers.get(id) { + if let Ok(buffer) = hub.buffers.get(id).get() { let snatch_guard = buffer.device.snatchable_lock.read(); let hal_buffer = buffer .raw(&snatch_guard) @@ -1225,7 +1266,7 @@ impl Global { let hub = &self.hub; - if let Ok(texture) = hub.textures.get(id) { + if let Ok(texture) = hub.textures.get(id).get() { let snatch_guard = texture.device.snatchable_lock.read(); let hal_texture = texture.raw(&snatch_guard); let hal_texture = hal_texture @@ -1249,7 +1290,7 @@ impl Global { let hub = &self.hub; - if let Ok(texture_view) = hub.texture_views.get(id) { + if let Ok(texture_view) = hub.texture_views.get(id).get() { let snatch_guard = texture_view.device.snatchable_lock.read(); let hal_texture_view = texture_view.raw(&snatch_guard); let hal_texture_view = hal_texture_view @@ -1272,11 +1313,8 @@ impl Global { profiling::scope!("Adapter::as_hal"); let hub = &self.hub; - let adapter = hub.adapters.get(id).ok(); - let hal_adapter = adapter - .as_ref() - .map(|adapter| &adapter.raw.adapter) - .and_then(|adapter| adapter.as_any().downcast_ref()); + let adapter = hub.adapters.get(id); + let hal_adapter = adapter.raw.adapter.as_any().downcast_ref(); hal_adapter_callback(hal_adapter) } @@ -1291,12 +1329,8 @@ impl Global { ) -> R { profiling::scope!("Device::as_hal"); - let hub = &self.hub; - let device = hub.devices.get(id).ok(); - let hal_device = device - .as_ref() - .map(|device| device.raw()) - .and_then(|device| device.as_any().downcast_ref()); + let device = self.hub.devices.get(id); + let hal_device = device.raw().as_any().downcast_ref(); hal_device_callback(hal_device) } @@ -1311,14 +1345,9 @@ impl Global { ) -> R { profiling::scope!("Device::fence_as_hal"); - let hub = &self.hub; - - if let Ok(device) = hub.devices.get(id) { - let fence = device.fence.read(); - hal_fence_callback(fence.as_any().downcast_ref()) - } else { - hal_fence_callback(None) - } + let device = self.hub.devices.get(id); + let fence = device.fence.read(); + hal_fence_callback(fence.as_any().downcast_ref()) } /// # Safety @@ -1330,10 +1359,9 @@ impl Global { ) -> R { profiling::scope!("Surface::as_hal"); - let surface = self.surfaces.get(id).ok(); + let surface = self.surfaces.get(id); let hal_surface = surface - .as_ref() - .and_then(|surface| surface.raw(A::VARIANT)) + .raw(A::VARIANT) .and_then(|surface| surface.as_any().downcast_ref()); hal_surface_callback(hal_surface) @@ -1355,9 +1383,10 @@ impl Global { let hub = &self.hub; - if let Ok(cmd_buf) = hub.command_buffers.get(id.into_command_buffer_id()) { - let mut cmd_buf_data = cmd_buf.data.lock(); - let cmd_buf_data = cmd_buf_data.as_mut().unwrap(); + let cmd_buf = hub.command_buffers.get(id.into_command_buffer_id()); + let cmd_buf_data = cmd_buf.try_get(); + + if let Ok(mut cmd_buf_data) = cmd_buf_data { let cmd_buf_raw = cmd_buf_data .encoder .open(&cmd_buf.device) @@ -1618,8 +1647,6 @@ impl TextureView { pub enum CreateTextureViewError { #[error(transparent)] Device(#[from] DeviceError), - #[error("TextureId {0:?} is invalid")] - InvalidTextureId(TextureId), #[error(transparent)] DestroyedResource(#[from] DestroyedResourceError), #[error("Not enough memory left to create texture view")] @@ -1662,6 +1689,8 @@ pub enum CreateTextureViewError { texture: wgt::TextureFormat, view: wgt::TextureFormat, }, + #[error(transparent)] + InvalidResource(#[from] InvalidResourceError), } #[derive(Clone, Debug, Error)] @@ -1834,8 +1863,8 @@ impl QuerySet { #[derive(Clone, Debug, Error)] #[non_exhaustive] pub enum DestroyError { - #[error("Resource is invalid")] - Invalid, #[error("Resource is already destroyed")] AlreadyDestroyed, + #[error(transparent)] + InvalidResource(#[from] InvalidResourceError), } diff --git a/wgpu-core/src/storage.rs b/wgpu-core/src/storage.rs index c5e91eedd4..5a57782bf7 100644 --- a/wgpu-core/src/storage.rs +++ b/wgpu-core/src/storage.rs @@ -8,26 +8,30 @@ use crate::{Epoch, Index}; /// An entry in a `Storage::map` table. #[derive(Debug)] -pub(crate) enum Element { +pub(crate) enum Element +where + T: StorageItem, +{ /// There are no live ids with this index. Vacant, /// There is one live id with this index, allocated at the given /// epoch. - Occupied(Arc, Epoch), - - /// Like `Occupied`, but an error occurred when creating the - /// resource. - Error(Epoch), + Occupied(T, Epoch), } -#[derive(Clone, Debug)] -pub(crate) struct InvalidId; - pub(crate) trait StorageItem: ResourceType { type Marker: Marker; } +impl ResourceType for Arc { + const TYPE: &'static str = T::TYPE; +} + +impl StorageItem for Arc { + type Marker = T::Marker; +} + #[macro_export] macro_rules! impl_storage_item { ($ty:ident) => { @@ -70,34 +74,13 @@ impl Storage where T: StorageItem, { - /// Get a reference to an item behind a potentially invalid ID. - /// Panics if there is an epoch mismatch, or the entry is empty. - pub(crate) fn get(&self, id: Id) -> Result<&Arc, InvalidId> { + pub(crate) fn insert(&mut self, id: Id, value: T) { let (index, epoch, _) = id.unzip(); - let (result, storage_epoch) = match self.map.get(index as usize) { - Some(&Element::Occupied(ref v, epoch)) => (Ok(v), epoch), - None | Some(&Element::Vacant) => panic!("{}[{:?}] does not exist", self.kind, id), - Some(&Element::Error(epoch)) => (Err(InvalidId), epoch), - }; - assert_eq!( - epoch, storage_epoch, - "{}[{:?}] is no longer alive", - self.kind, id - ); - result - } - - /// Get an owned reference to an item behind a potentially invalid ID. - /// Panics if there is an epoch mismatch, or the entry is empty. - pub(crate) fn get_owned(&self, id: Id) -> Result, InvalidId> { - Ok(Arc::clone(self.get(id)?)) - } - - fn insert_impl(&mut self, index: usize, epoch: Epoch, element: Element) { + let index = index as usize; if index >= self.map.len() { self.map.resize_with(index + 1, || Element::Vacant); } - match std::mem::replace(&mut self.map[index], element) { + match std::mem::replace(&mut self.map[index], Element::Occupied(value, epoch)) { Element::Vacant => {} Element::Occupied(_, storage_epoch) => { assert_ne!( @@ -107,52 +90,21 @@ where T::TYPE ); } - Element::Error(storage_epoch) => { - assert_ne!( - epoch, - storage_epoch, - "Index {index:?} of {} is already occupied with Error", - T::TYPE - ); - } } } - pub(crate) fn insert(&mut self, id: Id, value: Arc) { - let (index, epoch, _backend) = id.unzip(); - self.insert_impl(index as usize, epoch, Element::Occupied(value, epoch)) - } - - pub(crate) fn insert_error(&mut self, id: Id) { - let (index, epoch, _) = id.unzip(); - self.insert_impl(index as usize, epoch, Element::Error(epoch)) - } - - pub(crate) fn replace_with_error(&mut self, id: Id) -> Result, InvalidId> { - let (index, epoch, _) = id.unzip(); - match std::mem::replace(&mut self.map[index as usize], Element::Error(epoch)) { - Element::Vacant => panic!("Cannot access vacant resource"), - Element::Occupied(value, storage_epoch) => { - assert_eq!(epoch, storage_epoch); - Ok(value) - } - _ => Err(InvalidId), - } - } - - pub(crate) fn remove(&mut self, id: Id) -> Option> { + pub(crate) fn remove(&mut self, id: Id) -> T { let (index, epoch, _) = id.unzip(); match std::mem::replace(&mut self.map[index as usize], Element::Vacant) { Element::Occupied(value, storage_epoch) => { assert_eq!(epoch, storage_epoch); - Some(value) + value } - Element::Error(_) => None, Element::Vacant => panic!("Cannot remove a vacant resource"), } } - pub(crate) fn iter(&self, backend: Backend) -> impl Iterator, &Arc)> { + pub(crate) fn iter(&self, backend: Backend) -> impl Iterator, &T)> { self.map .iter() .enumerate() @@ -168,3 +120,24 @@ where self.map.len() } } + +impl Storage +where + T: StorageItem + Clone, +{ + /// Get an owned reference to an item. + /// Panics if there is an epoch mismatch, the entry is empty or in error. + pub(crate) fn get(&self, id: Id) -> T { + let (index, epoch, _) = id.unzip(); + let (result, storage_epoch) = match self.map.get(index as usize) { + Some(&Element::Occupied(ref v, epoch)) => (v.clone(), epoch), + None | Some(&Element::Vacant) => panic!("{}[{:?}] does not exist", self.kind, id), + }; + assert_eq!( + epoch, storage_epoch, + "{}[{:?}] is no longer alive", + self.kind, id + ); + result + } +} diff --git a/wgpu-core/src/validation.rs b/wgpu-core/src/validation.rs index 1c5f28d909..c1cbdaf183 100644 --- a/wgpu-core/src/validation.rs +++ b/wgpu-core/src/validation.rs @@ -1,4 +1,4 @@ -use crate::{device::bgl, FastHashMap, FastHashSet}; +use crate::{device::bgl, resource::InvalidResourceError, FastHashMap, FastHashSet}; use arrayvec::ArrayVec; use std::{collections::hash_map::Entry, fmt}; use thiserror::Error; @@ -200,8 +200,6 @@ pub enum InputError { #[derive(Clone, Debug, Error)] #[non_exhaustive] pub enum StageError { - #[error("Shader module is invalid")] - InvalidModule, #[error( "Shader entry point's workgroup size {current:?} ({current_total} total invocations) must be less or equal to the per-dimension limit {limit:?} and the total invocation limit {total}" )] @@ -241,6 +239,8 @@ pub enum StageError { but no entry point was specified" )] MultipleEntryPointsFound, + #[error(transparent)] + InvalidResource(#[from] InvalidResourceError), } fn map_storage_format_to_naga(format: wgt::TextureFormat) -> Option { diff --git a/wgpu-types/src/lib.rs b/wgpu-types/src/lib.rs index cc0318eda1..e3cab5605c 100644 --- a/wgpu-types/src/lib.rs +++ b/wgpu-types/src/lib.rs @@ -7550,10 +7550,4 @@ pub enum DeviceLostReason { /// exactly once before it is dropped, which helps with managing the /// memory owned by the callback. ReplacedCallback = 3, - /// When setting the callback, but the device is already invalid - /// - /// As above, when the callback is provided, wgpu guarantees that it - /// will eventually be called. If the device is already invalid, wgpu - /// will call the callback immediately, with this reason. - DeviceInvalid = 4, } diff --git a/wgpu/src/api/device.rs b/wgpu/src/api/device.rs index e2a9f7f953..8c565ef524 100644 --- a/wgpu/src/api/device.rs +++ b/wgpu/src/api/device.rs @@ -464,12 +464,6 @@ impl Device { ) } - /// Test-only function to make this device invalid. - #[doc(hidden)] - pub fn make_invalid(&self) { - DynContext::device_make_invalid(&*self.context, self.data.as_ref()) - } - /// Create a [`PipelineCache`] with initial data /// /// This can be passed to [`Device::create_compute_pipeline`] diff --git a/wgpu/src/backend/webgpu.rs b/wgpu/src/backend/webgpu.rs index 30ad22f739..a98c0ca32f 100644 --- a/wgpu/src/backend/webgpu.rs +++ b/wgpu/src/backend/webgpu.rs @@ -2012,11 +2012,6 @@ impl crate::context::Context for ContextWebGpu { Sendable(device_data.0.create_render_bundle_encoder(&mapped_desc)) } - #[doc(hidden)] - fn device_make_invalid(&self, _device_data: &Self::DeviceData) { - // Unimplemented - } - fn device_drop(&self, _device_data: &Self::DeviceData) { // Device is dropped automatically } diff --git a/wgpu/src/backend/wgpu_core.rs b/wgpu/src/backend/wgpu_core.rs index 9b2597cb44..c5ba18cf6d 100644 --- a/wgpu/src/backend/wgpu_core.rs +++ b/wgpu/src/backend/wgpu_core.rs @@ -108,7 +108,7 @@ impl ContextWgpuCore { if trace_dir.is_some() { log::error!("Feature 'trace' has been removed temporarily, see https://github.com/gfx-rs/wgpu/issues/5974"); } - let (device_id, queue_id, error) = unsafe { + let (device_id, queue_id) = unsafe { self.0.create_device_from_hal( *adapter, hal_device.into(), @@ -117,10 +117,7 @@ impl ContextWgpuCore { None, None, ) - }; - if let Some(err) = error { - self.handle_error_fatal(err, "Adapter::create_device_from_hal"); - } + }?; let error_sink = Arc::new(Mutex::new(ErrorSinkRaw::new())); let device = Device { id: device_id, @@ -606,16 +603,19 @@ impl crate::Context for ContextWgpuCore { if trace_dir.is_some() { log::error!("Feature 'trace' has been removed temporarily, see https://github.com/gfx-rs/wgpu/issues/5974"); } - let (device_id, queue_id, error) = self.0.adapter_request_device( + let res = self.0.adapter_request_device( *adapter_data, &desc.map_label(|l| l.map(Borrowed)), None, None, None, ); - if let Some(err) = error { - return ready(Err(err.into())); - } + let (device_id, queue_id) = match res { + Ok(ids) => ids, + Err(err) => { + return ready(Err(err.into())); + } + }; let error_sink = Arc::new(Mutex::new(ErrorSinkRaw::new())); let device = Device { id: device_id, @@ -641,44 +641,27 @@ impl crate::Context for ContextWgpuCore { adapter_data: &Self::AdapterData, surface_data: &Self::SurfaceData, ) -> bool { - match self - .0 + self.0 .adapter_is_surface_supported(*adapter_data, surface_data.id) - { - Ok(result) => result, - Err(err) => self.handle_error_fatal(err, "Adapter::is_surface_supported"), - } } fn adapter_features(&self, adapter_data: &Self::AdapterData) -> Features { - match self.0.adapter_features(*adapter_data) { - Ok(features) => features, - Err(err) => self.handle_error_fatal(err, "Adapter::features"), - } + self.0.adapter_features(*adapter_data) } fn adapter_limits(&self, adapter_data: &Self::AdapterData) -> Limits { - match self.0.adapter_limits(*adapter_data) { - Ok(limits) => limits, - Err(err) => self.handle_error_fatal(err, "Adapter::limits"), - } + self.0.adapter_limits(*adapter_data) } fn adapter_downlevel_capabilities( &self, adapter_data: &Self::AdapterData, ) -> DownlevelCapabilities { - match self.0.adapter_downlevel_capabilities(*adapter_data) { - Ok(downlevel) => downlevel, - Err(err) => self.handle_error_fatal(err, "Adapter::downlevel_properties"), - } + self.0.adapter_downlevel_capabilities(*adapter_data) } fn adapter_get_info(&self, adapter_data: &Self::AdapterData) -> AdapterInfo { - match self.0.adapter_get_info(*adapter_data) { - Ok(info) => info, - Err(err) => self.handle_error_fatal(err, "Adapter::get_info"), - } + self.0.adapter_get_info(*adapter_data) } fn adapter_get_texture_format_features( @@ -686,23 +669,15 @@ impl crate::Context for ContextWgpuCore { adapter_data: &Self::AdapterData, format: wgt::TextureFormat, ) -> wgt::TextureFormatFeatures { - match self - .0 + self.0 .adapter_get_texture_format_features(*adapter_data, format) - { - Ok(info) => info, - Err(err) => self.handle_error_fatal(err, "Adapter::get_texture_format_features"), - } } fn adapter_get_presentation_timestamp( &self, adapter_data: &Self::AdapterData, ) -> wgt::PresentationTimestamp { - match self.0.adapter_get_presentation_timestamp(*adapter_data) { - Ok(timestamp) => timestamp, - Err(err) => self.handle_error_fatal(err, "Adapter::correlate_presentation_timestamp"), - } + self.0.adapter_get_presentation_timestamp(*adapter_data) } fn surface_get_capabilities( @@ -780,17 +755,11 @@ impl crate::Context for ContextWgpuCore { } fn device_features(&self, device_data: &Self::DeviceData) -> Features { - match self.0.device_features(device_data.id) { - Ok(features) => features, - Err(err) => self.handle_error_fatal(err, "Device::features"), - } + self.0.device_features(device_data.id) } fn device_limits(&self, device_data: &Self::DeviceData) -> Limits { - match self.0.device_limits(device_data.id) { - Ok(limits) => limits, - Err(err) => self.handle_error_fatal(err, "Device::limits"), - } + self.0.device_limits(device_data.id) } #[cfg_attr( @@ -1337,10 +1306,6 @@ impl crate::Context for ContextWgpuCore { Err(e) => panic!("Error in Device::create_render_bundle_encoder: {e}"), } } - #[doc(hidden)] - fn device_make_invalid(&self, device_data: &Self::DeviceData) { - self.0.device_make_invalid(device_data.id); - } #[cfg_attr(not(any(native, Emscripten)), allow(unused))] fn device_drop(&self, device_data: &Self::DeviceData) { #[cfg(any(native, Emscripten))] @@ -2094,13 +2059,7 @@ impl crate::Context for ContextWgpuCore { } fn queue_get_timestamp_period(&self, queue_data: &Self::QueueData) -> f32 { - let res = self.0.queue_get_timestamp_period(queue_data.id); - match res { - Ok(v) => v, - Err(cause) => { - self.handle_error_fatal(cause, "Queue::get_timestamp_period"); - } - } + self.0.queue_get_timestamp_period(queue_data.id) } fn queue_on_submitted_work_done( @@ -2109,11 +2068,7 @@ impl crate::Context for ContextWgpuCore { callback: crate::context::SubmittedWorkDoneCallback, ) { let closure = wgc::device::queue::SubmittedWorkDoneClosure::from_rust(callback); - - let res = self.0.queue_on_submitted_work_done(queue_data.id, closure); - if let Err(cause) = res { - self.handle_error_fatal(cause, "Queue::on_submitted_work_done"); - } + self.0.queue_on_submitted_work_done(queue_data.id, closure); } fn device_start_capture(&self, device_data: &Self::DeviceData) { diff --git a/wgpu/src/context.rs b/wgpu/src/context.rs index dd989ddd28..a27459ab45 100644 --- a/wgpu/src/context.rs +++ b/wgpu/src/context.rs @@ -195,8 +195,6 @@ pub trait Context: Debug + WasmNotSendSync + Sized { device_data: &Self::DeviceData, desc: &RenderBundleEncoderDescriptor<'_>, ) -> Self::RenderBundleEncoderData; - #[doc(hidden)] - fn device_make_invalid(&self, device_data: &Self::DeviceData); fn device_drop(&self, device_data: &Self::DeviceData); fn device_set_device_lost_callback( &self, @@ -888,8 +886,6 @@ pub(crate) trait DynContext: Debug + WasmNotSendSync { device_data: &crate::Data, desc: &RenderBundleEncoderDescriptor<'_>, ) -> Box; - #[doc(hidden)] - fn device_make_invalid(&self, device_data: &crate::Data); fn device_drop(&self, device_data: &crate::Data); fn device_set_device_lost_callback( &self, @@ -1638,12 +1634,6 @@ where Box::new(data) as _ } - #[doc(hidden)] - fn device_make_invalid(&self, device_data: &crate::Data) { - let device_data = downcast_ref(device_data); - Context::device_make_invalid(self, device_data) - } - fn device_drop(&self, device_data: &crate::Data) { let device_data = downcast_ref(device_data); Context::device_drop(self, device_data)