From 87a84eb490d65c0b264fb5455b81dd79ee1b6871 Mon Sep 17 00:00:00 2001 From: Raph Levien Date: Mon, 31 May 2021 20:38:36 -0700 Subject: [PATCH 1/2] Fix some dx12 bugs Missing a potential barrier, and had src and dst switched on blit. --- piet-gpu-hal/src/dx12.rs | 14 +++++++++++++- piet-gpu-hal/src/dx12/wrappers.rs | 2 +- 2 files changed, 14 insertions(+), 2 deletions(-) diff --git a/piet-gpu-hal/src/dx12.rs b/piet-gpu-hal/src/dx12.rs index 67b751d..cb5fb3b 100644 --- a/piet-gpu-hal/src/dx12.rs +++ b/piet-gpu-hal/src/dx12.rs @@ -456,12 +456,17 @@ impl crate::backend::CmdBuf for CmdBuf { unsafe fn finish(&mut self) { let _ = self.c.close(); + // This is a bit of a mess. Returning the allocator to the free pool + // makes sense if the command list will be dropped, but not if it will + // be reused. Probably need to implement some logic on drop. + /* if let Some(free_allocators) = self.free_allocators.upgrade() { free_allocators .lock() .unwrap() .push(self.allocator.take().unwrap()); } + */ } unsafe fn dispatch( @@ -516,6 +521,9 @@ impl crate::backend::CmdBuf for CmdBuf { ); self.c.resource_barrier(&[bar]); } + // Always do a memory barrier in case of UAV image access. We probably + // want to make these barriers more precise. + self.memory_barrier(); } unsafe fn clear_buffer(&self, buffer: &Buffer, size: Option) { @@ -558,6 +566,10 @@ impl crate::backend::CmdBuf for CmdBuf { impl crate::backend::PipelineBuilder for PipelineBuilder { fn add_buffers(&mut self, n_buffers: u32) { + // Note: if the buffer is readonly, then it needs to be bound + // as an SRV, not a UAV. I think that requires distinguishing + // readonly and read-write cases in pipeline and descriptor set + // creation. For now we punt. if n_buffers != 0 { self.ranges.push(d3d12::D3D12_DESCRIPTOR_RANGE { RangeType: d3d12::D3D12_DESCRIPTOR_RANGE_TYPE_UAV, @@ -572,7 +584,7 @@ impl crate::backend::PipelineBuilder for PipelineBuilder { fn add_images(&mut self, n_images: u32) { // These are UAV images, so the descriptor type is the same as buffers. - self.add_buffers(n_images) + self.add_buffers(n_images); } fn add_textures(&mut self, max_textures: u32) { diff --git a/piet-gpu-hal/src/dx12/wrappers.rs b/piet-gpu-hal/src/dx12/wrappers.rs index e132441..800959f 100644 --- a/piet-gpu-hal/src/dx12/wrappers.rs +++ b/piet-gpu-hal/src/dx12/wrappers.rs @@ -1307,7 +1307,7 @@ impl GraphicsCommandList { /// Copy an entire resource (buffer or image) pub unsafe fn copy_resource(&self, src: &Resource, dst: &Resource) { - self.0.CopyResource(src.get_mut(), dst.get_mut()); + self.0.CopyResource(dst.get_mut(), src.get_mut()); } pub unsafe fn copy_buffer( From 074fafad1e81fe2a167ea7d4609e91261d8da5fe Mon Sep 17 00:00:00 2001 From: Raph Levien Date: Mon, 31 May 2021 21:03:35 -0700 Subject: [PATCH 2/2] Turn off reuse of command buffers for now It worked ok on Vulkan but is causing problems on DX12 and Metal. Punt for now and come back to this later when we do more sophisticated resource management. --- piet-gpu-hal/src/dx12.rs | 2 -- piet-gpu-hal/src/hub.rs | 9 +++++++++ 2 files changed, 9 insertions(+), 2 deletions(-) diff --git a/piet-gpu-hal/src/dx12.rs b/piet-gpu-hal/src/dx12.rs index cb5fb3b..857fe3c 100644 --- a/piet-gpu-hal/src/dx12.rs +++ b/piet-gpu-hal/src/dx12.rs @@ -459,14 +459,12 @@ impl crate::backend::CmdBuf for CmdBuf { // This is a bit of a mess. Returning the allocator to the free pool // makes sense if the command list will be dropped, but not if it will // be reused. Probably need to implement some logic on drop. - /* if let Some(free_allocators) = self.free_allocators.upgrade() { free_allocators .lock() .unwrap() .push(self.allocator.take().unwrap()); } - */ } unsafe fn dispatch( diff --git a/piet-gpu-hal/src/hub.rs b/piet-gpu-hal/src/hub.rs index b280881..2791bc9 100644 --- a/piet-gpu-hal/src/hub.rs +++ b/piet-gpu-hal/src/hub.rs @@ -169,6 +169,11 @@ impl Session { let mut item = pending.swap_remove(i); // TODO: wait is superfluous, can just reset let _ = self.0.device.wait_and_reset(vec![&mut item.fence]); + + // Reuse of command buffers works on Vulkan, but not at all on + // Metal and is problematic on DX12 (the allocator is returned) + // to the pool. Punt for now. + /* let mut pool = self.0.cmd_buf_pool.lock().unwrap(); pool.push((item.cmd_buf, item.fence)); std::mem::drop(item.resources); @@ -176,6 +181,7 @@ impl Session { pool.push((staging_cmd_buf.cmd_buf, staging_cmd_buf.fence)); std::mem::drop(staging_cmd_buf.resources); } + */ } else { i += 1; } @@ -562,11 +568,14 @@ impl SubmittedCmdBuf { unsafe { session.device.wait_and_reset(vec![&mut item.fence])?; } + // See discussion in `poll_cleanup` + /* session .cmd_buf_pool .lock() .unwrap() .push((item.cmd_buf, item.fence)); + */ std::mem::drop(item.resources); } // else session dropped error?