Clean up command buffers

This patch deallocates command buffers after command submission completes (the same time as other resources are released).

It should be portable and robust on all back-ends, but not necessarily the most efficient. But reuse of command buffers, as well as more efficient allocation on Vulkan and DX12, are for followup work.
This commit is contained in:
Raph Levien 2021-09-06 10:17:16 -07:00
parent cbebb56981
commit f73da22a69
8 changed files with 113 additions and 23 deletions

View file

@ -16,7 +16,7 @@
//! The generic trait for backends to implement. //! The generic trait for backends to implement.
use crate::{mux::ShaderCode, BufferUsage, Error, GpuInfo, ImageLayout, SamplerParams}; use crate::{BufferUsage, Error, GpuInfo, ImageLayout, SamplerParams};
pub trait Device: Sized { pub trait Device: Sized {
type Buffer: 'static; type Buffer: 'static;
@ -105,6 +105,9 @@ pub trait Device: Sized {
fn create_cmd_buf(&self) -> Result<Self::CmdBuf, Error>; fn create_cmd_buf(&self) -> Result<Self::CmdBuf, Error>;
/// If the command buffer was submitted, it must complete before this is called.
unsafe fn destroy_cmd_buf(&self, cmd_buf: Self::CmdBuf) -> Result<(), Error>;
fn create_query_pool(&self, n_queries: u32) -> Result<Self::QueryPool, Error>; fn create_query_pool(&self, n_queries: u32) -> Result<Self::QueryPool, Error>;
/// Get results from query pool, destroying it in the process. /// Get results from query pool, destroying it in the process.
@ -158,6 +161,7 @@ pub trait Device: Sized {
unsafe fn create_semaphore(&self) -> Result<Self::Semaphore, Error>; unsafe fn create_semaphore(&self) -> Result<Self::Semaphore, Error>;
unsafe fn create_fence(&self, signaled: bool) -> Result<Self::Fence, Error>; unsafe fn create_fence(&self, signaled: bool) -> Result<Self::Fence, Error>;
unsafe fn destroy_fence(&self, fence: Self::Fence) -> Result<(), Error>;
unsafe fn wait_and_reset(&self, fences: Vec<&mut Self::Fence>) -> Result<(), Error>; unsafe fn wait_and_reset(&self, fences: Vec<&mut Self::Fence>) -> Result<(), Error>;
unsafe fn get_fence_status(&self, fence: &mut Self::Fence) -> Result<bool, Error>; unsafe fn get_fence_status(&self, fence: &mut Self::Fence) -> Result<bool, Error>;

View file

@ -316,6 +316,10 @@ impl crate::backend::Device for Dx12Device {
} }
} }
unsafe fn destroy_cmd_buf(&self, _cmd_buf: Self::CmdBuf) -> Result<(), Error> {
Ok(())
}
fn create_query_pool(&self, n_queries: u32) -> Result<Self::QueryPool, Error> { fn create_query_pool(&self, n_queries: u32) -> Result<Self::QueryPool, Error> {
unsafe { unsafe {
let heap = self let heap = self
@ -409,6 +413,10 @@ impl crate::backend::Device for Dx12Device {
Ok(Fence { fence, event, val }) Ok(Fence { fence, event, val })
} }
unsafe fn destroy_fence(&self, _fence: Self::Fence) -> Result<(), Error> {
Ok(())
}
unsafe fn wait_and_reset(&self, fences: Vec<&mut Self::Fence>) -> Result<(), Error> { unsafe fn wait_and_reset(&self, fences: Vec<&mut Self::Fence>) -> Result<(), Error> {
for fence in fences { for fence in fences {
// TODO: probably handle errors here. // TODO: probably handle errors here.

View file

@ -661,6 +661,10 @@ impl Device {
Ok(Fence(ComPtr::from_raw(fence))) Ok(Fence(ComPtr::from_raw(fence)))
} }
pub unsafe fn destroy_fence(&self, fence: &Fence) -> Result<(), Error> {
Ok(())
}
pub unsafe fn create_committed_resource( pub unsafe fn create_committed_resource(
&self, &self,
heap_properties: &d3d12::D3D12_HEAP_PROPERTIES, heap_properties: &d3d12::D3D12_HEAP_PROPERTIES,

View file

@ -30,8 +30,13 @@ pub struct Session(Arc<SessionInner>);
struct SessionInner { struct SessionInner {
device: mux::Device, device: mux::Device,
/// A pool of command buffers that can be reused.
///
/// Currently this is not used, as it only works well on Vulkan. At some
/// point, we will want to efficiently reuse command buffers rather than
/// allocating them each time, but that is a TODO.
cmd_buf_pool: Mutex<Vec<(mux::CmdBuf, Fence)>>, cmd_buf_pool: Mutex<Vec<(mux::CmdBuf, Fence)>>,
/// Command buffers that are still pending (so resources can't be freed). /// Command buffers that are still pending (so resources can't be freed yet).
pending: Mutex<Vec<SubmittedCmdBufInner>>, pending: Mutex<Vec<SubmittedCmdBufInner>>,
/// A command buffer that is used for copying from staging buffers. /// A command buffer that is used for copying from staging buffers.
staging_cmd_buf: Mutex<Option<CmdBuf>>, staging_cmd_buf: Mutex<Option<CmdBuf>>,
@ -169,18 +174,7 @@ impl Session {
let mut item = pending.swap_remove(i); let mut item = pending.swap_remove(i);
// TODO: wait is superfluous, can just reset // TODO: wait is superfluous, can just reset
let _ = self.0.device.wait_and_reset(vec![&mut item.fence]); let _ = self.0.device.wait_and_reset(vec![&mut item.fence]);
self.0.cleanup_submitted_cmd_buf(item);
// Reuse of command buffers works on Vulkan, but not at all on
// Metal and is problematic on DX12 (the allocator is returned)
// to the pool. Punt for now.
let mut pool = self.0.cmd_buf_pool.lock().unwrap();
pool.push((item.cmd_buf, item.fence));
std::mem::drop(item.resources);
if let Some(staging_cmd_buf) = item.staging_cmd_buf {
pool.push((staging_cmd_buf.cmd_buf, staging_cmd_buf.fence));
std::mem::drop(staging_cmd_buf.resources);
}
} else { } else {
i += 1; i += 1;
} }
@ -392,6 +386,25 @@ impl Session {
} }
} }
impl SessionInner {
/// Clean up a submitted command buffer.
///
/// This drops the resources used by the command buffer and also cleans up the command
/// buffer itself. Currently that means destroying it, but at some point we'll want to
/// be better at reuse.
unsafe fn cleanup_submitted_cmd_buf(&self, item: SubmittedCmdBufInner) {
let _should_handle_err = self.device.destroy_cmd_buf(item.cmd_buf);
let _should_handle_err = self.device.destroy_fence(item.fence);
std::mem::drop(item.resources);
if let Some(staging_cmd_buf) = item.staging_cmd_buf {
let _should_handle_err = self.device.destroy_cmd_buf(staging_cmd_buf.cmd_buf);
let _should_handle_err = self.device.destroy_fence(staging_cmd_buf.fence);
std::mem::drop(staging_cmd_buf.resources);
}
}
}
impl CmdBuf { impl CmdBuf {
/// Begin recording into a command buffer. /// Begin recording into a command buffer.
/// ///
@ -566,14 +579,8 @@ impl SubmittedCmdBuf {
if let Some(session) = Weak::upgrade(&self.1) { if let Some(session) = Weak::upgrade(&self.1) {
unsafe { unsafe {
session.device.wait_and_reset(vec![&mut item.fence])?; session.device.wait_and_reset(vec![&mut item.fence])?;
session.cleanup_submitted_cmd_buf(item);
} }
// See discussion in `poll_cleanup`
session
.cmd_buf_pool
.lock()
.unwrap()
.push((item.cmd_buf, item.fence));
std::mem::drop(item.resources);
} }
// else session dropped error? // else session dropped error?
Ok(()) Ok(())

View file

@ -72,6 +72,16 @@ macro_rules! mux_enum {
} }
} }
} }
$crate::mux_cfg! {
#[cfg(vk)]
#[allow(unused)]
fn vk_owned(self) -> $vk {
match self {
$name::Vk(x) => x,
_ => panic!("downcast error")
}
}
}
$crate::mux_cfg! { $crate::mux_cfg! {
#[cfg(dx12)] #[cfg(dx12)]
@ -93,6 +103,16 @@ macro_rules! mux_enum {
} }
} }
} }
$crate::mux_cfg! {
#[cfg(dx12)]
#[allow(unused)]
fn dx12_owned(self) -> $dx12 {
match self {
$name::Dx12(x) => x,
_ => panic!("downcast error")
}
}
}
$crate::mux_cfg! { $crate::mux_cfg! {
#[cfg(mtl)] #[cfg(mtl)]
@ -112,6 +132,15 @@ macro_rules! mux_enum {
} }
} }
} }
$crate::mux_cfg! {
#[cfg(mtl)]
#[allow(unused)]
fn mtl_owned(self) -> $mtl {
match self {
$name::Mtl(x) => x,
}
}
}
} }
}; };
} }

View file

@ -289,6 +289,10 @@ impl crate::backend::Device for MtlDevice {
Ok(CmdBuf { cmd_buf }) Ok(CmdBuf { cmd_buf })
} }
unsafe fn destroy_cmd_buf(&self, _cmd_buf: Self::CmdBuf) -> Result<(), Error> {
Ok(())
}
fn create_query_pool(&self, n_queries: u32) -> Result<Self::QueryPool, Error> { fn create_query_pool(&self, n_queries: u32) -> Result<Self::QueryPool, Error> {
// TODO // TODO
Ok(QueryPool) Ok(QueryPool)
@ -365,6 +369,10 @@ impl crate::backend::Device for MtlDevice {
Ok(Fence::Idle) Ok(Fence::Idle)
} }
unsafe fn destroy_fence(&self, _fence: Self::Fence) -> Result<(), Error> {
Ok(())
}
unsafe fn wait_and_reset(&self, fences: Vec<&mut Self::Fence>) -> Result<(), Error> { unsafe fn wait_and_reset(&self, fences: Vec<&mut Self::Fence>) -> Result<(), Error> {
for fence in fences { for fence in fences {
match fence { match fence {

View file

@ -242,6 +242,14 @@ impl Device {
} }
} }
pub unsafe fn destroy_fence(&self, fence: Fence) -> Result<(), Error> {
mux_match! { self;
Device::Vk(d) => d.destroy_fence(fence.vk_owned()),
Device::Dx12(d) => d.destroy_fence(fence.dx12_owned()),
Device::Mtl(d) => d.destroy_fence(fence.mtl_owned()),
}
}
// Consider changing Vec to iterator (as is done in gfx-hal) // Consider changing Vec to iterator (as is done in gfx-hal)
pub unsafe fn wait_and_reset(&self, fences: Vec<&mut Fence>) -> Result<(), Error> { pub unsafe fn wait_and_reset(&self, fences: Vec<&mut Fence>) -> Result<(), Error> {
mux_match! { self; mux_match! { self;
@ -309,6 +317,14 @@ impl Device {
} }
} }
pub unsafe fn destroy_cmd_buf(&self, cmd_buf: CmdBuf) -> Result<(), Error> {
mux_match! { self;
Device::Vk(d) => d.destroy_cmd_buf(cmd_buf.vk_owned()),
Device::Dx12(d) => d.destroy_cmd_buf(cmd_buf.dx12_owned()),
Device::Mtl(d) => d.destroy_cmd_buf(cmd_buf.mtl_owned()),
}
}
pub fn create_query_pool(&self, n_queries: u32) -> Result<QueryPool, Error> { pub fn create_query_pool(&self, n_queries: u32) -> Result<QueryPool, Error> {
mux_match! { self; mux_match! { self;
Device::Vk(d) => d.create_query_pool(n_queries).map(QueryPool::Vk), Device::Vk(d) => d.create_query_pool(n_queries).map(QueryPool::Vk),

View file

@ -89,6 +89,7 @@ pub struct DescriptorSet {
pub struct CmdBuf { pub struct CmdBuf {
cmd_buf: vk::CommandBuffer, cmd_buf: vk::CommandBuffer,
cmd_pool: vk::CommandPool,
device: Arc<RawDevice>, device: Arc<RawDevice>,
} }
@ -620,6 +621,12 @@ impl crate::backend::Device for VkDevice {
Ok(device.create_fence(&vk::FenceCreateInfo::builder().flags(flags).build(), None)?) Ok(device.create_fence(&vk::FenceCreateInfo::builder().flags(flags).build(), None)?)
} }
unsafe fn destroy_fence(&self, fence: Self::Fence) -> Result<(), Error> {
let device = &self.device.device;
device.destroy_fence(fence, None);
Ok(())
}
unsafe fn create_semaphore(&self) -> Result<Self::Semaphore, Error> { unsafe fn create_semaphore(&self) -> Result<Self::Semaphore, Error> {
let device = &self.device.device; let device = &self.device.device;
Ok(device.create_semaphore(&vk::SemaphoreCreateInfo::default(), None)?) Ok(device.create_semaphore(&vk::SemaphoreCreateInfo::default(), None)?)
@ -658,7 +665,7 @@ impl crate::backend::Device for VkDevice {
fn create_cmd_buf(&self) -> Result<CmdBuf, Error> { fn create_cmd_buf(&self) -> Result<CmdBuf, Error> {
unsafe { unsafe {
let device = &self.device.device; let device = &self.device.device;
let command_pool = device.create_command_pool( let cmd_pool = device.create_command_pool(
&vk::CommandPoolCreateInfo::builder() &vk::CommandPoolCreateInfo::builder()
.flags(vk::CommandPoolCreateFlags::RESET_COMMAND_BUFFER) .flags(vk::CommandPoolCreateFlags::RESET_COMMAND_BUFFER)
.queue_family_index(self.qfi), .queue_family_index(self.qfi),
@ -666,17 +673,24 @@ impl crate::backend::Device for VkDevice {
)?; )?;
let cmd_buf = device.allocate_command_buffers( let cmd_buf = device.allocate_command_buffers(
&vk::CommandBufferAllocateInfo::builder() &vk::CommandBufferAllocateInfo::builder()
.command_pool(command_pool) .command_pool(cmd_pool)
.level(vk::CommandBufferLevel::PRIMARY) .level(vk::CommandBufferLevel::PRIMARY)
.command_buffer_count(1), .command_buffer_count(1),
)?[0]; )?[0];
Ok(CmdBuf { Ok(CmdBuf {
cmd_buf, cmd_buf,
cmd_pool,
device: self.device.clone(), device: self.device.clone(),
}) })
} }
} }
unsafe fn destroy_cmd_buf(&self, cmd_buf: CmdBuf) -> Result<(), Error> {
let device = &self.device.device;
device.destroy_command_pool(cmd_buf.cmd_pool, None);
Ok(())
}
/// Create a query pool for timestamp queries. /// Create a query pool for timestamp queries.
fn create_query_pool(&self, n_queries: u32) -> Result<QueryPool, Error> { fn create_query_pool(&self, n_queries: u32) -> Result<QueryPool, Error> {
unsafe { unsafe {