2021-05-26 11:06:51 +10:00
|
|
|
//! A somewhat higher level GPU abstraction.
|
2020-11-18 03:04:25 +11:00
|
|
|
//!
|
2021-05-26 11:06:51 +10:00
|
|
|
//! This layer is on top of the lower-level layer that multiplexes different
|
|
|
|
//! back-ends. It handles details such as managing staging buffers for creating
|
|
|
|
//! buffers with initial content, deferring dropping of resources until command
|
|
|
|
//! submission is complete, and a bit more. These conveniences might expand
|
|
|
|
//! even more in time.
|
2020-11-18 03:04:25 +11:00
|
|
|
|
2021-05-25 04:44:56 +10:00
|
|
|
use std::convert::TryInto;
|
2020-11-18 03:04:25 +11:00
|
|
|
use std::sync::{Arc, Mutex, Weak};
|
|
|
|
|
2021-05-26 11:06:51 +10:00
|
|
|
use smallvec::SmallVec;
|
2020-11-18 03:04:25 +11:00
|
|
|
|
2021-05-26 11:06:51 +10:00
|
|
|
use crate::mux;
|
2020-11-18 03:04:25 +11:00
|
|
|
|
2021-05-26 11:06:51 +10:00
|
|
|
use crate::{BufferUsage, Error, GpuInfo, SamplerParams};
|
2020-11-18 03:04:25 +11:00
|
|
|
|
2021-05-26 11:06:51 +10:00
|
|
|
pub use crate::mux::{DescriptorSet, Fence, Pipeline, QueryPool, Sampler, Semaphore, ShaderCode};
|
2020-11-18 03:04:25 +11:00
|
|
|
|
|
|
|
#[derive(Clone)]
|
|
|
|
pub struct Session(Arc<SessionInner>);
|
|
|
|
|
|
|
|
struct SessionInner {
|
2021-05-26 11:06:51 +10:00
|
|
|
device: mux::Device,
|
|
|
|
cmd_buf_pool: Mutex<Vec<(mux::CmdBuf, Fence)>>,
|
2020-11-18 03:04:25 +11:00
|
|
|
/// Command buffers that are still pending (so resources can't be freed).
|
|
|
|
pending: Mutex<Vec<SubmittedCmdBufInner>>,
|
2021-05-25 04:44:56 +10:00
|
|
|
/// A command buffer that is used for copying from staging buffers.
|
|
|
|
staging_cmd_buf: Mutex<Option<CmdBuf>>,
|
2021-05-09 03:51:04 +10:00
|
|
|
gpu_info: GpuInfo,
|
2020-11-18 03:04:25 +11:00
|
|
|
}
|
|
|
|
|
|
|
|
pub struct CmdBuf {
|
2021-05-26 11:06:51 +10:00
|
|
|
cmd_buf: mux::CmdBuf,
|
2020-11-18 03:04:25 +11:00
|
|
|
fence: Fence,
|
2021-05-25 06:56:24 +10:00
|
|
|
resources: Vec<RetainResource>,
|
2020-11-18 03:04:25 +11:00
|
|
|
session: Weak<SessionInner>,
|
|
|
|
}
|
|
|
|
|
|
|
|
// Maybe "pending" is a better name?
|
|
|
|
pub struct SubmittedCmdBuf(Option<SubmittedCmdBufInner>, Weak<SessionInner>);
|
|
|
|
|
|
|
|
struct SubmittedCmdBufInner {
|
2021-05-25 04:44:56 +10:00
|
|
|
// It's inconsistent, cmd_buf is unpacked, staging_cmd_buf isn't. Probably
|
|
|
|
// better to chose one or the other.
|
2021-05-26 11:06:51 +10:00
|
|
|
cmd_buf: mux::CmdBuf,
|
2020-11-18 03:04:25 +11:00
|
|
|
fence: Fence,
|
2021-05-25 06:56:24 +10:00
|
|
|
resources: Vec<RetainResource>,
|
2021-05-25 04:44:56 +10:00
|
|
|
staging_cmd_buf: Option<CmdBuf>,
|
2020-11-18 03:04:25 +11:00
|
|
|
}
|
|
|
|
|
|
|
|
#[derive(Clone)]
|
|
|
|
pub struct Image(Arc<ImageInner>);
|
|
|
|
|
|
|
|
struct ImageInner {
|
2021-05-26 11:06:51 +10:00
|
|
|
image: mux::Image,
|
2020-11-18 03:04:25 +11:00
|
|
|
session: Weak<SessionInner>,
|
|
|
|
}
|
|
|
|
|
|
|
|
#[derive(Clone)]
|
|
|
|
pub struct Buffer(Arc<BufferInner>);
|
|
|
|
|
|
|
|
struct BufferInner {
|
2021-05-26 11:06:51 +10:00
|
|
|
buffer: mux::Buffer,
|
2020-11-18 03:04:25 +11:00
|
|
|
session: Weak<SessionInner>,
|
|
|
|
}
|
|
|
|
|
2021-05-26 11:06:51 +10:00
|
|
|
pub struct PipelineBuilder(mux::PipelineBuilder);
|
2020-11-25 07:36:27 +11:00
|
|
|
|
2021-05-26 11:06:51 +10:00
|
|
|
pub struct DescriptorSetBuilder(mux::DescriptorSetBuilder);
|
2020-11-25 07:36:27 +11:00
|
|
|
|
2021-05-25 04:44:56 +10:00
|
|
|
/// Data types that can be stored in a GPU buffer.
|
|
|
|
pub unsafe trait PlainData {}
|
|
|
|
|
|
|
|
unsafe impl PlainData for u8 {}
|
|
|
|
unsafe impl PlainData for u16 {}
|
|
|
|
unsafe impl PlainData for u32 {}
|
|
|
|
unsafe impl PlainData for u64 {}
|
|
|
|
unsafe impl PlainData for i8 {}
|
|
|
|
unsafe impl PlainData for i16 {}
|
|
|
|
unsafe impl PlainData for i32 {}
|
|
|
|
unsafe impl PlainData for i64 {}
|
|
|
|
unsafe impl PlainData for f32 {}
|
|
|
|
unsafe impl PlainData for f64 {}
|
|
|
|
|
2021-05-25 06:56:24 +10:00
|
|
|
/// A resource to retain during the lifetime of a command submission.
|
|
|
|
pub enum RetainResource {
|
|
|
|
Buffer(Buffer),
|
|
|
|
Image(Image),
|
|
|
|
}
|
|
|
|
|
2020-11-18 03:04:25 +11:00
|
|
|
impl Session {
|
2021-05-26 11:06:51 +10:00
|
|
|
pub fn new(device: mux::Device) -> Session {
|
2021-05-09 03:51:04 +10:00
|
|
|
let gpu_info = device.query_gpu_info();
|
2020-11-18 03:04:25 +11:00
|
|
|
Session(Arc::new(SessionInner {
|
|
|
|
device,
|
2021-05-09 03:51:04 +10:00
|
|
|
gpu_info,
|
2020-11-18 03:04:25 +11:00
|
|
|
cmd_buf_pool: Default::default(),
|
|
|
|
pending: Default::default(),
|
2021-05-25 04:44:56 +10:00
|
|
|
staging_cmd_buf: Default::default(),
|
2020-11-18 03:04:25 +11:00
|
|
|
}))
|
|
|
|
}
|
|
|
|
|
|
|
|
pub fn cmd_buf(&self) -> Result<CmdBuf, Error> {
|
|
|
|
self.poll_cleanup();
|
|
|
|
let (cmd_buf, fence) = if let Some(cf) = self.0.cmd_buf_pool.lock().unwrap().pop() {
|
|
|
|
cf
|
|
|
|
} else {
|
|
|
|
let cmd_buf = self.0.device.create_cmd_buf()?;
|
|
|
|
let fence = unsafe { self.0.device.create_fence(false)? };
|
|
|
|
(cmd_buf, fence)
|
|
|
|
};
|
|
|
|
Ok(CmdBuf {
|
|
|
|
cmd_buf,
|
|
|
|
fence,
|
|
|
|
resources: Vec::new(),
|
|
|
|
session: Arc::downgrade(&self.0),
|
|
|
|
})
|
|
|
|
}
|
|
|
|
|
|
|
|
fn poll_cleanup(&self) {
|
|
|
|
let mut pending = self.0.pending.lock().unwrap();
|
|
|
|
unsafe {
|
|
|
|
let mut i = 0;
|
|
|
|
while i < pending.len() {
|
2021-05-29 08:17:36 +10:00
|
|
|
if let Ok(true) = self.0.device.get_fence_status(&mut pending[i].fence) {
|
2021-05-28 08:37:05 +10:00
|
|
|
let mut item = pending.swap_remove(i);
|
2020-11-18 03:04:25 +11:00
|
|
|
// TODO: wait is superfluous, can just reset
|
2021-05-28 08:37:05 +10:00
|
|
|
let _ = self.0.device.wait_and_reset(vec![&mut item.fence]);
|
2021-05-25 04:44:56 +10:00
|
|
|
let mut pool = self.0.cmd_buf_pool.lock().unwrap();
|
|
|
|
pool.push((item.cmd_buf, item.fence));
|
2020-11-18 03:04:25 +11:00
|
|
|
std::mem::drop(item.resources);
|
2021-05-25 04:44:56 +10:00
|
|
|
if let Some(staging_cmd_buf) = item.staging_cmd_buf {
|
|
|
|
pool.push((staging_cmd_buf.cmd_buf, staging_cmd_buf.fence));
|
|
|
|
std::mem::drop(staging_cmd_buf.resources);
|
|
|
|
}
|
2020-11-18 03:04:25 +11:00
|
|
|
} else {
|
|
|
|
i += 1;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
pub unsafe fn run_cmd_buf(
|
|
|
|
&self,
|
2021-05-28 08:37:05 +10:00
|
|
|
mut cmd_buf: CmdBuf,
|
2021-05-26 01:25:24 +10:00
|
|
|
wait_semaphores: &[&Semaphore],
|
|
|
|
signal_semaphores: &[&Semaphore],
|
2020-11-18 03:04:25 +11:00
|
|
|
) -> Result<SubmittedCmdBuf, Error> {
|
2021-05-25 04:44:56 +10:00
|
|
|
// Again, SmallVec here?
|
|
|
|
let mut cmd_bufs = Vec::with_capacity(2);
|
|
|
|
let mut staging_cmd_buf = self.0.staging_cmd_buf.lock().unwrap().take();
|
|
|
|
if let Some(staging) = &mut staging_cmd_buf {
|
|
|
|
// With finer grained resource tracking, we might be able to avoid this in
|
|
|
|
// some cases.
|
|
|
|
staging.memory_barrier();
|
|
|
|
staging.finish();
|
|
|
|
cmd_bufs.push(&staging.cmd_buf);
|
|
|
|
}
|
|
|
|
cmd_bufs.push(&cmd_buf.cmd_buf);
|
|
|
|
self.0.device.run_cmd_bufs(
|
|
|
|
&cmd_bufs,
|
2020-11-18 03:04:25 +11:00
|
|
|
wait_semaphores,
|
|
|
|
signal_semaphores,
|
2021-05-28 08:37:05 +10:00
|
|
|
Some(&mut cmd_buf.fence),
|
2020-11-18 03:04:25 +11:00
|
|
|
)?;
|
|
|
|
Ok(SubmittedCmdBuf(
|
|
|
|
Some(SubmittedCmdBufInner {
|
|
|
|
cmd_buf: cmd_buf.cmd_buf,
|
|
|
|
fence: cmd_buf.fence,
|
|
|
|
resources: cmd_buf.resources,
|
2021-05-25 04:44:56 +10:00
|
|
|
staging_cmd_buf,
|
2020-11-18 03:04:25 +11:00
|
|
|
}),
|
|
|
|
cmd_buf.session,
|
|
|
|
))
|
|
|
|
}
|
|
|
|
|
2021-05-22 12:31:37 +10:00
|
|
|
pub fn create_buffer(&self, size: u64, usage: BufferUsage) -> Result<Buffer, Error> {
|
|
|
|
let buffer = self.0.device.create_buffer(size, usage)?;
|
2020-11-18 03:04:25 +11:00
|
|
|
Ok(Buffer(Arc::new(BufferInner {
|
|
|
|
buffer,
|
|
|
|
session: Arc::downgrade(&self.0),
|
|
|
|
})))
|
|
|
|
}
|
|
|
|
|
2021-05-25 04:44:56 +10:00
|
|
|
/// Create a buffer with initialized data.
|
|
|
|
pub fn create_buffer_init(
|
2020-11-18 03:04:25 +11:00
|
|
|
&self,
|
2021-05-25 04:44:56 +10:00
|
|
|
contents: &[impl PlainData],
|
|
|
|
usage: BufferUsage,
|
|
|
|
) -> Result<Buffer, Error> {
|
|
|
|
unsafe {
|
|
|
|
self.create_buffer_init_raw(
|
|
|
|
contents.as_ptr() as *const u8,
|
|
|
|
std::mem::size_of_val(contents).try_into()?,
|
|
|
|
usage,
|
|
|
|
)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
/// Create a buffer with initialized data, from a raw pointer memory region.
|
|
|
|
pub unsafe fn create_buffer_init_raw(
|
|
|
|
&self,
|
|
|
|
contents: *const u8,
|
|
|
|
size: u64,
|
|
|
|
usage: BufferUsage,
|
|
|
|
) -> Result<Buffer, Error> {
|
|
|
|
let use_staging_buffer = !usage.intersects(BufferUsage::MAP_READ | BufferUsage::MAP_WRITE)
|
|
|
|
&& self.gpu_info().use_staging_buffers;
|
|
|
|
let create_usage = if use_staging_buffer {
|
|
|
|
BufferUsage::MAP_WRITE | BufferUsage::COPY_SRC
|
|
|
|
} else {
|
|
|
|
usage | BufferUsage::MAP_WRITE
|
|
|
|
};
|
|
|
|
let create_buf = self.create_buffer(size, create_usage)?;
|
|
|
|
self.0
|
|
|
|
.device
|
2021-05-26 11:06:51 +10:00
|
|
|
.write_buffer(&create_buf.mux_buffer(), contents, 0, size)?;
|
2021-05-25 04:44:56 +10:00
|
|
|
if use_staging_buffer {
|
|
|
|
let buf = self.create_buffer(size, usage | BufferUsage::COPY_DST)?;
|
|
|
|
let mut staging_cmd_buf = self.0.staging_cmd_buf.lock().unwrap();
|
|
|
|
if staging_cmd_buf.is_none() {
|
|
|
|
let mut cmd_buf = self.cmd_buf()?;
|
|
|
|
cmd_buf.begin();
|
|
|
|
*staging_cmd_buf = Some(cmd_buf);
|
|
|
|
}
|
|
|
|
let staging_cmd_buf = staging_cmd_buf.as_mut().unwrap();
|
2021-05-26 11:06:51 +10:00
|
|
|
// This will ensure the staging buffer is deallocated.
|
|
|
|
staging_cmd_buf.copy_buffer(create_buf.mux_buffer(), buf.mux_buffer());
|
2021-05-25 06:56:24 +10:00
|
|
|
staging_cmd_buf.add_resource(create_buf);
|
2021-05-25 04:44:56 +10:00
|
|
|
Ok(buf)
|
|
|
|
} else {
|
|
|
|
Ok(create_buf)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
pub unsafe fn create_image2d(&self, width: u32, height: u32) -> Result<Image, Error> {
|
2021-05-22 12:31:37 +10:00
|
|
|
let image = self.0.device.create_image2d(width, height)?;
|
2020-11-18 03:04:25 +11:00
|
|
|
Ok(Image(Arc::new(ImageInner {
|
|
|
|
image,
|
|
|
|
session: Arc::downgrade(&self.0),
|
|
|
|
})))
|
|
|
|
}
|
|
|
|
|
|
|
|
pub unsafe fn create_semaphore(&self) -> Result<Semaphore, Error> {
|
|
|
|
self.0.device.create_semaphore()
|
|
|
|
}
|
|
|
|
|
2020-11-25 07:36:27 +11:00
|
|
|
/// This creates a pipeline that operates on some buffers and images.
|
2020-11-18 03:04:25 +11:00
|
|
|
///
|
|
|
|
/// The descriptor set layout is just some number of storage buffers and storage images (this might change).
|
2021-05-26 11:06:51 +10:00
|
|
|
pub unsafe fn create_simple_compute_pipeline<'a>(
|
2020-11-18 03:04:25 +11:00
|
|
|
&self,
|
2021-05-26 11:06:51 +10:00
|
|
|
code: ShaderCode<'a>,
|
2020-11-18 03:04:25 +11:00
|
|
|
n_buffers: u32,
|
|
|
|
) -> Result<Pipeline, Error> {
|
2020-11-25 07:36:27 +11:00
|
|
|
self.pipeline_builder()
|
|
|
|
.add_buffers(n_buffers)
|
|
|
|
.create_compute_pipeline(self, code)
|
2020-11-18 03:04:25 +11:00
|
|
|
}
|
|
|
|
|
2020-11-25 07:36:27 +11:00
|
|
|
/// Create a descriptor set for a simple pipeline that just references buffers.
|
|
|
|
pub unsafe fn create_simple_descriptor_set<'a>(
|
2020-11-18 03:04:25 +11:00
|
|
|
&self,
|
|
|
|
pipeline: &Pipeline,
|
2020-11-25 07:36:27 +11:00
|
|
|
buffers: impl IntoRefs<'a, Buffer>,
|
2020-11-18 03:04:25 +11:00
|
|
|
) -> Result<DescriptorSet, Error> {
|
2020-11-25 07:36:27 +11:00
|
|
|
self.descriptor_set_builder()
|
|
|
|
.add_buffers(buffers)
|
|
|
|
.build(self, pipeline)
|
2020-11-18 03:04:25 +11:00
|
|
|
}
|
|
|
|
|
|
|
|
/// Create a query pool for timestamp queries.
|
|
|
|
pub fn create_query_pool(&self, n_queries: u32) -> Result<QueryPool, Error> {
|
|
|
|
self.0.device.create_query_pool(n_queries)
|
|
|
|
}
|
|
|
|
|
|
|
|
pub unsafe fn fetch_query_pool(&self, pool: &QueryPool) -> Result<Vec<f64>, Error> {
|
|
|
|
self.0.device.fetch_query_pool(pool)
|
|
|
|
}
|
2020-11-25 07:36:27 +11:00
|
|
|
|
|
|
|
pub unsafe fn pipeline_builder(&self) -> PipelineBuilder {
|
|
|
|
PipelineBuilder(self.0.device.pipeline_builder())
|
|
|
|
}
|
|
|
|
|
|
|
|
pub unsafe fn descriptor_set_builder(&self) -> DescriptorSetBuilder {
|
|
|
|
DescriptorSetBuilder(self.0.device.descriptor_set_builder())
|
|
|
|
}
|
2020-11-26 07:43:42 +11:00
|
|
|
|
|
|
|
pub unsafe fn create_sampler(&self, params: SamplerParams) -> Result<Sampler, Error> {
|
2021-05-26 11:06:51 +10:00
|
|
|
todo!()
|
|
|
|
//self.0.device.create_sampler(params)
|
2020-11-26 07:43:42 +11:00
|
|
|
}
|
2021-04-03 12:59:07 +11:00
|
|
|
|
2021-05-09 03:51:04 +10:00
|
|
|
pub fn gpu_info(&self) -> &GpuInfo {
|
|
|
|
&self.0.gpu_info
|
2021-04-03 12:59:07 +11:00
|
|
|
}
|
2021-05-29 08:17:36 +10:00
|
|
|
|
|
|
|
/// Choose shader code from the available choices.
|
|
|
|
pub fn choose_shader<'a>(&self, spv: &'a [u8], hlsl: &'a str, msl: &'a str) -> ShaderCode<'a> {
|
|
|
|
self.0.device.choose_shader(spv, hlsl, msl)
|
|
|
|
}
|
2020-11-18 03:04:25 +11:00
|
|
|
}
|
|
|
|
|
|
|
|
impl CmdBuf {
|
2020-11-19 10:16:12 +11:00
|
|
|
/// Make sure the resource lives until the command buffer completes.
|
|
|
|
///
|
|
|
|
/// The submitted command buffer will hold this reference until the corresponding
|
|
|
|
/// fence is signaled.
|
|
|
|
///
|
|
|
|
/// There are two choices for upholding the lifetime invariant: this function, or
|
|
|
|
/// the caller can manually hold the reference. The latter is appropriate when it's
|
|
|
|
/// part of retained state.
|
2021-05-25 06:56:24 +10:00
|
|
|
pub fn add_resource(&mut self, resource: impl Into<RetainResource>) {
|
|
|
|
self.resources.push(resource.into());
|
2020-11-18 03:04:25 +11:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
impl SubmittedCmdBuf {
|
|
|
|
pub fn wait(mut self) -> Result<(), Error> {
|
2021-05-28 08:37:05 +10:00
|
|
|
let mut item = self.0.take().unwrap();
|
2020-11-18 03:04:25 +11:00
|
|
|
if let Some(session) = Weak::upgrade(&self.1) {
|
|
|
|
unsafe {
|
2021-05-28 08:37:05 +10:00
|
|
|
session.device.wait_and_reset(vec![&mut item.fence])?;
|
2020-11-18 03:04:25 +11:00
|
|
|
}
|
|
|
|
session
|
|
|
|
.cmd_buf_pool
|
|
|
|
.lock()
|
|
|
|
.unwrap()
|
|
|
|
.push((item.cmd_buf, item.fence));
|
|
|
|
std::mem::drop(item.resources);
|
|
|
|
}
|
|
|
|
// else session dropped error?
|
|
|
|
Ok(())
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
impl Drop for SubmittedCmdBuf {
|
|
|
|
fn drop(&mut self) {
|
|
|
|
if let Some(inner) = self.0.take() {
|
|
|
|
if let Some(session) = Weak::upgrade(&self.1) {
|
|
|
|
session.pending.lock().unwrap().push(inner);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
impl Drop for BufferInner {
|
|
|
|
fn drop(&mut self) {
|
|
|
|
if let Some(session) = Weak::upgrade(&self.session) {
|
|
|
|
unsafe {
|
|
|
|
let _ = session.device.destroy_buffer(&self.buffer);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
impl Drop for ImageInner {
|
|
|
|
fn drop(&mut self) {
|
|
|
|
if let Some(session) = Weak::upgrade(&self.session) {
|
|
|
|
unsafe {
|
|
|
|
let _ = session.device.destroy_image(&self.image);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2021-05-26 11:06:51 +10:00
|
|
|
// Probably migrate from deref here to wrapping all methods.
|
2020-11-18 03:04:25 +11:00
|
|
|
impl std::ops::Deref for CmdBuf {
|
2021-05-26 11:06:51 +10:00
|
|
|
type Target = mux::CmdBuf;
|
2020-11-18 03:04:25 +11:00
|
|
|
fn deref(&self) -> &Self::Target {
|
|
|
|
&self.cmd_buf
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
impl std::ops::DerefMut for CmdBuf {
|
|
|
|
fn deref_mut(&mut self) -> &mut Self::Target {
|
|
|
|
&mut self.cmd_buf
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
impl Image {
|
2021-05-26 11:06:51 +10:00
|
|
|
pub fn mux_image(&self) -> &mux::Image {
|
2020-11-18 03:04:25 +11:00
|
|
|
&self.0.image
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
impl Buffer {
|
2021-05-26 11:06:51 +10:00
|
|
|
pub fn mux_buffer(&self) -> &mux::Buffer {
|
2020-11-18 03:04:25 +11:00
|
|
|
&self.0.buffer
|
|
|
|
}
|
|
|
|
|
2021-05-25 04:44:56 +10:00
|
|
|
pub unsafe fn write<T: PlainData>(&mut self, contents: &[T]) -> Result<(), Error> {
|
2020-11-18 03:04:25 +11:00
|
|
|
if let Some(session) = Weak::upgrade(&self.0.session) {
|
2021-05-25 04:44:56 +10:00
|
|
|
session.device.write_buffer(
|
|
|
|
&self.0.buffer,
|
|
|
|
contents.as_ptr() as *const u8,
|
|
|
|
0,
|
|
|
|
std::mem::size_of_val(contents).try_into()?,
|
|
|
|
)?;
|
2020-11-18 03:04:25 +11:00
|
|
|
}
|
|
|
|
// else session lost error?
|
|
|
|
Ok(())
|
|
|
|
}
|
2021-05-25 04:44:56 +10:00
|
|
|
pub unsafe fn read<T: PlainData>(&self, result: &mut Vec<T>) -> Result<(), Error> {
|
2021-05-26 11:06:51 +10:00
|
|
|
let size = self.mux_buffer().size();
|
2021-05-25 04:44:56 +10:00
|
|
|
let len = size as usize / std::mem::size_of::<T>();
|
|
|
|
if len > result.len() {
|
|
|
|
result.reserve(len - result.len());
|
|
|
|
}
|
2020-11-18 03:04:25 +11:00
|
|
|
if let Some(session) = Weak::upgrade(&self.0.session) {
|
2021-05-25 04:44:56 +10:00
|
|
|
session
|
|
|
|
.device
|
|
|
|
.read_buffer(&self.0.buffer, result.as_mut_ptr() as *mut u8, 0, size)?;
|
|
|
|
result.set_len(len);
|
2020-11-18 03:04:25 +11:00
|
|
|
}
|
|
|
|
// else session lost error?
|
|
|
|
Ok(())
|
|
|
|
}
|
|
|
|
}
|
2020-11-25 07:36:27 +11:00
|
|
|
|
|
|
|
impl PipelineBuilder {
|
|
|
|
/// Add buffers to the pipeline. Each has its own binding.
|
|
|
|
pub fn add_buffers(mut self, n_buffers: u32) -> Self {
|
|
|
|
self.0.add_buffers(n_buffers);
|
|
|
|
self
|
|
|
|
}
|
|
|
|
|
|
|
|
/// Add storage images to the pipeline. Each has its own binding.
|
|
|
|
pub fn add_images(mut self, n_images: u32) -> Self {
|
|
|
|
self.0.add_images(n_images);
|
|
|
|
self
|
|
|
|
}
|
|
|
|
|
2020-11-19 10:54:11 +11:00
|
|
|
/// Add a binding with a variable-size array of textures.
|
|
|
|
pub fn add_textures(mut self, max_textures: u32) -> Self {
|
|
|
|
self.0.add_textures(max_textures);
|
|
|
|
self
|
|
|
|
}
|
|
|
|
|
2021-05-26 11:06:51 +10:00
|
|
|
pub unsafe fn create_compute_pipeline<'a>(
|
2020-11-25 07:36:27 +11:00
|
|
|
self,
|
|
|
|
session: &Session,
|
2021-05-26 11:06:51 +10:00
|
|
|
code: ShaderCode<'a>,
|
2020-11-25 07:36:27 +11:00
|
|
|
) -> Result<Pipeline, Error> {
|
|
|
|
self.0.create_compute_pipeline(&session.0.device, code)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
impl DescriptorSetBuilder {
|
|
|
|
pub fn add_buffers<'a>(mut self, buffers: impl IntoRefs<'a, Buffer>) -> Self {
|
2021-05-26 11:06:51 +10:00
|
|
|
let mux_buffers = buffers
|
2020-11-25 07:36:27 +11:00
|
|
|
.into_refs()
|
2021-05-26 11:06:51 +10:00
|
|
|
.map(|b| b.mux_buffer())
|
|
|
|
.collect::<SmallVec<[_; 8]>>();
|
|
|
|
self.0.add_buffers(&mux_buffers);
|
2020-11-25 07:36:27 +11:00
|
|
|
self
|
|
|
|
}
|
|
|
|
|
|
|
|
pub fn add_images<'a>(mut self, images: impl IntoRefs<'a, Image>) -> Self {
|
2021-05-26 11:06:51 +10:00
|
|
|
let mux_images = images
|
|
|
|
.into_refs()
|
|
|
|
.map(|i| i.mux_image())
|
|
|
|
.collect::<Vec<_>>();
|
|
|
|
self.0.add_images(&mux_images);
|
2020-11-25 07:36:27 +11:00
|
|
|
self
|
|
|
|
}
|
|
|
|
|
implement FillImage command and sRGB support
FillImage is like Fill, except that it takes its color from one or
more image atlases.
kernel4 uses a single image for non-Vulkan hosts, and the dynamic sized array
of image descriptors on Vulkan.
A previous version of this commit used textures. I think images are a better
choice for piet-gpu, for several reasons:
- Texture sampling, in particular textureGrad, is slow on lower spec devices
such as Google Pixel. Texture sampling is particularly slow and difficult to
implement for CPU fallbacks.
- Texture sampling need more parameters, in particular the full u,v
transformation matrix, leading to a large increase in the command size. Since
all commands use the same size, that memory penalty is paid by all scenes, not
just scenes with textures.
- It is unlikely that piet-gpu will support every kind of fill for every
client, because each kind must be added to kernel4.
With FillImage, a client will prepare the image(s) in separate shader stages,
sampling and applying transformations and special effects as needed. Textures
that align with the output pixel grid can be used directly, without
pre-processing.
Note that the pre-processing step can run concurrently with the piet-gpu pipeline;
Only the last stage, kernel4, needs the images.
Pre-processing most likely uses fixed function vertex/fragment programs,
which on some GPUs may run in parallel with piet-gpu's compute programs.
While here, fix a few validation errors:
- Explicitly enable EXT_descriptor_indexing, KHR_maintenance3,
KHR_get_physical_device_properties2.
- Specify a vkDescriptorSetVariableDescriptorCountAllocateInfo for
vkAllocateDescriptorSets. Otherwise, variable image2D arrays won't work (but
sampler2D arrays do, at least on my setup).
Updates #38
Signed-off-by: Elias Naur <mail@eliasnaur.com>
2020-12-29 08:02:39 +11:00
|
|
|
pub fn add_textures<'a>(mut self, images: impl IntoRefs<'a, Image>) -> Self {
|
2021-05-26 11:06:51 +10:00
|
|
|
let mux_images = images
|
|
|
|
.into_refs()
|
|
|
|
.map(|i| i.mux_image())
|
|
|
|
.collect::<Vec<_>>();
|
|
|
|
self.0.add_textures(&mux_images);
|
2020-11-19 10:54:11 +11:00
|
|
|
self
|
|
|
|
}
|
|
|
|
|
2020-11-25 07:36:27 +11:00
|
|
|
pub unsafe fn build(
|
|
|
|
self,
|
|
|
|
session: &Session,
|
|
|
|
pipeline: &Pipeline,
|
|
|
|
) -> Result<DescriptorSet, Error> {
|
|
|
|
self.0.build(&session.0.device, pipeline)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// This lets us use either a slice or a vector. The type is clunky but it
|
|
|
|
// seems fine enough to use.
|
|
|
|
pub trait IntoRefs<'a, T: 'a> {
|
|
|
|
type Iterator: Iterator<Item = &'a T>;
|
|
|
|
|
|
|
|
fn into_refs(self) -> Self::Iterator;
|
|
|
|
}
|
|
|
|
|
|
|
|
impl<'a, T> IntoRefs<'a, T> for &'a [T] {
|
|
|
|
type Iterator = std::slice::Iter<'a, T>;
|
|
|
|
fn into_refs(self) -> Self::Iterator {
|
|
|
|
self.into_iter()
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
impl<'a, T> IntoRefs<'a, T> for &'a [&'a T] {
|
|
|
|
type Iterator = std::iter::Copied<std::slice::Iter<'a, &'a T>>;
|
|
|
|
fn into_refs(self) -> Self::Iterator {
|
|
|
|
self.into_iter().copied()
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2021-05-25 07:25:13 +10:00
|
|
|
impl<'a, T, const N: usize> IntoRefs<'a, T> for &'a [&'a T; N] {
|
2020-11-25 07:36:27 +11:00
|
|
|
type Iterator = std::iter::Copied<std::slice::Iter<'a, &'a T>>;
|
|
|
|
fn into_refs(self) -> Self::Iterator {
|
|
|
|
self.into_iter().copied()
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
impl<'a, T> IntoRefs<'a, T> for Vec<&'a T> {
|
|
|
|
type Iterator = std::vec::IntoIter<&'a T>;
|
|
|
|
fn into_refs(self) -> Self::Iterator {
|
|
|
|
self.into_iter()
|
|
|
|
}
|
|
|
|
}
|
2021-05-25 06:56:24 +10:00
|
|
|
|
|
|
|
impl From<Buffer> for RetainResource {
|
|
|
|
fn from(buf: Buffer) -> Self {
|
|
|
|
RetainResource::Buffer(buf)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
impl From<Image> for RetainResource {
|
|
|
|
fn from(img: Image) -> Self {
|
|
|
|
RetainResource::Image(img)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2021-05-25 08:42:25 +10:00
|
|
|
impl<'a, T: Clone + Into<RetainResource>> From<&'a T> for RetainResource {
|
2021-05-25 06:56:24 +10:00
|
|
|
fn from(resource: &'a T) -> Self {
|
|
|
|
resource.clone().into()
|
|
|
|
}
|
|
|
|
}
|