mirror of
https://github.com/italicsjenga/vello.git
synced 2025-01-09 20:31:29 +11:00
Merge pull request #135 from linebender/bufwrite
Access buffer data through mapping
This commit is contained in:
commit
5ea5c4bb9a
|
@ -16,7 +16,7 @@
|
||||||
|
|
||||||
//! The generic trait for backends to implement.
|
//! The generic trait for backends to implement.
|
||||||
|
|
||||||
use crate::{BindType, BufferUsage, Error, GpuInfo, ImageLayout, SamplerParams};
|
use crate::{BindType, BufferUsage, Error, GpuInfo, ImageLayout, MapMode, SamplerParams};
|
||||||
|
|
||||||
pub trait Device: Sized {
|
pub trait Device: Sized {
|
||||||
type Buffer: 'static;
|
type Buffer: 'static;
|
||||||
|
@ -114,36 +114,33 @@ pub trait Device: Sized {
|
||||||
fence: Option<&mut Self::Fence>,
|
fence: Option<&mut Self::Fence>,
|
||||||
) -> Result<(), Error>;
|
) -> Result<(), Error>;
|
||||||
|
|
||||||
/// Copy data from the buffer to memory.
|
/// Map the buffer into addressable memory.
|
||||||
///
|
|
||||||
/// Discussion question: add offset?
|
|
||||||
///
|
///
|
||||||
/// # Safety
|
/// # Safety
|
||||||
///
|
///
|
||||||
/// The buffer must be valid to access. The destination memory must be valid to
|
/// The buffer must be valid to access. The offset + size much be within the
|
||||||
/// write to. The ranges must not overlap. The offset + size must be within
|
/// buffer's allocation. The buffer must not already be mapped. Of course,
|
||||||
/// the buffer's allocation, and size within the destination.
|
/// the usual safety rules apply to the returned pointer.
|
||||||
unsafe fn read_buffer(
|
unsafe fn map_buffer(
|
||||||
&self,
|
&self,
|
||||||
buffer: &Self::Buffer,
|
buffer: &Self::Buffer,
|
||||||
dst: *mut u8,
|
|
||||||
offset: u64,
|
offset: u64,
|
||||||
size: u64,
|
size: u64,
|
||||||
) -> Result<(), Error>;
|
mode: MapMode,
|
||||||
|
) -> Result<*mut u8, Error>;
|
||||||
|
|
||||||
/// Copy data from memory to the buffer.
|
/// Map the buffer into addressable memory.
|
||||||
///
|
///
|
||||||
/// # Safety
|
/// # Safety
|
||||||
///
|
///
|
||||||
/// The buffer must be valid to access. The source memory must be valid to
|
/// The buffer must be mapped. The parameters must be the same as the map
|
||||||
/// read from. The ranges must not overlap. The offset + size must be within
|
/// call.
|
||||||
/// the buffer's allocation, and size within the source.
|
unsafe fn unmap_buffer(
|
||||||
unsafe fn write_buffer(
|
|
||||||
&self,
|
&self,
|
||||||
buffer: &Self::Buffer,
|
buffer: &Self::Buffer,
|
||||||
contents: *const u8,
|
|
||||||
offset: u64,
|
offset: u64,
|
||||||
size: u64,
|
size: u64,
|
||||||
|
mode: MapMode,
|
||||||
) -> Result<(), Error>;
|
) -> Result<(), Error>;
|
||||||
|
|
||||||
unsafe fn create_semaphore(&self) -> Result<Self::Semaphore, Error>;
|
unsafe fn create_semaphore(&self) -> Result<Self::Semaphore, Error>;
|
||||||
|
|
150
piet-gpu-hal/src/bufwrite.rs
Normal file
150
piet-gpu-hal/src/bufwrite.rs
Normal file
|
@ -0,0 +1,150 @@
|
||||||
|
// Copyright © 2021 piet-gpu developers.
|
||||||
|
//
|
||||||
|
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
|
||||||
|
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
|
||||||
|
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
|
||||||
|
// option. This file may not be copied, modified, or distributed
|
||||||
|
// except according to those
|
||||||
|
|
||||||
|
//! An abstraction for writing to GPU buffers.
|
||||||
|
|
||||||
|
use bytemuck::Pod;
|
||||||
|
|
||||||
|
/// A GPU buffer to be filled.
|
||||||
|
pub struct BufWrite {
|
||||||
|
ptr: *mut u8,
|
||||||
|
len: usize,
|
||||||
|
capacity: usize,
|
||||||
|
}
|
||||||
|
|
||||||
|
impl BufWrite {
|
||||||
|
pub(crate) fn new(ptr: *mut u8, len: usize, capacity: usize) -> BufWrite {
|
||||||
|
BufWrite { ptr, len, capacity }
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Append a plain data object to the buffer.
|
||||||
|
///
|
||||||
|
/// Panics if capacity is inadequate.
|
||||||
|
#[inline]
|
||||||
|
pub fn push(&mut self, item: impl Pod) {
|
||||||
|
self.push_bytes(bytemuck::bytes_of(&item));
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Extend with a slice of plain data objects.
|
||||||
|
///
|
||||||
|
/// Panics if capacity is inadequate.
|
||||||
|
#[inline]
|
||||||
|
pub fn extend_slice(&mut self, slice: &[impl Pod]) {
|
||||||
|
self.push_bytes(bytemuck::cast_slice(slice));
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Extend with a byte slice.
|
||||||
|
///
|
||||||
|
/// Panics if capacity is inadequate.
|
||||||
|
#[inline]
|
||||||
|
pub fn push_bytes(&mut self, bytes: &[u8]) {
|
||||||
|
let len = bytes.len();
|
||||||
|
assert!(self.capacity - self.len >= len);
|
||||||
|
unsafe {
|
||||||
|
std::ptr::copy_nonoverlapping(bytes.as_ptr(), self.ptr.add(self.len), len);
|
||||||
|
}
|
||||||
|
self.len += len;
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Extend with zeros.
|
||||||
|
///
|
||||||
|
/// Panics if capacity is inadequate.
|
||||||
|
#[inline]
|
||||||
|
pub fn fill_zero(&mut self, len: usize) {
|
||||||
|
assert!(self.capacity - self.len >= len);
|
||||||
|
unsafe {
|
||||||
|
let slice = std::slice::from_raw_parts_mut(self.ptr.add(self.len), len);
|
||||||
|
slice.fill(0);
|
||||||
|
}
|
||||||
|
self.len += len;
|
||||||
|
}
|
||||||
|
|
||||||
|
/// The total capacity of the buffer, in bytes.
|
||||||
|
#[inline]
|
||||||
|
pub fn capacity(&self) -> usize {
|
||||||
|
self.capacity
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Extend with an iterator over plain data objects.
|
||||||
|
///
|
||||||
|
/// Currently, this doesn't panic, just truncates. That may change.
|
||||||
|
// Note: when specialization lands, this can be another impl of
|
||||||
|
// `Extend`.
|
||||||
|
pub fn extend_ref_iter<'a, I, T: Pod + 'a>(&mut self, iter: I)
|
||||||
|
where
|
||||||
|
I: IntoIterator<Item = &'a T>,
|
||||||
|
{
|
||||||
|
let item_size = std::mem::size_of::<T>();
|
||||||
|
if item_size == 0 {
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
let mut iter = iter.into_iter();
|
||||||
|
let n_remaining = (self.capacity - self.len) / item_size;
|
||||||
|
unsafe {
|
||||||
|
let mut dst = self.ptr.add(self.len);
|
||||||
|
for _ in 0..n_remaining {
|
||||||
|
if let Some(item) = iter.next() {
|
||||||
|
std::ptr::copy_nonoverlapping(
|
||||||
|
bytemuck::bytes_of(item).as_ptr(),
|
||||||
|
dst,
|
||||||
|
item_size,
|
||||||
|
);
|
||||||
|
self.len += item_size;
|
||||||
|
dst = dst.add(item_size);
|
||||||
|
} else {
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
// TODO: should we test the iter and panic on overflow?
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl std::ops::Deref for BufWrite {
|
||||||
|
type Target = [u8];
|
||||||
|
fn deref(&self) -> &[u8] {
|
||||||
|
unsafe { std::slice::from_raw_parts(self.ptr, self.len) }
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl std::ops::DerefMut for BufWrite {
|
||||||
|
fn deref_mut(&mut self) -> &mut [u8] {
|
||||||
|
unsafe { std::slice::from_raw_parts_mut(self.ptr, self.len) }
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl<T: Pod> std::iter::Extend<T> for BufWrite {
|
||||||
|
fn extend<I>(&mut self, iter: I)
|
||||||
|
where
|
||||||
|
I: IntoIterator<Item = T>,
|
||||||
|
{
|
||||||
|
let item_size = std::mem::size_of::<T>();
|
||||||
|
if item_size == 0 {
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
let mut iter = iter.into_iter();
|
||||||
|
let n_remaining = (self.capacity - self.len) / item_size;
|
||||||
|
unsafe {
|
||||||
|
let mut dst = self.ptr.add(self.len);
|
||||||
|
for _ in 0..n_remaining {
|
||||||
|
if let Some(item) = iter.next() {
|
||||||
|
std::ptr::copy_nonoverlapping(
|
||||||
|
bytemuck::bytes_of(&item).as_ptr(),
|
||||||
|
dst,
|
||||||
|
item_size,
|
||||||
|
);
|
||||||
|
self.len += item_size;
|
||||||
|
dst = dst.add(item_size);
|
||||||
|
} else {
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
// TODO: should we test the iter and panic on overflow?
|
||||||
|
}
|
||||||
|
}
|
|
@ -21,7 +21,7 @@ use raw_window_handle::{HasRawWindowHandle, RawWindowHandle};
|
||||||
|
|
||||||
use smallvec::SmallVec;
|
use smallvec::SmallVec;
|
||||||
|
|
||||||
use crate::{BindType, BufferUsage, Error, GpuInfo, ImageLayout, WorkgroupLimits};
|
use crate::{BindType, BufferUsage, Error, GpuInfo, ImageLayout, MapMode, WorkgroupLimits};
|
||||||
|
|
||||||
use self::{
|
use self::{
|
||||||
descriptor::{CpuHeapRefOwned, DescriptorPool, GpuHeapRefOwned},
|
descriptor::{CpuHeapRefOwned, DescriptorPool, GpuHeapRefOwned},
|
||||||
|
@ -381,12 +381,10 @@ impl crate::backend::Device for Dx12Device {
|
||||||
|
|
||||||
unsafe fn fetch_query_pool(&self, pool: &Self::QueryPool) -> Result<Vec<f64>, Error> {
|
unsafe fn fetch_query_pool(&self, pool: &Self::QueryPool) -> Result<Vec<f64>, Error> {
|
||||||
let mut buf = vec![0u64; pool.n_queries as usize];
|
let mut buf = vec![0u64; pool.n_queries as usize];
|
||||||
self.read_buffer(
|
let size = mem::size_of_val(buf.as_slice());
|
||||||
&pool.buf,
|
let mapped = self.map_buffer(&pool.buf, 0, size as u64, MapMode::Read)?;
|
||||||
buf.as_mut_ptr() as *mut u8,
|
std::ptr::copy_nonoverlapping(mapped, buf.as_mut_ptr() as *mut u8, size);
|
||||||
0,
|
self.unmap_buffer(&pool.buf, 0, size as u64, MapMode::Read)?;
|
||||||
mem::size_of_val(buf.as_slice()) as u64,
|
|
||||||
)?;
|
|
||||||
let ts0 = buf[0];
|
let ts0 = buf[0];
|
||||||
let tsp = (self.ts_freq as f64).recip();
|
let tsp = (self.ts_freq as f64).recip();
|
||||||
let result = buf[1..]
|
let result = buf[1..]
|
||||||
|
@ -418,29 +416,25 @@ impl crate::backend::Device for Dx12Device {
|
||||||
Ok(())
|
Ok(())
|
||||||
}
|
}
|
||||||
|
|
||||||
unsafe fn read_buffer(
|
unsafe fn map_buffer(
|
||||||
&self,
|
&self,
|
||||||
buffer: &Self::Buffer,
|
buffer: &Self::Buffer,
|
||||||
dst: *mut u8,
|
|
||||||
offset: u64,
|
offset: u64,
|
||||||
size: u64,
|
size: u64,
|
||||||
) -> Result<(), Error> {
|
mode: MapMode,
|
||||||
buffer
|
) -> Result<*mut u8, Error> {
|
||||||
.resource
|
let mapped = buffer.resource.map_buffer(offset, size, mode)?;
|
||||||
.read_resource(dst, offset as usize, size as usize)?;
|
Ok(mapped)
|
||||||
Ok(())
|
|
||||||
}
|
}
|
||||||
|
|
||||||
unsafe fn write_buffer(
|
unsafe fn unmap_buffer(
|
||||||
&self,
|
&self,
|
||||||
buffer: &Self::Buffer,
|
buffer: &Self::Buffer,
|
||||||
contents: *const u8,
|
|
||||||
offset: u64,
|
offset: u64,
|
||||||
size: u64,
|
size: u64,
|
||||||
|
mode: MapMode,
|
||||||
) -> Result<(), Error> {
|
) -> Result<(), Error> {
|
||||||
buffer
|
buffer.resource.unmap_buffer(offset, size, mode)?;
|
||||||
.resource
|
|
||||||
.write_resource(contents, offset as usize, size as usize)?;
|
|
||||||
Ok(())
|
Ok(())
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -7,6 +7,7 @@
|
||||||
// except according to those terms.
|
// except according to those terms.
|
||||||
|
|
||||||
use crate::dx12::error::{self, error_if_failed_else_unit, explain_error, Error};
|
use crate::dx12::error::{self, error_if_failed_else_unit, explain_error, Error};
|
||||||
|
use crate::MapMode;
|
||||||
use smallvec::SmallVec;
|
use smallvec::SmallVec;
|
||||||
use std::convert::{TryFrom, TryInto};
|
use std::convert::{TryFrom, TryInto};
|
||||||
use std::sync::atomic::{AtomicPtr, Ordering};
|
use std::sync::atomic::{AtomicPtr, Ordering};
|
||||||
|
@ -105,46 +106,38 @@ impl Resource {
|
||||||
self.ptr.store(ptr::null_mut(), Ordering::Relaxed);
|
self.ptr.store(ptr::null_mut(), Ordering::Relaxed);
|
||||||
}
|
}
|
||||||
|
|
||||||
pub unsafe fn write_resource(
|
pub unsafe fn map_buffer(
|
||||||
&self,
|
&self,
|
||||||
data: *const u8,
|
offset: u64,
|
||||||
offset: usize,
|
size: u64,
|
||||||
size: usize,
|
mode: MapMode,
|
||||||
) -> Result<(), Error> {
|
) -> Result<*mut u8, Error> {
|
||||||
let mut mapped_memory: *mut u8 = ptr::null_mut();
|
let mut mapped_memory: *mut u8 = ptr::null_mut();
|
||||||
let zero_range = d3d12::D3D12_RANGE { ..mem::zeroed() };
|
let (begin, end) = match mode {
|
||||||
let range = d3d12::D3D12_RANGE {
|
MapMode::Read => (offset as usize, (offset + size) as usize),
|
||||||
Begin: offset,
|
MapMode::Write => (0, 0),
|
||||||
End: offset + size,
|
|
||||||
};
|
};
|
||||||
explain_error(
|
|
||||||
(*self.get()).Map(0, &zero_range, &mut mapped_memory as *mut _ as *mut _),
|
|
||||||
"could not map GPU mem to CPU mem",
|
|
||||||
)?;
|
|
||||||
|
|
||||||
ptr::copy_nonoverlapping(data, mapped_memory.add(offset), size);
|
|
||||||
(*self.get()).Unmap(0, &range);
|
|
||||||
Ok(())
|
|
||||||
}
|
|
||||||
|
|
||||||
pub unsafe fn read_resource(
|
|
||||||
&self,
|
|
||||||
dst: *mut u8,
|
|
||||||
offset: usize,
|
|
||||||
size: usize,
|
|
||||||
) -> Result<(), Error> {
|
|
||||||
let mut mapped_memory: *mut u8 = ptr::null_mut();
|
|
||||||
let range = d3d12::D3D12_RANGE {
|
let range = d3d12::D3D12_RANGE {
|
||||||
Begin: offset,
|
Begin: begin,
|
||||||
End: offset + size,
|
End: end,
|
||||||
};
|
};
|
||||||
let zero_range = d3d12::D3D12_RANGE { ..mem::zeroed() };
|
|
||||||
explain_error(
|
explain_error(
|
||||||
(*self.get()).Map(0, &range, &mut mapped_memory as *mut _ as *mut _),
|
(*self.get()).Map(0, &range, &mut mapped_memory as *mut _ as *mut _),
|
||||||
"could not map GPU mem to CPU mem",
|
"could not map GPU mem to CPU mem",
|
||||||
)?;
|
)?;
|
||||||
ptr::copy_nonoverlapping(mapped_memory.add(offset), dst, size);
|
Ok(mapped_memory.add(offset as usize))
|
||||||
(*self.get()).Unmap(0, &zero_range);
|
}
|
||||||
|
|
||||||
|
pub unsafe fn unmap_buffer(&self, offset: u64, size: u64, mode: MapMode) -> Result<(), Error> {
|
||||||
|
let (begin, end) = match mode {
|
||||||
|
MapMode::Read => (0, 0),
|
||||||
|
MapMode::Write => (offset as usize, (offset + size) as usize),
|
||||||
|
};
|
||||||
|
let range = d3d12::D3D12_RANGE {
|
||||||
|
Begin: begin,
|
||||||
|
End: end,
|
||||||
|
};
|
||||||
|
(*self.get()).Unmap(0, &range);
|
||||||
Ok(())
|
Ok(())
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
|
@ -7,12 +7,13 @@
|
||||||
//! even more in time.
|
//! even more in time.
|
||||||
|
|
||||||
use std::convert::TryInto;
|
use std::convert::TryInto;
|
||||||
|
use std::ops::{Bound, RangeBounds};
|
||||||
use std::sync::{Arc, Mutex, Weak};
|
use std::sync::{Arc, Mutex, Weak};
|
||||||
|
|
||||||
use bytemuck::Pod;
|
use bytemuck::Pod;
|
||||||
use smallvec::SmallVec;
|
use smallvec::SmallVec;
|
||||||
|
|
||||||
use crate::{mux, BackendType};
|
use crate::{mux, BackendType, BufWrite, MapMode};
|
||||||
|
|
||||||
use crate::{BindType, BufferUsage, Error, GpuInfo, ImageLayout, SamplerParams};
|
use crate::{BindType, BufferUsage, Error, GpuInfo, ImageLayout, SamplerParams};
|
||||||
|
|
||||||
|
@ -112,6 +113,28 @@ pub enum RetainResource {
|
||||||
Image(Image),
|
Image(Image),
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/// A buffer mapped for writing.
|
||||||
|
///
|
||||||
|
/// When this structure is dropped, the buffer will be unmapped.
|
||||||
|
pub struct BufWriteGuard<'a> {
|
||||||
|
buf_write: BufWrite,
|
||||||
|
session: Arc<SessionInner>,
|
||||||
|
buffer: &'a mux::Buffer,
|
||||||
|
offset: u64,
|
||||||
|
size: u64,
|
||||||
|
}
|
||||||
|
|
||||||
|
/// A buffer mapped for reading.
|
||||||
|
///
|
||||||
|
/// When this structure is dropped, the buffer will be unmapped.
|
||||||
|
pub struct BufReadGuard<'a> {
|
||||||
|
bytes: &'a [u8],
|
||||||
|
session: Arc<SessionInner>,
|
||||||
|
buffer: &'a mux::Buffer,
|
||||||
|
offset: u64,
|
||||||
|
size: u64,
|
||||||
|
}
|
||||||
|
|
||||||
impl Session {
|
impl Session {
|
||||||
/// Create a new session, choosing the best backend.
|
/// Create a new session, choosing the best backend.
|
||||||
pub fn new(device: mux::Device) -> Session {
|
pub fn new(device: mux::Device) -> Session {
|
||||||
|
@ -232,20 +255,24 @@ impl Session {
|
||||||
contents: &[impl Pod],
|
contents: &[impl Pod],
|
||||||
usage: BufferUsage,
|
usage: BufferUsage,
|
||||||
) -> Result<Buffer, Error> {
|
) -> Result<Buffer, Error> {
|
||||||
unsafe {
|
let size = std::mem::size_of_val(contents);
|
||||||
let bytes = bytemuck::cast_slice(contents);
|
let bytes = bytemuck::cast_slice(contents);
|
||||||
self.create_buffer_init_raw(bytes.as_ptr(), bytes.len().try_into()?, usage)
|
self.create_buffer_with(size as u64, |b| b.push_bytes(bytes), usage)
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Create a buffer with initialized data, from a raw pointer memory region.
|
/// Create a buffer with initialized data.
|
||||||
pub unsafe fn create_buffer_init_raw(
|
///
|
||||||
|
/// The buffer is filled by the provided function. The same details about
|
||||||
|
/// staging buffers apply as [`create_buffer_init`].
|
||||||
|
pub fn create_buffer_with(
|
||||||
&self,
|
&self,
|
||||||
contents: *const u8,
|
|
||||||
size: u64,
|
size: u64,
|
||||||
|
f: impl Fn(&mut BufWrite),
|
||||||
usage: BufferUsage,
|
usage: BufferUsage,
|
||||||
) -> Result<Buffer, Error> {
|
) -> Result<Buffer, Error> {
|
||||||
let use_staging_buffer = !usage.intersects(BufferUsage::MAP_READ | BufferUsage::MAP_WRITE)
|
unsafe {
|
||||||
|
let use_staging_buffer = !usage
|
||||||
|
.intersects(BufferUsage::MAP_READ | BufferUsage::MAP_WRITE)
|
||||||
&& self.gpu_info().use_staging_buffers;
|
&& self.gpu_info().use_staging_buffers;
|
||||||
let create_usage = if use_staging_buffer {
|
let create_usage = if use_staging_buffer {
|
||||||
BufferUsage::MAP_WRITE | BufferUsage::COPY_SRC
|
BufferUsage::MAP_WRITE | BufferUsage::COPY_SRC
|
||||||
|
@ -253,9 +280,15 @@ impl Session {
|
||||||
usage | BufferUsage::MAP_WRITE
|
usage | BufferUsage::MAP_WRITE
|
||||||
};
|
};
|
||||||
let create_buf = self.create_buffer(size, create_usage)?;
|
let create_buf = self.create_buffer(size, create_usage)?;
|
||||||
|
let mapped =
|
||||||
self.0
|
self.0
|
||||||
.device
|
.device
|
||||||
.write_buffer(&create_buf.mux_buffer(), contents, 0, size)?;
|
.map_buffer(&create_buf.mux_buffer(), 0, size, MapMode::Write)?;
|
||||||
|
let mut buf_write = BufWrite::new(mapped, 0, size as usize);
|
||||||
|
f(&mut buf_write);
|
||||||
|
self.0
|
||||||
|
.device
|
||||||
|
.unmap_buffer(&create_buf.mux_buffer(), 0, size, MapMode::Write)?;
|
||||||
if use_staging_buffer {
|
if use_staging_buffer {
|
||||||
let buf = self.create_buffer(size, usage | BufferUsage::COPY_DST)?;
|
let buf = self.create_buffer(size, usage | BufferUsage::COPY_DST)?;
|
||||||
let mut staging_cmd_buf = self.0.staging_cmd_buf.lock().unwrap();
|
let mut staging_cmd_buf = self.0.staging_cmd_buf.lock().unwrap();
|
||||||
|
@ -273,6 +306,7 @@ impl Session {
|
||||||
Ok(create_buf)
|
Ok(create_buf)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
}
|
||||||
|
|
||||||
/// Create an image.
|
/// Create an image.
|
||||||
///
|
///
|
||||||
|
@ -669,12 +703,22 @@ impl Buffer {
|
||||||
pub unsafe fn write(&mut self, contents: &[impl Pod]) -> Result<(), Error> {
|
pub unsafe fn write(&mut self, contents: &[impl Pod]) -> Result<(), Error> {
|
||||||
let bytes = bytemuck::cast_slice(contents);
|
let bytes = bytemuck::cast_slice(contents);
|
||||||
if let Some(session) = Weak::upgrade(&self.0.session) {
|
if let Some(session) = Weak::upgrade(&self.0.session) {
|
||||||
session.device.write_buffer(
|
let size = bytes.len().try_into()?;
|
||||||
&self.0.buffer,
|
let buf_size = self.0.buffer.size();
|
||||||
bytes.as_ptr(),
|
if size > buf_size {
|
||||||
0,
|
return Err(format!(
|
||||||
bytes.len().try_into()?,
|
"Trying to write {} bytes into buffer of size {}",
|
||||||
)?;
|
size, buf_size
|
||||||
|
)
|
||||||
|
.into());
|
||||||
|
}
|
||||||
|
let mapped = session
|
||||||
|
.device
|
||||||
|
.map_buffer(&self.0.buffer, 0, size, MapMode::Write)?;
|
||||||
|
std::ptr::copy_nonoverlapping(bytes.as_ptr(), mapped, bytes.len());
|
||||||
|
session
|
||||||
|
.device
|
||||||
|
.unmap_buffer(&self.0.buffer, 0, size, MapMode::Write)?;
|
||||||
}
|
}
|
||||||
// else session lost error?
|
// else session lost error?
|
||||||
Ok(())
|
Ok(())
|
||||||
|
@ -694,15 +738,115 @@ impl Buffer {
|
||||||
result.reserve(len - result.len());
|
result.reserve(len - result.len());
|
||||||
}
|
}
|
||||||
if let Some(session) = Weak::upgrade(&self.0.session) {
|
if let Some(session) = Weak::upgrade(&self.0.session) {
|
||||||
|
let mapped = session
|
||||||
|
.device
|
||||||
|
.map_buffer(&self.0.buffer, 0, size, MapMode::Read)?;
|
||||||
|
std::ptr::copy_nonoverlapping(mapped, result.as_mut_ptr() as *mut u8, size as usize);
|
||||||
session
|
session
|
||||||
.device
|
.device
|
||||||
.read_buffer(&self.0.buffer, result.as_mut_ptr() as *mut u8, 0, size)?;
|
.unmap_buffer(&self.0.buffer, 0, size, MapMode::Read)?;
|
||||||
result.set_len(len);
|
result.set_len(len);
|
||||||
}
|
}
|
||||||
// else session lost error?
|
// else session lost error?
|
||||||
Ok(())
|
Ok(())
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/// Map a buffer for writing.
|
||||||
|
///
|
||||||
|
/// The mapped buffer is represented by a "guard" structure, which will unmap
|
||||||
|
/// the buffer when it's dropped. That also has a number of methods for pushing
|
||||||
|
/// bytes and [`bytemuck::Pod`] objects.
|
||||||
|
///
|
||||||
|
/// The buffer must have been created with `MAP_WRITE` usage.
|
||||||
|
pub unsafe fn map_write<'a>(
|
||||||
|
&'a mut self,
|
||||||
|
range: impl RangeBounds<usize>,
|
||||||
|
) -> Result<BufWriteGuard<'a>, Error> {
|
||||||
|
let offset = match range.start_bound() {
|
||||||
|
Bound::Unbounded => 0,
|
||||||
|
Bound::Included(&s) => s.try_into()?,
|
||||||
|
Bound::Excluded(_) => unreachable!(),
|
||||||
|
};
|
||||||
|
let end = match range.end_bound() {
|
||||||
|
Bound::Unbounded => self.size(),
|
||||||
|
Bound::Included(&s) => s.try_into()?,
|
||||||
|
Bound::Excluded(&s) => s.checked_add(1).unwrap().try_into()?,
|
||||||
|
};
|
||||||
|
self.map_write_impl(offset, end - offset)
|
||||||
|
}
|
||||||
|
|
||||||
|
unsafe fn map_write_impl<'a>(
|
||||||
|
&'a self,
|
||||||
|
offset: u64,
|
||||||
|
size: u64,
|
||||||
|
) -> Result<BufWriteGuard<'a>, Error> {
|
||||||
|
if let Some(session) = Weak::upgrade(&self.0.session) {
|
||||||
|
let ptr = session
|
||||||
|
.device
|
||||||
|
.map_buffer(&self.0.buffer, offset, size, MapMode::Write)?;
|
||||||
|
let buf_write = BufWrite::new(ptr, 0, size as usize);
|
||||||
|
let guard = BufWriteGuard {
|
||||||
|
buf_write,
|
||||||
|
session,
|
||||||
|
buffer: &self.0.buffer,
|
||||||
|
offset,
|
||||||
|
size,
|
||||||
|
};
|
||||||
|
Ok(guard)
|
||||||
|
} else {
|
||||||
|
Err("session lost".into())
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Map a buffer for reading.
|
||||||
|
///
|
||||||
|
/// The mapped buffer is represented by a "guard" structure, which will unmap
|
||||||
|
/// the buffer when it's dropped, and derefs to a plain byte slice.
|
||||||
|
///
|
||||||
|
/// The buffer must have been created with `MAP_READ` usage. The caller
|
||||||
|
/// is also responsible for ensuring that this does not read uninitialized
|
||||||
|
/// memory.
|
||||||
|
pub unsafe fn map_read<'a>(
|
||||||
|
// Discussion: should be &mut? Buffer is Clone, but maybe that should change.
|
||||||
|
&'a self,
|
||||||
|
range: impl RangeBounds<usize>,
|
||||||
|
) -> Result<BufReadGuard<'a>, Error> {
|
||||||
|
let offset = match range.start_bound() {
|
||||||
|
Bound::Unbounded => 0,
|
||||||
|
Bound::Excluded(&s) => s.try_into()?,
|
||||||
|
Bound::Included(_) => unreachable!(),
|
||||||
|
};
|
||||||
|
let end = match range.end_bound() {
|
||||||
|
Bound::Unbounded => self.size(),
|
||||||
|
Bound::Excluded(&s) => s.try_into()?,
|
||||||
|
Bound::Included(&s) => s.checked_add(1).unwrap().try_into()?,
|
||||||
|
};
|
||||||
|
self.map_read_impl(offset, end - offset)
|
||||||
|
}
|
||||||
|
|
||||||
|
unsafe fn map_read_impl<'a>(
|
||||||
|
&'a self,
|
||||||
|
offset: u64,
|
||||||
|
size: u64,
|
||||||
|
) -> Result<BufReadGuard<'a>, Error> {
|
||||||
|
if let Some(session) = Weak::upgrade(&self.0.session) {
|
||||||
|
let ptr = session
|
||||||
|
.device
|
||||||
|
.map_buffer(&self.0.buffer, offset, size, MapMode::Read)?;
|
||||||
|
let bytes = std::slice::from_raw_parts(ptr, size as usize);
|
||||||
|
let guard = BufReadGuard {
|
||||||
|
bytes,
|
||||||
|
session,
|
||||||
|
buffer: &self.0.buffer,
|
||||||
|
offset,
|
||||||
|
size,
|
||||||
|
};
|
||||||
|
Ok(guard)
|
||||||
|
} else {
|
||||||
|
Err("session lost".into())
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
/// The size of the buffer.
|
/// The size of the buffer.
|
||||||
///
|
///
|
||||||
/// This is at least as large as the value provided on creation.
|
/// This is at least as large as the value provided on creation.
|
||||||
|
@ -801,3 +945,58 @@ impl<'a, T: Clone + Into<RetainResource>> From<&'a T> for RetainResource {
|
||||||
resource.clone().into()
|
resource.clone().into()
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
impl<'a> Drop for BufWriteGuard<'a> {
|
||||||
|
fn drop(&mut self) {
|
||||||
|
unsafe {
|
||||||
|
let _ = self.session.device.unmap_buffer(
|
||||||
|
self.buffer,
|
||||||
|
self.offset,
|
||||||
|
self.size,
|
||||||
|
MapMode::Write,
|
||||||
|
);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl<'a> std::ops::Deref for BufWriteGuard<'a> {
|
||||||
|
type Target = BufWrite;
|
||||||
|
|
||||||
|
fn deref(&self) -> &Self::Target {
|
||||||
|
&self.buf_write
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl<'a> std::ops::DerefMut for BufWriteGuard<'a> {
|
||||||
|
fn deref_mut(&mut self) -> &mut Self::Target {
|
||||||
|
&mut self.buf_write
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl<'a> Drop for BufReadGuard<'a> {
|
||||||
|
fn drop(&mut self) {
|
||||||
|
unsafe {
|
||||||
|
let _ = self.session.device.unmap_buffer(
|
||||||
|
self.buffer,
|
||||||
|
self.offset,
|
||||||
|
self.size,
|
||||||
|
MapMode::Read,
|
||||||
|
);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl<'a> std::ops::Deref for BufReadGuard<'a> {
|
||||||
|
type Target = [u8];
|
||||||
|
|
||||||
|
fn deref(&self) -> &Self::Target {
|
||||||
|
self.bytes
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl<'a> BufReadGuard<'a> {
|
||||||
|
/// Interpret the buffer as a slice of a plain data type.
|
||||||
|
pub fn cast_slice<T: Pod>(&self) -> &[T] {
|
||||||
|
bytemuck::cast_slice(self.bytes)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
|
@ -7,6 +7,7 @@ use bitflags::bitflags;
|
||||||
|
|
||||||
mod backend;
|
mod backend;
|
||||||
mod bestfit;
|
mod bestfit;
|
||||||
|
mod bufwrite;
|
||||||
mod hub;
|
mod hub;
|
||||||
|
|
||||||
#[macro_use]
|
#[macro_use]
|
||||||
|
@ -18,8 +19,10 @@ pub use crate::mux::{
|
||||||
DescriptorSet, Fence, Instance, Pipeline, QueryPool, Sampler, Semaphore, ShaderCode, Surface,
|
DescriptorSet, Fence, Instance, Pipeline, QueryPool, Sampler, Semaphore, ShaderCode, Surface,
|
||||||
Swapchain,
|
Swapchain,
|
||||||
};
|
};
|
||||||
|
pub use bufwrite::BufWrite;
|
||||||
pub use hub::{
|
pub use hub::{
|
||||||
Buffer, CmdBuf, DescriptorSetBuilder, Image, RetainResource, Session, SubmittedCmdBuf,
|
BufReadGuard, BufWriteGuard, Buffer, CmdBuf, DescriptorSetBuilder, Image, RetainResource,
|
||||||
|
Session, SubmittedCmdBuf,
|
||||||
};
|
};
|
||||||
|
|
||||||
// TODO: because these are conditionally included, "cargo fmt" does not
|
// TODO: because these are conditionally included, "cargo fmt" does not
|
||||||
|
@ -128,6 +131,14 @@ pub enum BindType {
|
||||||
// TODO: Uniform, Sampler, maybe others
|
// TODO: Uniform, Sampler, maybe others
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/// Whether to map a buffer in read or write mode.
|
||||||
|
pub enum MapMode {
|
||||||
|
/// Map for reading.
|
||||||
|
Read,
|
||||||
|
/// Map for writing.
|
||||||
|
Write,
|
||||||
|
}
|
||||||
|
|
||||||
#[derive(Clone, Debug)]
|
#[derive(Clone, Debug)]
|
||||||
/// Information about the GPU.
|
/// Information about the GPU.
|
||||||
pub struct GpuInfo {
|
pub struct GpuInfo {
|
||||||
|
|
|
@ -30,7 +30,7 @@ use metal::{CGFloat, MTLFeatureSet};
|
||||||
|
|
||||||
use raw_window_handle::{HasRawWindowHandle, RawWindowHandle};
|
use raw_window_handle::{HasRawWindowHandle, RawWindowHandle};
|
||||||
|
|
||||||
use crate::{BufferUsage, Error, GpuInfo, WorkgroupLimits};
|
use crate::{BufferUsage, Error, GpuInfo, MapMode, WorkgroupLimits};
|
||||||
|
|
||||||
use util::*;
|
use util::*;
|
||||||
|
|
||||||
|
@ -339,41 +339,27 @@ impl crate::backend::Device for MtlDevice {
|
||||||
Ok(())
|
Ok(())
|
||||||
}
|
}
|
||||||
|
|
||||||
unsafe fn read_buffer(
|
unsafe fn map_buffer(
|
||||||
&self,
|
&self,
|
||||||
buffer: &Self::Buffer,
|
buffer: &Self::Buffer,
|
||||||
dst: *mut u8,
|
|
||||||
offset: u64,
|
offset: u64,
|
||||||
size: u64,
|
_size: u64,
|
||||||
) -> Result<(), Error> {
|
_mode: MapMode,
|
||||||
|
) -> Result<*mut u8, Error> {
|
||||||
let contents_ptr = buffer.buffer.contents();
|
let contents_ptr = buffer.buffer.contents();
|
||||||
if contents_ptr.is_null() {
|
if contents_ptr.is_null() {
|
||||||
return Err("probably trying to read from private buffer".into());
|
return Err("probably trying to map private buffer".into());
|
||||||
}
|
}
|
||||||
std::ptr::copy_nonoverlapping(
|
Ok((contents_ptr as *mut u8).add(offset as usize))
|
||||||
(contents_ptr as *const u8).add(offset as usize),
|
|
||||||
dst,
|
|
||||||
size as usize,
|
|
||||||
);
|
|
||||||
Ok(())
|
|
||||||
}
|
}
|
||||||
|
|
||||||
unsafe fn write_buffer(
|
unsafe fn unmap_buffer(
|
||||||
&self,
|
&self,
|
||||||
buffer: &Buffer,
|
_buffer: &Self::Buffer,
|
||||||
contents: *const u8,
|
_offset: u64,
|
||||||
offset: u64,
|
_size: u64,
|
||||||
size: u64,
|
_mode: MapMode,
|
||||||
) -> Result<(), Error> {
|
) -> Result<(), Error> {
|
||||||
let contents_ptr = buffer.buffer.contents();
|
|
||||||
if contents_ptr.is_null() {
|
|
||||||
return Err("probably trying to write to private buffer".into());
|
|
||||||
}
|
|
||||||
std::ptr::copy_nonoverlapping(
|
|
||||||
contents,
|
|
||||||
(contents_ptr as *mut u8).add(offset as usize),
|
|
||||||
size as usize,
|
|
||||||
);
|
|
||||||
Ok(())
|
Ok(())
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -35,6 +35,7 @@ use crate::backend::DescriptorSetBuilder as DescriptorSetBuilderTrait;
|
||||||
use crate::backend::Device as DeviceTrait;
|
use crate::backend::Device as DeviceTrait;
|
||||||
use crate::BackendType;
|
use crate::BackendType;
|
||||||
use crate::BindType;
|
use crate::BindType;
|
||||||
|
use crate::MapMode;
|
||||||
use crate::{BufferUsage, Error, GpuInfo, ImageLayout, InstanceFlags};
|
use crate::{BufferUsage, Error, GpuInfo, ImageLayout, InstanceFlags};
|
||||||
|
|
||||||
mux_enum! {
|
mux_enum! {
|
||||||
|
@ -445,31 +446,31 @@ impl Device {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
pub unsafe fn read_buffer(
|
pub unsafe fn map_buffer(
|
||||||
&self,
|
&self,
|
||||||
buffer: &Buffer,
|
buffer: &Buffer,
|
||||||
dst: *mut u8,
|
|
||||||
offset: u64,
|
offset: u64,
|
||||||
size: u64,
|
size: u64,
|
||||||
) -> Result<(), Error> {
|
mode: MapMode,
|
||||||
|
) -> Result<*mut u8, Error> {
|
||||||
mux_match! { self;
|
mux_match! { self;
|
||||||
Device::Vk(d) => d.read_buffer(buffer.vk(), dst, offset, size),
|
Device::Vk(d) => d.map_buffer(buffer.vk(), offset, size, mode),
|
||||||
Device::Dx12(d) => d.read_buffer(buffer.dx12(), dst, offset, size),
|
Device::Dx12(d) => d.map_buffer(buffer.dx12(), offset, size, mode),
|
||||||
Device::Mtl(d) => d.read_buffer(buffer.mtl(), dst, offset, size),
|
Device::Mtl(d) => d.map_buffer(buffer.mtl(), offset, size, mode),
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
pub unsafe fn write_buffer(
|
pub unsafe fn unmap_buffer(
|
||||||
&self,
|
&self,
|
||||||
buffer: &Buffer,
|
buffer: &Buffer,
|
||||||
contents: *const u8,
|
|
||||||
offset: u64,
|
offset: u64,
|
||||||
size: u64,
|
size: u64,
|
||||||
|
mode: MapMode,
|
||||||
) -> Result<(), Error> {
|
) -> Result<(), Error> {
|
||||||
mux_match! { self;
|
mux_match! { self;
|
||||||
Device::Vk(d) => d.write_buffer(buffer.vk(), contents, offset, size),
|
Device::Vk(d) => d.unmap_buffer(buffer.vk(), offset, size, mode),
|
||||||
Device::Dx12(d) => d.write_buffer(buffer.dx12(), contents, offset, size),
|
Device::Dx12(d) => d.unmap_buffer(buffer.dx12(), offset, size, mode),
|
||||||
Device::Mtl(d) => d.write_buffer(buffer.mtl(), contents, offset, size),
|
Device::Mtl(d) => d.unmap_buffer(buffer.mtl(), offset, size, mode),
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -13,7 +13,7 @@ use smallvec::SmallVec;
|
||||||
|
|
||||||
use crate::backend::Device as DeviceTrait;
|
use crate::backend::Device as DeviceTrait;
|
||||||
use crate::{
|
use crate::{
|
||||||
BindType, BufferUsage, Error, GpuInfo, ImageLayout, SamplerParams, SubgroupSize,
|
BindType, BufferUsage, Error, GpuInfo, ImageLayout, MapMode, SamplerParams, SubgroupSize,
|
||||||
WorkgroupLimits,
|
WorkgroupLimits,
|
||||||
};
|
};
|
||||||
|
|
||||||
|
@ -821,14 +821,13 @@ impl crate::backend::Device for VkDevice {
|
||||||
Ok(())
|
Ok(())
|
||||||
}
|
}
|
||||||
|
|
||||||
unsafe fn read_buffer(
|
unsafe fn map_buffer(
|
||||||
&self,
|
&self,
|
||||||
buffer: &Self::Buffer,
|
buffer: &Self::Buffer,
|
||||||
dst: *mut u8,
|
|
||||||
offset: u64,
|
offset: u64,
|
||||||
size: u64,
|
size: u64,
|
||||||
) -> Result<(), Error> {
|
_mode: MapMode,
|
||||||
let copy_size = size.try_into()?;
|
) -> Result<*mut u8, Error> {
|
||||||
let device = &self.device.device;
|
let device = &self.device.device;
|
||||||
let buf = device.map_memory(
|
let buf = device.map_memory(
|
||||||
buffer.buffer_memory,
|
buffer.buffer_memory,
|
||||||
|
@ -836,28 +835,17 @@ impl crate::backend::Device for VkDevice {
|
||||||
size,
|
size,
|
||||||
vk::MemoryMapFlags::empty(),
|
vk::MemoryMapFlags::empty(),
|
||||||
)?;
|
)?;
|
||||||
std::ptr::copy_nonoverlapping(buf as *const u8, dst, copy_size);
|
Ok(buf as *mut u8)
|
||||||
device.unmap_memory(buffer.buffer_memory);
|
|
||||||
Ok(())
|
|
||||||
}
|
}
|
||||||
|
|
||||||
unsafe fn write_buffer(
|
unsafe fn unmap_buffer(
|
||||||
&self,
|
&self,
|
||||||
buffer: &Buffer,
|
buffer: &Self::Buffer,
|
||||||
contents: *const u8,
|
_offset: u64,
|
||||||
offset: u64,
|
_size: u64,
|
||||||
size: u64,
|
_mode: MapMode,
|
||||||
) -> Result<(), Error> {
|
) -> Result<(), Error> {
|
||||||
let copy_size = size.try_into()?;
|
self.device.device.unmap_memory(buffer.buffer_memory);
|
||||||
let device = &self.device.device;
|
|
||||||
let buf = device.map_memory(
|
|
||||||
buffer.buffer_memory,
|
|
||||||
offset,
|
|
||||||
size,
|
|
||||||
vk::MemoryMapFlags::empty(),
|
|
||||||
)?;
|
|
||||||
std::ptr::copy_nonoverlapping(contents, buf as *mut u8, copy_size);
|
|
||||||
device.unmap_memory(buffer.buffer_memory);
|
|
||||||
Ok(())
|
Ok(())
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -61,9 +61,8 @@ pub unsafe fn run_clear_test(runner: &mut Runner, config: &Config) -> TestResult
|
||||||
}
|
}
|
||||||
total_elapsed += runner.submit(commands);
|
total_elapsed += runner.submit(commands);
|
||||||
if i == 0 || config.verify_all {
|
if i == 0 || config.verify_all {
|
||||||
let mut dst: Vec<u32> = Default::default();
|
let dst = out_buf.map_read(..);
|
||||||
out_buf.read(&mut dst);
|
if let Some(failure) = verify(dst.cast_slice()) {
|
||||||
if let Some(failure) = verify(&dst) {
|
|
||||||
result.fail(format!("failure at {}", failure));
|
result.fail(format!("failure at {}", failure));
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
|
@ -54,9 +54,8 @@ pub unsafe fn run_linkedlist_test(runner: &mut Runner, config: &Config) -> TestR
|
||||||
}
|
}
|
||||||
total_elapsed += runner.submit(commands);
|
total_elapsed += runner.submit(commands);
|
||||||
if i == 0 || config.verify_all {
|
if i == 0 || config.verify_all {
|
||||||
let mut dst: Vec<u32> = Default::default();
|
let dst = mem_buf.map_read(..);
|
||||||
mem_buf.read(&mut dst);
|
if !verify(dst.cast_slice()) {
|
||||||
if !verify(&dst) {
|
|
||||||
result.fail("incorrect data");
|
result.fail("incorrect data");
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
|
@ -69,10 +69,13 @@ pub unsafe fn run_prefix_test(
|
||||||
}
|
}
|
||||||
*/
|
*/
|
||||||
let n_elements: u64 = config.size.choose(1 << 12, 1 << 24, 1 << 25);
|
let n_elements: u64 = config.size.choose(1 << 12, 1 << 24, 1 << 25);
|
||||||
let data: Vec<u32> = (0..n_elements as u32).collect();
|
|
||||||
let data_buf = runner
|
let data_buf = runner
|
||||||
.session
|
.session
|
||||||
.create_buffer_init(&data, BufferUsage::STORAGE)
|
.create_buffer_with(
|
||||||
|
n_elements * 4,
|
||||||
|
|b| b.extend(0..n_elements as u32),
|
||||||
|
BufferUsage::STORAGE,
|
||||||
|
)
|
||||||
.unwrap();
|
.unwrap();
|
||||||
let out_buf = runner.buf_down(data_buf.size(), BufferUsage::empty());
|
let out_buf = runner.buf_down(data_buf.size(), BufferUsage::empty());
|
||||||
let code = PrefixCode::new(runner, variant);
|
let code = PrefixCode::new(runner, variant);
|
||||||
|
@ -91,9 +94,8 @@ pub unsafe fn run_prefix_test(
|
||||||
}
|
}
|
||||||
total_elapsed += runner.submit(commands);
|
total_elapsed += runner.submit(commands);
|
||||||
if i == 0 || config.verify_all {
|
if i == 0 || config.verify_all {
|
||||||
let mut dst: Vec<u32> = Default::default();
|
let dst = out_buf.map_read(..);
|
||||||
out_buf.read(&mut dst);
|
if let Some(failure) = verify(dst.cast_slice()) {
|
||||||
if let Some(failure) = verify(&dst) {
|
|
||||||
result.fail(format!("failure at {}", failure));
|
result.fail(format!("failure at {}", failure));
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
|
@ -47,10 +47,13 @@ pub unsafe fn run_prefix_test(runner: &mut Runner, config: &Config) -> TestResul
|
||||||
// prone to reading and writing past the end of buffers if this is
|
// prone to reading and writing past the end of buffers if this is
|
||||||
// not a power of the number of elements processed in a workgroup.
|
// not a power of the number of elements processed in a workgroup.
|
||||||
let n_elements: u64 = config.size.choose(1 << 12, 1 << 24, 1 << 24);
|
let n_elements: u64 = config.size.choose(1 << 12, 1 << 24, 1 << 24);
|
||||||
let data: Vec<u32> = (0..n_elements as u32).collect();
|
|
||||||
let data_buf = runner
|
let data_buf = runner
|
||||||
.session
|
.session
|
||||||
.create_buffer_init(&data, BufferUsage::STORAGE)
|
.create_buffer_with(
|
||||||
|
n_elements * 4,
|
||||||
|
|b| b.extend(0..n_elements as u32),
|
||||||
|
BufferUsage::STORAGE,
|
||||||
|
)
|
||||||
.unwrap();
|
.unwrap();
|
||||||
let out_buf = runner.buf_down(data_buf.size(), BufferUsage::empty());
|
let out_buf = runner.buf_down(data_buf.size(), BufferUsage::empty());
|
||||||
let code = PrefixTreeCode::new(runner);
|
let code = PrefixTreeCode::new(runner);
|
||||||
|
@ -72,9 +75,8 @@ pub unsafe fn run_prefix_test(runner: &mut Runner, config: &Config) -> TestResul
|
||||||
}
|
}
|
||||||
total_elapsed += runner.submit(commands);
|
total_elapsed += runner.submit(commands);
|
||||||
if i == 0 || config.verify_all {
|
if i == 0 || config.verify_all {
|
||||||
let mut dst: Vec<u32> = Default::default();
|
let dst = out_buf.map_read(..);
|
||||||
out_buf.read(&mut dst);
|
if let Some(failure) = verify(dst.cast_slice()) {
|
||||||
if let Some(failure) = verify(&dst) {
|
|
||||||
result.fail(format!("failure at {}", failure));
|
result.fail(format!("failure at {}", failure));
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
|
@ -16,9 +16,12 @@
|
||||||
|
|
||||||
//! Test runner intended to make it easy to write tests.
|
//! Test runner intended to make it easy to write tests.
|
||||||
|
|
||||||
|
use std::ops::RangeBounds;
|
||||||
|
|
||||||
use bytemuck::Pod;
|
use bytemuck::Pod;
|
||||||
use piet_gpu_hal::{
|
use piet_gpu_hal::{
|
||||||
BackendType, Buffer, BufferUsage, CmdBuf, Instance, InstanceFlags, QueryPool, Session,
|
BackendType, BufReadGuard, Buffer, BufferUsage, CmdBuf, Instance, InstanceFlags, QueryPool,
|
||||||
|
Session,
|
||||||
};
|
};
|
||||||
|
|
||||||
pub struct Runner {
|
pub struct Runner {
|
||||||
|
@ -140,4 +143,8 @@ impl BufDown {
|
||||||
pub unsafe fn read(&self, dst: &mut Vec<impl Pod>) {
|
pub unsafe fn read(&self, dst: &mut Vec<impl Pod>) {
|
||||||
self.stage_buf.read(dst).unwrap()
|
self.stage_buf.read(dst).unwrap()
|
||||||
}
|
}
|
||||||
|
|
||||||
|
pub unsafe fn map_read<'a>(&'a self, range: impl RangeBounds<usize>) -> BufReadGuard<'a> {
|
||||||
|
self.stage_buf.map_read(range).unwrap()
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
Loading…
Reference in a new issue