Add Renderer::upload_scene()

This allows rendering from raw stream data and may be temporary depending on the crate structure after the traditional piet api is removed.
This commit is contained in:
Chad Brokaw 2022-05-10 00:53:48 -04:00
parent c749addf6c
commit e12b063cd3
3 changed files with 205 additions and 53 deletions

View file

@ -37,6 +37,147 @@ pub struct Encoder {
n_clip: u32,
}
#[derive(Copy, Clone, Debug)]
pub struct EncodedSceneRef<'a, T: Copy + Pod> {
pub transform_stream: &'a [T],
pub tag_stream: &'a [u8],
pub pathseg_stream: &'a [u8],
pub linewidth_stream: &'a [f32],
pub drawtag_stream: &'a [u32],
pub drawdata_stream: &'a [u8],
pub n_path: u32,
pub n_pathseg: u32,
pub n_clip: u32,
pub ramp_data: &'a [u32],
}
impl<'a, T: Copy + Pod> EncodedSceneRef<'a, T> {
/// Return a config for the element processing pipeline.
///
/// This does not include further pipeline processing. Also returns the
/// beginning of free memory.
pub fn stage_config(&self) -> (Config, usize) {
// Layout of scene buffer
let drawtag_offset = 0;
let n_drawobj = self.n_drawobj();
let n_drawobj_padded = align_up(n_drawobj, DRAW_PART_SIZE as usize);
let drawdata_offset = drawtag_offset + n_drawobj_padded * DRAWTAG_SIZE;
let trans_offset = drawdata_offset + self.drawdata_stream.len();
let n_trans = self.transform_stream.len();
let n_trans_padded = align_up(n_trans, TRANSFORM_PART_SIZE as usize);
let linewidth_offset = trans_offset + n_trans_padded * TRANSFORM_SIZE;
let n_linewidth = self.linewidth_stream.len();
let pathtag_offset = linewidth_offset + n_linewidth * LINEWIDTH_SIZE;
let n_pathtag = self.tag_stream.len();
let n_pathtag_padded = align_up(n_pathtag, PATHSEG_PART_SIZE as usize);
let pathseg_offset = pathtag_offset + n_pathtag_padded;
// Layout of memory
let mut alloc = 0;
let trans_alloc = alloc;
alloc += trans_alloc + n_trans_padded * TRANSFORM_SIZE;
let pathseg_alloc = alloc;
alloc += pathseg_alloc + self.n_pathseg as usize * PATHSEG_SIZE;
let path_bbox_alloc = alloc;
let n_path = self.n_path as usize;
alloc += path_bbox_alloc + n_path * PATH_BBOX_SIZE;
let drawmonoid_alloc = alloc;
alloc += n_drawobj_padded * DRAWMONOID_SIZE;
let anno_alloc = alloc;
alloc += n_drawobj * ANNOTATED_SIZE;
let clip_alloc = alloc;
let n_clip = self.n_clip as usize;
const CLIP_SIZE: usize = 4;
alloc += n_clip * CLIP_SIZE;
let clip_bic_alloc = alloc;
const CLIP_BIC_SIZE: usize = 8;
// This can round down, as we only reduce the prefix
alloc += (n_clip / CLIP_PART_SIZE as usize) * CLIP_BIC_SIZE;
let clip_stack_alloc = alloc;
const CLIP_EL_SIZE: usize = 20;
alloc += n_clip * CLIP_EL_SIZE;
let clip_bbox_alloc = alloc;
const CLIP_BBOX_SIZE: usize = 16;
alloc += align_up(n_clip as usize, CLIP_PART_SIZE as usize) * CLIP_BBOX_SIZE;
let draw_bbox_alloc = alloc;
alloc += n_drawobj * DRAW_BBOX_SIZE;
let drawinfo_alloc = alloc;
// TODO: not optimized; it can be accumulated during encoding or summed from drawtags
const MAX_DRAWINFO_SIZE: usize = 44;
alloc += n_drawobj * MAX_DRAWINFO_SIZE;
let config = Config {
n_elements: n_drawobj as u32,
n_pathseg: self.n_pathseg,
pathseg_alloc: pathseg_alloc as u32,
anno_alloc: anno_alloc as u32,
trans_alloc: trans_alloc as u32,
path_bbox_alloc: path_bbox_alloc as u32,
drawmonoid_alloc: drawmonoid_alloc as u32,
clip_alloc: clip_alloc as u32,
clip_bic_alloc: clip_bic_alloc as u32,
clip_stack_alloc: clip_stack_alloc as u32,
clip_bbox_alloc: clip_bbox_alloc as u32,
draw_bbox_alloc: draw_bbox_alloc as u32,
drawinfo_alloc: drawinfo_alloc as u32,
n_trans: n_trans as u32,
n_path: self.n_path,
n_clip: self.n_clip,
trans_offset: trans_offset as u32,
linewidth_offset: linewidth_offset as u32,
pathtag_offset: pathtag_offset as u32,
pathseg_offset: pathseg_offset as u32,
drawtag_offset: drawtag_offset as u32,
drawdata_offset: drawdata_offset as u32,
..Default::default()
};
(config, alloc)
}
pub fn write_scene(&self, buf: &mut BufWrite) {
buf.extend_slice(&self.drawtag_stream);
let n_drawobj = self.drawtag_stream.len();
buf.fill_zero(padding(n_drawobj, DRAW_PART_SIZE as usize) * DRAWTAG_SIZE);
buf.extend_slice(&self.drawdata_stream);
buf.extend_slice(&self.transform_stream);
let n_trans = self.transform_stream.len();
buf.fill_zero(padding(n_trans, TRANSFORM_PART_SIZE as usize) * TRANSFORM_SIZE);
buf.extend_slice(&self.linewidth_stream);
buf.extend_slice(&self.tag_stream);
let n_pathtag = self.tag_stream.len();
buf.fill_zero(padding(n_pathtag, PATHSEG_PART_SIZE as usize));
buf.extend_slice(&self.pathseg_stream);
}
/// The number of draw objects in the draw object stream.
pub(crate) fn n_drawobj(&self) -> usize {
self.drawtag_stream.len()
}
/// The number of paths.
pub(crate) fn n_path(&self) -> u32 {
self.n_path
}
/// The number of path segments.
pub(crate) fn n_pathseg(&self) -> u32 {
self.n_pathseg
}
pub(crate) fn n_transform(&self) -> usize {
self.transform_stream.len()
}
/// The number of tags in the path stream.
pub(crate) fn n_pathtag(&self) -> usize {
self.tag_stream.len()
}
pub(crate) fn n_clip(&self) -> u32 {
self.n_clip
}
}
/// A scene fragment encoding a glyph.
///
/// This is a reduced version of the full encoder.

View file

@ -8,9 +8,11 @@ pub mod stages;
pub mod test_scenes;
mod text;
use bytemuck::Pod;
use std::convert::TryInto;
pub use blend::{Blend, BlendMode, CompositionMode};
pub use encoder::EncodedSceneRef;
pub use render_ctx::PietGpuRenderContext;
pub use gradient::Colrv1RadialGradient;
@ -406,6 +408,61 @@ impl Renderer {
Ok(())
}
pub fn upload_scene<T: Copy + Pod>(
&mut self,
scene: &EncodedSceneRef<T>,
buf_ix: usize,
) -> Result<(), Error> {
let (mut config, mut alloc) = scene.stage_config();
let n_drawobj = scene.n_drawobj();
// TODO: be more consistent in size types
let n_path = scene.n_path() as usize;
self.n_paths = n_path;
self.n_transform = scene.n_transform();
self.n_drawobj = scene.n_drawobj();
self.n_pathseg = scene.n_pathseg() as usize;
self.n_pathtag = scene.n_pathtag();
self.n_clip = scene.n_clip();
// These constants depend on encoding and may need to be updated.
// Perhaps we can plumb these from piet-gpu-derive?
const PATH_SIZE: usize = 12;
const BIN_SIZE: usize = 8;
let width_in_tiles = self.width / TILE_W;
let height_in_tiles = self.height / TILE_H;
let tile_base = alloc;
alloc += ((n_path + 3) & !3) * PATH_SIZE;
let bin_base = alloc;
alloc += ((n_drawobj + 255) & !255) * BIN_SIZE;
let ptcl_base = alloc;
alloc += width_in_tiles * height_in_tiles * PTCL_INITIAL_ALLOC;
config.width_in_tiles = width_in_tiles as u32;
config.height_in_tiles = height_in_tiles as u32;
config.tile_alloc = tile_base as u32;
config.bin_alloc = bin_base as u32;
config.ptcl_alloc = ptcl_base as u32;
unsafe {
// TODO: reallocate scene buffer if size is inadequate
{
let mut mapped_scene = self.scene_bufs[buf_ix].map_write(..)?;
scene.write_scene(&mut mapped_scene);
}
self.config_bufs[buf_ix].write(&[config])?;
self.memory_buf_host[buf_ix].write(&[alloc as u32, 0 /* Overflow flag */])?;
// Upload gradient data.
if !scene.ramp_data.is_empty() {
assert!(
self.gradient_bufs[buf_ix].size() as usize
>= std::mem::size_of_val(&*scene.ramp_data)
);
self.gradient_bufs[buf_ix].write(scene.ramp_data)?;
}
}
Ok(())
}
pub unsafe fn record(&self, cmd_buf: &mut CmdBuf, query_pool: &QueryPool, buf_ix: usize) {
cmd_buf.copy_buffer(&self.config_bufs[buf_ix], &self.config_buf);
cmd_buf.copy_buffer(&self.memory_buf_host[buf_ix], &self.memory_buf_dev);

View file

@ -57,7 +57,7 @@ fn main() -> Result<(), Error> {
.map(|_| session.create_semaphore())
.collect::<Result<Vec<_>, Error>>()?;
let query_pools = (0..NUM_FRAMES)
.map(|_| session.create_query_pool(8))
.map(|_| session.create_query_pool(12))
.collect::<Result<Vec<_>, Error>>()?;
let mut cmd_bufs: [Option<CmdBuf>; NUM_FRAMES] = Default::default();
let mut submitted: [Option<SubmittedCmdBuf>; NUM_FRAMES] = Default::default();
@ -99,72 +99,26 @@ fn main() -> Result<(), Error> {
if !ts.is_empty() {
info_string = format!(
"{:.3}ms :: e:{:.3}ms|alloc:{:.3}ms|cp:{:.3}ms|bd:{:.3}ms|bin:{:.3}ms|cr:{:.3}ms|r:{:.3}ms",
ts[6] * 1e3,
ts[10] * 1e3,
ts[0] * 1e3,
(ts[1] - ts[0]) * 1e3,
(ts[2] - ts[1]) * 1e3,
(ts[3] - ts[2]) * 1e3,
(ts[4] - ts[3]) * 1e3,
(ts[5] - ts[4]) * 1e3,
(ts[6] - ts[5]) * 1e3,
(ts[8] - ts[7]) * 1e3,
(ts[10] - ts[9]) * 1e3,
);
}
}
let mut ctx = PietGpuRenderContext::new();
if let Some(input) = matches.value_of("INPUT") {
let mut scale = matches
.value_of("scale")
.map(|scale| scale.parse().unwrap())
.unwrap_or(8.0);
if matches.is_present("flip") {
scale = -scale;
}
test_scenes::render_svg(&mut ctx, input, scale);
} else {
use piet_gpu::{Blend, BlendMode::*, CompositionMode::*};
let blends = [
Blend::new(Normal, SrcOver),
Blend::new(Multiply, SrcOver),
Blend::new(Screen, SrcOver),
Blend::new(Overlay, SrcOver),
Blend::new(Darken, SrcOver),
Blend::new(Lighten, SrcOver),
Blend::new(ColorDodge, SrcOver),
Blend::new(ColorBurn, SrcOver),
Blend::new(HardLight, SrcOver),
Blend::new(SoftLight, SrcOver),
Blend::new(Difference, SrcOver),
Blend::new(Exclusion, SrcOver),
Blend::new(Hue, SrcOver),
Blend::new(Saturation, SrcOver),
Blend::new(Color, SrcOver),
Blend::new(Luminosity, SrcOver),
Blend::new(Normal, Clear),
Blend::new(Normal, Copy),
Blend::new(Normal, Dest),
Blend::new(Normal, SrcOver),
Blend::new(Normal, DestOver),
Blend::new(Normal, SrcIn),
Blend::new(Normal, DestIn),
Blend::new(Normal, SrcOut),
Blend::new(Normal, DestOut),
Blend::new(Normal, SrcAtop),
Blend::new(Normal, DestAtop),
Blend::new(Normal, Xor),
Blend::new(Normal, Plus),
];
let blend = blends[mode % blends.len()];
test_scenes::render_blend_test(&mut ctx, current_frame, blend);
info_string = format!("{:?}", blend);
}
render_info_string(&mut ctx, &info_string);
ctx = PietGpuRenderContext::new();
test_scene1_old(&mut ctx);
let mut encoded_scene_old = ctx.encoded_scene();
let ramp_data = ctx.get_ramp_data();
encoded_scene_old.ramp_data = &ramp_data;
//let mut encoded_scene_old = ctx.encoded_scene();
// let ramp_data = ctx.get_ramp_data();
//encoded_scene_old.ramp_data = &ramp_data;
test_scene1(&mut scene, &mut rcx);
let encoded_scene = scene_to_encoded_scene(&scene, &rcx);
// println!("{:?}\n============\n{:?}", encoded_scene_old, encoded_scene);