Mostly working rendering

This exposes interfaces to render glyphs into a texture atlas. The main changes are:

* Methods to plumb raw Metal GPU resources (device, texture, etc) into piet-gpu-hal objects.

* A new glyph_render API specialized to rendering glyphs. This is basically the same as just painting to a canvas, but will allow better caching (and has more direct access to fonts, bypassing the Piet font type which is underdeveloped).

* Ability to render to A8 target in addition to RGBA.

WIP, there are some rough edges, not least of which is that the image format changes are only on mac and cause compile errors elsewhere.
This commit is contained in:
Raph Levien 2022-01-19 11:58:01 -08:00
parent 833d993a4e
commit 0cf370f9c7
26 changed files with 153 additions and 26 deletions

View file

@ -16,7 +16,9 @@
//! The generic trait for backends to implement.
use crate::{BindType, BufferUsage, Error, GpuInfo, ImageLayout, MapMode, SamplerParams};
use crate::{
BindType, BufferUsage, Error, GpuInfo, ImageFormat, ImageLayout, MapMode, SamplerParams,
};
pub trait Device: Sized {
type Buffer: 'static;
@ -47,7 +49,12 @@ pub trait Device: Sized {
/// Maybe doesn't need result return?
unsafe fn destroy_buffer(&self, buffer: &Self::Buffer) -> Result<(), Error>;
unsafe fn create_image2d(&self, width: u32, height: u32) -> Result<Self::Image, Error>;
unsafe fn create_image2d(
&self,
width: u32,
height: u32,
format: ImageFormat,
) -> Result<Self::Image, Error>;
/// Destroy an image.
///

View file

@ -13,7 +13,7 @@ use std::sync::{Arc, Mutex, Weak};
use bytemuck::Pod;
use smallvec::SmallVec;
use crate::{mux, BackendType, BufWrite, MapMode};
use crate::{mux, BackendType, BufWrite, ImageFormat, MapMode};
use crate::{BindType, BufferUsage, Error, GpuInfo, ImageLayout, SamplerParams};
@ -312,8 +312,13 @@ impl Session {
///
/// Currently this creates only a 2D image in RGBA8 format, with usage
/// so that it can be accessed by shaders and used for transfer.
pub unsafe fn create_image2d(&self, width: u32, height: u32) -> Result<Image, Error> {
let image = self.0.device.create_image2d(width, height)?;
pub unsafe fn create_image2d(
&self,
width: u32,
height: u32,
format: ImageFormat,
) -> Result<Image, Error> {
let image = self.0.device.create_image2d(width, height, format)?;
Ok(Image(Arc::new(ImageInner {
image,
session: Arc::downgrade(&self.0),

View file

@ -91,6 +91,15 @@ pub enum SamplerParams {
Linear,
}
/// Image format.
#[derive(Copy, Clone, Debug)]
pub enum ImageFormat {
// 8 bit grayscale / alpha
A8,
// 8 bit per pixel RGBA
Rgba8,
}
bitflags! {
/// The intended usage for a buffer, specified on creation.
pub struct BufferUsage: u32 {

View file

@ -30,7 +30,7 @@ use metal::{CGFloat, MTLFeatureSet};
use raw_window_handle::{HasRawWindowHandle, RawWindowHandle};
use crate::{BufferUsage, Error, GpuInfo, MapMode, WorkgroupLimits};
use crate::{BufferUsage, Error, GpuInfo, ImageFormat, MapMode, WorkgroupLimits};
use util::*;
@ -279,14 +279,23 @@ impl crate::backend::Device for MtlDevice {
Ok(())
}
unsafe fn create_image2d(&self, width: u32, height: u32) -> Result<Self::Image, Error> {
unsafe fn create_image2d(
&self,
width: u32,
height: u32,
format: ImageFormat,
) -> Result<Self::Image, Error> {
let desc = metal::TextureDescriptor::new();
desc.set_width(width as u64);
desc.set_height(height as u64);
// These are defaults so don't need to be explicitly set.
//desc.set_depth(1);
//desc.set_mipmap_level_count(1);
//desc.set_pixel_format(metal::MTLPixelFormat::RGBA8Unorm);
let mtl_format = match format {
ImageFormat::A8 => metal::MTLPixelFormat::R8Unorm,
ImageFormat::Rgba8 => metal::MTLPixelFormat::RGBA8Unorm,
};
desc.set_pixel_format(mtl_format);
desc.set_usage(metal::MTLTextureUsage::ShaderRead | metal::MTLTextureUsage::ShaderWrite);
let texture = self.device.new_texture(&desc);
Ok(Image {

View file

@ -35,6 +35,7 @@ use crate::backend::DescriptorSetBuilder as DescriptorSetBuilderTrait;
use crate::backend::Device as DeviceTrait;
use crate::BackendType;
use crate::BindType;
use crate::ImageFormat;
use crate::MapMode;
use crate::{BufferUsage, Error, GpuInfo, ImageLayout, InstanceFlags};
@ -264,11 +265,16 @@ impl Device {
}
}
pub unsafe fn create_image2d(&self, width: u32, height: u32) -> Result<Image, Error> {
pub unsafe fn create_image2d(
&self,
width: u32,
height: u32,
format: ImageFormat,
) -> Result<Image, Error> {
mux_match! { self;
Device::Vk(d) => d.create_image2d(width, height).map(Image::Vk),
Device::Dx12(d) => d.create_image2d(width, height).map(Image::Dx12),
Device::Mtl(d) => d.create_image2d(width, height).map(Image::Mtl),
Device::Vk(d) => d.create_image2d(width, height, format).map(Image::Vk),
Device::Dx12(d) => d.create_image2d(width, height, format).map(Image::Dx12),
Device::Mtl(d) => d.create_image2d(width, height, format).map(Image::Mtl),
}
}

View file

@ -58,6 +58,12 @@ build gen/kernel4.hlsl: hlsl gen/kernel4.spv
build gen/kernel4.dxil: dxil gen/kernel4.hlsl
build gen/kernel4.msl: msl gen/kernel4.spv
build gen/kernel4_gray.spv: glsl kernel4.comp | ptcl.h setup.h
flags = -DGRAY
build gen/kernel4_gray.hlsl: hlsl gen/kernel4_gray.spv
build gen/kernel4_gray.dxil: dxil gen/kernel4_gray.hlsl
build gen/kernel4_gray.msl: msl gen/kernel4_gray.spv
# New element pipeline follows
build gen/transform_reduce.spv: glsl transform_reduce.comp | scene.h setup.h mem.h

Binary file not shown.

Binary file not shown.

Binary file not shown.

Binary file not shown.

Binary file not shown.

Binary file not shown.

Binary file not shown.

Binary file not shown.

Binary file not shown.

Binary file not shown.

Binary file not shown.

Binary file not shown.

Binary file not shown.

Binary file not shown.

Binary file not shown.

Binary file not shown.

Binary file not shown.

View file

@ -23,7 +23,11 @@ layout(set = 0, binding = 1) restrict readonly buffer ConfigBuf {
Config conf;
};
#ifdef GRAY
layout(r8, set = 0, binding = 2) uniform restrict writeonly image2D image;
#else
layout(rgba8, set = 0, binding = 2) uniform restrict writeonly image2D image;
#endif
layout(rgba8, set = 0, binding = 3) uniform restrict readonly image2D image_atlas;
@ -228,6 +232,12 @@ void main() {
}
for (uint i = 0; i < CHUNK; i++) {
#ifdef GRAY
// Just store the alpha value; later we can specialize this kernel more to avoid
// computing unneeded RGB colors.
imageStore(image, ivec2(xy_uint + chunk_offset(i)), vec4(rgba[i].a));
#else
imageStore(image, ivec2(xy_uint + chunk_offset(i)), vec4(tosRGB(rgba[i].rgb), rgba[i].a));
#endif
}
}

View file

@ -16,7 +16,8 @@
//! An experimental API for glyph rendering.
use swash::{scale::ScaleContext, CacheKey, FontRef};
use piet::{kurbo::Affine, RenderContext};
use swash::{scale::ScaleContext, CacheKey, FontDataRef, FontRef};
use crate::{encoder::GlyphEncoder, PietGpuRenderContext};
@ -30,33 +31,56 @@ pub struct FontId(CacheKey);
impl GlyphRenderer {
pub fn new() -> GlyphRenderer {
let render_ctx = PietGpuRenderContext::new();
let scale_context = ScaleContext::new();
GlyphRenderer {
render_ctx: PietGpuRenderContext::new(),
scale_context: ScaleContext::new(),
render_ctx,
scale_context,
}
}
pub unsafe fn add_glyph(&mut self, font_data: &[u8], font_id: u64, glyph_id: u16) {
pub unsafe fn add_glyph(
&mut self,
font_data: &[u8],
font_id: u64,
glyph_id: u16,
transform: [f32; 6],
) {
// This transmute is dodgy because the definition in swash isn't repr(transparent).
// I think the best solution is to have a from_u64 method, but we'll work that out
// later.
let font_id = FontId(std::mem::transmute(font_id));
let encoder = self.make_glyph(font_data, font_id, glyph_id);
const DEFAULT_UPEM: u16 = 2048;
let affine = Affine::new([
transform[0] as f64,
transform[1] as f64,
transform[2] as f64,
transform[3] as f64,
transform[4] as f64,
transform[5] as f64,
]) * Affine::scale(1.0 / DEFAULT_UPEM as f64);
self.render_ctx.transform(affine);
self.render_ctx.encode_glyph(&encoder);
// TODO: don't fill glyph if RGBA
self.render_ctx.fill_glyph(0xff_ff_ff_ff);
self.render_ctx.transform(affine.inverse());
}
pub fn reset(&mut self) {
self.render_ctx = PietGpuRenderContext::new();
}
fn make_glyph(&mut self, font_data: &[u8], font_id: FontId, glyph_id: u16) -> GlyphEncoder {
let mut encoder = GlyphEncoder::default();
let font_ref = FontRef {
data: font_data,
offset: 0,
key: font_id.0,
};
let font_data = FontDataRef::new(font_data).expect("invalid font");
let mut font_ref = font_data.get(0).expect("invalid font index");
font_ref.key = font_id.0;
let mut scaler = self.scale_context.builder(font_ref).size(2048.).build();
if let Some(outline) = scaler.scale_outline(glyph_id) {
crate::text::append_outline(&mut encoder, outline.verbs(), outline.points());
} else {
println!("failed to scale");
}
encoder
}

View file

@ -49,6 +49,18 @@ pub fn dump_k1_data(k1_buf: &[u32]) {
}
}
pub struct RenderConfig {
width: usize,
height: usize,
format: PixelFormat,
}
// Should we just use the enum from piet-gpu-hal?
pub enum PixelFormat {
A8,
Rgba8,
}
pub struct Renderer {
// These sizes are aligned to tile boundaries, though at some point
// we'll want to have a good strategy for dealing with odd sizes.
@ -106,15 +118,41 @@ pub struct Renderer {
gradients: Image,
}
impl RenderConfig {
pub fn new(width: usize, height: usize) -> RenderConfig {
RenderConfig {
width,
height,
format: PixelFormat::Rgba8,
}
}
pub fn pixel_format(mut self, format: PixelFormat) -> Self {
self.format = format;
self
}
}
impl Renderer {
/// Create a new renderer.
pub unsafe fn new(
session: &Session,
width: usize,
height: usize,
n_bufs: usize,
) -> Result<Self, Error> {
let config = RenderConfig::new(width, height);
Self::new_from_config(session, config, n_bufs)
}
/// Create a new renderer.
pub unsafe fn new_from_config(
session: &Session,
config: RenderConfig,
n_bufs: usize,
) -> Result<Self, Error> {
// For now, round up to tile alignment
let width = config.width;
let height = config.height;
let width = width + (width.wrapping_neg() & (TILE_W - 1));
let height = height + (height.wrapping_neg() & (TILE_W - 1));
let dev = BufferUsage::STORAGE | BufferUsage::COPY_DST;
@ -126,7 +164,11 @@ impl Renderer {
.map(|_| session.create_buffer(8 * 1024 * 1024, host_upload).unwrap())
.collect::<Vec<_>>();
let image_dev = session.create_image2d(width as u32, height as u32)?;
let image_format = match config.format {
PixelFormat::A8 => piet_gpu_hal::ImageFormat::A8,
PixelFormat::Rgba8 => piet_gpu_hal::ImageFormat::Rgba8,
};
let image_dev = session.create_image2d(width as u32, height as u32, image_format)?;
// Note: this must be updated when the config struct size changes.
const CONFIG_BUFFER_SIZE: u64 = std::mem::size_of::<Config>() as u64;
@ -211,7 +253,10 @@ impl Renderer {
.collect();
let gradients = Self::make_gradient_image(&session);
let k4_code = include_shader!(session, "../shader/gen/kernel4");
let k4_code = match config.format {
PixelFormat::A8 => include_shader!(session, "../shader/gen/kernel4_gray"),
PixelFormat::Rgba8 => include_shader!(session, "../shader/gen/kernel4"),
};
let k4_pipeline = session.create_compute_pipeline(
k4_code,
&[
@ -428,7 +473,8 @@ impl Renderer {
return Err("unsupported image format".into());
}
let buffer = session.create_buffer_init(&buf, BufferUsage::COPY_SRC)?;
let image = session.create_image2d(width.try_into()?, height.try_into()?)?;
const RGBA: piet_gpu_hal::ImageFormat = piet_gpu_hal::ImageFormat::Rgba8;
let image = session.create_image2d(width.try_into()?, height.try_into()?, RGBA)?;
let mut cmd_buf = session.cmd_buf()?;
cmd_buf.begin();
cmd_buf.image_barrier(&image, ImageLayout::Undefined, ImageLayout::BlitDst);
@ -464,8 +510,13 @@ impl Renderer {
fn make_gradient_image(session: &Session) -> Image {
unsafe {
const RGBA: piet_gpu_hal::ImageFormat = piet_gpu_hal::ImageFormat::Rgba8;
session
.create_image2d(gradient::N_SAMPLES as u32, gradient::N_GRADIENTS as u32)
.create_image2d(
gradient::N_SAMPLES as u32,
gradient::N_GRADIENTS as u32,
RGBA,
)
.unwrap()
}
}