Merge pull request #109 from linebender/gradient

Basic implementation of gradients
This commit is contained in:
Raph Levien 2021-08-17 08:33:52 -07:00 committed by GitHub
commit 698a1546d2
No known key found for this signature in database
GPG key ID: 4AEE18F83AFDEB23
30 changed files with 696 additions and 201 deletions

View file

@ -673,6 +673,13 @@ impl Buffer {
// else session lost error?
Ok(())
}
/// The size of the buffer.
///
/// This is at least as large as the value provided on creation.
pub fn size(&self) -> u64 {
self.0.buffer.size()
}
}
impl PipelineBuilder {

View file

@ -104,7 +104,6 @@ pub struct PipelineBuilder {
bindings: Vec<vk::DescriptorSetLayoutBinding>,
binding_flags: Vec<vk::DescriptorBindingFlags>,
max_textures: u32,
has_descriptor_indexing: bool,
}
pub struct DescriptorSetBuilder {
@ -644,7 +643,6 @@ impl crate::backend::Device for VkDevice {
bindings: Vec::new(),
binding_flags: Vec::new(),
max_textures: 0,
has_descriptor_indexing: self.gpu_info.has_descriptor_indexing,
}
}
@ -1090,23 +1088,21 @@ impl crate::backend::PipelineBuilder<VkDevice> for PipelineBuilder {
}
}
fn add_textures(&mut self, max_textures: u32) {
fn add_textures(&mut self, n_images: u32) {
let start = self.bindings.len() as u32;
self.bindings.push(
vk::DescriptorSetLayoutBinding::builder()
.binding(start)
.descriptor_type(vk::DescriptorType::STORAGE_IMAGE)
.descriptor_count(max_textures)
.stage_flags(vk::ShaderStageFlags::COMPUTE)
.build(),
);
let flags = if self.has_descriptor_indexing {
vk::DescriptorBindingFlags::VARIABLE_DESCRIPTOR_COUNT
} else {
Default::default()
};
self.binding_flags.push(flags);
self.max_textures += max_textures;
for i in 0..n_images {
self.bindings.push(
vk::DescriptorSetLayoutBinding::builder()
.binding(start + i)
.descriptor_type(vk::DescriptorType::STORAGE_IMAGE)
.descriptor_count(1)
.stage_flags(vk::ShaderStageFlags::COMPUTE)
.build(),
);
self.binding_flags
.push(vk::DescriptorBindingFlags::default());
}
self.max_textures += n_images;
}
unsafe fn create_compute_pipeline(
@ -1195,11 +1191,11 @@ impl crate::backend::DescriptorSetBuilder<VkDevice> for DescriptorSetBuilder {
.build(),
);
}
if pipeline.max_textures > 0 {
if !self.textures.is_empty() {
descriptor_pool_sizes.push(
vk::DescriptorPoolSize::builder()
.ty(vk::DescriptorType::STORAGE_IMAGE)
.descriptor_count(pipeline.max_textures)
.descriptor_count(self.textures.len() as u32)
.build(),
);
}
@ -1211,15 +1207,11 @@ impl crate::backend::DescriptorSetBuilder<VkDevice> for DescriptorSetBuilder {
)?;
let descriptor_set_layouts = [pipeline.descriptor_set_layout];
let counts = &[pipeline.max_textures];
let variable_info = vk::DescriptorSetVariableDescriptorCountAllocateInfo::builder()
.descriptor_counts(counts);
let descriptor_sets = device
.allocate_descriptor_sets(
&vk::DescriptorSetAllocateInfo::builder()
.descriptor_pool(descriptor_pool)
.set_layouts(&descriptor_set_layouts)
.push_next(&mut variable_info.build()),
.set_layouts(&descriptor_set_layouts),
)
.unwrap();
let mut binding = 0;
@ -1240,6 +1232,7 @@ impl crate::backend::DescriptorSetBuilder<VkDevice> for DescriptorSetBuilder {
);
binding += 1;
}
// maybe chain images and textures together; they're basically identical now
for image in &self.images {
device.update_descriptor_sets(
&[vk::WriteDescriptorSet::builder()
@ -1256,28 +1249,21 @@ impl crate::backend::DescriptorSetBuilder<VkDevice> for DescriptorSetBuilder {
);
binding += 1;
}
if !self.textures.is_empty() {
let infos = self
.textures
.iter()
.map(|texture| {
vk::DescriptorImageInfo::builder()
.sampler(self.sampler)
.image_view(*texture)
.image_layout(vk::ImageLayout::GENERAL)
.build()
})
.collect::<Vec<_>>();
for image in &self.textures {
device.update_descriptor_sets(
&[vk::WriteDescriptorSet::builder()
.dst_set(descriptor_sets[0])
.dst_binding(binding)
.descriptor_type(vk::DescriptorType::STORAGE_IMAGE)
.image_info(&infos)
.image_info(&[vk::DescriptorImageInfo::builder()
.sampler(vk::Sampler::null())
.image_view(*image)
.image_layout(vk::ImageLayout::GENERAL)
.build()])
.build()],
&[],
);
//binding += 1;
binding += 1;
}
Ok(DescriptorSet {
descriptor_set: descriptor_sets[0],

View file

@ -17,6 +17,15 @@ piet_gpu! {
linewidth: f32,
rgba_color: u32,
}
struct AnnoLinGradient {
bbox: [f32; 4],
// For stroked fills.
linewidth: f32,
index: u32,
line_x: f32,
line_y: f32,
line_c: f32,
}
struct AnnoBeginClip {
bbox: [f32; 4],
linewidth: f32,
@ -27,6 +36,7 @@ piet_gpu! {
enum Annotated {
Nop,
Color(TagFlags, AnnoColor),
LinGradient(TagFlags, AnnoLinGradient),
Image(TagFlags, AnnoImage),
BeginClip(TagFlags, AnnoBeginClip),
EndClip(AnnoEndClip),

View file

@ -17,6 +17,13 @@ piet_gpu! {
struct CmdColor {
rgba_color: u32,
}
struct CmdLinGrad {
index: u32,
// line equation for gradient
line_x: f32,
line_y: f32,
line_c: f32,
}
struct CmdImage {
index: u32,
offset: [i16; 2],
@ -34,6 +41,7 @@ piet_gpu! {
Solid,
Alpha(CmdAlpha),
Color(CmdColor),
LinGrad(CmdLinGrad),
Image(CmdImage),
BeginClip,
EndClip,

View file

@ -1,7 +1,7 @@
use piet_gpu_derive::piet_gpu;
pub use self::scene::{
Clip, CubicSeg, Element, FillColor, LineSeg, QuadSeg, SetFillMode, SetLineWidth, Transform,
Clip, CubicSeg, Element, FillColor, FillLinGradient, LineSeg, QuadSeg, SetFillMode, SetLineWidth, Transform,
};
piet_gpu! {
@ -25,6 +25,11 @@ piet_gpu! {
struct FillColor {
rgba_color: u32,
}
struct FillLinGradient {
index: u32,
p0: [f32; 2],
p1: [f32; 2],
}
struct FillImage {
index: u32,
offset: [i16; 2],
@ -51,11 +56,12 @@ piet_gpu! {
Cubic(CubicSeg),
FillColor(FillColor),
FillLinGradient(FillLinGradient),
FillImage(FillImage),
SetLineWidth(SetLineWidth),
Transform(Transform),
BeginClip(Clip),
EndClip(Clip),
FillImage(FillImage),
SetFillMode(SetFillMode),
}
}

View file

@ -107,12 +107,9 @@ impl GfxState {
let mut ctx = PietGpuRenderContext::new();
render_scene(&mut ctx);
let n_paths = ctx.path_count();
let n_pathseg = ctx.pathseg_count();
let n_trans = ctx.pathseg_count();
let scene = ctx.get_scene_buf();
let renderer = Renderer::new(&session, scene, n_paths, n_pathseg, n_trans)?;
let mut renderer = Renderer::new(&session)?;
renderer.upload_render_ctx(&mut ctx)?;
let submitted: Option<SubmittedCmdBuf> = None;
let current_frame = 0;

View file

@ -244,13 +244,9 @@ fn main() -> Result<(), Error> {
} else {
render_scene(&mut ctx);
}
let n_paths = ctx.path_count();
let n_pathseg = ctx.pathseg_count();
let n_trans = ctx.trans_count();
let scene = ctx.get_scene_buf();
//dump_scene(&scene);
let renderer = Renderer::new(&session, scene, n_paths, n_pathseg, n_trans)?;
let mut renderer = Renderer::new(&session)?;
renderer.upload_render_ctx(&mut ctx)?;
let image_usage = BufferUsage::MAP_READ | BufferUsage::COPY_DST;
let image_buf = session.create_buffer((WIDTH * HEIGHT * 4) as u64, image_usage)?;

View file

@ -61,12 +61,9 @@ fn main() -> Result<(), Error> {
} else {
render_scene(&mut ctx);
}
let n_paths = ctx.path_count();
let n_pathseg = ctx.pathseg_count();
let n_trans = ctx.trans_count();
let scene = ctx.get_scene_buf();
let renderer = Renderer::new(&session, scene, n_paths, n_pathseg, n_trans)?;
let mut renderer = Renderer::new(&session)?;
renderer.upload_render_ctx(&mut ctx)?;
let mut submitted: Option<SubmittedCmdBuf> = None;
let mut last_frame_idx = 0;

View file

@ -10,6 +10,10 @@ struct AnnoColorRef {
uint offset;
};
struct AnnoLinGradientRef {
uint offset;
};
struct AnnoBeginClipRef {
uint offset;
};
@ -47,6 +51,21 @@ AnnoColorRef AnnoColor_index(AnnoColorRef ref, uint index) {
return AnnoColorRef(ref.offset + index * AnnoColor_size);
}
struct AnnoLinGradient {
vec4 bbox;
float linewidth;
uint index;
float line_x;
float line_y;
float line_c;
};
#define AnnoLinGradient_size 36
AnnoLinGradientRef AnnoLinGradient_index(AnnoLinGradientRef ref, uint index) {
return AnnoLinGradientRef(ref.offset + index * AnnoLinGradient_size);
}
struct AnnoBeginClip {
vec4 bbox;
float linewidth;
@ -70,10 +89,11 @@ AnnoEndClipRef AnnoEndClip_index(AnnoEndClipRef ref, uint index) {
#define Annotated_Nop 0
#define Annotated_Color 1
#define Annotated_Image 2
#define Annotated_BeginClip 3
#define Annotated_EndClip 4
#define Annotated_size 32
#define Annotated_LinGradient 2
#define Annotated_Image 3
#define Annotated_BeginClip 4
#define Annotated_EndClip 5
#define Annotated_size 40
AnnotatedRef Annotated_index(AnnotatedRef ref, uint index) {
return AnnotatedRef(ref.offset + index * Annotated_size);
@ -137,6 +157,40 @@ void AnnoColor_write(Alloc a, AnnoColorRef ref, AnnoColor s) {
write_mem(a, ix + 5, s.rgba_color);
}
AnnoLinGradient AnnoLinGradient_read(Alloc a, AnnoLinGradientRef ref) {
uint ix = ref.offset >> 2;
uint raw0 = read_mem(a, ix + 0);
uint raw1 = read_mem(a, ix + 1);
uint raw2 = read_mem(a, ix + 2);
uint raw3 = read_mem(a, ix + 3);
uint raw4 = read_mem(a, ix + 4);
uint raw5 = read_mem(a, ix + 5);
uint raw6 = read_mem(a, ix + 6);
uint raw7 = read_mem(a, ix + 7);
uint raw8 = read_mem(a, ix + 8);
AnnoLinGradient s;
s.bbox = vec4(uintBitsToFloat(raw0), uintBitsToFloat(raw1), uintBitsToFloat(raw2), uintBitsToFloat(raw3));
s.linewidth = uintBitsToFloat(raw4);
s.index = raw5;
s.line_x = uintBitsToFloat(raw6);
s.line_y = uintBitsToFloat(raw7);
s.line_c = uintBitsToFloat(raw8);
return s;
}
void AnnoLinGradient_write(Alloc a, AnnoLinGradientRef ref, AnnoLinGradient s) {
uint ix = ref.offset >> 2;
write_mem(a, ix + 0, floatBitsToUint(s.bbox.x));
write_mem(a, ix + 1, floatBitsToUint(s.bbox.y));
write_mem(a, ix + 2, floatBitsToUint(s.bbox.z));
write_mem(a, ix + 3, floatBitsToUint(s.bbox.w));
write_mem(a, ix + 4, floatBitsToUint(s.linewidth));
write_mem(a, ix + 5, s.index);
write_mem(a, ix + 6, floatBitsToUint(s.line_x));
write_mem(a, ix + 7, floatBitsToUint(s.line_y));
write_mem(a, ix + 8, floatBitsToUint(s.line_c));
}
AnnoBeginClip AnnoBeginClip_read(Alloc a, AnnoBeginClipRef ref) {
uint ix = ref.offset >> 2;
uint raw0 = read_mem(a, ix + 0);
@ -187,6 +241,10 @@ AnnoColor Annotated_Color_read(Alloc a, AnnotatedRef ref) {
return AnnoColor_read(a, AnnoColorRef(ref.offset + 4));
}
AnnoLinGradient Annotated_LinGradient_read(Alloc a, AnnotatedRef ref) {
return AnnoLinGradient_read(a, AnnoLinGradientRef(ref.offset + 4));
}
AnnoImage Annotated_Image_read(Alloc a, AnnotatedRef ref) {
return AnnoImage_read(a, AnnoImageRef(ref.offset + 4));
}
@ -208,6 +266,11 @@ void Annotated_Color_write(Alloc a, AnnotatedRef ref, uint flags, AnnoColor s) {
AnnoColor_write(a, AnnoColorRef(ref.offset + 4), s);
}
void Annotated_LinGradient_write(Alloc a, AnnotatedRef ref, uint flags, AnnoLinGradient s) {
write_mem(a, ref.offset >> 2, (flags << 16) | Annotated_LinGradient);
AnnoLinGradient_write(a, AnnoLinGradientRef(ref.offset + 4), s);
}
void Annotated_Image_write(Alloc a, AnnotatedRef ref, uint flags, AnnoImage s) {
write_mem(a, ref.offset >> 2, (flags << 16) | Annotated_Image);
AnnoImage_write(a, AnnoImageRef(ref.offset + 4), s);

View file

@ -58,6 +58,7 @@ void main() {
AnnotatedTag tag = Annotated_tag(conf.anno_alloc, ref);
switch (tag.tag) {
case Annotated_Image:
case Annotated_LinGradient:
case Annotated_BeginClip:
case Annotated_Color:
if (fill_mode_from_flags(tag.flags) != MODE_NONZERO) {

Binary file not shown.

Binary file not shown.

View file

@ -57,6 +57,7 @@ void main() {
int x0 = 0, y0 = 0, x1 = 0, y1 = 0;
switch (tag) {
case Annotated_Color:
case Annotated_LinGradient:
case Annotated_Image:
case Annotated_BeginClip:
case Annotated_EndClip:

Binary file not shown.

View file

@ -24,6 +24,3 @@ build backdrop_lg.spv: glsl backdrop.comp | annotated.h tile.h setup.h
build coarse.spv: glsl coarse.comp | annotated.h bins.h ptcl.h setup.h
build kernel4.spv: glsl kernel4.comp | ptcl.h setup.h
build kernel4_idx.spv: glsl kernel4.comp | ptcl.h setup.h
flags = -DENABLE_IMAGE_INDICES

View file

@ -223,6 +223,7 @@ void main() {
switch (tag) {
case Annotated_Color:
case Annotated_Image:
case Annotated_LinGradient:
case Annotated_BeginClip:
case Annotated_EndClip:
// We have one "path" for each element, even if the element isn't
@ -338,6 +339,22 @@ void main() {
Cmd_Color_write(cmd_alloc, cmd_ref, CmdColor(fill.rgba_color));
cmd_ref.offset += 4 + CmdColor_size;
break;
case Annotated_LinGradient:
tile = Tile_read(read_tile_alloc(element_ref_ix, mem_ok), TileRef(sh_tile_base[element_ref_ix]
+ (sh_tile_stride[element_ref_ix] * tile_y + tile_x) * Tile_size));
AnnoLinGradient lin = Annotated_LinGradient_read(conf.anno_alloc, ref);
if (!alloc_cmd(cmd_alloc, cmd_ref, cmd_limit)) {
break;
}
write_fill(cmd_alloc, cmd_ref, tag.flags, tile, fill.linewidth);
CmdLinGrad cmd_lin;
cmd_lin.index = lin.index;
cmd_lin.line_x = lin.line_x;
cmd_lin.line_y = lin.line_y;
cmd_lin.line_c = lin.line_c;
Cmd_LinGrad_write(cmd_alloc, cmd_ref, cmd_lin);
cmd_ref.offset += 4 + CmdLinGrad_size;
break;
case Annotated_Image:
tile = Tile_read(read_tile_alloc(element_ref_ix, mem_ok), TileRef(sh_tile_base[element_ref_ix]
+ (sh_tile_stride[element_ref_ix] * tile_y + tile_x) * Tile_size));

Binary file not shown.

View file

@ -137,6 +137,7 @@ State map_element(ElementRef ref) {
c.pathseg_count = 1;
break;
case Element_FillColor:
case Element_FillLinGradient:
case Element_FillImage:
case Element_BeginClip:
c.flags = FLAG_RESET_BBOX;
@ -363,6 +364,31 @@ void main() {
AnnotatedRef out_ref = AnnotatedRef(conf.anno_alloc.offset + (st.path_count - 1) * Annotated_size);
Annotated_Color_write(conf.anno_alloc, out_ref, fill_mode, anno_fill);
break;
case Element_FillLinGradient:
FillLinGradient lin = Element_FillLinGradient_read(this_ref);
AnnoLinGradient anno_lin;
anno_lin.index = lin.index;
vec2 p0 = st.mat.xy * lin.p0.x + st.mat.zw * lin.p0.y + st.translate;
vec2 p1 = st.mat.xy * lin.p1.x + st.mat.zw * lin.p1.y + st.translate;
vec2 dxy = p1 - p0;
float scale = 1.0 / (dxy.x * dxy.x + dxy.y * dxy.y);
float line_x = dxy.x * scale;
float line_y = dxy.y * scale;
anno_lin.line_x = line_x;
anno_lin.line_y = line_y;
anno_lin.line_c = -(p0.x * line_x + p0.y * line_y);
// TODO: consider consolidating bbox calculation
if (is_stroke) {
vec2 lw = get_linewidth(st);
anno_lin.bbox = st.bbox + vec4(-lw, lw);
anno_lin.linewidth = st.linewidth * sqrt(abs(st.mat.x * st.mat.w - st.mat.y * st.mat.z));
} else {
anno_lin.bbox = st.bbox;
anno_lin.linewidth = 0.0;
}
out_ref = AnnotatedRef(conf.anno_alloc.offset + (st.path_count - 1) * Annotated_size);
Annotated_LinGradient_write(conf.anno_alloc, out_ref, fill_mode, anno_lin);
break;
case Element_FillImage:
FillImage fill_img = Element_FillImage_read(this_ref);
AnnoImage anno_img;

Binary file not shown.

View file

@ -28,11 +28,9 @@ layout(set = 0, binding = 1) restrict readonly buffer ConfigBuf {
layout(rgba8, set = 0, binding = 2) uniform restrict writeonly image2D image;
#ifdef ENABLE_IMAGE_INDICES
layout(rgba8, set = 0, binding = 3) uniform restrict readonly image2D images[];
#else
layout(rgba8, set = 0, binding = 3) uniform restrict readonly image2D images[1];
#endif
layout(rgba8, set = 0, binding = 3) uniform restrict readonly image2D image_atlas;
layout(rgba8, set = 0, binding = 4) uniform restrict readonly image2D gradients;
#include "ptcl.h"
#include "tile.h"
@ -75,11 +73,7 @@ mediump vec4[CHUNK] fillImage(uvec2 xy, CmdImage cmd_img) {
for (uint i = 0; i < CHUNK; i++) {
ivec2 uv = ivec2(xy + chunk_offset(i)) + cmd_img.offset;
mediump vec4 fg_rgba;
#ifdef ENABLE_IMAGE_INDICES
fg_rgba = imageLoad(images[cmd_img.index], uv);
#else
fg_rgba = imageLoad(images[0], uv);
#endif
fg_rgba = imageLoad(image_atlas, uv);
fg_rgba.rgb = fromsRGB(fg_rgba.rgb);
rgba[i] = fg_rgba;
}
@ -98,18 +92,6 @@ void main() {
mediump float blend_alpha_stack[MAX_BLEND_STACK][CHUNK];
for (uint i = 0; i < CHUNK; i++) {
rgba[i] = vec4(0.0);
// TODO: remove this debug image support when the actual image method is plumbed.
#ifdef DEBUG_IMAGES
#ifdef ENABLE_IMAGE_INDICES
if (xy_uint.x < 1024 && xy_uint.y < 1024) {
rgba[i] = imageLoad(images[gl_WorkGroupID.x / 64], ivec2(xy_uint + chunk_offset(i))/4);
}
#else
if (xy_uint.x < 1024 && xy_uint.y < 1024) {
rgb[i] = imageLoad(images[0], ivec2(xy_uint + chunk_offset(i))/4).rgb;
}
#endif
#endif
}
mediump float area[CHUNK];
@ -197,6 +179,19 @@ void main() {
}
cmd_ref.offset += 4 + CmdColor_size;
break;
case Cmd_LinGrad:
CmdLinGrad lin = Cmd_LinGrad_read(cmd_alloc, cmd_ref);
float d = lin.line_x * float(xy.x) + lin.line_y * float(xy.y) + lin.line_c;
for (uint k = 0; k < CHUNK; k++) {
vec2 chunk_xy = vec2(chunk_offset(k));
float my_d = d + lin.line_x * chunk_xy.x + lin.line_y * chunk_xy.y;
int x = int(round(clamp(my_d, 0.0, 1.0) * float(GRADIENT_WIDTH - 1)));
mediump vec4 fg_rgba = imageLoad(gradients, ivec2(x, int(lin.index)));
fg_rgba.rgb = fromsRGB(fg_rgba.rgb);
rgba[k] = fg_rgba;
}
cmd_ref.offset += 4 + CmdLinGrad_size;
break;
case Cmd_Image:
CmdImage fill_img = Cmd_Image_read(cmd_alloc, cmd_ref);
mediump vec4 img[CHUNK] = fillImage(xy_uint, fill_img);

Binary file not shown.

Binary file not shown.

View file

@ -14,6 +14,10 @@ struct CmdColorRef {
uint offset;
};
struct CmdLinGradRef {
uint offset;
};
struct CmdImageRef {
uint offset;
};
@ -62,6 +66,19 @@ CmdColorRef CmdColor_index(CmdColorRef ref, uint index) {
return CmdColorRef(ref.offset + index * CmdColor_size);
}
struct CmdLinGrad {
uint index;
float line_x;
float line_y;
float line_c;
};
#define CmdLinGrad_size 16
CmdLinGradRef CmdLinGrad_index(CmdLinGradRef ref, uint index) {
return CmdLinGradRef(ref.offset + index * CmdLinGrad_size);
}
struct CmdImage {
uint index;
ivec2 offset;
@ -99,11 +116,12 @@ CmdJumpRef CmdJump_index(CmdJumpRef ref, uint index) {
#define Cmd_Solid 3
#define Cmd_Alpha 4
#define Cmd_Color 5
#define Cmd_Image 6
#define Cmd_BeginClip 7
#define Cmd_EndClip 8
#define Cmd_Jump 9
#define Cmd_size 12
#define Cmd_LinGrad 6
#define Cmd_Image 7
#define Cmd_BeginClip 8
#define Cmd_EndClip 9
#define Cmd_Jump 10
#define Cmd_size 20
CmdRef Cmd_index(CmdRef ref, uint index) {
return CmdRef(ref.offset + index * Cmd_size);
@ -159,6 +177,28 @@ void CmdColor_write(Alloc a, CmdColorRef ref, CmdColor s) {
write_mem(a, ix + 0, s.rgba_color);
}
CmdLinGrad CmdLinGrad_read(Alloc a, CmdLinGradRef ref) {
uint ix = ref.offset >> 2;
uint raw0 = read_mem(a, ix + 0);
uint raw1 = read_mem(a, ix + 1);
uint raw2 = read_mem(a, ix + 2);
uint raw3 = read_mem(a, ix + 3);
CmdLinGrad s;
s.index = raw0;
s.line_x = uintBitsToFloat(raw1);
s.line_y = uintBitsToFloat(raw2);
s.line_c = uintBitsToFloat(raw3);
return s;
}
void CmdLinGrad_write(Alloc a, CmdLinGradRef ref, CmdLinGrad s) {
uint ix = ref.offset >> 2;
write_mem(a, ix + 0, s.index);
write_mem(a, ix + 1, floatBitsToUint(s.line_x));
write_mem(a, ix + 2, floatBitsToUint(s.line_y));
write_mem(a, ix + 3, floatBitsToUint(s.line_c));
}
CmdImage CmdImage_read(Alloc a, CmdImageRef ref) {
uint ix = ref.offset >> 2;
uint raw0 = read_mem(a, ix + 0);
@ -222,6 +262,10 @@ CmdColor Cmd_Color_read(Alloc a, CmdRef ref) {
return CmdColor_read(a, CmdColorRef(ref.offset + 4));
}
CmdLinGrad Cmd_LinGrad_read(Alloc a, CmdRef ref) {
return CmdLinGrad_read(a, CmdLinGradRef(ref.offset + 4));
}
CmdImage Cmd_Image_read(Alloc a, CmdRef ref) {
return CmdImage_read(a, CmdImageRef(ref.offset + 4));
}
@ -258,6 +302,11 @@ void Cmd_Color_write(Alloc a, CmdRef ref, CmdColor s) {
CmdColor_write(a, CmdColorRef(ref.offset + 4), s);
}
void Cmd_LinGrad_write(Alloc a, CmdRef ref, CmdLinGrad s) {
write_mem(a, ref.offset >> 2, Cmd_LinGrad);
CmdLinGrad_write(a, CmdLinGradRef(ref.offset + 4), s);
}
void Cmd_Image_write(Alloc a, CmdRef ref, CmdImage s) {
write_mem(a, ref.offset >> 2, Cmd_Image);
CmdImage_write(a, CmdImageRef(ref.offset + 4), s);

View file

@ -18,6 +18,10 @@ struct FillColorRef {
uint offset;
};
struct FillLinGradientRef {
uint offset;
};
struct FillImageRef {
uint offset;
};
@ -88,6 +92,18 @@ FillColorRef FillColor_index(FillColorRef ref, uint index) {
return FillColorRef(ref.offset + index * FillColor_size);
}
struct FillLinGradient {
uint index;
vec2 p0;
vec2 p1;
};
#define FillLinGradient_size 20
FillLinGradientRef FillLinGradient_index(FillLinGradientRef ref, uint index) {
return FillLinGradientRef(ref.offset + index * FillLinGradient_size);
}
struct FillImage {
uint index;
ivec2 offset;
@ -145,12 +161,13 @@ SetFillModeRef SetFillMode_index(SetFillModeRef ref, uint index) {
#define Element_Quad 2
#define Element_Cubic 3
#define Element_FillColor 4
#define Element_SetLineWidth 5
#define Element_Transform 6
#define Element_BeginClip 7
#define Element_EndClip 8
#define Element_FillImage 9
#define Element_SetFillMode 10
#define Element_FillLinGradient 5
#define Element_FillImage 6
#define Element_SetLineWidth 7
#define Element_Transform 8
#define Element_BeginClip 9
#define Element_EndClip 10
#define Element_SetFillMode 11
#define Element_size 36
ElementRef Element_index(ElementRef ref, uint index) {
@ -215,6 +232,20 @@ FillColor FillColor_read(FillColorRef ref) {
return s;
}
FillLinGradient FillLinGradient_read(FillLinGradientRef ref) {
uint ix = ref.offset >> 2;
uint raw0 = scene[ix + 0];
uint raw1 = scene[ix + 1];
uint raw2 = scene[ix + 2];
uint raw3 = scene[ix + 3];
uint raw4 = scene[ix + 4];
FillLinGradient s;
s.index = raw0;
s.p0 = vec2(uintBitsToFloat(raw1), uintBitsToFloat(raw2));
s.p1 = vec2(uintBitsToFloat(raw3), uintBitsToFloat(raw4));
return s;
}
FillImage FillImage_read(FillImageRef ref) {
uint ix = ref.offset >> 2;
uint raw0 = scene[ix + 0];
@ -287,6 +318,14 @@ FillColor Element_FillColor_read(ElementRef ref) {
return FillColor_read(FillColorRef(ref.offset + 4));
}
FillLinGradient Element_FillLinGradient_read(ElementRef ref) {
return FillLinGradient_read(FillLinGradientRef(ref.offset + 4));
}
FillImage Element_FillImage_read(ElementRef ref) {
return FillImage_read(FillImageRef(ref.offset + 4));
}
SetLineWidth Element_SetLineWidth_read(ElementRef ref) {
return SetLineWidth_read(SetLineWidthRef(ref.offset + 4));
}
@ -303,10 +342,6 @@ Clip Element_EndClip_read(ElementRef ref) {
return Clip_read(ClipRef(ref.offset + 4));
}
FillImage Element_FillImage_read(ElementRef ref) {
return FillImage_read(FillImageRef(ref.offset + 4));
}
SetFillMode Element_SetFillMode_read(ElementRef ref) {
return SetFillMode_read(SetFillModeRef(ref.offset + 4));
}

View file

@ -28,6 +28,8 @@
#define LG_N_TILE (7 + LG_WG_FACTOR)
#define N_SLICE (N_TILE / 32)
#define GRADIENT_WIDTH 512
struct Config {
uint n_elements; // paths
uint n_pathseg;

View file

@ -40,6 +40,7 @@ void main() {
int x0 = 0, y0 = 0, x1 = 0, y1 = 0;
switch (tag) {
case Annotated_Color:
case Annotated_LinGradient:
case Annotated_Image:
case Annotated_BeginClip:
case Annotated_EndClip:

Binary file not shown.

204
piet-gpu/src/gradient.rs Normal file
View file

@ -0,0 +1,204 @@
// Copyright 2021 The piet-gpu authors.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// https://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//
// Also licensed under MIT license, at your choice.
//! Implementation of gradients.
use std::collections::hash_map::{Entry, HashMap};
use piet::{Color, FixedLinearGradient, GradientStop};
#[derive(Clone)]
pub struct BakedGradient {
ramp: Vec<u32>,
}
/// This is basically the same type as scene::FillLinGradient, so could
/// potentially use that directly.
#[derive(Clone)]
pub struct LinearGradient {
pub(crate) start: [f32; 2],
pub(crate) end: [f32; 2],
pub(crate) ramp_id: u32,
}
#[derive(Default)]
pub struct RampCache {
ramps: Vec<GradientRamp>,
map: HashMap<GradientRamp, usize>,
}
#[derive(Clone, Hash, PartialEq, Eq)]
struct GradientRamp(Vec<u32>);
pub const N_SAMPLES: usize = 512;
// TODO: make this dynamic
pub const N_GRADIENTS: usize = 256;
#[derive(Clone, Copy)]
struct PremulRgba([f64; 4]);
impl PremulRgba {
fn from_color(c: &Color) -> PremulRgba {
let rgba = c.as_rgba();
let a = rgba.3;
// TODO: sRGB nonlinearity? This is complicated.
PremulRgba([rgba.0 * a, rgba.1 * a, rgba.2 * a, a])
}
fn to_u32(&self) -> u32 {
let z = self.0;
let r = (z[0].max(0.0).min(1.0) * 255.0).round() as u32;
let g = (z[1].max(0.0).min(1.0) * 255.0).round() as u32;
let b = (z[2].max(0.0).min(1.0) * 255.0).round() as u32;
let a = (z[3].max(0.0).min(1.0) * 255.0).round() as u32;
r | (g << 8) | (b << 16) | (a << 24)
}
fn lerp(&self, other: PremulRgba, t: f64) -> PremulRgba {
fn l(a: f64, b: f64, t: f64) -> f64 {
a * (1.0 - t) + b * t
}
let a = self.0;
let b = other.0;
PremulRgba([
l(a[0], b[0], t),
l(a[1], b[1], t),
l(a[2], b[2], t),
l(a[3], b[3], t),
])
}
}
impl GradientRamp {
fn from_stops(stops: &[GradientStop]) -> GradientRamp {
let mut last_u = 0.0;
let mut last_c = PremulRgba::from_color(&stops[0].color);
let mut this_u = last_u;
let mut this_c = last_c;
let mut j = 0;
let v = (0..N_SAMPLES)
.map(|i| {
let u = (i as f64) / (N_SAMPLES - 1) as f64;
while u > this_u {
last_u = this_u;
last_c = this_c;
if let Some(s) = stops.get(j + 1) {
this_u = s.pos as f64;
this_c = PremulRgba::from_color(&s.color);
j += 1;
} else {
break;
}
}
let du = this_u - last_u;
let c = if du < 1e-9 {
this_c
} else {
last_c.lerp(this_c, (u - last_u) / du)
};
c.to_u32()
})
.collect();
GradientRamp(v)
}
/// For debugging/development.
pub(crate) fn dump(&self) {
for val in &self.0 {
println!("{:x}", val);
}
}
}
impl RampCache {
/// Add a gradient ramp to the cache.
///
/// Currently there is no eviction, so if the gradient is animating, there may
/// be resource leaks. In order to support lifetime management, the signature
/// should probably change so it returns a ref-counted handle, so that eviction
/// is deferred until the last handle is dropped.
///
/// This function is pretty expensive, but the result is lightweight.
fn add_ramp(&mut self, ramp: &[GradientStop]) -> usize {
let ramp = GradientRamp::from_stops(ramp);
match self.map.entry(ramp) {
Entry::Occupied(o) => *o.get(),
Entry::Vacant(v) => {
let idx = self.ramps.len();
self.ramps.push(v.key().clone());
v.insert(idx);
idx
}
}
}
pub fn add_linear_gradient(&mut self, lin: &FixedLinearGradient) -> LinearGradient {
let ramp_id = self.add_ramp(&lin.stops);
LinearGradient {
ramp_id: ramp_id as u32,
start: crate::render_ctx::to_f32_2(lin.start),
end: crate::render_ctx::to_f32_2(lin.end),
}
}
/// Dump the contents of a gradient. This is for debugging.
#[allow(unused)]
pub(crate) fn dump_gradient(&self, lin: &LinearGradient) {
println!("id = {}", lin.ramp_id);
self.ramps[lin.ramp_id as usize].dump();
}
/// Get the ramp data.
///
/// This concatenates all the ramps; we'll want a more sophisticated approach to
/// incremental update.
pub fn get_ramp_data(&self) -> Vec<u32> {
let mut result = Vec::with_capacity(N_SAMPLES * self.ramps.len());
for ramp in &self.ramps {
result.extend(&ramp.0);
}
result
}
}
#[cfg(test)]
mod test {
use super::RampCache;
use piet::kurbo::Point;
use piet::{Color, FixedLinearGradient, GradientStop};
#[test]
fn simple_ramp() {
let stops = vec![
GradientStop {
color: Color::WHITE,
pos: 0.0,
},
GradientStop {
color: Color::BLACK,
pos: 1.0,
},
];
let mut cache = RampCache::default();
let lin = FixedLinearGradient {
start: Point::new(0.0, 0.0),
end: Point::new(0.0, 1.0),
stops,
};
let our_lin = cache.add_linear_gradient(&lin);
cache.dump_gradient(&our_lin);
}
}

View file

@ -1,3 +1,4 @@
mod gradient;
mod pico_svg;
mod render_ctx;
mod text;
@ -8,8 +9,11 @@ pub use render_ctx::PietGpuRenderContext;
use rand::{Rng, RngCore};
use piet::kurbo::{Affine, BezPath, Circle, Point, Shape, Vec2};
use piet::{Color, ImageFormat, RenderContext, Text, TextAttribute, TextLayoutBuilder};
use piet::kurbo::{BezPath, Circle, Point, Rect, Shape, Vec2};
use piet::{
Color, FixedGradient, FixedLinearGradient, GradientStop, ImageFormat, RenderContext, Text,
TextAttribute, TextLayoutBuilder,
};
use piet_gpu_types::encoder::Encode;
@ -80,6 +84,7 @@ pub fn render_scene(rc: &mut impl RenderContext) {
//render_cardioid(rc);
render_clip_test(rc);
render_alpha_test(rc);
render_gradient_test(rc);
render_text_test(rc);
//render_tiger(rc);
}
@ -151,6 +156,28 @@ fn render_alpha_test(rc: &mut impl RenderContext) {
rc.restore();
}
#[allow(unused)]
fn render_gradient_test(rc: &mut impl RenderContext) {
let stops = vec![
GradientStop {
color: Color::rgb8(0, 255, 0),
pos: 0.0,
},
GradientStop {
color: Color::BLACK,
pos: 1.0,
},
];
let lin = FixedLinearGradient {
start: Point::new(0.0, 100.0),
end: Point::new(0.0, 300.0),
stops,
};
let brush = FixedGradient::Linear(lin);
//let brush = Color::rgb8(0, 128, 0);
rc.fill(Rect::new(100.0, 100.0, 300.0, 300.0), &brush);
}
fn diamond(origin: Point) -> impl Shape {
let mut path = BezPath::new();
const SIZE: f64 = 50.0;
@ -250,68 +277,35 @@ pub struct Renderer {
// Keep a reference to the image so that it is not destroyed.
_bg_image: Image,
gradient_buf: Buffer,
gradients: Image,
}
impl Renderer {
pub unsafe fn new(
session: &Session,
scene: &[u8],
n_paths: usize,
n_pathseg: usize,
n_trans: usize,
) -> Result<Self, Error> {
/// Create a new renderer.
pub unsafe fn new(session: &Session) -> Result<Self, Error> {
let dev = BufferUsage::STORAGE | BufferUsage::COPY_DST;
let host_upload = BufferUsage::MAP_WRITE | BufferUsage::COPY_SRC;
let n_elements = scene.len() / piet_gpu_types::scene::Element::fixed_size();
println!(
"scene: {} elements, {} paths, {} path_segments, {} transforms",
n_elements, n_paths, n_pathseg, n_trans
);
let scene_buf = session.create_buffer_init(&scene[..], dev).unwrap();
// This may be inadequate for very complex scenes (paris etc)
// TODO: separate staging buffer (if needed)
let scene_buf = session.create_buffer(1 * 1024 * 1024, host_upload).unwrap();
let state_buf = session.create_buffer(1 * 1024 * 1024, dev)?;
let image_dev = session.create_image2d(WIDTH as u32, HEIGHT as u32)?;
// TODO: constants
const PATH_SIZE: usize = 12;
const BIN_SIZE: usize = 8;
const PATHSEG_SIZE: usize = 52;
const ANNO_SIZE: usize = 32;
const TRANS_SIZE: usize = 24;
let mut alloc = 0;
let tile_base = alloc;
alloc += ((n_paths + 3) & !3) * PATH_SIZE;
let bin_base = alloc;
alloc += ((n_paths + 255) & !255) * BIN_SIZE;
let ptcl_base = alloc;
alloc += WIDTH_IN_TILES * HEIGHT_IN_TILES * PTCL_INITIAL_ALLOC;
let pathseg_base = alloc;
alloc += (n_pathseg * PATHSEG_SIZE + 3) & !3;
let anno_base = alloc;
alloc += (n_paths * ANNO_SIZE + 3) & !3;
let trans_base = alloc;
alloc += (n_trans * TRANS_SIZE + 3) & !3;
let config = &[
n_paths as u32,
n_pathseg as u32,
WIDTH_IN_TILES as u32,
HEIGHT_IN_TILES as u32,
tile_base as u32,
bin_base as u32,
ptcl_base as u32,
pathseg_base as u32,
anno_base as u32,
trans_base as u32,
];
let config_buf = session.create_buffer_init(&config[..], dev).unwrap();
// Note: this must be updated when the config struct size changes.
const CONFIG_BUFFER_SIZE: u64 = 40;
// TODO: separate staging buffer (if needed)
let config_buf = session
.create_buffer(CONFIG_BUFFER_SIZE, host_upload)
.unwrap();
// Perhaps we could avoid the explicit staging buffer by having buffer creation method
// that takes both initial contents and a size.
let mut memory_buf_host = session.create_buffer(2 * 4, host_upload)?;
let memory_buf_host = session.create_buffer(2 * 4, host_upload)?;
let memory_buf_dev = session.create_buffer(128 * 1024 * 1024, dev)?;
memory_buf_host.write(&[alloc as u32, 0 /* Overflow flag */])?;
let el_code = ShaderCode::Spv(include_bytes!("../shader/elements.spv"));
let el_pipeline = session.create_simple_compute_pipeline(el_code, 4)?;
@ -353,22 +347,17 @@ impl Renderer {
let bg_image = Self::make_test_bg_image(&session);
let k4_code = if session.gpu_info().has_descriptor_indexing {
ShaderCode::Spv(include_bytes!("../shader/kernel4_idx.spv"))
} else {
println!("doing non-indexed k4");
ShaderCode::Spv(include_bytes!("../shader/kernel4.spv"))
};
// This is an arbitrary limit on the number of textures that can be referenced by
// the fine rasterizer. To set it for real, we probably want to pay attention both
// to the device limit (maxDescriptorSetSampledImages) but also to the number of
// images encoded (I believe there's an cost when allocating descriptor pools). If
// it can't be satisfied, then for compatibility we'll probably want to fall back
// to an atlasing approach.
//
// However, we're adding only one texture for now. Avoid a harmless Vulkan validation
// error by using a tight bound.
let max_textures = 1;
const GRADIENT_BUF_SIZE: usize =
crate::gradient::N_GRADIENTS * crate::gradient::N_SAMPLES * 4;
let gradient_buf = session.create_buffer(GRADIENT_BUF_SIZE as u64, host_upload)?;
let gradients = Self::make_gradient_image(&session);
let k4_code = ShaderCode::Spv(include_bytes!("../shader/kernel4.spv"));
// This is a bit of a stand-in for future development. For now, we assume one
// atlas image for all images, and another image for the gradients. In the future,
// on GPUs that support it, we will probably want to go to descriptor indexing in
// order to cut down on allocation and copying for the atlas image.
let max_textures = 2;
let k4_pipeline = session
.pipeline_builder()
.add_buffers(2)
@ -379,7 +368,7 @@ impl Renderer {
.descriptor_set_builder()
.add_buffers(&[&memory_buf_dev, &config_buf])
.add_images(&[&image_dev])
.add_textures(&[&bg_image])
.add_textures(&[&bg_image, &gradients])
.build(&session, &k4_pipeline)?;
Ok(Renderer {
@ -403,13 +392,82 @@ impl Renderer {
coarse_ds,
k4_pipeline,
k4_ds,
n_elements,
n_paths,
n_pathseg,
n_elements: 0,
n_paths: 0,
n_pathseg: 0,
_bg_image: bg_image,
gradient_buf,
gradients,
})
}
/// Convert the scene in the render context to GPU resources.
///
/// At present, this requires that any command buffer submission has completed.
/// A future evolution will handle staging of the next frame's scene while the
/// rendering of the current frame is in flight.
pub fn upload_render_ctx(
&mut self,
render_ctx: &mut PietGpuRenderContext,
) -> Result<(), Error> {
let n_paths = render_ctx.path_count();
let n_pathseg = render_ctx.pathseg_count();
let n_trans = render_ctx.trans_count();
self.n_paths = n_paths;
self.n_pathseg = n_pathseg;
// These constants depend on encoding and may need to be updated.
// Perhaps we can plumb these from piet-gpu-derive?
const PATH_SIZE: usize = 12;
const BIN_SIZE: usize = 8;
const PATHSEG_SIZE: usize = 52;
const ANNO_SIZE: usize = 40;
const TRANS_SIZE: usize = 24;
let mut alloc = 0;
let tile_base = alloc;
alloc += ((n_paths + 3) & !3) * PATH_SIZE;
let bin_base = alloc;
alloc += ((n_paths + 255) & !255) * BIN_SIZE;
let ptcl_base = alloc;
alloc += WIDTH_IN_TILES * HEIGHT_IN_TILES * PTCL_INITIAL_ALLOC;
let pathseg_base = alloc;
alloc += (n_pathseg * PATHSEG_SIZE + 3) & !3;
let anno_base = alloc;
alloc += (n_paths * ANNO_SIZE + 3) & !3;
let trans_base = alloc;
alloc += (n_trans * TRANS_SIZE + 3) & !3;
let config = &[
n_paths as u32,
n_pathseg as u32,
WIDTH_IN_TILES as u32,
HEIGHT_IN_TILES as u32,
tile_base as u32,
bin_base as u32,
ptcl_base as u32,
pathseg_base as u32,
anno_base as u32,
trans_base as u32,
];
unsafe {
let scene = render_ctx.get_scene_buf();
self.n_elements = scene.len() / piet_gpu_types::scene::Element::fixed_size();
// TODO: reallocate scene buffer if size is inadequate
assert!(self.scene_buf.size() as usize >= scene.len());
self.scene_buf.write(scene)?;
self.config_buf.write(config)?;
self.memory_buf_host
.write(&[alloc as u32, 0 /* Overflow flag */])?;
// Upload gradient data.
let ramp_data = render_ctx.get_ramp_data();
if !ramp_data.is_empty() {
assert!(self.gradient_buf.size() as usize >= std::mem::size_of_val(&*ramp_data));
self.gradient_buf.write(&ramp_data)?;
}
}
Ok(())
}
pub unsafe fn record(&self, cmd_buf: &mut CmdBuf, query_pool: &QueryPool) {
cmd_buf.copy_buffer(&self.memory_buf_host, &self.memory_buf_dev);
cmd_buf.clear_buffer(&self.state_buf, None);
@ -419,6 +477,14 @@ impl Renderer {
ImageLayout::Undefined,
ImageLayout::General,
);
// TODO: make gradient upload optional, only if it's changed
cmd_buf.image_barrier(
&self.gradients,
ImageLayout::Undefined,
ImageLayout::BlitDst,
);
cmd_buf.copy_buffer_to_image(&self.gradient_buf, &self.gradients);
cmd_buf.image_barrier(&self.gradients, ImageLayout::BlitDst, ImageLayout::General);
cmd_buf.reset_query_pool(&query_pool);
cmd_buf.write_timestamp(&query_pool, 0);
cmd_buf.dispatch(
@ -494,9 +560,7 @@ impl Renderer {
if format != ImageFormat::RgbaPremul {
return Err("unsupported image format".into());
}
let host_upload = BufferUsage::MAP_WRITE | BufferUsage::COPY_SRC;
let mut buffer = session.create_buffer(buf.len() as u64, host_upload)?;
buffer.write(buf)?;
let buffer = session.create_buffer_init(&buf, BufferUsage::COPY_SRC)?;
let image = session.create_image2d(width.try_into()?, height.try_into()?)?;
let mut cmd_buf = session.cmd_buf()?;
cmd_buf.begin();
@ -530,4 +594,12 @@ impl Renderer {
}
Self::make_image(session, WIDTH, HEIGHT, &buf, ImageFormat::RgbaPremul).unwrap()
}
fn make_gradient_image(session: &Session) -> Image {
unsafe {
session
.create_image2d(gradient::N_SAMPLES as u32, gradient::N_GRADIENTS as u32)
.unwrap()
}
}
}

View file

@ -12,9 +12,11 @@ use crate::MAX_BLEND_STACK;
use piet_gpu_types::encoder::{Encode, Encoder};
use piet_gpu_types::scene::{
Clip, CubicSeg, Element, FillColor, LineSeg, QuadSeg, SetFillMode, SetLineWidth, Transform,
Clip, CubicSeg, Element, FillColor, FillLinGradient, LineSeg, QuadSeg, SetFillMode,
SetLineWidth, Transform,
};
use crate::gradient::{LinearGradient, RampCache};
use crate::text::Font;
pub use crate::text::{PathEncoder, PietGpuText, PietGpuTextLayout, PietGpuTextLayoutBuilder};
@ -39,12 +41,14 @@ pub struct PietGpuRenderContext {
cur_transform: Affine,
state_stack: Vec<State>,
clip_stack: Vec<ClipElement>,
ramp_cache: RampCache,
}
#[derive(Clone)]
pub enum PietGpuBrush {
Solid(u32),
Gradient,
LinGradient(LinearGradient),
}
#[derive(Default)]
@ -93,6 +97,7 @@ impl PietGpuRenderContext {
cur_transform: Affine::default(),
state_stack: Vec::new(),
clip_stack: Vec::new(),
ramp_cache: RampCache::default(),
}
}
@ -113,6 +118,10 @@ impl PietGpuRenderContext {
self.trans_count
}
pub fn get_ramp_data(&self) -> Vec<u32> {
self.ramp_cache.get_ramp_data()
}
pub(crate) fn set_fill_mode(&mut self, fill_mode: FillMode) {
if self.fill_mode != fill_mode {
self.elements.push(Element::SetFillMode(SetFillMode {
@ -149,8 +158,14 @@ impl RenderContext for PietGpuRenderContext {
PietGpuBrush::Solid(premul.as_rgba_u32())
}
fn gradient(&mut self, _gradient: impl Into<FixedGradient>) -> Result<Self::Brush, Error> {
Ok(Self::Brush::Gradient)
fn gradient(&mut self, gradient: impl Into<FixedGradient>) -> Result<Self::Brush, Error> {
match gradient.into() {
FixedGradient::Linear(lin) => {
let lin = self.ramp_cache.add_linear_gradient(&lin);
Ok(PietGpuBrush::LinGradient(lin))
}
_ => todo!("don't do radial gradients yet"),
}
}
fn clear(&mut self, _color: Color) {}
@ -164,18 +179,11 @@ impl RenderContext for PietGpuRenderContext {
}
self.set_fill_mode(FillMode::Stroke);
let brush = brush.make_brush(self, || shape.bounding_box()).into_owned();
match brush {
PietGpuBrush::Solid(rgba_color) => {
// Note: the bbox contribution of stroke becomes more complicated with miter joins.
self.accumulate_bbox(|| shape.bounding_box() + Insets::uniform(width * 0.5));
let path = shape.path_elements(TOLERANCE);
self.encode_path(path, false);
let stroke = FillColor { rgba_color };
self.elements.push(Element::FillColor(stroke));
self.path_count += 1;
}
_ => (),
}
// Note: the bbox contribution of stroke becomes more complicated with miter joins.
self.accumulate_bbox(|| shape.bounding_box() + Insets::uniform(width * 0.5));
let path = shape.path_elements(TOLERANCE);
self.encode_path(path, false);
self.encode_brush(&brush);
}
fn stroke_styled(
@ -189,17 +197,13 @@ impl RenderContext for PietGpuRenderContext {
fn fill(&mut self, shape: impl Shape, brush: &impl IntoBrush<Self>) {
let brush = brush.make_brush(self, || shape.bounding_box()).into_owned();
if let PietGpuBrush::Solid(rgba_color) = brush {
// Note: we might get a good speedup from using an approximate bounding box.
// Perhaps that should be added to kurbo.
self.accumulate_bbox(|| shape.bounding_box());
let path = shape.path_elements(TOLERANCE);
self.set_fill_mode(FillMode::Nonzero);
self.encode_path(path, true);
let fill = FillColor { rgba_color };
self.elements.push(Element::FillColor(fill));
self.path_count += 1;
}
// Note: we might get a good speedup from using an approximate bounding box.
// Perhaps that should be added to kurbo.
self.accumulate_bbox(|| shape.bounding_box());
let path = shape.path_elements(TOLERANCE);
self.set_fill_mode(FillMode::Nonzero);
self.encode_path(path, true);
self.encode_brush(&brush);
}
fn fill_even_odd(&mut self, _shape: impl Shape, _brush: &impl IntoBrush<Self>) {}
@ -501,6 +505,27 @@ impl PietGpuRenderContext {
self.elements.push(Element::Transform(transform));
self.trans_count += 1;
}
fn encode_brush(&mut self, brush: &PietGpuBrush) {
match brush {
PietGpuBrush::Solid(rgba_color) => {
let fill = FillColor {
rgba_color: *rgba_color,
};
self.elements.push(Element::FillColor(fill));
self.path_count += 1;
}
PietGpuBrush::LinGradient(lin) => {
let fill_lin = FillLinGradient {
index: lin.ramp_id,
p0: lin.start,
p1: lin.end,
};
self.elements.push(Element::FillLinGradient(fill_lin));
self.path_count += 1;
}
}
}
}
impl IntoBrush<PietGpuRenderContext> for PietGpuBrush {