Merge branch 'main' into failure

This commit is contained in:
Raph Levien 2023-05-18 11:14:22 -07:00
commit f3587b1240
32 changed files with 931 additions and 294 deletions

View file

@ -2,7 +2,6 @@ on:
push: push:
branches: branches:
- main - main
- dev
pull_request: pull_request:
jobs: jobs:
@ -11,17 +10,35 @@ jobs:
name: cargo fmt name: cargo fmt
steps: steps:
- uses: actions/checkout@v2 - uses: actions/checkout@v2
- uses: dtolnay/rust-toolchain@stable
- run: cargo fmt --all --check
compiles:
runs-on: ubuntu-latest
name: Check workspace compile
steps:
- uses: actions/checkout@v2
- uses: dtolnay/rust-toolchain@stable
- name: Install native dependencies
run: sudo apt-get update; sudo apt-get install --no-install-recommends libasound2-dev libudev-dev
- run: cargo check --workspace
# --exclude with_bevy # for when bevy has an outdated wgpu version
# -Dwarnings # for when we have fixed unused code warnings
- name: install stable toolchain wasm:
uses: actions-rs/toolchain@v1 runs-on: ubuntu-latest
name: Ensure with_winit compiles on WASM
steps:
- uses: actions/checkout@v2
- uses: dtolnay/rust-toolchain@stable
with: with:
toolchain: stable targets: wasm32-unknown-unknown
profile: minimal # cargo-run-wasm does not provide a way to determine that it failed programmatically.
components: rustfmt # Ideally, fix this and use:
override: true # - run: cargo run_wasm -- -p with_winit --bin with_winit_bin --build-only
- name: Allow using WebGPU in web_sys
run: |
echo "RUSTFLAGS=--cfg=web_sys_unstable_apis" >> "$GITHUB_ENV"
- run: cargo check -p with_winit --target wasm32-unknown-unknown
- name: cargo fmt
uses: actions-rs/cargo@v1
with:
command: fmt
args: --all -- --check

View file

@ -10,7 +10,8 @@
"pathtag": "${workspaceFolder}/shader/shared/pathtag.wgsl", "pathtag": "${workspaceFolder}/shader/shared/pathtag.wgsl",
"ptcl": "${workspaceFolder}/shader/shared/ptcl.wgsl", "ptcl": "${workspaceFolder}/shader/shared/ptcl.wgsl",
"segment": "${workspaceFolder}/shader/shared/segment.wgsl", "segment": "${workspaceFolder}/shader/shared/segment.wgsl",
"tile": "${workspaceFolder}/shader/shared/tile.wgsl" "tile": "${workspaceFolder}/shader/shared/tile.wgsl",
"transform": "${workspaceFolder}/shader/shared/transform.wgsl"
}, },
"wgsl-analyzer.diagnostics.nagaVersion": "main", "wgsl-analyzer.diagnostics.nagaVersion": "main",
"wgsl-analyzer.preprocessor.shaderDefs": [ "wgsl-analyzer.preprocessor.shaderDefs": [

View file

@ -3,8 +3,14 @@ name = "vello_encoding"
version = "0.1.0" version = "0.1.0"
edition = "2021" edition = "2021"
[features]
default = ["full"]
# Enables support for the full pipeline including late-bound
# resources (gradients, images and glyph runs)
full = ["fello", "guillotiere"]
[dependencies] [dependencies]
bytemuck = { workspace = true } bytemuck = { workspace = true }
fello = { workspace = true } fello = { workspace = true, optional = true }
peniko = { workspace = true } peniko = { workspace = true }
guillotiere = "0.6.2" guillotiere = { version = "0.6.2", optional = true }

View file

@ -196,6 +196,7 @@ impl<T: Sized> BufferSize<T> {
} }
/// Returns the number of elements. /// Returns the number of elements.
#[allow(clippy::len_without_is_empty)]
pub const fn len(self) -> u32 { pub const fn len(self) -> u32 {
self.len self.len
} }

View file

@ -22,7 +22,7 @@ impl DrawTag {
pub const LINEAR_GRADIENT: Self = Self(0x114); pub const LINEAR_GRADIENT: Self = Self(0x114);
/// Radial gradient fill. /// Radial gradient fill.
pub const RADIAL_GRADIENT: Self = Self(0x2dc); pub const RADIAL_GRADIENT: Self = Self(0x29c);
/// Image fill. /// Image fill.
pub const IMAGE: Self = Self(0x248); pub const IMAGE: Self = Self(0x248);

View file

@ -1,13 +1,16 @@
// Copyright 2022 The Vello authors // Copyright 2022 The Vello authors
// SPDX-License-Identifier: Apache-2.0 OR MIT // SPDX-License-Identifier: Apache-2.0 OR MIT
use super::{ use super::{DrawColor, DrawTag, PathEncoder, PathTag, Transform};
resolve::Patch, DrawColor, DrawImage, DrawLinearGradient, DrawRadialGradient, DrawTag, Glyph,
GlyphRun, PathEncoder, PathTag, Transform,
};
use fello::NormalizedCoord; use peniko::{kurbo::Shape, BlendMode, BrushRef, Color};
use peniko::{kurbo::Shape, BlendMode, BrushRef, ColorStop, Extend, GradientKind, Image};
#[cfg(feature = "full")]
use {
super::{DrawImage, DrawLinearGradient, DrawRadialGradient, Glyph, GlyphRun, Patch},
fello::NormalizedCoord,
peniko::{ColorStop, Extend, GradientKind, Image},
};
/// Encoded data streams for a scene. /// Encoded data streams for a scene.
#[derive(Clone, Default)] #[derive(Clone, Default)]
@ -20,20 +23,13 @@ pub struct Encoding {
pub draw_tags: Vec<DrawTag>, pub draw_tags: Vec<DrawTag>,
/// The draw data stream. /// The draw data stream.
pub draw_data: Vec<u8>, pub draw_data: Vec<u8>,
/// Draw data patches for late bound resources.
pub patches: Vec<Patch>,
/// Color stop collection for gradients.
pub color_stops: Vec<ColorStop>,
/// The transform stream. /// The transform stream.
pub transforms: Vec<Transform>, pub transforms: Vec<Transform>,
/// The line width stream. /// The line width stream.
pub linewidths: Vec<f32>, pub linewidths: Vec<f32>,
/// Positioned glyph buffer. /// Late bound resource data.
pub glyphs: Vec<Glyph>, #[cfg(feature = "full")]
/// Sequences of glyphs. pub resources: Resources,
pub glyph_runs: Vec<GlyphRun>,
/// Normalized coordinate buffer for variable fonts.
pub normalized_coords: Vec<NormalizedCoord>,
/// Number of encoded paths. /// Number of encoded paths.
pub n_paths: u32, pub n_paths: u32,
/// Number of encoded path segments. /// Number of encoded path segments.
@ -63,15 +59,12 @@ impl Encoding {
self.linewidths.clear(); self.linewidths.clear();
self.draw_data.clear(); self.draw_data.clear();
self.draw_tags.clear(); self.draw_tags.clear();
self.glyphs.clear();
self.glyph_runs.clear();
self.normalized_coords.clear();
self.n_paths = 0; self.n_paths = 0;
self.n_path_segments = 0; self.n_path_segments = 0;
self.n_clips = 0; self.n_clips = 0;
self.n_open_clips = 0; self.n_open_clips = 0;
self.patches.clear(); #[cfg(feature = "full")]
self.color_stops.clear(); self.resources.reset();
if !is_fragment { if !is_fragment {
self.transforms.push(Transform::IDENTITY); self.transforms.push(Transform::IDENTITY);
self.linewidths.push(-1.0); self.linewidths.push(-1.0);
@ -80,62 +73,76 @@ impl Encoding {
/// Appends another encoding to this one with an optional transform. /// Appends another encoding to this one with an optional transform.
pub fn append(&mut self, other: &Self, transform: &Option<Transform>) { pub fn append(&mut self, other: &Self, transform: &Option<Transform>) {
let stops_base = self.color_stops.len(); #[cfg(feature = "full")]
let glyph_runs_base = self.glyph_runs.len(); let glyph_runs_base = {
let glyphs_base = self.glyphs.len(); let offsets = self.stream_offsets();
let coords_base = self.normalized_coords.len(); let stops_base = self.resources.color_stops.len();
let offsets = self.stream_offsets(); let glyph_runs_base = self.resources.glyph_runs.len();
let glyphs_base = self.resources.glyphs.len();
let coords_base = self.resources.normalized_coords.len();
self.resources
.glyphs
.extend_from_slice(&other.resources.glyphs);
self.resources
.normalized_coords
.extend_from_slice(&other.resources.normalized_coords);
self.resources
.glyph_runs
.extend(other.resources.glyph_runs.iter().cloned().map(|mut run| {
run.glyphs.start += glyphs_base;
run.normalized_coords.start += coords_base;
run.stream_offsets.path_tags += offsets.path_tags;
run.stream_offsets.path_data += offsets.path_data;
run.stream_offsets.draw_tags += offsets.draw_tags;
run.stream_offsets.draw_data += offsets.draw_data;
run.stream_offsets.transforms += offsets.transforms;
run.stream_offsets.linewidths += offsets.linewidths;
run
}));
self.resources
.patches
.extend(other.resources.patches.iter().map(|patch| match patch {
Patch::Ramp {
draw_data_offset: offset,
stops,
extend,
} => {
let stops = stops.start + stops_base..stops.end + stops_base;
Patch::Ramp {
draw_data_offset: offset + offsets.draw_data,
stops,
extend: *extend,
}
}
Patch::GlyphRun { index } => Patch::GlyphRun {
index: index + glyph_runs_base,
},
Patch::Image {
image,
draw_data_offset,
} => Patch::Image {
image: image.clone(),
draw_data_offset: *draw_data_offset + offsets.draw_data,
},
}));
self.resources
.color_stops
.extend_from_slice(&other.resources.color_stops);
glyph_runs_base
};
self.path_tags.extend_from_slice(&other.path_tags); self.path_tags.extend_from_slice(&other.path_tags);
self.path_data.extend_from_slice(&other.path_data); self.path_data.extend_from_slice(&other.path_data);
self.draw_tags.extend_from_slice(&other.draw_tags); self.draw_tags.extend_from_slice(&other.draw_tags);
self.draw_data.extend_from_slice(&other.draw_data); self.draw_data.extend_from_slice(&other.draw_data);
self.glyphs.extend_from_slice(&other.glyphs);
self.normalized_coords
.extend_from_slice(&other.normalized_coords);
self.glyph_runs
.extend(other.glyph_runs.iter().cloned().map(|mut run| {
run.glyphs.start += glyphs_base;
run.normalized_coords.start += coords_base;
run.stream_offsets.path_tags += offsets.path_tags;
run.stream_offsets.path_data += offsets.path_data;
run.stream_offsets.draw_tags += offsets.draw_tags;
run.stream_offsets.draw_data += offsets.draw_data;
run.stream_offsets.transforms += offsets.transforms;
run.stream_offsets.linewidths += offsets.linewidths;
run
}));
self.n_paths += other.n_paths; self.n_paths += other.n_paths;
self.n_path_segments += other.n_path_segments; self.n_path_segments += other.n_path_segments;
self.n_clips += other.n_clips; self.n_clips += other.n_clips;
self.n_open_clips += other.n_open_clips; self.n_open_clips += other.n_open_clips;
self.patches
.extend(other.patches.iter().map(|patch| match patch {
Patch::Ramp {
draw_data_offset: offset,
stops,
} => {
let stops = stops.start + stops_base..stops.end + stops_base;
Patch::Ramp {
draw_data_offset: offset + offsets.draw_data,
stops,
}
}
Patch::GlyphRun { index } => Patch::GlyphRun {
index: index + glyph_runs_base,
},
Patch::Image {
image,
draw_data_offset,
} => Patch::Image {
image: image.clone(),
draw_data_offset: *draw_data_offset + offsets.draw_data,
},
}));
self.color_stops.extend_from_slice(&other.color_stops);
if let Some(transform) = *transform { if let Some(transform) = *transform {
self.transforms self.transforms
.extend(other.transforms.iter().map(|x| transform * *x)); .extend(other.transforms.iter().map(|x| transform * *x));
for run in &mut self.glyph_runs[glyph_runs_base..] { #[cfg(feature = "full")]
for run in &mut self.resources.glyph_runs[glyph_runs_base..] {
run.transform = transform * run.transform; run.transform = transform * run.transform;
} }
} else { } else {
@ -199,7 +206,9 @@ impl Encoding {
} }
/// Encodes a brush with an optional alpha modifier. /// Encodes a brush with an optional alpha modifier.
#[allow(unused_variables)]
pub fn encode_brush<'b>(&mut self, brush: impl Into<BrushRef<'b>>, alpha: f32) { pub fn encode_brush<'b>(&mut self, brush: impl Into<BrushRef<'b>>, alpha: f32) {
#[cfg(feature = "full")]
use super::math::point_to_f32; use super::math::point_to_f32;
match brush.into() { match brush.into() {
BrushRef::Solid(color) => { BrushRef::Solid(color) => {
@ -210,6 +219,7 @@ impl Encoding {
}; };
self.encode_color(DrawColor::new(color)); self.encode_color(DrawColor::new(color));
} }
#[cfg(feature = "full")]
BrushRef::Gradient(gradient) => match gradient.kind { BrushRef::Gradient(gradient) => match gradient.kind {
GradientKind::Linear { start, end } => { GradientKind::Linear { start, end } => {
self.encode_linear_gradient( self.encode_linear_gradient(
@ -246,9 +256,13 @@ impl Encoding {
todo!("sweep gradients aren't supported yet!") todo!("sweep gradients aren't supported yet!")
} }
}, },
#[cfg(feature = "full")]
BrushRef::Image(image) => { BrushRef::Image(image) => {
#[cfg(feature = "full")]
self.encode_image(image, alpha); self.encode_image(image, alpha);
} }
#[cfg(not(feature = "full"))]
_ => panic!("brushes other than solid require the 'full' feature to be enabled"),
} }
} }
@ -259,38 +273,56 @@ impl Encoding {
} }
/// Encodes a linear gradient brush. /// Encodes a linear gradient brush.
#[cfg(feature = "full")]
pub fn encode_linear_gradient( pub fn encode_linear_gradient(
&mut self, &mut self,
gradient: DrawLinearGradient, gradient: DrawLinearGradient,
color_stops: impl Iterator<Item = ColorStop>, color_stops: impl Iterator<Item = ColorStop>,
alpha: f32, alpha: f32,
_extend: Extend, extend: Extend,
) { ) {
self.add_ramp(color_stops, alpha); match self.add_ramp(color_stops, alpha, extend) {
self.draw_tags.push(DrawTag::LINEAR_GRADIENT); RampStops::Empty => self.encode_color(DrawColor::new(Color::TRANSPARENT)),
self.draw_data RampStops::One(color) => self.encode_color(DrawColor::new(color)),
.extend_from_slice(bytemuck::bytes_of(&gradient)); _ => {
self.draw_tags.push(DrawTag::LINEAR_GRADIENT);
self.draw_data
.extend_from_slice(bytemuck::bytes_of(&gradient));
}
}
} }
/// Encodes a radial gradient brush. /// Encodes a radial gradient brush.
#[cfg(feature = "full")]
pub fn encode_radial_gradient( pub fn encode_radial_gradient(
&mut self, &mut self,
gradient: DrawRadialGradient, gradient: DrawRadialGradient,
color_stops: impl Iterator<Item = ColorStop>, color_stops: impl Iterator<Item = ColorStop>,
alpha: f32, alpha: f32,
_extend: Extend, extend: Extend,
) { ) {
self.add_ramp(color_stops, alpha); // Match Skia's epsilon for radii comparison
self.draw_tags.push(DrawTag::RADIAL_GRADIENT); const SKIA_EPSILON: f32 = 1.0 / (1 << 12) as f32;
self.draw_data if gradient.p0 == gradient.p1 && (gradient.r0 - gradient.r1).abs() < SKIA_EPSILON {
.extend_from_slice(bytemuck::bytes_of(&gradient)); self.encode_color(DrawColor::new(Color::TRANSPARENT));
}
match self.add_ramp(color_stops, alpha, extend) {
RampStops::Empty => self.encode_color(DrawColor::new(Color::TRANSPARENT)),
RampStops::One(color) => self.encode_color(DrawColor::new(color)),
_ => {
self.draw_tags.push(DrawTag::RADIAL_GRADIENT);
self.draw_data
.extend_from_slice(bytemuck::bytes_of(&gradient));
}
}
} }
/// Encodes an image brush. /// Encodes an image brush.
#[cfg(feature = "full")]
pub fn encode_image(&mut self, image: &Image, _alpha: f32) { pub fn encode_image(&mut self, image: &Image, _alpha: f32) {
// TODO: feed the alpha multiplier through the full pipeline for consistency // TODO: feed the alpha multiplier through the full pipeline for consistency
// with other brushes? // with other brushes?
self.patches.push(Patch::Image { self.resources.patches.push(Patch::Image {
image: image.clone(), image: image.clone(),
draw_data_offset: self.draw_data.len(), draw_data_offset: self.draw_data.len(),
}); });
@ -331,19 +363,72 @@ impl Encoding {
self.path_tags.swap(len - 1, len - 2); self.path_tags.swap(len - 1, len - 2);
} }
fn add_ramp(&mut self, color_stops: impl Iterator<Item = ColorStop>, alpha: f32) { #[cfg(feature = "full")]
fn add_ramp(
&mut self,
color_stops: impl Iterator<Item = ColorStop>,
alpha: f32,
extend: Extend,
) -> RampStops {
let offset = self.draw_data.len(); let offset = self.draw_data.len();
let stops_start = self.color_stops.len(); let stops_start = self.resources.color_stops.len();
if alpha != 1.0 { if alpha != 1.0 {
self.color_stops self.resources
.color_stops
.extend(color_stops.map(|stop| stop.with_alpha_factor(alpha))); .extend(color_stops.map(|stop| stop.with_alpha_factor(alpha)));
} else { } else {
self.color_stops.extend(color_stops); self.resources.color_stops.extend(color_stops);
} }
self.patches.push(Patch::Ramp { let stops_end = self.resources.color_stops.len();
draw_data_offset: offset, match stops_end - stops_start {
stops: stops_start..self.color_stops.len(), 0 => RampStops::Empty,
}); 1 => RampStops::One(self.resources.color_stops.pop().unwrap().color),
_ => {
self.resources.patches.push(Patch::Ramp {
draw_data_offset: offset,
stops: stops_start..stops_end,
extend,
});
RampStops::Many
}
}
}
}
/// Result for adding a sequence of color stops.
enum RampStops {
/// Color stop sequence was empty.
Empty,
/// Contained a single color stop.
One(Color),
/// More than one color stop.
Many,
}
/// Encoded data for late bound resources.
#[cfg(feature = "full")]
#[derive(Clone, Default)]
pub struct Resources {
/// Draw data patches for late bound resources.
pub patches: Vec<Patch>,
/// Color stop collection for gradients.
pub color_stops: Vec<ColorStop>,
/// Positioned glyph buffer.
pub glyphs: Vec<Glyph>,
/// Sequences of glyphs.
pub glyph_runs: Vec<GlyphRun>,
/// Normalized coordinate buffer for variable fonts.
pub normalized_coords: Vec<NormalizedCoord>,
}
#[cfg(feature = "full")]
impl Resources {
fn reset(&mut self) {
self.patches.clear();
self.color_stops.clear();
self.glyphs.clear();
self.glyph_runs.clear();
self.normalized_coords.clear();
} }
} }
@ -365,6 +450,7 @@ pub struct StreamOffsets {
} }
impl StreamOffsets { impl StreamOffsets {
#[cfg(feature = "full")]
pub(crate) fn add(&mut self, other: &Self) { pub(crate) fn add(&mut self, other: &Self) {
self.path_tags += other.path_tags; self.path_tags += other.path_tags;
self.path_data += other.path_data; self.path_data += other.path_data;

View file

@ -8,6 +8,7 @@ use std::collections::{hash_map::Entry, HashMap};
const DEFAULT_ATLAS_SIZE: i32 = 1024; const DEFAULT_ATLAS_SIZE: i32 = 1024;
const MAX_ATLAS_SIZE: i32 = 8192; const MAX_ATLAS_SIZE: i32 = 8192;
#[derive(Default)]
pub struct Images<'a> { pub struct Images<'a> {
pub width: u32, pub width: u32,
pub height: u32, pub height: u32,

View file

@ -8,12 +8,16 @@ mod clip;
mod config; mod config;
mod draw; mod draw;
mod encoding; mod encoding;
#[cfg(feature = "full")]
mod glyph; mod glyph;
#[cfg(feature = "full")]
mod glyph_cache; mod glyph_cache;
#[cfg(feature = "full")]
mod image_cache; mod image_cache;
mod math; mod math;
mod monoid; mod monoid;
mod path; mod path;
#[cfg(feature = "full")]
mod ramp_cache; mod ramp_cache;
mod resolve; mod resolve;
@ -28,11 +32,17 @@ pub use draw::{
DrawRadialGradient, DrawTag, DrawRadialGradient, DrawTag,
}; };
pub use encoding::{Encoding, StreamOffsets}; pub use encoding::{Encoding, StreamOffsets};
pub use glyph::{Glyph, GlyphRun};
pub use math::Transform; pub use math::Transform;
pub use monoid::Monoid; pub use monoid::Monoid;
pub use path::{ pub use path::{
Cubic, Path, PathBbox, PathEncoder, PathMonoid, PathSegment, PathSegmentType, PathTag, Tile, Cubic, Path, PathBbox, PathEncoder, PathMonoid, PathSegment, PathSegmentType, PathTag, Tile,
}; };
pub use ramp_cache::Ramps; pub use resolve::{resolve_solid_paths_only, Layout};
pub use resolve::{Layout, Patch, Resolver};
#[cfg(feature = "full")]
pub use {
encoding::Resources,
glyph::{Glyph, GlyphRun},
ramp_cache::Ramps,
resolve::{Patch, Resolver},
};

View file

@ -72,6 +72,7 @@ impl Mul for Transform {
} }
} }
#[allow(dead_code)]
pub fn point_to_f32(point: kurbo::Point) -> [f32; 2] { pub fn point_to_f32(point: kurbo::Point) -> [f32; 2] {
[point.x as f32, point.y as f32] [point.x as f32, point.y as f32]
} }

View file

@ -403,6 +403,7 @@ impl<'a> PathEncoder<'a> {
} }
} }
#[cfg(feature = "full")]
impl fello::scale::Pen for PathEncoder<'_> { impl fello::scale::Pen for PathEncoder<'_> {
fn move_to(&mut self, x: f32, y: f32) { fn move_to(&mut self, x: f32, y: f32) {
self.move_to(x, y) self.move_to(x, y)

View file

@ -1,16 +1,19 @@
// Copyright 2022 The Vello authors // Copyright 2022 The Vello authors
// SPDX-License-Identifier: Apache-2.0 OR MIT // SPDX-License-Identifier: Apache-2.0 OR MIT
use std::ops::Range;
use bytemuck::{Pod, Zeroable}; use bytemuck::{Pod, Zeroable};
use peniko::Image;
use super::{ use super::{DrawTag, Encoding, PathTag, StreamOffsets, Transform};
glyph_cache::{CachedRange, GlyphCache, GlyphKey},
image_cache::{ImageCache, Images}, #[cfg(feature = "full")]
ramp_cache::{RampCache, Ramps}, use {
DrawTag, Encoding, PathTag, StreamOffsets, Transform, super::{
glyph_cache::{CachedRange, GlyphCache, GlyphKey},
image_cache::{ImageCache, Images},
ramp_cache::{RampCache, Ramps},
},
peniko::{Extend, Image},
std::ops::Range,
}; };
/// Layout of a packed encoding. /// Layout of a packed encoding.
@ -100,7 +103,63 @@ impl Layout {
} }
} }
/// Resolves and packs an encoding that contains only paths with solid color
/// fills.
///
/// Panics if the encoding contains any late bound resources (gradients, images
/// or glyph runs).
pub fn resolve_solid_paths_only(encoding: &Encoding, packed: &mut Vec<u8>) -> Layout {
#[cfg(feature = "full")]
assert!(
encoding.resources.patches.is_empty(),
"this resolve function doesn't support late bound resources"
);
let data = packed;
data.clear();
let mut layout = Layout {
n_paths: encoding.n_paths,
n_clips: encoding.n_clips,
..Layout::default()
};
let SceneBufferSizes {
buffer_size,
path_tag_padded,
} = SceneBufferSizes::new(encoding, &StreamOffsets::default());
data.reserve(buffer_size);
// Path tag stream
layout.path_tag_base = size_to_words(data.len());
data.extend_from_slice(bytemuck::cast_slice(&encoding.path_tags));
for _ in 0..encoding.n_open_clips {
data.extend_from_slice(bytemuck::bytes_of(&PathTag::PATH));
}
data.resize(path_tag_padded, 0);
// Path data stream
layout.path_data_base = size_to_words(data.len());
data.extend_from_slice(bytemuck::cast_slice(&encoding.path_data));
// Draw tag stream
layout.draw_tag_base = size_to_words(data.len());
// Bin data follows draw info
layout.bin_data_start = encoding.draw_tags.iter().map(|tag| tag.info_size()).sum();
data.extend_from_slice(bytemuck::cast_slice(&encoding.draw_tags));
for _ in 0..encoding.n_open_clips {
data.extend_from_slice(bytemuck::bytes_of(&DrawTag::END_CLIP));
}
// Draw data stream
layout.draw_data_base = size_to_words(data.len());
data.extend_from_slice(bytemuck::cast_slice(&encoding.draw_data));
// Transform stream
layout.transform_base = size_to_words(data.len());
data.extend_from_slice(bytemuck::cast_slice(&encoding.transforms));
// Linewidth stream
layout.linewidth_base = size_to_words(data.len());
data.extend_from_slice(bytemuck::cast_slice(&encoding.linewidths));
layout.n_draw_objects = layout.n_paths;
assert_eq!(buffer_size, data.len());
layout
}
/// Resolver for late bound resources. /// Resolver for late bound resources.
#[cfg(feature = "full")]
#[derive(Default)] #[derive(Default)]
pub struct Resolver { pub struct Resolver {
glyph_cache: GlyphCache, glyph_cache: GlyphCache,
@ -112,6 +171,7 @@ pub struct Resolver {
patches: Vec<ResolvedPatch>, patches: Vec<ResolvedPatch>,
} }
#[cfg(feature = "full")]
impl Resolver { impl Resolver {
/// Creates a new resource cache. /// Creates a new resource cache.
pub fn new() -> Self { pub fn new() -> Self {
@ -125,7 +185,12 @@ impl Resolver {
encoding: &Encoding, encoding: &Encoding,
packed: &mut Vec<u8>, packed: &mut Vec<u8>,
) -> (Layout, Ramps<'a>, Images<'a>) { ) -> (Layout, Ramps<'a>, Images<'a>) {
let sizes = self.resolve_patches(encoding); let resources = &encoding.resources;
if resources.patches.is_empty() {
let layout = resolve_solid_paths_only(encoding, packed);
return (layout, Ramps::default(), Images::default());
}
let patch_sizes = self.resolve_patches(encoding);
self.resolve_pending_images(); self.resolve_pending_images();
let data = packed; let data = packed;
data.clear(); data.clear();
@ -134,20 +199,11 @@ impl Resolver {
n_clips: encoding.n_clips, n_clips: encoding.n_clips,
..Layout::default() ..Layout::default()
}; };
// Compute size of data buffer let SceneBufferSizes {
let n_path_tags = buffer_size,
encoding.path_tags.len() + sizes.path_tags + encoding.n_open_clips as usize; path_tag_padded,
let path_tag_padded = align_up(n_path_tags, 4 * crate::config::PATH_REDUCE_WG); } = SceneBufferSizes::new(encoding, &patch_sizes);
let capacity = path_tag_padded data.reserve(buffer_size);
+ slice_size_in_bytes(&encoding.path_data, sizes.path_data)
+ slice_size_in_bytes(
&encoding.draw_tags,
sizes.draw_tags + encoding.n_open_clips as usize,
)
+ slice_size_in_bytes(&encoding.draw_data, sizes.draw_data)
+ slice_size_in_bytes(&encoding.transforms, sizes.transforms)
+ slice_size_in_bytes(&encoding.linewidths, sizes.linewidths);
data.reserve(capacity);
// Path tag stream // Path tag stream
layout.path_tag_base = size_to_words(data.len()); layout.path_tag_base = size_to_words(data.len());
{ {
@ -156,7 +212,7 @@ impl Resolver {
for patch in &self.patches { for patch in &self.patches {
if let ResolvedPatch::GlyphRun { index, glyphs, .. } = patch { if let ResolvedPatch::GlyphRun { index, glyphs, .. } = patch {
layout.n_paths += 1; layout.n_paths += 1;
let stream_offset = encoding.glyph_runs[*index].stream_offsets.path_tags; let stream_offset = resources.glyph_runs[*index].stream_offsets.path_tags;
if pos < stream_offset { if pos < stream_offset {
data.extend_from_slice(bytemuck::cast_slice(&stream[pos..stream_offset])); data.extend_from_slice(bytemuck::cast_slice(&stream[pos..stream_offset]));
pos = stream_offset; pos = stream_offset;
@ -185,7 +241,9 @@ impl Resolver {
let stream = &encoding.path_data; let stream = &encoding.path_data;
for patch in &self.patches { for patch in &self.patches {
if let ResolvedPatch::GlyphRun { index, glyphs, .. } = patch { if let ResolvedPatch::GlyphRun { index, glyphs, .. } = patch {
let stream_offset = encoding.glyph_runs[*index].stream_offsets.path_data; let stream_offset = encoding.resources.glyph_runs[*index]
.stream_offsets
.path_data;
if pos < stream_offset { if pos < stream_offset {
data.extend_from_slice(bytemuck::cast_slice(&stream[pos..stream_offset])); data.extend_from_slice(bytemuck::cast_slice(&stream[pos..stream_offset]));
pos = stream_offset; pos = stream_offset;
@ -221,11 +279,13 @@ impl Resolver {
ResolvedPatch::Ramp { ResolvedPatch::Ramp {
draw_data_offset, draw_data_offset,
ramp_id, ramp_id,
extend,
} => { } => {
if pos < *draw_data_offset { if pos < *draw_data_offset {
data.extend_from_slice(&encoding.draw_data[pos..*draw_data_offset]); data.extend_from_slice(&encoding.draw_data[pos..*draw_data_offset]);
} }
data.extend_from_slice(bytemuck::bytes_of(ramp_id)); let index_mode = (ramp_id << 2) | *extend as u32;
data.extend_from_slice(bytemuck::bytes_of(&index_mode));
pos = *draw_data_offset + 4; pos = *draw_data_offset + 4;
} }
ResolvedPatch::GlyphRun { .. } => {} ResolvedPatch::GlyphRun { .. } => {}
@ -267,14 +327,14 @@ impl Resolver {
transform, transform,
} = patch } = patch
{ {
let run = &encoding.glyph_runs[*index]; let run = &resources.glyph_runs[*index];
let stream_offset = encoding.glyph_runs[*index].stream_offsets.transforms; let stream_offset = run.stream_offsets.transforms;
if pos < stream_offset { if pos < stream_offset {
data.extend_from_slice(bytemuck::cast_slice(&stream[pos..stream_offset])); data.extend_from_slice(bytemuck::cast_slice(&stream[pos..stream_offset]));
pos = stream_offset; pos = stream_offset;
} }
if let Some(glyph_transform) = run.glyph_transform { if let Some(glyph_transform) = run.glyph_transform {
for glyph in &encoding.glyphs[run.glyphs.clone()] { for glyph in &resources.glyphs[run.glyphs.clone()] {
let xform = *transform let xform = *transform
* Transform { * Transform {
matrix: [1.0, 0.0, 0.0, -1.0], matrix: [1.0, 0.0, 0.0, -1.0],
@ -284,7 +344,7 @@ impl Resolver {
data.extend_from_slice(bytemuck::bytes_of(&xform)); data.extend_from_slice(bytemuck::bytes_of(&xform));
} }
} else { } else {
for glyph in &encoding.glyphs[run.glyphs.clone()] { for glyph in &resources.glyphs[run.glyphs.clone()] {
let xform = *transform let xform = *transform
* Transform { * Transform {
matrix: [1.0, 0.0, 0.0, -1.0], matrix: [1.0, 0.0, 0.0, -1.0],
@ -306,7 +366,7 @@ impl Resolver {
let stream = &encoding.linewidths; let stream = &encoding.linewidths;
for patch in &self.patches { for patch in &self.patches {
if let ResolvedPatch::GlyphRun { index, glyphs, .. } = patch { if let ResolvedPatch::GlyphRun { index, glyphs, .. } = patch {
let stream_offset = encoding.glyph_runs[*index].stream_offsets.linewidths; let stream_offset = resources.glyph_runs[*index].stream_offsets.linewidths;
if pos < stream_offset { if pos < stream_offset {
data.extend_from_slice(bytemuck::cast_slice(&stream[pos..stream_offset])); data.extend_from_slice(bytemuck::cast_slice(&stream[pos..stream_offset]));
pos = stream_offset; pos = stream_offset;
@ -323,7 +383,7 @@ impl Resolver {
} }
} }
layout.n_draw_objects = layout.n_paths; layout.n_draw_objects = layout.n_paths;
assert_eq!(capacity, data.len()); assert_eq!(buffer_size, data.len());
(layout, self.ramp_cache.ramps(), self.image_cache.images()) (layout, self.ramp_cache.ramps(), self.image_cache.images())
} }
@ -335,21 +395,24 @@ impl Resolver {
self.pending_images.clear(); self.pending_images.clear();
self.patches.clear(); self.patches.clear();
let mut sizes = StreamOffsets::default(); let mut sizes = StreamOffsets::default();
for patch in &encoding.patches { let resources = &encoding.resources;
for patch in &resources.patches {
match patch { match patch {
Patch::Ramp { Patch::Ramp {
draw_data_offset, draw_data_offset,
stops, stops,
extend,
} => { } => {
let ramp_id = self.ramp_cache.add(&encoding.color_stops[stops.clone()]); let ramp_id = self.ramp_cache.add(&resources.color_stops[stops.clone()]);
self.patches.push(ResolvedPatch::Ramp { self.patches.push(ResolvedPatch::Ramp {
draw_data_offset: *draw_data_offset + sizes.draw_data, draw_data_offset: *draw_data_offset + sizes.draw_data,
ramp_id, ramp_id,
extend: *extend,
}); });
} }
Patch::GlyphRun { index } => { Patch::GlyphRun { index } => {
let mut run_sizes = StreamOffsets::default(); let mut run_sizes = StreamOffsets::default();
let run = &encoding.glyph_runs[*index]; let run = &resources.glyph_runs[*index];
let font_id = run.font.data.id(); let font_id = run.font.data.id();
let font_size_u32 = run.font_size.to_bits(); let font_size_u32 = run.font_size.to_bits();
let Ok(font_file) = fello::raw::FileRef::new(run.font.data.as_ref()) else { continue }; let Ok(font_file) = fello::raw::FileRef::new(run.font.data.as_ref()) else { continue };
@ -360,8 +423,8 @@ impl Resolver {
} }
}; };
let Some(font) = font else { continue }; let Some(font) = font else { continue };
let glyphs = &encoding.glyphs[run.glyphs.clone()]; let glyphs = &resources.glyphs[run.glyphs.clone()];
let coords = &encoding.normalized_coords[run.normalized_coords.clone()]; let coords = &resources.normalized_coords[run.normalized_coords.clone()];
let key = fello::FontKey { let key = fello::FontKey {
data_id: font_id, data_id: font_id,
index: run.font.index, index: run.font.index,
@ -463,8 +526,9 @@ impl Resolver {
} }
} }
#[derive(Clone)]
/// Patch for a late bound resource. /// Patch for a late bound resource.
#[cfg(feature = "full")]
#[derive(Clone)]
pub enum Patch { pub enum Patch {
/// Gradient ramp resource. /// Gradient ramp resource.
Ramp { Ramp {
@ -472,6 +536,8 @@ pub enum Patch {
draw_data_offset: usize, draw_data_offset: usize,
/// Range of the gradient stops in the resource set. /// Range of the gradient stops in the resource set.
stops: Range<usize>, stops: Range<usize>,
/// Extend mode for the gradient.
extend: Extend,
}, },
/// Glyph run resource. /// Glyph run resource.
GlyphRun { GlyphRun {
@ -488,12 +554,14 @@ pub enum Patch {
} }
/// Image to be allocated in the atlas. /// Image to be allocated in the atlas.
#[cfg(feature = "full")]
#[derive(Clone, Debug)] #[derive(Clone, Debug)]
struct PendingImage { struct PendingImage {
image: Image, image: Image,
xy: Option<(u32, u32)>, xy: Option<(u32, u32)>,
} }
#[cfg(feature = "full")]
#[derive(Clone, Debug)] #[derive(Clone, Debug)]
enum ResolvedPatch { enum ResolvedPatch {
Ramp { Ramp {
@ -501,6 +569,8 @@ enum ResolvedPatch {
draw_data_offset: usize, draw_data_offset: usize,
/// Resolved ramp index. /// Resolved ramp index.
ramp_id: u32, ramp_id: u32,
/// Extend mode for the gradient.
extend: Extend,
}, },
GlyphRun { GlyphRun {
/// Index of the original glyph run in the encoding. /// Index of the original glyph run in the encoding.
@ -518,6 +588,36 @@ enum ResolvedPatch {
}, },
} }
struct SceneBufferSizes {
/// Full size of the scene buffer in bytes.
buffer_size: usize,
/// Padded length of the path tag stream in bytes.
path_tag_padded: usize,
}
impl SceneBufferSizes {
/// Computes common scene buffer sizes for the given encoding and patch
/// stream sizes.
fn new(encoding: &Encoding, patch_sizes: &StreamOffsets) -> Self {
let n_path_tags =
encoding.path_tags.len() + patch_sizes.path_tags + encoding.n_open_clips as usize;
let path_tag_padded = align_up(n_path_tags, 4 * crate::config::PATH_REDUCE_WG);
let buffer_size = path_tag_padded
+ slice_size_in_bytes(&encoding.path_data, patch_sizes.path_data)
+ slice_size_in_bytes(
&encoding.draw_tags,
patch_sizes.draw_tags + encoding.n_open_clips as usize,
)
+ slice_size_in_bytes(&encoding.draw_data, patch_sizes.draw_data)
+ slice_size_in_bytes(&encoding.transforms, patch_sizes.transforms)
+ slice_size_in_bytes(&encoding.linewidths, patch_sizes.linewidths);
Self {
buffer_size,
path_tag_padded,
}
}
}
fn slice_size_in_bytes<T: Sized>(slice: &[T], extra: usize) -> usize { fn slice_size_in_bytes<T: Sized>(slice: &[T], extra: usize) -> usize {
(slice.len() + extra) * std::mem::size_of::<T>() (slice.len() + extra) * std::mem::size_of::<T>()
} }

View file

@ -8,31 +8,32 @@ mod types;
use std::env; use std::env;
use std::fmt::Write; use std::fmt::Write;
use std::path::Path; use std::path::{Path, PathBuf};
use compile::ShaderInfo; use compile::ShaderInfo;
fn main() { fn main() {
let out_dir = env::var_os("OUT_DIR").unwrap(); let out_dir = env::var_os("OUT_DIR").unwrap();
let dest_path = Path::new(&out_dir).join("shaders.rs"); let dest_path = Path::new(&out_dir).join("shaders.rs");
let mut shaders = compile::ShaderInfo::from_dir("../../shader");
// The shaders are defined under the workspace root and not in this crate so we need to locate
// them somehow. Cargo doesn't define an environment variable that points at the root workspace
// directory. In hermetic build environments that don't support relative paths (such as Bazel)
// we support supplying a `WORKSPACE_MANIFEST_FILE` that is expected to be an absolute path to
// the Cargo.toml file at the workspace root. If that's not present, we use a relative path.
let workspace_dir = env::var("WORKSPACE_MANIFEST_FILE")
.ok()
.and_then(|p| Path::new(&p).parent().map(|p| p.to_owned()))
.unwrap_or(PathBuf::from("../../"));
let shader_dir = Path::new(&workspace_dir).join("shader");
let mut shaders = compile::ShaderInfo::from_dir(&shader_dir);
// Drop the HashMap and sort by name so that we get deterministic order. // Drop the HashMap and sort by name so that we get deterministic order.
let mut shaders = shaders.drain().collect::<Vec<_>>(); let mut shaders = shaders.drain().collect::<Vec<_>>();
shaders.sort_by(|x, y| x.0.cmp(&y.0)); shaders.sort_by(|x, y| x.0.cmp(&y.0));
let mut buf = String::default(); let mut buf = String::default();
write_types(&mut buf, &shaders).unwrap(); write_types(&mut buf, &shaders).unwrap();
if cfg!(feature = "wgsl") { write_shaders(&mut buf, &shaders).unwrap();
write_shaders(&mut buf, "wgsl", &shaders, |info| {
info.source.as_bytes().to_owned()
})
.unwrap();
}
if cfg!(feature = "msl") {
write_shaders(&mut buf, "msl", &shaders, |info| {
compile::msl::translate(info).unwrap().as_bytes().to_owned()
})
.unwrap();
}
std::fs::write(&dest_path, &buf).unwrap(); std::fs::write(&dest_path, &buf).unwrap();
println!("cargo:rerun-if-changed=../shader"); println!("cargo:rerun-if-changed=../shader");
} }
@ -65,11 +66,9 @@ fn write_types(buf: &mut String, shaders: &[(String, ShaderInfo)]) -> Result<(),
fn write_shaders( fn write_shaders(
buf: &mut String, buf: &mut String,
mod_name: &str,
shaders: &[(String, ShaderInfo)], shaders: &[(String, ShaderInfo)],
translate: impl Fn(&ShaderInfo) -> Vec<u8>,
) -> Result<(), std::fmt::Error> { ) -> Result<(), std::fmt::Error> {
writeln!(buf, "pub mod {mod_name} {{")?; writeln!(buf, "mod gen {{")?;
writeln!(buf, " use super::*;")?; writeln!(buf, " use super::*;")?;
writeln!(buf, " use BindType::*;")?; writeln!(buf, " use BindType::*;")?;
writeln!(buf, " pub const SHADERS: Shaders<'static> = Shaders {{")?; writeln!(buf, " pub const SHADERS: Shaders<'static> = Shaders {{")?;
@ -80,14 +79,8 @@ fn write_shaders(
.map(|binding| binding.ty) .map(|binding| binding.ty)
.collect::<Vec<_>>(); .collect::<Vec<_>>();
let wg_bufs = &info.workgroup_buffers; let wg_bufs = &info.workgroup_buffers;
let source = translate(info);
writeln!(buf, " {name}: ComputeShader {{")?; writeln!(buf, " {name}: ComputeShader {{")?;
writeln!(buf, " name: Cow::Borrowed({:?}),", name)?; writeln!(buf, " name: Cow::Borrowed({:?}),", name)?;
writeln!(
buf,
" code: Cow::Borrowed(&{:?}),",
source.as_slice()
)?;
writeln!( writeln!(
buf, buf,
" workgroup_size: {:?},", " workgroup_size: {:?},",
@ -99,6 +92,16 @@ fn write_shaders(
" workgroup_buffers: Cow::Borrowed(&{:?}),", " workgroup_buffers: Cow::Borrowed(&{:?}),",
wg_bufs wg_bufs
)?; )?;
if cfg!(feature = "wgsl") {
writeln!(buf, " wgsl: Cow::Borrowed(&{:?}),", info.source)?;
}
if cfg!(feature = "msl") {
writeln!(
buf,
" msl: Cow::Borrowed(&{:?}),",
compile::msl::translate(info).unwrap()
)?;
}
writeln!(buf, " }},")?; writeln!(buf, " }},")?;
} }
writeln!(buf, " }};")?; writeln!(buf, " }};")?;

View file

@ -166,28 +166,29 @@ impl ShaderInfo {
for entry in shader_dir for entry in shader_dir
.read_dir() .read_dir()
.expect("Can read shader import directory") .expect("Can read shader import directory")
.filter_map(move |e| {
e.ok()
.filter(|e| e.path().extension().map(|e| e == "wgsl").unwrap_or(false))
})
{ {
let entry = entry.expect("Can continue reading shader import directory"); let file_name = entry.file_name();
if entry.file_type().unwrap().is_file() { if let Some(name) = file_name.to_str() {
let file_name = entry.file_name(); let suffix = ".wgsl";
if let Some(name) = file_name.to_str() { if let Some(shader_name) = name.strip_suffix(suffix) {
let suffix = ".wgsl"; let contents = fs::read_to_string(shader_dir.join(&file_name))
if let Some(shader_name) = name.strip_suffix(suffix) { .expect("Could read shader {shader_name} contents");
let contents = fs::read_to_string(shader_dir.join(&file_name)) if let Some(permutations) = permutation_map.get(shader_name) {
.expect("Could read shader {shader_name} contents"); for permutation in permutations {
if let Some(permutations) = permutation_map.get(shader_name) { let mut defines = defines.clone();
for permutation in permutations { defines.extend(permutation.defines.iter().cloned());
let mut defines = defines.clone();
defines.extend(permutation.defines.iter().cloned());
let source = preprocess::preprocess(&contents, &defines, &imports);
let shader_info = Self::new(source.clone(), "main").unwrap();
info.insert(permutation.name.clone(), shader_info);
}
} else {
let source = preprocess::preprocess(&contents, &defines, &imports); let source = preprocess::preprocess(&contents, &defines, &imports);
let shader_info = Self::new(source.clone(), "main").unwrap(); let shader_info = Self::new(source.clone(), "main").unwrap();
info.insert(shader_name.to_string(), shader_info); info.insert(permutation.name.clone(), shader_info);
} }
} else {
let source = preprocess::preprocess(&contents, &defines, &imports);
let shader_info = Self::new(source.clone(), "main").unwrap();
info.insert(shader_name.to_string(), shader_info);
} }
} }
} }

View file

@ -14,17 +14,18 @@ pub fn get_imports(shader_dir: &Path) -> HashMap<String, String> {
for entry in imports_dir for entry in imports_dir
.read_dir() .read_dir()
.expect("Can read shader import directory") .expect("Can read shader import directory")
.filter_map(move |e| {
e.ok()
.filter(|e| e.path().extension().map(|e| e == "wgsl").unwrap_or(false))
})
{ {
let entry = entry.expect("Can continue reading shader import directory"); let file_name = entry.file_name();
if entry.file_type().unwrap().is_file() { if let Some(name) = file_name.to_str() {
let file_name = entry.file_name(); let suffix = ".wgsl";
if let Some(name) = file_name.to_str() { if let Some(import_name) = name.strip_suffix(suffix) {
let suffix = ".wgsl"; let contents = fs::read_to_string(imports_dir.join(&file_name))
if let Some(import_name) = name.strip_suffix(suffix) { .expect("Could read shader {import_name} contents");
let contents = fs::read_to_string(imports_dir.join(&file_name)) imports.insert(import_name.to_owned(), contents);
.expect("Could read shader {import_name} contents");
imports.insert(import_name.to_owned(), contents);
}
} }
} }
} }

View file

@ -13,10 +13,15 @@ use std::borrow::Cow;
#[derive(Clone, Debug)] #[derive(Clone, Debug)]
pub struct ComputeShader<'a> { pub struct ComputeShader<'a> {
pub name: Cow<'a, str>, pub name: Cow<'a, str>,
pub code: Cow<'a, [u8]>,
pub workgroup_size: [u32; 3], pub workgroup_size: [u32; 3],
pub bindings: Cow<'a, [BindType]>, pub bindings: Cow<'a, [BindType]>,
pub workgroup_buffers: Cow<'a, [WorkgroupBufferInfo]>, pub workgroup_buffers: Cow<'a, [WorkgroupBufferInfo]>,
#[cfg(feature = "wgsl")]
pub wgsl: Cow<'a, str>,
#[cfg(feature = "msl")]
pub msl: Cow<'a, str>,
} }
pub trait PipelineHost { pub trait PipelineHost {
@ -32,3 +37,5 @@ pub trait PipelineHost {
} }
include!(concat!(env!("OUT_DIR"), "/shaders.rs")); include!(concat!(env!("OUT_DIR"), "/shaders.rs"));
pub use gen::SHADERS;

View file

@ -44,6 +44,8 @@ pub fn test_scenes() -> SceneSet {
scene!(funky_paths), scene!(funky_paths),
scene!(cardioid_and_friends), scene!(cardioid_and_friends),
scene!(animated_text: animated), scene!(animated_text: animated),
scene!(gradient_extend),
scene!(two_point_radial),
scene!(brush_transform: animated), scene!(brush_transform: animated),
scene!(blend_grid), scene!(blend_grid),
scene!(conflation_artifacts), scene!(conflation_artifacts),
@ -255,6 +257,218 @@ fn brush_transform(sb: &mut SceneBuilder, params: &mut SceneParams) {
); );
} }
fn gradient_extend(sb: &mut SceneBuilder, params: &mut SceneParams) {
fn square(sb: &mut SceneBuilder, is_radial: bool, transform: Affine, extend: Extend) {
let colors = [Color::RED, Color::rgb8(0, 255, 0), Color::BLUE];
let width = 300f64;
let height = 300f64;
let gradient: Brush = if is_radial {
let center = (width * 0.5, height * 0.5);
let radius = (width * 0.25) as f32;
Gradient::new_two_point_radial(center, radius * 0.25, center, radius)
.with_stops(colors)
.with_extend(extend)
.into()
} else {
Gradient::new_linear((width * 0.35, height * 0.5), (width * 0.65, height * 0.5))
.with_stops(colors)
.with_extend(extend)
.into()
};
sb.fill(
Fill::NonZero,
transform,
&gradient,
None,
&Rect::new(0.0, 0.0, width, height),
);
}
let extend_modes = [Extend::Pad, Extend::Repeat, Extend::Reflect];
for x in 0..3 {
let extend = extend_modes[x];
for y in 0..2 {
let is_radial = y & 1 != 0;
let transform = Affine::translate((x as f64 * 350.0 + 50.0, y as f64 * 350.0 + 100.0));
square(sb, is_radial, transform, extend);
}
}
for (i, label) in ["Pad", "Repeat", "Reflect"].iter().enumerate() {
let x = i as f64 * 350.0 + 50.0;
params.text.add(
sb,
None,
32.0,
Some(&Color::WHITE.into()),
Affine::translate((x, 70.0)),
label,
);
}
}
fn two_point_radial(sb: &mut SceneBuilder, _params: &mut SceneParams) {
fn make(
sb: &mut SceneBuilder,
x0: f64,
y0: f64,
r0: f32,
x1: f64,
y1: f64,
r1: f32,
transform: Affine,
extend: Extend,
) {
let colors = [Color::RED, Color::YELLOW, Color::rgb8(6, 85, 186)];
let width = 400f64;
let height = 200f64;
let rect = Rect::new(0.0, 0.0, width, height);
sb.fill(Fill::NonZero, transform, Color::WHITE, None, &rect);
sb.fill(
Fill::NonZero,
transform,
&Gradient::new_two_point_radial((x0, y0), r0, (x1, y1), r1)
.with_stops(colors)
.with_extend(extend),
None,
&Rect::new(0.0, 0.0, width, height),
);
let r0 = r0 as f64 - 1.0;
let r1 = r1 as f64 - 1.0;
let stroke_width = 1.0;
sb.stroke(
&Stroke::new(stroke_width),
transform,
Color::BLACK,
None,
&Ellipse::new((x0, y0), (r0, r0), 0.0),
);
sb.stroke(
&Stroke::new(stroke_width),
transform,
Color::BLACK,
None,
&Ellipse::new((x1, y1), (r1, r1), 0.0),
);
}
// These demonstrate radial gradient patterns similar to the examples shown
// at <https://learn.microsoft.com/en-us/typography/opentype/spec/colr#radial-gradients>
for (i, mode) in [Extend::Pad, Extend::Repeat, Extend::Reflect]
.iter()
.enumerate()
{
let y = 100.0;
let x0 = 140.0;
let x1 = x0 + 140.0;
let r0 = 20.0;
let r1 = 50.0;
make(
sb,
x0,
y,
r0,
x1,
y,
r1,
Affine::translate((i as f64 * 420.0 + 20.0, 20.0)),
*mode,
);
}
for (i, mode) in [Extend::Pad, Extend::Repeat, Extend::Reflect]
.iter()
.enumerate()
{
let y = 100.0;
let x0 = 140.0;
let x1 = x0 + 140.0;
let r0 = 20.0;
let r1 = 50.0;
make(
sb,
x1,
y,
r1,
x0,
y,
r0,
Affine::translate((i as f64 * 420.0 + 20.0, 240.0)),
*mode,
);
}
for (i, mode) in [Extend::Pad, Extend::Repeat, Extend::Reflect]
.iter()
.enumerate()
{
let y = 100.0;
let x0 = 140.0;
let x1 = x0 + 140.0;
let r0 = 50.0;
let r1 = 50.0;
make(
sb,
x0,
y,
r0,
x1,
y,
r1,
Affine::translate((i as f64 * 420.0 + 20.0, 460.0)),
*mode,
);
}
for (i, mode) in [Extend::Pad, Extend::Repeat, Extend::Reflect]
.iter()
.enumerate()
{
let x0 = 140.0;
let y0 = 125.0;
let r0 = 20.0;
let x1 = 190.0;
let y1 = 100.0;
let r1 = 95.0;
make(
sb,
x0,
y0,
r0,
x1,
y1,
r1,
Affine::translate((i as f64 * 420.0 + 20.0, 680.0)),
*mode,
);
}
for (i, mode) in [Extend::Pad, Extend::Repeat, Extend::Reflect]
.iter()
.enumerate()
{
let x0 = 140.0;
let y0 = 125.0;
let r0 = 20.0;
let x1 = 190.0;
let y1 = 100.0;
let r1 = 96.0;
// Shift p0 so the outer edges of both circles touch
let p0 = Point::new(x1, y1)
+ ((Point::new(x0, y0) - Point::new(x1, y1)).normalize() * (r1 - r0));
make(
sb,
p0.x,
p0.y,
r0 as f32,
x1,
y1,
r1 as f32,
Affine::translate((i as f64 * 420.0 + 20.0, 900.0)),
*mode,
);
}
}
fn blend_grid(sb: &mut SceneBuilder, _: &mut SceneParams) { fn blend_grid(sb: &mut SceneBuilder, _: &mut SceneParams) {
const BLEND_MODES: &[Mix] = &[ const BLEND_MODES: &[Mix] = &[
Mix::Normal, Mix::Normal,

View file

@ -20,5 +20,6 @@ $ cargo run -p with_winit --release -- [SVG FILES]
- Space resets the position and zoom of the image. - Space resets the position and zoom of the image.
- S toggles the frame statistics layer - S toggles the frame statistics layer
- C resets the min/max frame time tracked by statistics - C resets the min/max frame time tracked by statistics
- D toggles displaying the required number of each kind of dynamically allocated element (default: off)
- V toggles VSync on/off (default: on) - V toggles VSync on/off (default: on)
- Escape exits the program. - Escape exits the program.

View file

@ -27,7 +27,7 @@ use vello::{
util::RenderContext, util::RenderContext,
Renderer, Scene, SceneBuilder, Renderer, Scene, SceneBuilder,
}; };
use vello::{RendererOptions, SceneFragment}; use vello::{BumpAllocators, RendererOptions, SceneFragment};
use winit::{ use winit::{
event_loop::{EventLoop, EventLoopBuilder}, event_loop::{EventLoop, EventLoopBuilder},
@ -100,6 +100,8 @@ fn run(
let mut images = ImageCache::new(); let mut images = ImageCache::new();
let mut stats = stats::Stats::new(); let mut stats = stats::Stats::new();
let mut stats_shown = true; let mut stats_shown = true;
let mut scene_complexity: Option<BumpAllocators> = None;
let mut complexity_shown = false;
let mut vsync_on = true; let mut vsync_on = true;
let mut frame_start_time = Instant::now(); let mut frame_start_time = Instant::now();
let start = Instant::now(); let start = Instant::now();
@ -153,6 +155,9 @@ fn run(
Some(VirtualKeyCode::S) => { Some(VirtualKeyCode::S) => {
stats_shown = !stats_shown; stats_shown = !stats_shown;
} }
Some(VirtualKeyCode::D) => {
complexity_shown = !complexity_shown;
}
Some(VirtualKeyCode::C) => { Some(VirtualKeyCode::C) => {
stats.clear_min_and_max(); stats.clear_min_and_max();
} }
@ -332,6 +337,7 @@ fn run(
width as f64, width as f64,
height as f64, height as f64,
stats.samples(), stats.samples(),
complexity_shown.then_some(scene_complexity).flatten(),
vsync_on, vsync_on,
); );
} }
@ -342,7 +348,7 @@ fn run(
.expect("failed to get surface texture"); .expect("failed to get surface texture");
#[cfg(not(target_arch = "wasm32"))] #[cfg(not(target_arch = "wasm32"))]
{ {
vello::block_on_wgpu( scene_complexity = vello::block_on_wgpu(
&device_handle.device, &device_handle.device,
renderers[render_state.surface.dev_id] renderers[render_state.surface.dev_id]
.as_mut() .as_mut()

View file

@ -19,7 +19,7 @@ use std::collections::VecDeque;
use vello::{ use vello::{
kurbo::{Affine, PathEl, Rect}, kurbo::{Affine, PathEl, Rect},
peniko::{Brush, Color, Fill, Stroke}, peniko::{Brush, Color, Fill, Stroke},
SceneBuilder, BumpAllocators, SceneBuilder,
}; };
const SLIDING_WINDOW_SIZE: usize = 100; const SLIDING_WINDOW_SIZE: usize = 100;
@ -40,6 +40,7 @@ impl Snapshot {
viewport_width: f64, viewport_width: f64,
viewport_height: f64, viewport_height: f64,
samples: T, samples: T,
bump: Option<BumpAllocators>,
vsync: bool, vsync: bool,
) where ) where
T: Iterator<Item = &'a u64>, T: Iterator<Item = &'a u64>,
@ -59,13 +60,23 @@ impl Snapshot {
&Rect::new(0., 0., width, height), &Rect::new(0., 0., width, height),
); );
let labels = [ let mut labels = vec![
format!("Frame Time: {:.2} ms", self.frame_time_ms), format!("Frame Time: {:.2} ms", self.frame_time_ms),
format!("Frame Time (min): {:.2} ms", self.frame_time_min_ms), format!("Frame Time (min): {:.2} ms", self.frame_time_min_ms),
format!("Frame Time (max): {:.2} ms", self.frame_time_max_ms), format!("Frame Time (max): {:.2} ms", self.frame_time_max_ms),
format!("VSync: {}", if vsync { "on" } else { "off" }), format!("VSync: {}", if vsync { "on" } else { "off" }),
format!("Resolution: {viewport_width}x{viewport_height}"), format!("Resolution: {viewport_width}x{viewport_height}"),
]; ];
if let Some(bump) = &bump {
if bump.failed >= 1 {
labels.push(format!("Allocation Failed!"));
}
labels.push(format!("binning: {}", bump.binning));
labels.push(format!("ptcl: {}", bump.ptcl));
labels.push(format!("tile: {}", bump.tile));
labels.push(format!("segments: {}", bump.segments));
labels.push(format!("blend: {}", bump.blend));
}
// height / 2 is dedicated to the text labels and the rest is filled by the bar graph. // height / 2 is dedicated to the text labels and the rest is filled by the bar graph.
let text_height = height * 0.5 / (1 + labels.len()) as f64; let text_height = height * 0.5 / (1 + labels.len()) as f64;

View file

@ -376,7 +376,7 @@ fn main(
} }
} }
// DRAWTAG_FILL_RAD_GRADIENT // DRAWTAG_FILL_RAD_GRADIENT
case 0x2dcu: { case 0x29cu: {
let linewidth = bitcast<f32>(info_bin_data[di]); let linewidth = bitcast<f32>(info_bin_data[di]);
if write_path(tile, linewidth) { if write_path(tile, linewidth) {
let index = scene[dd]; let index = scene[dd];

View file

@ -6,6 +6,7 @@
#import clip #import clip
#import drawtag #import drawtag
#import bbox #import bbox
#import transform
@group(0) @binding(0) @group(0) @binding(0)
var<uniform> config: Config; var<uniform> config: Config;
@ -30,12 +31,6 @@ var<storage, read_write> clip_inp: array<ClipInp>;
let WG_SIZE = 256u; let WG_SIZE = 256u;
// Possibly dedup?
struct Transform {
matrx: vec4<f32>,
translate: vec2<f32>,
}
fn read_transform(transform_base: u32, ix: u32) -> Transform { fn read_transform(transform_base: u32, ix: u32) -> Transform {
let base = transform_base + ix * 6u; let base = transform_base + ix * 6u;
let c0 = bitcast<f32>(scene[base]); let c0 = bitcast<f32>(scene[base]);
@ -110,18 +105,16 @@ fn main(
// let y1 = f32(bbox.y1); // let y1 = f32(bbox.y1);
// let bbox_f = vec4(x0, y0, x1, y1); // let bbox_f = vec4(x0, y0, x1, y1);
let fill_mode = u32(bbox.linewidth >= 0.0); let fill_mode = u32(bbox.linewidth >= 0.0);
var matrx: vec4<f32>; var transform = Transform();
var translate: vec2<f32>;
var linewidth = bbox.linewidth; var linewidth = bbox.linewidth;
if linewidth >= 0.0 || tag_word == DRAWTAG_FILL_LIN_GRADIENT || tag_word == DRAWTAG_FILL_RAD_GRADIENT || if linewidth >= 0.0 || tag_word == DRAWTAG_FILL_LIN_GRADIENT || tag_word == DRAWTAG_FILL_RAD_GRADIENT ||
tag_word == DRAWTAG_FILL_IMAGE tag_word == DRAWTAG_FILL_IMAGE
{ {
let transform = read_transform(config.transform_base, bbox.trans_ix); transform = read_transform(config.transform_base, bbox.trans_ix);
matrx = transform.matrx;
translate = transform.translate;
} }
if linewidth >= 0.0 { if linewidth >= 0.0 {
// Note: doesn't deal with anisotropic case // Note: doesn't deal with anisotropic case
let matrx = transform.matrx;
linewidth *= sqrt(abs(matrx.x * matrx.w - matrx.y * matrx.z)); linewidth *= sqrt(abs(matrx.x * matrx.w - matrx.y * matrx.z));
} }
switch tag_word { switch tag_word {
@ -134,8 +127,8 @@ fn main(
info[di] = bitcast<u32>(linewidth); info[di] = bitcast<u32>(linewidth);
var p0 = bitcast<vec2<f32>>(vec2(scene[dd + 1u], scene[dd + 2u])); var p0 = bitcast<vec2<f32>>(vec2(scene[dd + 1u], scene[dd + 2u]));
var p1 = bitcast<vec2<f32>>(vec2(scene[dd + 3u], scene[dd + 4u])); var p1 = bitcast<vec2<f32>>(vec2(scene[dd + 3u], scene[dd + 4u]));
p0 = matrx.xy * p0.x + matrx.zw * p0.y + translate; p0 = transform_apply(transform, p0);
p1 = matrx.xy * p1.x + matrx.zw * p1.y + translate; p1 = transform_apply(transform, p1);
let dxy = p1 - p0; let dxy = p1 - p0;
let scale = 1.0 / dot(dxy, dxy); let scale = 1.0 / dot(dxy, dxy);
let line_xy = dxy * scale; let line_xy = dxy * scale;
@ -145,44 +138,99 @@ fn main(
info[di + 3u] = bitcast<u32>(line_c); info[di + 3u] = bitcast<u32>(line_c);
} }
// DRAWTAG_FILL_RAD_GRADIENT // DRAWTAG_FILL_RAD_GRADIENT
case 0x2dcu: { case 0x29cu: {
// Two-point conical gradient implementation based
// on the algorithm at <https://skia.org/docs/dev/design/conical/>
// This epsilon matches what Skia uses
let GRADIENT_EPSILON = 1.0 / f32(1 << 12u);
info[di] = bitcast<u32>(linewidth); info[di] = bitcast<u32>(linewidth);
var p0 = bitcast<vec2<f32>>(vec2(scene[dd + 1u], scene[dd + 2u])); var p0 = bitcast<vec2<f32>>(vec2(scene[dd + 1u], scene[dd + 2u]));
var p1 = bitcast<vec2<f32>>(vec2(scene[dd + 3u], scene[dd + 4u])); var p1 = bitcast<vec2<f32>>(vec2(scene[dd + 3u], scene[dd + 4u]));
let r0 = bitcast<f32>(scene[dd + 5u]); var r0 = bitcast<f32>(scene[dd + 5u]);
let r1 = bitcast<f32>(scene[dd + 6u]); var r1 = bitcast<f32>(scene[dd + 6u]);
let inv_det = 1.0 / (matrx.x * matrx.w - matrx.y * matrx.z); let user_to_gradient = transform_inverse(transform);
let inv_mat = inv_det * vec4(matrx.w, -matrx.y, -matrx.z, matrx.x); // Output variables
let inv_tr = mat2x2(inv_mat.xy, inv_mat.zw) * -translate - p0; var xform = Transform();
let center1 = p1 - p0; var focal_x = 0.0;
let rr = r1 / (r1 - r0); var radius = 0.0;
let ra_inv = rr / (r1 * r1 - dot(center1, center1)); var kind = 0u;
let c1 = center1 * ra_inv; var flags = 0u;
let ra = rr * ra_inv; if abs(r0 - r1) <= GRADIENT_EPSILON {
let roff = rr - 1.0; // When the radii are the same, emit a strip gradient
info[di + 1u] = bitcast<u32>(inv_mat.x); kind = RAD_GRAD_KIND_STRIP;
info[di + 2u] = bitcast<u32>(inv_mat.y); let scaled = r0 / distance(p0, p1);
info[di + 3u] = bitcast<u32>(inv_mat.z); xform = transform_mul(
info[di + 4u] = bitcast<u32>(inv_mat.w); two_point_to_unit_line(p0, p1),
info[di + 5u] = bitcast<u32>(inv_tr.x); user_to_gradient
info[di + 6u] = bitcast<u32>(inv_tr.y); );
info[di + 7u] = bitcast<u32>(c1.x); radius = scaled * scaled;
info[di + 8u] = bitcast<u32>(c1.y); } else {
info[di + 9u] = bitcast<u32>(ra); // Assume a two point conical gradient unless the centers
info[di + 10u] = bitcast<u32>(roff); // are equal.
kind = RAD_GRAD_KIND_CONE;
if all(p0 == p1) {
kind = RAD_GRAD_KIND_CIRCULAR;
// Nudge p0 a bit to avoid denormals.
p0 += GRADIENT_EPSILON;
}
if r1 == 0.0 {
// If r1 == 0.0, swap the points and radii
flags |= RAD_GRAD_SWAPPED;
let tmp_p = p0;
p0 = p1;
p1 = tmp_p;
let tmp_r = r0;
r0 = r1;
r1 = tmp_r;
}
focal_x = r0 / (r0 - r1);
let cf = (1.0 - focal_x) * p0 + focal_x * p1;
radius = r1 / (distance(cf, p1));
let user_to_unit_line = transform_mul(
two_point_to_unit_line(cf, p1),
user_to_gradient
);
var user_to_scaled = user_to_unit_line;
// When r == 1.0, focal point is on circle
if abs(radius - 1.0) <= GRADIENT_EPSILON {
kind = RAD_GRAD_KIND_FOCAL_ON_CIRCLE;
let scale = 0.5 * abs(1.0 - focal_x);
user_to_scaled = transform_mul(
Transform(vec4(scale, 0.0, 0.0, scale), vec2(0.0)),
user_to_unit_line
);
} else {
let a = radius * radius - 1.0;
let scale_ratio = abs(1.0 - focal_x) / a;
let scale_x = radius * scale_ratio;
let scale_y = sqrt(abs(a)) * scale_ratio;
user_to_scaled = transform_mul(
Transform(vec4(scale_x, 0.0, 0.0, scale_y), vec2(0.0)),
user_to_unit_line
);
}
xform = user_to_scaled;
}
info[di + 1u] = bitcast<u32>(xform.matrx.x);
info[di + 2u] = bitcast<u32>(xform.matrx.y);
info[di + 3u] = bitcast<u32>(xform.matrx.z);
info[di + 4u] = bitcast<u32>(xform.matrx.w);
info[di + 5u] = bitcast<u32>(xform.translate.x);
info[di + 6u] = bitcast<u32>(xform.translate.y);
info[di + 7u] = bitcast<u32>(focal_x);
info[di + 8u] = bitcast<u32>(radius);
info[di + 9u] = bitcast<u32>((flags << 3u) | kind);
} }
// DRAWTAG_FILL_IMAGE // DRAWTAG_FILL_IMAGE
case 0x248u: { case 0x248u: {
info[di] = bitcast<u32>(linewidth); info[di] = bitcast<u32>(linewidth);
let inv_det = 1.0 / (matrx.x * matrx.w - matrx.y * matrx.z); let inv = transform_inverse(transform);
let inv_mat = inv_det * vec4(matrx.w, -matrx.y, -matrx.z, matrx.x); info[di + 1u] = bitcast<u32>(inv.matrx.x);
let inv_tr = mat2x2(inv_mat.xy, inv_mat.zw) * -translate; info[di + 2u] = bitcast<u32>(inv.matrx.y);
info[di + 1u] = bitcast<u32>(inv_mat.x); info[di + 3u] = bitcast<u32>(inv.matrx.z);
info[di + 2u] = bitcast<u32>(inv_mat.y); info[di + 4u] = bitcast<u32>(inv.matrx.w);
info[di + 3u] = bitcast<u32>(inv_mat.z); info[di + 5u] = bitcast<u32>(inv.translate.x);
info[di + 4u] = bitcast<u32>(inv_mat.w); info[di + 6u] = bitcast<u32>(inv.translate.y);
info[di + 5u] = bitcast<u32>(inv_tr.x);
info[di + 6u] = bitcast<u32>(inv_tr.y);
info[di + 7u] = scene[dd]; info[di + 7u] = scene[dd];
info[di + 8u] = scene[dd + 1u]; info[di + 8u] = scene[dd + 1u];
} }
@ -197,3 +245,17 @@ fn main(
clip_inp[m.clip_ix] = ClipInp(ix, i32(path_ix)); clip_inp[m.clip_ix] = ClipInp(ix, i32(path_ix));
} }
} }
fn two_point_to_unit_line(p0: vec2<f32>, p1: vec2<f32>) -> Transform {
let tmp1 = from_poly2(p0, p1);
let inv = transform_inverse(tmp1);
let tmp2 = from_poly2(vec2(0.0), vec2(1.0, 0.0));
return transform_mul(tmp2, inv);
}
fn from_poly2(p0: vec2<f32>, p1: vec2<f32>) -> Transform {
return Transform(
vec4(p1.y - p0.y, p0.x - p1.x, p1.x - p0.x, p1.y - p0.y),
vec2(p0.x, p0.y)
);
}

View file

@ -61,16 +61,20 @@ fn read_color(cmd_ix: u32) -> CmdColor {
} }
fn read_lin_grad(cmd_ix: u32) -> CmdLinGrad { fn read_lin_grad(cmd_ix: u32) -> CmdLinGrad {
let index = ptcl[cmd_ix + 1u]; let index_mode = ptcl[cmd_ix + 1u];
let index = index_mode >> 2u;
let extend_mode = index_mode & 0x3u;
let info_offset = ptcl[cmd_ix + 2u]; let info_offset = ptcl[cmd_ix + 2u];
let line_x = bitcast<f32>(info[info_offset]); let line_x = bitcast<f32>(info[info_offset]);
let line_y = bitcast<f32>(info[info_offset + 1u]); let line_y = bitcast<f32>(info[info_offset + 1u]);
let line_c = bitcast<f32>(info[info_offset + 2u]); let line_c = bitcast<f32>(info[info_offset + 2u]);
return CmdLinGrad(index, line_x, line_y, line_c); return CmdLinGrad(index, extend_mode, line_x, line_y, line_c);
} }
fn read_rad_grad(cmd_ix: u32) -> CmdRadGrad { fn read_rad_grad(cmd_ix: u32) -> CmdRadGrad {
let index = ptcl[cmd_ix + 1u]; let index_mode = ptcl[cmd_ix + 1u];
let index = index_mode >> 2u;
let extend_mode = index_mode & 0x3u;
let info_offset = ptcl[cmd_ix + 2u]; let info_offset = ptcl[cmd_ix + 2u];
let m0 = bitcast<f32>(info[info_offset]); let m0 = bitcast<f32>(info[info_offset]);
let m1 = bitcast<f32>(info[info_offset + 1u]); let m1 = bitcast<f32>(info[info_offset + 1u]);
@ -78,10 +82,12 @@ fn read_rad_grad(cmd_ix: u32) -> CmdRadGrad {
let m3 = bitcast<f32>(info[info_offset + 3u]); let m3 = bitcast<f32>(info[info_offset + 3u]);
let matrx = vec4(m0, m1, m2, m3); let matrx = vec4(m0, m1, m2, m3);
let xlat = vec2(bitcast<f32>(info[info_offset + 4u]), bitcast<f32>(info[info_offset + 5u])); let xlat = vec2(bitcast<f32>(info[info_offset + 4u]), bitcast<f32>(info[info_offset + 5u]));
let c1 = vec2(bitcast<f32>(info[info_offset + 6u]), bitcast<f32>(info[info_offset + 7u])); let focal_x = bitcast<f32>(info[info_offset + 6u]);
let ra = bitcast<f32>(info[info_offset + 8u]); let radius = bitcast<f32>(info[info_offset + 7u]);
let roff = bitcast<f32>(info[info_offset + 9u]); let flags_kind = info[info_offset + 8u];
return CmdRadGrad(index, matrx, xlat, c1, ra, roff); let flags = flags_kind >> 3u;
let kind = flags_kind & 0x7u;
return CmdRadGrad(index, extend_mode, matrx, xlat, focal_x, radius, kind, flags);
} }
fn read_image(cmd_ix: u32) -> CmdImage { fn read_image(cmd_ix: u32) -> CmdImage {
@ -108,6 +114,26 @@ fn read_end_clip(cmd_ix: u32) -> CmdEndClip {
return CmdEndClip(blend, alpha); return CmdEndClip(blend, alpha);
} }
fn extend_mode(t: f32, mode: u32) -> f32 {
let EXTEND_PAD = 0u;
let EXTEND_REPEAT = 1u;
let EXTEND_REFLECT = 2u;
switch mode {
// EXTEND_PAD
case 0u: {
return clamp(t, 0.0, 1.0);
}
// EXTEND_REPEAT
case 1u: {
return fract(t);
}
// EXTEND_REFLECT
default: {
return abs(t - 2.0 * round(0.5 * t));
}
}
}
#else #else
@group(0) @binding(3) @group(0) @binding(3)
@ -262,7 +288,7 @@ fn main(
let d = lin.line_x * xy.x + lin.line_y * xy.y + lin.line_c; let d = lin.line_x * xy.x + lin.line_y * xy.y + lin.line_c;
for (var i = 0u; i < PIXELS_PER_THREAD; i += 1u) { for (var i = 0u; i < PIXELS_PER_THREAD; i += 1u) {
let my_d = d + lin.line_x * f32(i); let my_d = d + lin.line_x * f32(i);
let x = i32(round(clamp(my_d, 0.0, 1.0) * f32(GRADIENT_WIDTH - 1))); let x = i32(round(extend_mode(my_d, lin.extend_mode) * f32(GRADIENT_WIDTH - 1)));
let fg_rgba = textureLoad(gradients, vec2(x, i32(lin.index)), 0); let fg_rgba = textureLoad(gradients, vec2(x, i32(lin.index)), 0);
let fg_i = fg_rgba * area[i]; let fg_i = fg_rgba * area[i];
rgba[i] = rgba[i] * (1.0 - fg_i.a) + fg_i; rgba[i] = rgba[i] * (1.0 - fg_i.a) + fg_i;
@ -272,17 +298,46 @@ fn main(
// CMD_RAD_GRAD // CMD_RAD_GRAD
case 7u: { case 7u: {
let rad = read_rad_grad(cmd_ix); let rad = read_rad_grad(cmd_ix);
let focal_x = rad.focal_x;
let radius = rad.radius;
let is_strip = rad.kind == RAD_GRAD_KIND_STRIP;
let is_circular = rad.kind == RAD_GRAD_KIND_CIRCULAR;
let is_focal_on_circle = rad.kind == RAD_GRAD_KIND_FOCAL_ON_CIRCLE;
let is_swapped = (rad.flags & RAD_GRAD_SWAPPED) != 0u;
let r1_recip = select(1.0 / radius, 0.0, is_circular);
let less_scale = select(1.0, -1.0, is_swapped || (1.0 - focal_x) < 0.0);
let t_sign = sign(1.0 - focal_x);
for (var i = 0u; i < PIXELS_PER_THREAD; i += 1u) { for (var i = 0u; i < PIXELS_PER_THREAD; i += 1u) {
let my_xy = vec2(xy.x + f32(i), xy.y); let my_xy = vec2(xy.x + f32(i), xy.y);
// TODO: can hoist y, but for now stick to the GLSL version let local_xy = rad.matrx.xy * my_xy.x + rad.matrx.zw * my_xy.y + rad.xlat;
let xy_xformed = rad.matrx.xy * my_xy.x + rad.matrx.zw * my_xy.y + rad.xlat; let x = local_xy.x;
let ba = dot(xy_xformed, rad.c1); let y = local_xy.y;
let ca = rad.ra * dot(xy_xformed, xy_xformed); let xx = x * x;
let t = sqrt(ba * ba + ca) - ba - rad.roff; let yy = y * y;
let x = i32(round(clamp(t, 0.0, 1.0) * f32(GRADIENT_WIDTH - 1))); var t = 0.0;
let fg_rgba = textureLoad(gradients, vec2(x, i32(rad.index)), 0); var is_valid = true;
let fg_i = fg_rgba * area[i]; if is_strip {
rgba[i] = rgba[i] * (1.0 - fg_i.a) + fg_i; let a = radius - yy;
t = sqrt(a) + x;
is_valid = a >= 0.0;
} else if is_focal_on_circle {
t = (xx + yy) / x;
is_valid = t >= 0.0 && x != 0.0;
} else if radius > 1.0 {
t = sqrt(xx + yy) - x * r1_recip;
} else { // radius < 1.0
let a = xx - yy;
t = less_scale * sqrt(a) - x * r1_recip;
is_valid = a >= 0.0 && t >= 0.0;
}
if is_valid {
t = extend_mode(focal_x + t_sign * t, rad.extend_mode);
t = select(t, 1.0 - t, is_swapped);
let x = i32(round(t * f32(GRADIENT_WIDTH - 1)));
let fg_rgba = textureLoad(gradients, vec2(x, i32(rad.index)), 0);
let fg_i = fg_rgba * area[i];
rgba[i] = rgba[i] * (1.0 - fg_i.a) + fg_i;
}
} }
cmd_ix += 3u; cmd_ix += 3u;
} }

View file

@ -14,6 +14,7 @@
#import config #import config
#import pathtag #import pathtag
#import cubic #import cubic
#import transform
@group(0) @binding(0) @group(0) @binding(0)
var<uniform> config: Config; var<uniform> config: Config;
@ -36,7 +37,6 @@ struct AtomicPathBbox {
@group(0) @binding(3) @group(0) @binding(3)
var<storage, read_write> path_bboxes: array<AtomicPathBbox>; var<storage, read_write> path_bboxes: array<AtomicPathBbox>;
@group(0) @binding(4) @group(0) @binding(4)
var<storage, read_write> cubics: array<Cubic>; var<storage, read_write> cubics: array<Cubic>;
@ -85,11 +85,6 @@ fn read_i16_point(ix: u32) -> vec2<f32> {
return vec2(x, y); return vec2(x, y);
} }
struct Transform {
matrx: vec4<f32>,
translate: vec2<f32>,
}
fn read_transform(transform_base: u32, ix: u32) -> Transform { fn read_transform(transform_base: u32, ix: u32) -> Transform {
let base = transform_base + ix * 6u; let base = transform_base + ix * 6u;
let c0 = bitcast<f32>(scene[base]); let c0 = bitcast<f32>(scene[base]);
@ -103,10 +98,6 @@ fn read_transform(transform_base: u32, ix: u32) -> Transform {
return Transform(matrx, translate); return Transform(matrx, translate);
} }
fn transform_apply(transform: Transform, p: vec2<f32>) -> vec2<f32> {
return transform.matrx.xy * p.x + transform.matrx.zw * p.y + transform.translate;
}
fn round_down(x: f32) -> i32 { fn round_down(x: f32) -> i32 {
return i32(floor(x)); return i32(floor(x));
} }

View file

@ -306,7 +306,8 @@ fn blend_compose(
let as_fa = as_ * fa; let as_fa = as_ * fa;
let ab_fb = ab * fb; let ab_fb = ab * fb;
let co = as_fa * cs + ab_fb * cb; let co = as_fa * cs + ab_fb * cb;
return vec4(co, as_fa + ab_fb); // Modes like COMPOSE_PLUS can generate alpha > 1.0, so clamp.
return vec4(co, min(as_fa + ab_fb, 1.0));
} }
// Apply color mixing and composition. Both input and output colors are // Apply color mixing and composition. Both input and output colors are

View file

@ -49,3 +49,16 @@ let N_TILE_Y = 16u;
let N_TILE = 256u; let N_TILE = 256u;
let BLEND_STACK_SPLIT = 4u; let BLEND_STACK_SPLIT = 4u;
// The following are computed in draw_leaf from the generic gradient parameters
// encoded in the scene, and stored in the gradient's info struct, for
// consumption during fine rasterization.
// Radial gradient kinds
let RAD_GRAD_KIND_CIRCULAR = 1u;
let RAD_GRAD_KIND_STRIP = 2u;
let RAD_GRAD_KIND_FOCAL_ON_CIRCLE = 3u;
let RAD_GRAD_KIND_CONE = 4u;
// Radial gradient flags
let RAD_GRAD_SWAPPED = 1u;

View file

@ -18,7 +18,7 @@ struct DrawMonoid {
let DRAWTAG_NOP = 0u; let DRAWTAG_NOP = 0u;
let DRAWTAG_FILL_COLOR = 0x44u; let DRAWTAG_FILL_COLOR = 0x44u;
let DRAWTAG_FILL_LIN_GRADIENT = 0x114u; let DRAWTAG_FILL_LIN_GRADIENT = 0x114u;
let DRAWTAG_FILL_RAD_GRADIENT = 0x2dcu; let DRAWTAG_FILL_RAD_GRADIENT = 0x29cu;
let DRAWTAG_FILL_IMAGE = 0x248u; let DRAWTAG_FILL_IMAGE = 0x248u;
let DRAWTAG_BEGIN_CLIP = 0x9u; let DRAWTAG_BEGIN_CLIP = 0x9u;
let DRAWTAG_END_CLIP = 0x21u; let DRAWTAG_END_CLIP = 0x21u;

View file

@ -44,6 +44,7 @@ struct CmdColor {
struct CmdLinGrad { struct CmdLinGrad {
index: u32, index: u32,
extend_mode: u32,
line_x: f32, line_x: f32,
line_y: f32, line_y: f32,
line_c: f32, line_c: f32,
@ -51,11 +52,13 @@ struct CmdLinGrad {
struct CmdRadGrad { struct CmdRadGrad {
index: u32, index: u32,
extend_mode: u32,
matrx: vec4<f32>, matrx: vec4<f32>,
xlat: vec2<f32>, xlat: vec2<f32>,
c1: vec2<f32>, focal_x: f32,
ra: f32, radius: f32,
roff: f32, kind: u32,
flags: u32,
} }
struct CmdImage { struct CmdImage {

View file

@ -0,0 +1,26 @@
// SPDX-License-Identifier: Apache-2.0 OR MIT OR Unlicense
// Helpers for working with transforms.
struct Transform {
matrx: vec4<f32>,
translate: vec2<f32>,
}
fn transform_apply(transform: Transform, p: vec2<f32>) -> vec2<f32> {
return transform.matrx.xy * p.x + transform.matrx.zw * p.y + transform.translate;
}
fn transform_inverse(transform: Transform) -> Transform {
let inv_det = 1.0 / (transform.matrx.x * transform.matrx.w - transform.matrx.y * transform.matrx.z);
let inv_mat = inv_det * vec4(transform.matrx.w, -transform.matrx.y, -transform.matrx.z, transform.matrx.x);
let inv_tr = mat2x2(inv_mat.xy, inv_mat.zw) * -transform.translate;
return Transform(inv_mat, inv_tr);
}
fn transform_mul(a: Transform, b: Transform) -> Transform {
return Transform(
a.matrx.xyxy * b.matrx.xxzz + a.matrx.zwzw * b.matrx.yyww,
a.matrx.xy * b.translate.x + a.matrx.zw * b.translate.y + a.translate
);
}

View file

@ -29,6 +29,7 @@ use {
vello_encoding::Encoding, vello_encoding::Encoding,
}; };
pub use fello;
pub use vello_encoding::Glyph; pub use vello_encoding::Glyph;
/// General context for creating scene fragments for glyph outlines. /// General context for creating scene fragments for glyph outlines.

View file

@ -37,6 +37,8 @@ pub use util::block_on_wgpu;
use engine::{Engine, ExternalResource, Recording}; use engine::{Engine, ExternalResource, Recording};
use shaders::FullShaders; use shaders::FullShaders;
/// Temporary export, used in with_winit for stats
pub use vello_encoding::BumpAllocators;
use wgpu::{Device, Queue, SurfaceTexture, TextureFormat, TextureView}; use wgpu::{Device, Queue, SurfaceTexture, TextureFormat, TextureView};
/// Catch-all error type. /// Catch-all error type.
@ -196,6 +198,12 @@ impl Renderer {
/// The texture is assumed to be of the specified dimensions and have been created with /// The texture is assumed to be of the specified dimensions and have been created with
/// the [wgpu::TextureFormat::Rgba8Unorm] format and the [wgpu::TextureUsages::STORAGE_BINDING] /// the [wgpu::TextureFormat::Rgba8Unorm] format and the [wgpu::TextureUsages::STORAGE_BINDING]
/// flag set. /// flag set.
///
/// The return value is the value of the `BumpAllocators` in this rendering, which is currently used
/// for debug output.
///
/// This return type is not stable, and will likely be changed when a more principled way to access
/// relevant statistics is implemented
pub async fn render_to_texture_async( pub async fn render_to_texture_async(
&mut self, &mut self,
device: &Device, device: &Device,
@ -203,13 +211,15 @@ impl Renderer {
scene: &Scene, scene: &Scene,
texture: &TextureView, texture: &TextureView,
params: &RenderParams, params: &RenderParams,
) -> Result<()> { ) -> Result<Option<BumpAllocators>> {
let mut render = Render::new(); let mut render = Render::new();
let encoding = scene.data(); let encoding = scene.data();
let recording = render.render_encoding_coarse(encoding, &self.shaders, params, true); let recording = render.render_encoding_coarse(encoding, &self.shaders, params, true);
let target = render.out_image(); let target = render.out_image();
let bump_buf = render.bump_buf(); let bump_buf = render.bump_buf();
self.engine.run_recording(device, queue, &recording, &[])?; self.engine.run_recording(device, queue, &recording, &[])?;
let mut bump: Option<BumpAllocators> = None;
if let Some(bump_buf) = self.engine.get_download(bump_buf) { if let Some(bump_buf) = self.engine.get_download(bump_buf) {
let buf_slice = bump_buf.slice(..); let buf_slice = bump_buf.slice(..);
let (sender, receiver) = futures_intrusive::channel::shared::oneshot_channel(); let (sender, receiver) = futures_intrusive::channel::shared::oneshot_channel();
@ -219,8 +229,8 @@ impl Renderer {
} else { } else {
return Err("channel was closed".into()); return Err("channel was closed".into());
} }
let _mapped = buf_slice.get_mapped_range(); let mapped = buf_slice.get_mapped_range();
// println!("{:?}", bytemuck::cast_slice::<_, u32>(&mapped)); bump = Some(bytemuck::pod_read_unaligned(&*mapped));
} }
// TODO: apply logic to determine whether we need to rerun coarse, and also // TODO: apply logic to determine whether we need to rerun coarse, and also
// allocate the blend stack as needed. // allocate the blend stack as needed.
@ -231,7 +241,7 @@ impl Renderer {
let external_resources = [ExternalResource::Image(target, texture)]; let external_resources = [ExternalResource::Image(target, texture)];
self.engine self.engine
.run_recording(device, queue, &recording, &external_resources)?; .run_recording(device, queue, &recording, &external_resources)?;
Ok(()) Ok(bump)
} }
/// See [Self::render_to_surface] /// See [Self::render_to_surface]
@ -242,7 +252,7 @@ impl Renderer {
scene: &Scene, scene: &Scene,
surface: &SurfaceTexture, surface: &SurfaceTexture,
params: &RenderParams, params: &RenderParams,
) -> Result<()> { ) -> Result<Option<BumpAllocators>> {
let width = params.width; let width = params.width;
let height = params.height; let height = params.height;
let mut target = self let mut target = self
@ -254,7 +264,8 @@ impl Renderer {
if target.width != width || target.height != height { if target.width != width || target.height != height {
target = TargetTexture::new(device, width, height); target = TargetTexture::new(device, width, height);
} }
self.render_to_texture_async(device, queue, scene, &target.view, params) let bump = self
.render_to_texture_async(device, queue, scene, &target.view, params)
.await?; .await?;
let blit = self let blit = self
.blit .blit
@ -292,7 +303,7 @@ impl Renderer {
} }
queue.submit(Some(encoder.finish())); queue.submit(Some(encoder.finish()));
self.target = Some(target); self.target = Some(target);
Ok(()) Ok(bump)
} }
} }

View file

@ -205,8 +205,8 @@ impl<'a> DrawGlyphs<'a> {
/// Creates a new builder for encoding a glyph run for the specified /// Creates a new builder for encoding a glyph run for the specified
/// encoding with the given font. /// encoding with the given font.
pub fn new(encoding: &'a mut Encoding, font: &Font) -> Self { pub fn new(encoding: &'a mut Encoding, font: &Font) -> Self {
let coords_start = encoding.normalized_coords.len(); let coords_start = encoding.resources.normalized_coords.len();
let glyphs_start = encoding.glyphs.len(); let glyphs_start = encoding.resources.glyphs.len();
let stream_offsets = encoding.stream_offsets(); let stream_offsets = encoding.stream_offsets();
Self { Self {
encoding, encoding,
@ -264,10 +264,14 @@ impl<'a> DrawGlyphs<'a> {
/// Sets the normalized design space coordinates for a variable font instance. /// Sets the normalized design space coordinates for a variable font instance.
pub fn normalized_coords(mut self, coords: &[NormalizedCoord]) -> Self { pub fn normalized_coords(mut self, coords: &[NormalizedCoord]) -> Self {
self.encoding self.encoding
.resources
.normalized_coords .normalized_coords
.truncate(self.run.normalized_coords.start); .truncate(self.run.normalized_coords.start);
self.encoding.normalized_coords.extend_from_slice(coords); self.encoding
self.run.normalized_coords.end = self.encoding.normalized_coords.len(); .resources
.normalized_coords
.extend_from_slice(coords);
self.run.normalized_coords.end = self.encoding.resources.normalized_coords.len();
self self
} }
@ -292,18 +296,19 @@ impl<'a> DrawGlyphs<'a> {
/// ///
/// The `style` parameter accepts either `Fill` or `&Stroke` types. /// The `style` parameter accepts either `Fill` or `&Stroke` types.
pub fn draw(mut self, style: impl Into<StyleRef<'a>>, glyphs: impl Iterator<Item = Glyph>) { pub fn draw(mut self, style: impl Into<StyleRef<'a>>, glyphs: impl Iterator<Item = Glyph>) {
let resources = &mut self.encoding.resources;
self.run.style = style.into().to_owned(); self.run.style = style.into().to_owned();
self.encoding.glyphs.extend(glyphs); resources.glyphs.extend(glyphs);
self.run.glyphs.end = self.encoding.glyphs.len(); self.run.glyphs.end = resources.glyphs.len();
if self.run.glyphs.is_empty() { if self.run.glyphs.is_empty() {
self.encoding resources
.normalized_coords .normalized_coords
.truncate(self.run.normalized_coords.start); .truncate(self.run.normalized_coords.start);
return; return;
} }
let index = self.encoding.glyph_runs.len(); let index = resources.glyph_runs.len();
self.encoding.glyph_runs.push(self.run); resources.glyph_runs.push(self.run);
self.encoding.patches.push(Patch::GlyphRun { index }); resources.patches.push(Patch::GlyphRun { index });
self.encoding.encode_brush(self.brush, self.brush_alpha); self.encoding.encode_brush(self.brush, self.brush_alpha);
} }
} }

View file

@ -318,4 +318,5 @@ const SHARED_SHADERS: &[(&str, &str)] = &[
shared_shader!("ptcl"), shared_shader!("ptcl"),
shared_shader!("segment"), shared_shader!("segment"),
shared_shader!("tile"), shared_shader!("tile"),
shared_shader!("transform"),
]; ];