vello/piet-gpu/shader/coarse.comp
Raph Levien 7f4a6523a8 Filter sparse tiles
Have a more-parallel read of the tile structures based on bbox coverage,
and only set the bit when the tile isn't empty.

This is a speedup, but there is some duplicated work and it is possible
to improve it further.
2020-06-03 17:55:42 -07:00

443 lines
17 KiB
Plaintext

// The coarse rasterizer stage of the pipeline.
#version 450
#extension GL_GOOGLE_include_directive : enable
#include "setup.h"
layout(local_size_x = N_TILE, local_size_y = 1) in;
layout(set = 0, binding = 0) buffer AnnotatedBuf {
uint[] annotated;
};
layout(set = 0, binding = 1) buffer BinsBuf {
uint[] bins;
};
layout(set = 0, binding = 2) buffer TileBuf {
uint[] tile;
};
layout(set = 0, binding = 3) buffer AllocBuf {
uint n_elements;
uint alloc;
};
layout(set = 0, binding = 4) buffer PtclBuf {
uint[] ptcl;
};
#include "annotated.h"
#include "bins.h"
#include "tile.h"
#include "ptcl.h"
#define LG_N_PART_READ 8
#define N_PART_READ (1 << LG_N_PART_READ)
shared uint sh_elements[N_TILE];
shared float sh_right_edge[N_TILE];
// Number of elements in the partition; prefix sum.
shared uint sh_part_count[N_PART_READ];
shared uint sh_part_elements[N_PART_READ];
shared uint sh_bitmaps[N_SLICE][N_TILE];
shared uint sh_tile_count[N_TILE];
// The width of the tile rect for the element, intersected with this bin
shared uint sh_tile_width[N_TILE];
shared uint sh_tile_x0[N_TILE];
shared uint sh_tile_y0[N_TILE];
// scale factors useful for converting coordinates to tiles
#define SX (1.0 / float(TILE_WIDTH_PX))
#define SY (1.0 / float(TILE_HEIGHT_PX))
// Perhaps cmd_limit should be a global? This is a style question.
void alloc_cmd(inout CmdRef cmd_ref, inout uint cmd_limit) {
if (cmd_ref.offset > cmd_limit) {
uint new_cmd = atomicAdd(alloc, PTCL_INITIAL_ALLOC);
CmdJump jump = CmdJump(new_cmd);
Cmd_Jump_write(cmd_ref, jump);
cmd_ref = CmdRef(new_cmd);
cmd_limit = new_cmd + PTCL_INITIAL_ALLOC - 2 * Cmd_size;
}
}
void main() {
// Could use either linear or 2d layouts for both dispatch and
// invocations within the workgroup. We'll use variables to abstract.
uint bin_ix = N_TILE_X * gl_WorkGroupID.y + gl_WorkGroupID.x;
uint partition_ix = 0;
uint n_partitions = (n_elements + N_TILE - 1) / N_TILE;
// Top left coordinates of this bin.
vec2 xy0 = vec2(N_TILE_X * TILE_WIDTH_PX * gl_WorkGroupID.x, N_TILE_Y * TILE_HEIGHT_PX * gl_WorkGroupID.y);
uint th_ix = gl_LocalInvocationID.x;
uint tile_x = N_TILE_X * gl_WorkGroupID.x + gl_LocalInvocationID.x % N_TILE_X;
uint tile_y = N_TILE_Y * gl_WorkGroupID.y + gl_LocalInvocationID.x / N_TILE_X;
uint this_tile_ix = tile_y * WIDTH_IN_TILES + tile_x;
CmdRef cmd_ref = CmdRef(this_tile_ix * PTCL_INITIAL_ALLOC);
uint cmd_limit = cmd_ref.offset + PTCL_INITIAL_ALLOC - 2 * Cmd_size;
// I'm sure we can figure out how to do this with at least one fewer register...
// Items up to rd_ix have been read from sh_elements
uint rd_ix = 0;
// Items up to wr_ix have been written into sh_elements
uint wr_ix = 0;
// Items between part_start_ix and ready_ix are ready to be transferred from sh_part_elements
uint part_start_ix = 0;
uint ready_ix = 0;
int backdrop = 0;
while (true) {
for (uint i = 0; i < N_SLICE; i++) {
sh_bitmaps[i][th_ix] = 0;
}
// parallel read of input partitions
do {
if (ready_ix == wr_ix && partition_ix < n_partitions) {
part_start_ix = ready_ix;
uint count = 0;
if (th_ix < N_PART_READ && partition_ix + th_ix < n_partitions) {
uint in_ix = ((partition_ix + th_ix) * N_TILE + bin_ix) * 2;
count = bins[in_ix];
sh_part_elements[th_ix] = bins[in_ix + 1];
}
// prefix sum of counts
for (uint i = 0; i < LG_N_PART_READ; i++) {
if (th_ix < N_PART_READ) {
sh_part_count[th_ix] = count;
}
barrier();
if (th_ix < N_PART_READ) {
if (th_ix >= (1 << i)) {
count += sh_part_count[th_ix - (1 << i)];
}
}
barrier();
}
if (th_ix < N_PART_READ) {
sh_part_count[th_ix] = part_start_ix + count;
}
barrier();
ready_ix = sh_part_count[N_PART_READ - 1];
partition_ix += N_PART_READ;
}
// use binary search to find element to read
uint ix = rd_ix + th_ix;
if (ix >= wr_ix && ix < ready_ix) {
uint part_ix = 0;
for (uint i = 0; i < LG_N_PART_READ; i++) {
uint probe = part_ix + ((N_PART_READ / 2) >> i);
if (ix >= sh_part_count[probe - 1]) {
part_ix = probe;
}
}
ix -= part_ix > 0 ? sh_part_count[part_ix - 1] : part_start_ix;
BinInstanceRef inst_ref = BinInstanceRef(sh_part_elements[part_ix]);
BinInstance inst = BinInstance_read(BinInstance_index(inst_ref, ix));
sh_elements[th_ix] = inst.element_ix;
sh_right_edge[th_ix] = inst.right_edge;
}
barrier();
wr_ix = min(rd_ix + N_TILE, ready_ix);
} while (wr_ix - rd_ix < N_TILE && (wr_ix < ready_ix || partition_ix < n_partitions));
// We've done the merge and filled the buffer.
// Read one element, compute coverage.
uint tag = Annotated_Nop;
AnnotatedRef ref;
float right_edge = 0.0;
if (th_ix + rd_ix < wr_ix) {
uint element_ix = sh_elements[th_ix];
right_edge = sh_right_edge[th_ix];
ref = AnnotatedRef(element_ix * Annotated_size);
tag = Annotated_tag(ref);
}
// Bounding box of element in pixel coordinates.
float xmin, xmax, ymin, ymax;
switch (tag) {
case Annotated_Fill:
case Annotated_Stroke:
// Note: we take advantage of the fact that fills and strokes
// have compatible layout.
AnnoFill fill = Annotated_Fill_read(ref);
xmin = fill.bbox.x;
xmax = fill.bbox.z;
ymin = fill.bbox.y;
ymax = fill.bbox.w;
break;
default:
ymin = 0;
ymax = 0;
break;
}
// Draw the coverage area into the bitmasks. This uses an algorithm
// that computes the coverage of a span for given scanline.
// Compute bounding box in tiles and clip to this bin.
int x0 = int(floor((xmin - xy0.x) * SX));
int x1 = int(ceil((xmax - xy0.x) * SX));
int y0 = int(floor((ymin - xy0.y) * SY));
int y1 = int(ceil((ymax - xy0.y) * SY));
x0 = clamp(x0, 0, N_TILE_X);
x1 = clamp(x1, x0, N_TILE_X);
y0 = clamp(y0, 0, N_TILE_Y);
y1 = clamp(y1, y0, N_TILE_Y);
uint tile_count = uint((x1 - x0) * (y1 - y0));
sh_tile_width[th_ix] = uint(x1 - x0);
sh_tile_x0[th_ix] = uint(x0);
sh_tile_y0[th_ix] = uint(y0);
// Prefix sum of sh_tile_count
sh_tile_count[th_ix] = tile_count;
for (uint i = 0; i < LG_N_TILE; i++) {
barrier();
if (th_ix >= (1 << i)) {
tile_count += sh_tile_count[th_ix - (1 << i)];
}
barrier();
sh_tile_count[th_ix] = tile_count;
}
barrier();
uint total_tile_count = sh_tile_count[N_TILE - 1];
for (uint ix = th_ix; ix < total_tile_count; ix += N_TILE) {
// Binary search to find element
uint el_ix = 0;
for (uint i = 0; i < LG_N_TILE; i++) {
uint probe = el_ix + ((N_TILE / 2) >> i);
if (ix >= sh_tile_count[probe - 1]) {
el_ix = probe;
}
}
uint seq_ix = ix - (el_ix > 0 ? sh_tile_count[el_ix - 1] : 0);
uint width = sh_tile_width[el_ix];
uint x = sh_tile_x0[el_ix] + seq_ix % width;
uint y = sh_tile_y0[el_ix] + seq_ix / width;
uint tile_x = x + gl_WorkGroupID.x * N_TILE_X;
uint tile_y = y + gl_WorkGroupID.y * N_TILE_Y;
uint element_ix = sh_elements[el_ix];
Path path = Path_read(PathRef(element_ix * Path_size));
if (tile_x >= path.bbox.x && tile_x < path.bbox.z && tile_y >= path.bbox.y && tile_y < path.bbox.w) {
uint stride = path.bbox.z - path.bbox.x;
uint tile_subix = (tile_y - path.bbox.y) * stride + tile_x - path.bbox.x;
Tile tile = Tile_read(Tile_index(path.tiles, tile_subix));
if (tile.tile.offset != 0) {
uint el_slice = el_ix / 32;
uint el_mask = 1 << (el_ix & 31);
atomicOr(sh_bitmaps[el_slice][y * N_TILE_X + x], el_mask);
}
}
}
barrier();
// We've computed coverage and other info for each element in the input, now for
// the output stage. We'll do segments first using a more parallel algorithm.
/*
uint seg_count = 0;
for (uint i = 0; i < N_SLICE; i++) {
seg_count += bitCount(sh_bitmaps[i][th_ix] & sh_is_segment[i]);
}
sh_seg_count[th_ix] = seg_count;
// Prefix sum of sh_seg_count
for (uint i = 0; i < LG_N_TILE; i++) {
barrier();
if (th_ix >= (1 << i)) {
seg_count += sh_seg_count[th_ix - (1 << i)];
}
barrier();
sh_seg_count[th_ix] = seg_count;
}
if (th_ix == N_TILE - 1) {
sh_seg_alloc = atomicAdd(alloc, seg_count * Segment_size);
}
barrier();
uint total_seg_count = sh_seg_count[N_TILE - 1];
uint seg_alloc = sh_seg_alloc;
// Output buffer is allocated as segments for each tile laid end-to-end.
for (uint ix = th_ix; ix < total_seg_count; ix += N_TILE) {
// Find the work item; this thread is now not bound to an element or tile.
// First find the tile (by binary search)
uint tile_ix = 0;
for (uint i = 0; i < LG_N_TILE; i++) {
uint probe = tile_ix + ((N_TILE / 2) >> i);
if (ix >= sh_seg_count[probe - 1]) {
tile_ix = probe;
}
}
// Now, sh_seg_count[tile_ix - 1] <= ix < sh_seg_count[tile_ix].
// (considering sh_seg_count[-1] == 0)
// Index of segment within tile's segments
uint seq_ix = ix;
// Maybe consider a sentinel value to avoid the conditional?
if (tile_ix > 0) {
seq_ix -= sh_seg_count[tile_ix - 1];
}
// Find the segment. This is done by linear scan through the bitmaps of the
// tile, accelerated by bit counting. Binary search might help, maybe not.
uint slice_ix = 0;
uint seq_bits;
while (true) {
seq_bits = sh_bitmaps[slice_ix][tile_ix] & sh_is_segment[slice_ix];
uint this_count = bitCount(seq_bits);
if (this_count > seq_ix) {
break;
}
seq_ix -= this_count;
slice_ix++;
}
// Now find position of nth bit set (n = seq_ix) in seq_bits; binary search
uint bit_ix = 0;
for (int i = 0; i < 5; i++) {
uint probe = bit_ix + (16 >> i);
if (seq_ix >= bitCount(seq_bits & ((1 << probe) - 1))) {
bit_ix = probe;
}
}
uint out_offset = seg_alloc + Segment_size * ix + SegChunk_size;
uint rd_el_ix = slice_ix * 32 + bit_ix;
uint element_ix = sh_elements[rd_el_ix];
ref = AnnotatedRef(element_ix * Annotated_size);
AnnoFillLineSeg line = Annotated_FillLine_read(ref);
float y_edge = 0.0;
// This is basically the same logic as piet-metal, but should be made numerically robust.
if (Annotated_tag(ref) == Annotated_FillLine) {
vec2 tile_xy = xy0 + vec2((tile_ix % N_TILE_X) * TILE_WIDTH_PX, (tile_ix / N_TILE_X) * TILE_HEIGHT_PX);
y_edge = mix(line.p0.y, line.p1.y, (tile_xy.x - line.p0.x) / (line.p1.x - line.p0.x));
if (min(line.p0.x, line.p1.x) < tile_xy.x && y_edge >= tile_xy.y && y_edge < tile_xy.y + TILE_HEIGHT_PX) {
if (line.p0.x > line.p1.x) {
line.p1 = vec2(tile_xy.x, y_edge);
} else {
line.p0 = vec2(tile_xy.x, y_edge);
}
} else {
y_edge = 1e9;
}
}
Segment seg = Segment(line.p0, line.p1, y_edge);
Segment_write(SegmentRef(seg_alloc + Segment_size * ix), seg);
}
*/
// Output non-segment elements for this tile. The thread does a sequential walk
// through the non-segment elements, and for segments, count and backdrop are
// aggregated using bit counting.
uint slice_ix = 0;
uint bitmap = sh_bitmaps[0][th_ix];
while (true) {
if (bitmap == 0) {
slice_ix++;
if (slice_ix == N_SLICE) {
break;
}
bitmap = sh_bitmaps[slice_ix][th_ix];
if (bitmap == 0) {
continue;
}
}
uint element_ref_ix = slice_ix * 32 + findLSB(bitmap);
uint element_ix = sh_elements[element_ref_ix];
// Clear LSB
bitmap &= bitmap - 1;
// At this point, we read the element again from global memory.
// If that turns out to be expensive, maybe we can pack it into
// shared memory (or perhaps just the tag).
ref = AnnotatedRef(element_ix * Annotated_size);
tag = Annotated_tag(ref);
switch (tag) {
/*
case Annotated_Fill:
if (last_chunk_n > 0 || seg_count > 0) {
SegChunkRef chunk_ref = SegChunkRef(0);
if (seg_count > 0) {
chunk_ref = alloc_seg_chunk();
SegChunk chunk;
chunk.n = seg_count;
chunk.next = SegChunkRef(0);
uint seg_offset = seg_alloc + seg_start * Segment_size;
chunk.segs = SegmentRef(seg_offset);
SegChunk_write(chunk_ref, chunk);
}
if (last_chunk_n > 0) {
SegChunk chunk;
chunk.n = last_chunk_n;
chunk.next = chunk_ref;
chunk.segs = last_chunk_segs;
SegChunk_write(last_chunk_ref, chunk);
} else {
first_seg_chunk = chunk_ref;
}
AnnoFill fill = Annotated_Fill_read(ref);
CmdFill cmd_fill;
cmd_fill.seg_ref = first_seg_chunk;
cmd_fill.backdrop = backdrop;
cmd_fill.rgba_color = fill.rgba_color;
alloc_cmd(cmd_ref, cmd_limit);
Cmd_Fill_write(cmd_ref, cmd_fill);
cmd_ref.offset += Cmd_size;
last_chunk_n = 0;
} else if (backdrop != 0) {
AnnoFill fill = Annotated_Fill_read(ref);
alloc_cmd(cmd_ref, cmd_limit);
Cmd_Solid_write(cmd_ref, CmdSolid(fill.rgba_color));
cmd_ref.offset += Cmd_size;
}
seg_start += seg_count;
seg_count = 0;
backdrop = 0;
break;
*/
case Annotated_Stroke:
// Because the only elements we're processing right now are
// paths, we can just use the element index as the path index.
// In future, when we're doing a bunch of stuff, the path index
// should probably be stored in the annotated element.
uint path_ix = element_ix;
Path path = Path_read(PathRef(path_ix * Path_size));
// It may be we have a strong guarantee this will always be `true`, but
// I prefer not to take chances.
if (tile_x >= path.bbox.x && tile_x < path.bbox.z && tile_y >= path.bbox.y && tile_y < path.bbox.w) {
uint stride = path.bbox.z - path.bbox.x;
uint tile_subix = (tile_y - path.bbox.y) * stride + tile_x - path.bbox.x;
Tile tile = Tile_read(Tile_index(path.tiles, tile_subix));
if (tile.tile.offset != 0) {
AnnoStroke stroke = Annotated_Stroke_read(ref);
CmdStroke cmd_stroke;
cmd_stroke.tile_ref = tile.tile.offset;
cmd_stroke.half_width = 0.5 * stroke.linewidth;
cmd_stroke.rgba_color = stroke.rgba_color;
alloc_cmd(cmd_ref, cmd_limit);
Cmd_Stroke_write(cmd_ref, cmd_stroke);
cmd_ref.offset += Cmd_size;
}
}
break;
}
}
barrier();
rd_ix += N_TILE;
if (rd_ix >= ready_ix && partition_ix >= n_partitions) break;
}
Cmd_End_write(cmd_ref);
}