vello/piet-gpu/shader/binning.comp
Raph Levien a616b4d010 Rework right_edge computation in elements
Trying to fit it into the fancy monad doesn't really work, so use a
more straightforward approach to compute it from the aggregate.

Also add yEdge logic (basically copying piet-metal). With a fix to
ELEMENT_BINNING_RATIO (which I had simply gotten wrong), the example
renders almost correctly, with small bounding box artifacts.
2020-05-21 10:00:56 -07:00

236 lines
9.1 KiB
Plaintext

// The binning stage of the pipeline.
#version 450
#extension GL_GOOGLE_include_directive : enable
#include "setup.h"
layout(local_size_x = N_TILE, local_size_y = 1) in;
layout(set = 0, binding = 0) buffer AnnotatedBuf {
uint[] annotated;
};
// This is for scanning forward for right_edge data.
layout(set = 0, binding = 1) buffer StateBuf {
uint[] state;
};
layout(set = 0, binding = 2) buffer AllocBuf {
uint n_elements;
// Will be incremented atomically to claim tiles
uint tile_ix;
uint alloc;
};
layout(set = 0, binding = 3) buffer BinsBuf {
uint[] bins;
};
#include "annotated.h"
#include "state.h"
#include "bins.h"
// scale factors useful for converting coordinates to bins
#define SX (1.0 / float(N_TILE_X * TILE_WIDTH_PX))
#define SY (1.0 / float(N_TILE_Y * TILE_HEIGHT_PX))
#define TSY (1.0 / float(TILE_HEIGHT_PX))
// Constant not available in GLSL. Also consider uintBitsToFloat(0x7f800000)
#define INFINITY (1.0 / 0.0)
// Note: cudaraster has N_TILE + 1 to cut down on bank conflicts.
shared uint bitmaps[N_SLICE][N_TILE];
shared uint count[N_SLICE][N_TILE];
shared uint sh_my_tile;
shared uint sh_chunk_start[N_TILE];
shared uint sh_chunk_end[N_TILE];
shared uint sh_chunk_jump[N_TILE];
shared float sh_right_edge[N_TILE];
#define StateBuf_stride (8 + 2 * State_size)
uint state_right_edge_index(uint partition_ix) {
return 2 + partition_ix * (StateBuf_stride / 4);
}
void main() {
BinChunkRef chunk_ref = BinChunkRef((gl_LocalInvocationID.x * N_WG + gl_WorkGroupID.x) * BIN_INITIAL_ALLOC);
uint wr_limit = chunk_ref.offset + BIN_INITIAL_ALLOC;
uint chunk_n = 0;
uint my_n_elements = n_elements;
while (true) {
if (gl_LocalInvocationID.x == 0) {
sh_my_tile = atomicAdd(tile_ix, 1);
}
barrier();
uint my_tile = sh_my_tile;
if (my_tile * N_TILE >= my_n_elements) {
break;
}
for (uint i = 0; i < N_SLICE; i++) {
bitmaps[i][gl_LocalInvocationID.x] = 0;
}
barrier();
// Read inputs and determine coverage of bins
uint element_ix = my_tile * N_TILE + gl_LocalInvocationID.x;
AnnotatedRef ref = AnnotatedRef(element_ix * Annotated_size);
uint tag = Annotated_Nop;
if (element_ix < my_n_elements) {
tag = Annotated_tag(ref);
}
int x0 = 0, y0 = 0, x1 = 0, y1 = 0;
float my_right_edge = INFINITY;
bool crosses_edge = false;
switch (tag) {
case Annotated_FillLine:
case Annotated_StrokeLine:
AnnoStrokeLineSeg line = Annotated_StrokeLine_read(ref);
x0 = int(floor((min(line.p0.x, line.p1.x) - line.stroke.x) * SX));
y0 = int(floor((min(line.p0.y, line.p1.y) - line.stroke.y) * SY));
x1 = int(ceil((max(line.p0.x, line.p1.x) + line.stroke.x) * SX));
y1 = int(ceil((max(line.p0.y, line.p1.y) + line.stroke.y) * SY));
crosses_edge = tag == Annotated_FillLine && ceil(line.p0.y * TSY) != ceil(line.p1.y * TSY);
break;
case Annotated_Fill:
case Annotated_Stroke:
// Note: we take advantage of the fact that fills and strokes
// have compatible layout.
AnnoFill fill = Annotated_Fill_read(ref);
x0 = int(floor(fill.bbox.x * SX));
y0 = int(floor(fill.bbox.y * SY));
x1 = int(ceil(fill.bbox.z * SX));
y1 = int(ceil(fill.bbox.w * SY));
// It probably makes more sense to track x1, to avoid having to redo
// the rounding to tile coords.
my_right_edge = fill.bbox.z;
break;
}
// If the last element in this partition is a fill edge, then we need to do a
// look-forward to find the right edge of its corresponding fill. That data is
// recorded in aggregates computed in the element processing pass.
if (gl_LocalInvocationID.x == N_TILE - 1 && tag == Annotated_FillLine) {
uint aggregate_ix = (my_tile + 1) * ELEMENT_BINNING_RATIO;
// This is sequential but the expectation is that the amount of
// look-forward is small (performance may degrade in the case
// of massively complex paths).
do {
my_right_edge = uintBitsToFloat(state[state_right_edge_index(aggregate_ix)]);
aggregate_ix++;
} while (isinf(my_right_edge));
}
// Now propagate right_edge backward, from fill to segment.
for (uint i = 0; i < LG_N_TILE; i++) {
// Note: we could try to cut down on write bandwidth here if the value hasn't
// changed, but not sure it's worth the complexity to track.
sh_right_edge[gl_LocalInvocationID.x] = my_right_edge;
barrier();
if (gl_LocalInvocationID.x + (1 << i) < N_TILE && isinf(my_right_edge)) {
my_right_edge = sh_right_edge[gl_LocalInvocationID.x + (1 << i)];
}
barrier();
}
if (crosses_edge) {
x1 = int(ceil(my_right_edge * SX));
}
// At this point, we run an iterator over the coverage area,
// trying to keep divergence low.
// Right now, it's just a bbox, but we'll get finer with
// segments.
x0 = clamp(x0, 0, N_TILE_X);
x1 = clamp(x1, x0, N_TILE_X);
y0 = clamp(y0, 0, N_TILE_Y);
y1 = clamp(y1, y0, N_TILE_Y);
if (x0 == x1) y1 = y0;
int x = x0, y = y0;
uint my_slice = gl_LocalInvocationID.x / 32;
uint my_mask = 1 << (gl_LocalInvocationID.x & 31);
while (y < y1) {
atomicOr(bitmaps[my_slice][y * N_TILE_X + x], my_mask);
x++;
if (x == x1) {
x = x0;
y++;
}
}
barrier();
// Allocate output segments.
uint element_count = 0;
for (uint i = 0; i < N_SLICE; i++) {
element_count += bitCount(bitmaps[i][gl_LocalInvocationID.x]);
count[i][gl_LocalInvocationID.x] = element_count;
}
// element_count is number of elements covering bin for this invocation.
if (element_count != 0) {
uint chunk_end;
uint chunk_new_start;
// Refactor to reduce code duplication?
if (chunk_n > 0) {
uint next_chunk = chunk_ref.offset + BinChunk_size + chunk_n * BinInstance_size;
if (next_chunk + BinChunk_size + min(24, element_count * BinInstance_size) > wr_limit) {
uint alloc_amount = max(BIN_ALLOC, BinChunk_size + element_count * BinInstance_size);
// could try to reduce fragmentation if BIN_ALLOC is only a bit above needed
next_chunk = atomicAdd(alloc, alloc_amount);
wr_limit = next_chunk + alloc_amount;
}
BinChunk_write(chunk_ref, BinChunk(chunk_n, BinChunkRef(next_chunk)));
chunk_ref = BinChunkRef(next_chunk);
}
BinInstanceRef instance_ref = BinInstanceRef(chunk_ref.offset + BinChunk_size);
if (instance_ref.offset + element_count * BinInstance_size > wr_limit) {
chunk_end = wr_limit;
chunk_n = (wr_limit - instance_ref.offset) / BinInstance_size;
uint alloc_amount = max(BIN_ALLOC, BinChunk_size + (element_count - chunk_n) * BinInstance_size);
chunk_new_start = atomicAdd(alloc, alloc_amount);
wr_limit = chunk_new_start + alloc_amount;
BinChunk_write(chunk_ref, BinChunk(chunk_n, BinChunkRef(chunk_new_start)));
chunk_ref = BinChunkRef(chunk_new_start);
chunk_new_start += BinChunk_size;
chunk_n = element_count - chunk_n;
} else {
chunk_end = ~0;
chunk_new_start = ~0;
chunk_n = element_count;
}
sh_chunk_start[gl_LocalInvocationID.x] = instance_ref.offset;
sh_chunk_end[gl_LocalInvocationID.x] = chunk_end;
sh_chunk_jump[gl_LocalInvocationID.x] = chunk_new_start - chunk_end;
}
barrier();
// Use similar strategy as Laine & Karras paper; loop over bbox of bins
// touched by this element
x = x0;
y = y0;
while (y < y1) {
uint bin_ix = y * N_TILE_X + x;
uint out_mask = bitmaps[my_slice][bin_ix];
if ((out_mask & my_mask) != 0) {
uint idx = bitCount(out_mask & (my_mask - 1));
if (my_slice > 0) {
idx += count[my_slice - 1][bin_ix];
}
uint out_offset = sh_chunk_start[bin_ix] + idx * BinInstance_size;
if (out_offset >= sh_chunk_end[bin_ix]) {
out_offset += sh_chunk_jump[bin_ix];
}
BinInstance_write(BinInstanceRef(out_offset), BinInstance(element_ix, my_right_edge));
}
x++;
if (x == x1) {
x = x0;
y++;
}
}
}
BinChunk_write(chunk_ref, BinChunk(chunk_n, BinChunkRef(0)));
}