2020-05-14 08:35:19 +10:00
|
|
|
// The coarse rasterizer stage of the pipeline.
|
|
|
|
|
|
|
|
#version 450
|
|
|
|
#extension GL_GOOGLE_include_directive : enable
|
|
|
|
|
|
|
|
#include "setup.h"
|
|
|
|
|
|
|
|
layout(local_size_x = N_TILE, local_size_y = 1) in;
|
|
|
|
|
|
|
|
layout(set = 0, binding = 0) buffer AnnotatedBuf {
|
|
|
|
uint[] annotated;
|
|
|
|
};
|
|
|
|
|
|
|
|
layout(set = 0, binding = 1) buffer BinsBuf {
|
|
|
|
uint[] bins;
|
|
|
|
};
|
|
|
|
|
|
|
|
layout(set = 0, binding = 2) buffer AllocBuf {
|
|
|
|
uint alloc;
|
|
|
|
};
|
|
|
|
|
|
|
|
layout(set = 0, binding = 3) buffer PtclBuf {
|
|
|
|
uint[] ptcl;
|
|
|
|
};
|
|
|
|
|
|
|
|
#include "annotated.h"
|
|
|
|
#include "bins.h"
|
|
|
|
#include "ptcl.h"
|
|
|
|
|
|
|
|
#define N_RINGBUF 512
|
|
|
|
|
|
|
|
shared uint sh_elements[N_RINGBUF];
|
|
|
|
shared uint sh_chunk[N_WG];
|
|
|
|
shared uint sh_chunk_next[N_WG];
|
|
|
|
shared uint sh_chunk_n[N_WG];
|
|
|
|
shared uint sh_min_buf;
|
|
|
|
// Some of these are kept in shared memory to ease register
|
|
|
|
// pressure, but it could go either way.
|
|
|
|
shared uint sh_first_el[N_WG];
|
|
|
|
shared uint sh_selected_n;
|
|
|
|
shared uint sh_elements_ref;
|
|
|
|
|
|
|
|
shared uint sh_bitmaps[N_SLICE][N_TILE];
|
|
|
|
|
2020-05-15 10:06:45 +10:00
|
|
|
// scale factors useful for converting coordinates to tiles
|
|
|
|
#define SX (1.0 / float(TILE_WIDTH_PX))
|
|
|
|
#define SY (1.0 / float(TILE_HEIGHT_PX))
|
|
|
|
|
2020-05-16 05:28:29 +10:00
|
|
|
// Perhaps cmd_limit should be a global? This is a style question.
|
|
|
|
void alloc_cmd(inout CmdRef cmd_ref, inout uint cmd_limit) {
|
|
|
|
if (cmd_ref.offset > cmd_limit) {
|
|
|
|
uint new_cmd = atomicAdd(alloc, PTCL_INITIAL_ALLOC);
|
|
|
|
CmdJump jump = CmdJump(new_cmd);
|
|
|
|
Cmd_Jump_write(cmd_ref, jump);
|
|
|
|
cmd_ref = CmdRef(new_cmd);
|
|
|
|
cmd_limit = new_cmd + PTCL_INITIAL_ALLOC - 2 * Cmd_size;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2020-05-14 08:35:19 +10:00
|
|
|
void main() {
|
|
|
|
// Could use either linear or 2d layouts for both dispatch and
|
|
|
|
// invocations within the workgroup. We'll use variables to abstract.
|
|
|
|
uint bin_ix = N_TILE_X * gl_WorkGroupID.y + gl_WorkGroupID.x;
|
2020-05-15 10:06:45 +10:00
|
|
|
// Top left coordinates of this bin.
|
|
|
|
vec2 xy0 = vec2(N_TILE_X * TILE_WIDTH_PX * gl_WorkGroupID.x, N_TILE_Y * TILE_HEIGHT_PX * gl_WorkGroupID.y);
|
2020-05-14 08:35:19 +10:00
|
|
|
uint th_ix = gl_LocalInvocationID.x;
|
2020-05-16 05:28:29 +10:00
|
|
|
|
|
|
|
uint tile_x = N_TILE_X * gl_WorkGroupID.x + gl_LocalInvocationID.x % N_TILE_X;
|
|
|
|
uint tile_y = N_TILE_Y * gl_WorkGroupID.y + gl_LocalInvocationID.x / N_TILE_X;
|
|
|
|
uint tile_ix = tile_y * WIDTH_IN_TILES + tile_x;
|
|
|
|
CmdRef cmd_ref = CmdRef(tile_ix * PTCL_INITIAL_ALLOC);
|
|
|
|
uint cmd_limit = cmd_ref.offset + PTCL_INITIAL_ALLOC - 2 * Cmd_size;
|
|
|
|
|
2020-05-14 08:35:19 +10:00
|
|
|
uint wr_ix = 0;
|
|
|
|
uint rd_ix = 0;
|
|
|
|
uint first_el;
|
|
|
|
if (th_ix < N_WG) {
|
|
|
|
uint start_chunk = (bin_ix * N_WG + th_ix) * BIN_INITIAL_ALLOC;
|
|
|
|
sh_chunk[th_ix] = start_chunk;
|
|
|
|
BinChunk chunk = BinChunk_read(BinChunkRef(start_chunk));
|
|
|
|
sh_chunk_n[th_ix] = chunk.n;
|
|
|
|
sh_chunk_next[th_ix] = chunk.next.offset;
|
|
|
|
sh_first_el[th_ix] = chunk.n > 0 ?
|
|
|
|
BinInstance_read(BinInstanceRef(start_chunk + BinChunk_size)).element_ix : ~0;
|
|
|
|
}
|
|
|
|
uint probe = 0; // for debugging
|
|
|
|
do {
|
|
|
|
for (uint i = 0; i < N_SLICE; i++) {
|
|
|
|
sh_bitmaps[i][th_ix] = 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
while (wr_ix - rd_ix <= N_TILE) {
|
|
|
|
// Choose segment with least element.
|
|
|
|
uint my_min;
|
|
|
|
if (th_ix < N_WG) {
|
|
|
|
if (th_ix == 0) {
|
|
|
|
sh_selected_n = 0;
|
|
|
|
sh_min_buf = ~1;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
barrier();
|
|
|
|
// Tempting to do this with subgroups, but atomic should be good enough.
|
|
|
|
my_min = sh_first_el[th_ix];
|
|
|
|
if (th_ix < N_WG) {
|
|
|
|
atomicMin(sh_min_buf, my_min);
|
|
|
|
}
|
|
|
|
barrier();
|
|
|
|
if (th_ix < N_WG) {
|
|
|
|
if (sh_first_el[th_ix] == sh_min_buf) {
|
|
|
|
sh_elements_ref = sh_chunk[th_ix] + BinChunk_size;
|
|
|
|
uint selected_n = sh_chunk_n[th_ix];
|
|
|
|
sh_selected_n = selected_n;
|
|
|
|
uint next_chunk = sh_chunk_next[th_ix];
|
|
|
|
if (next_chunk == 0) {
|
|
|
|
sh_first_el[th_ix] = ~0;
|
|
|
|
} else {
|
|
|
|
sh_chunk[th_ix] = next_chunk;
|
|
|
|
BinChunk chunk = BinChunk_read(BinChunkRef(next_chunk));
|
|
|
|
sh_chunk_n[th_ix] = chunk.n;
|
|
|
|
sh_chunk_next[th_ix] = chunk.next.offset;
|
|
|
|
sh_first_el[th_ix] = BinInstance_read(
|
|
|
|
BinInstanceRef(next_chunk + BinChunk_size)).element_ix;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
barrier();
|
|
|
|
uint chunk_n = sh_selected_n;
|
|
|
|
if (chunk_n == 0) {
|
|
|
|
// All chunks consumed
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
BinInstanceRef inst_ref = BinInstanceRef(sh_elements_ref);
|
|
|
|
if (th_ix < chunk_n) {
|
|
|
|
uint el = BinInstance_read(BinInstance_index(inst_ref, th_ix)).element_ix;
|
|
|
|
sh_elements[(wr_ix + th_ix) % N_RINGBUF] = el;
|
|
|
|
}
|
|
|
|
wr_ix += chunk_n;
|
|
|
|
}
|
|
|
|
|
|
|
|
// We've done the merge and filled the buffer.
|
2020-05-15 10:06:45 +10:00
|
|
|
|
|
|
|
// Read one element, compute coverage.
|
2020-05-14 08:35:19 +10:00
|
|
|
uint tag = Annotated_Nop;
|
|
|
|
AnnotatedRef ref;
|
|
|
|
if (th_ix + rd_ix < wr_ix) {
|
2020-05-15 10:06:45 +10:00
|
|
|
uint element_ix = sh_elements[(rd_ix + th_ix) % N_RINGBUF];
|
2020-05-14 08:35:19 +10:00
|
|
|
ref = AnnotatedRef(element_ix * Annotated_size);
|
|
|
|
tag = Annotated_tag(ref);
|
|
|
|
}
|
2020-05-15 10:06:45 +10:00
|
|
|
|
|
|
|
int x0 = 0, y0 = 0, x1 = 0, y1 = 0;
|
|
|
|
switch (tag) {
|
|
|
|
case Annotated_Line:
|
|
|
|
AnnoLineSeg line = Annotated_Line_read(ref);
|
|
|
|
x0 = int(floor((min(line.p0.x, line.p1.x) - line.stroke.x - xy0.x) * SX));
|
|
|
|
y0 = int(floor((min(line.p0.y, line.p1.y) - line.stroke.y - xy0.y) * SY));
|
|
|
|
x1 = int(ceil((max(line.p0.x, line.p1.x) + line.stroke.x - xy0.x) * SX));
|
|
|
|
y1 = int(ceil((max(line.p0.y, line.p1.y) + line.stroke.y - xy0.y) * SY));
|
|
|
|
break;
|
|
|
|
case Annotated_Fill:
|
|
|
|
case Annotated_Stroke:
|
|
|
|
// Note: we take advantage of the fact that fills and strokes
|
|
|
|
// have compatible layout.
|
|
|
|
AnnoFill fill = Annotated_Fill_read(ref);
|
|
|
|
x0 = int(floor((fill.bbox.x - xy0.x) * SX));
|
|
|
|
y0 = int(floor((fill.bbox.y - xy0.y) * SY));
|
|
|
|
x1 = int(ceil((fill.bbox.z - xy0.x) * SX));
|
|
|
|
y1 = int(ceil((fill.bbox.w - xy0.y) * SY));
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
// At this point, we run an iterator over the coverage area,
|
|
|
|
// trying to keep divergence low.
|
|
|
|
// Right now, it's just a bbox, but we'll get finer with
|
|
|
|
// segments.
|
|
|
|
x0 = clamp(x0, 0, N_TILE_X);
|
|
|
|
x1 = clamp(x1, x0, N_TILE_X);
|
|
|
|
y0 = clamp(y0, 0, N_TILE_Y);
|
|
|
|
y1 = clamp(y1, y0, N_TILE_Y);
|
|
|
|
// This loop draws a rectangle to the coverage bitmasks. For
|
|
|
|
// line segments, draw more precisely.
|
|
|
|
if (x0 == x1) y1 = y0;
|
|
|
|
int x = x0, y = y0;
|
|
|
|
uint my_slice = th_ix / 32;
|
|
|
|
uint my_mask = 1 << (th_ix & 31);
|
|
|
|
while (y < y1) {
|
|
|
|
atomicOr(sh_bitmaps[my_slice][y * N_TILE_X + x], my_mask);
|
|
|
|
x++;
|
|
|
|
if (x == x1) {
|
|
|
|
x = x0;
|
|
|
|
y++;
|
|
|
|
}
|
|
|
|
}
|
2020-05-16 05:28:29 +10:00
|
|
|
barrier();
|
2020-05-15 10:06:45 +10:00
|
|
|
|
|
|
|
// Output elements for this tile, based on bitmaps.
|
|
|
|
uint slice_ix = 0;
|
|
|
|
uint bitmap = sh_bitmaps[0][th_ix];
|
|
|
|
while (true) {
|
|
|
|
if (bitmap == 0) {
|
|
|
|
slice_ix++;
|
|
|
|
if (slice_ix == N_SLICE) {
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
bitmap = sh_bitmaps[slice_ix][th_ix];
|
|
|
|
if (bitmap == 0) {
|
|
|
|
continue;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
uint element_ref_ix = slice_ix * 32 + findLSB(bitmap);
|
|
|
|
uint element_ix = sh_elements[(rd_ix + element_ref_ix) % N_RINGBUF];
|
|
|
|
|
|
|
|
// At this point, we read the element again from global memory.
|
|
|
|
// If that turns out to be expensive, maybe we can pack it into
|
|
|
|
// shared memory (or perhaps just the tag).
|
2020-05-16 05:28:29 +10:00
|
|
|
ref = AnnotatedRef(element_ix * Annotated_size);
|
|
|
|
tag = Annotated_tag(ref);
|
|
|
|
|
|
|
|
switch (tag) {
|
|
|
|
case Annotated_Fill:
|
|
|
|
case Annotated_Stroke:
|
|
|
|
// Note: we take advantage of the fact that fills and strokes
|
|
|
|
// have compatible layout.
|
|
|
|
AnnoFill fill = Annotated_Fill_read(ref);
|
|
|
|
alloc_cmd(cmd_ref, cmd_limit);
|
|
|
|
Cmd_Solid_write(cmd_ref, CmdSolid(fill.rgba_color));
|
|
|
|
break;
|
|
|
|
}
|
2020-05-15 10:06:45 +10:00
|
|
|
|
|
|
|
// clear LSB
|
|
|
|
bitmap &= bitmap - 1;
|
|
|
|
}
|
|
|
|
|
2020-05-14 08:35:19 +10:00
|
|
|
rd_ix += N_TILE;
|
2020-05-16 05:28:29 +10:00
|
|
|
break;
|
2020-05-14 08:35:19 +10:00
|
|
|
} while (wr_ix > rd_ix);
|
|
|
|
}
|