2020-05-13 06:38:26 +10:00
|
|
|
// The binning stage of the pipeline.
|
|
|
|
|
|
|
|
#version 450
|
|
|
|
#extension GL_GOOGLE_include_directive : enable
|
|
|
|
|
2020-05-14 08:35:19 +10:00
|
|
|
#include "setup.h"
|
2020-05-13 06:38:26 +10:00
|
|
|
|
|
|
|
layout(local_size_x = N_TILE, local_size_y = 1) in;
|
|
|
|
|
|
|
|
layout(set = 0, binding = 0) buffer AnnotatedBuf {
|
|
|
|
uint[] annotated;
|
|
|
|
};
|
|
|
|
|
2020-05-20 01:21:09 +10:00
|
|
|
// This is for scanning forward for right_edge data.
|
|
|
|
layout(set = 0, binding = 1) buffer StateBuf {
|
|
|
|
uint[] state;
|
|
|
|
};
|
|
|
|
|
|
|
|
layout(set = 0, binding = 2) buffer AllocBuf {
|
2020-05-13 06:38:26 +10:00
|
|
|
uint n_elements;
|
|
|
|
// Will be incremented atomically to claim tiles
|
|
|
|
uint tile_ix;
|
|
|
|
uint alloc;
|
|
|
|
};
|
|
|
|
|
2020-05-20 01:21:09 +10:00
|
|
|
layout(set = 0, binding = 3) buffer BinsBuf {
|
2020-05-13 06:38:26 +10:00
|
|
|
uint[] bins;
|
|
|
|
};
|
|
|
|
|
|
|
|
#include "annotated.h"
|
2020-05-20 01:21:09 +10:00
|
|
|
#include "state.h"
|
2020-05-13 06:38:26 +10:00
|
|
|
#include "bins.h"
|
|
|
|
|
|
|
|
// scale factors useful for converting coordinates to bins
|
|
|
|
#define SX (1.0 / float(N_TILE_X * TILE_WIDTH_PX))
|
|
|
|
#define SY (1.0 / float(N_TILE_Y * TILE_HEIGHT_PX))
|
|
|
|
|
2020-05-21 04:48:05 +10:00
|
|
|
#define TSY (1.0 / float(TILE_HEIGHT_PX))
|
|
|
|
|
2020-05-20 01:21:09 +10:00
|
|
|
// Constant not available in GLSL. Also consider uintBitsToFloat(0x7f800000)
|
|
|
|
#define INFINITY (1.0 / 0.0)
|
|
|
|
|
2020-05-13 06:38:26 +10:00
|
|
|
// Note: cudaraster has N_TILE + 1 to cut down on bank conflicts.
|
|
|
|
shared uint bitmaps[N_SLICE][N_TILE];
|
2020-05-13 14:26:44 +10:00
|
|
|
shared uint count[N_SLICE][N_TILE];
|
2020-05-13 06:38:26 +10:00
|
|
|
shared uint sh_my_tile;
|
2020-05-13 14:26:44 +10:00
|
|
|
shared uint sh_chunk_start[N_TILE];
|
|
|
|
shared uint sh_chunk_end[N_TILE];
|
|
|
|
shared uint sh_chunk_jump[N_TILE];
|
2020-05-13 06:38:26 +10:00
|
|
|
|
2020-05-20 01:21:09 +10:00
|
|
|
shared float sh_right_edge[N_TILE];
|
|
|
|
|
2020-05-21 09:36:09 +10:00
|
|
|
#define StateBuf_stride (8 + 2 * State_size)
|
2020-05-20 01:21:09 +10:00
|
|
|
|
2020-05-21 09:36:09 +10:00
|
|
|
uint state_right_edge_index(uint partition_ix) {
|
|
|
|
return 2 + partition_ix * (StateBuf_stride / 4);
|
2020-05-20 01:21:09 +10:00
|
|
|
}
|
|
|
|
|
2020-05-13 06:38:26 +10:00
|
|
|
void main() {
|
|
|
|
BinChunkRef chunk_ref = BinChunkRef((gl_LocalInvocationID.x * N_WG + gl_WorkGroupID.x) * BIN_INITIAL_ALLOC);
|
2020-05-13 12:54:19 +10:00
|
|
|
uint wr_limit = chunk_ref.offset + BIN_INITIAL_ALLOC;
|
2020-05-13 06:38:26 +10:00
|
|
|
uint chunk_n = 0;
|
2020-05-13 12:54:19 +10:00
|
|
|
uint my_n_elements = n_elements;
|
2020-05-13 06:38:26 +10:00
|
|
|
while (true) {
|
|
|
|
if (gl_LocalInvocationID.x == 0) {
|
|
|
|
sh_my_tile = atomicAdd(tile_ix, 1);
|
|
|
|
}
|
|
|
|
barrier();
|
|
|
|
uint my_tile = sh_my_tile;
|
2020-05-13 12:54:19 +10:00
|
|
|
if (my_tile * N_TILE >= my_n_elements) {
|
2020-05-13 06:38:26 +10:00
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
|
|
|
for (uint i = 0; i < N_SLICE; i++) {
|
|
|
|
bitmaps[i][gl_LocalInvocationID.x] = 0;
|
|
|
|
}
|
|
|
|
barrier();
|
|
|
|
|
|
|
|
// Read inputs and determine coverage of bins
|
|
|
|
uint element_ix = my_tile * N_TILE + gl_LocalInvocationID.x;
|
|
|
|
AnnotatedRef ref = AnnotatedRef(element_ix * Annotated_size);
|
2020-05-13 12:54:19 +10:00
|
|
|
uint tag = Annotated_Nop;
|
|
|
|
if (element_ix < my_n_elements) {
|
|
|
|
tag = Annotated_tag(ref);
|
|
|
|
}
|
2020-05-13 06:38:26 +10:00
|
|
|
int x0 = 0, y0 = 0, x1 = 0, y1 = 0;
|
2020-05-20 01:21:09 +10:00
|
|
|
float my_right_edge = INFINITY;
|
2020-05-21 04:48:05 +10:00
|
|
|
bool crosses_edge = false;
|
2020-05-13 06:38:26 +10:00
|
|
|
switch (tag) {
|
2020-05-21 00:38:52 +10:00
|
|
|
case Annotated_FillLine:
|
|
|
|
case Annotated_StrokeLine:
|
|
|
|
AnnoStrokeLineSeg line = Annotated_StrokeLine_read(ref);
|
2020-05-13 06:38:26 +10:00
|
|
|
x0 = int(floor((min(line.p0.x, line.p1.x) - line.stroke.x) * SX));
|
|
|
|
y0 = int(floor((min(line.p0.y, line.p1.y) - line.stroke.y) * SY));
|
|
|
|
x1 = int(ceil((max(line.p0.x, line.p1.x) + line.stroke.x) * SX));
|
|
|
|
y1 = int(ceil((max(line.p0.y, line.p1.y) + line.stroke.y) * SY));
|
2020-05-21 04:48:05 +10:00
|
|
|
crosses_edge = tag == Annotated_FillLine && ceil(line.p0.y * TSY) != ceil(line.p1.y * TSY);
|
2020-05-13 06:38:26 +10:00
|
|
|
break;
|
|
|
|
case Annotated_Fill:
|
|
|
|
case Annotated_Stroke:
|
|
|
|
// Note: we take advantage of the fact that fills and strokes
|
|
|
|
// have compatible layout.
|
|
|
|
AnnoFill fill = Annotated_Fill_read(ref);
|
|
|
|
x0 = int(floor(fill.bbox.x * SX));
|
|
|
|
y0 = int(floor(fill.bbox.y * SY));
|
|
|
|
x1 = int(ceil(fill.bbox.z * SX));
|
|
|
|
y1 = int(ceil(fill.bbox.w * SY));
|
2020-05-21 04:48:05 +10:00
|
|
|
// It probably makes more sense to track x1, to avoid having to redo
|
|
|
|
// the rounding to tile coords.
|
|
|
|
my_right_edge = fill.bbox.z;
|
2020-05-13 06:38:26 +10:00
|
|
|
break;
|
|
|
|
}
|
2020-05-20 01:21:09 +10:00
|
|
|
|
|
|
|
// If the last element in this partition is a fill edge, then we need to do a
|
|
|
|
// look-forward to find the right edge of its corresponding fill. That data is
|
|
|
|
// recorded in aggregates computed in the element processing pass.
|
2020-05-21 00:38:52 +10:00
|
|
|
if (gl_LocalInvocationID.x == N_TILE - 1 && tag == Annotated_FillLine) {
|
2020-05-20 01:21:09 +10:00
|
|
|
uint aggregate_ix = (my_tile + 1) * ELEMENT_BINNING_RATIO;
|
|
|
|
// This is sequential but the expectation is that the amount of
|
|
|
|
// look-forward is small (performance may degrade in the case
|
|
|
|
// of massively complex paths).
|
|
|
|
do {
|
2020-05-21 09:36:09 +10:00
|
|
|
my_right_edge = uintBitsToFloat(state[state_right_edge_index(aggregate_ix)]);
|
2020-05-20 01:21:09 +10:00
|
|
|
aggregate_ix++;
|
|
|
|
} while (isinf(my_right_edge));
|
|
|
|
}
|
|
|
|
|
|
|
|
// Now propagate right_edge backward, from fill to segment.
|
|
|
|
for (uint i = 0; i < LG_N_TILE; i++) {
|
|
|
|
// Note: we could try to cut down on write bandwidth here if the value hasn't
|
|
|
|
// changed, but not sure it's worth the complexity to track.
|
|
|
|
sh_right_edge[gl_LocalInvocationID.x] = my_right_edge;
|
|
|
|
barrier();
|
|
|
|
if (gl_LocalInvocationID.x + (1 << i) < N_TILE && isinf(my_right_edge)) {
|
|
|
|
my_right_edge = sh_right_edge[gl_LocalInvocationID.x + (1 << i)];
|
|
|
|
}
|
|
|
|
barrier();
|
|
|
|
}
|
2020-05-21 04:48:05 +10:00
|
|
|
if (crosses_edge) {
|
|
|
|
x1 = int(ceil(my_right_edge * SX));
|
|
|
|
}
|
2020-05-20 01:21:09 +10:00
|
|
|
|
2020-05-13 06:38:26 +10:00
|
|
|
// At this point, we run an iterator over the coverage area,
|
|
|
|
// trying to keep divergence low.
|
|
|
|
// Right now, it's just a bbox, but we'll get finer with
|
|
|
|
// segments.
|
|
|
|
x0 = clamp(x0, 0, N_TILE_X);
|
|
|
|
x1 = clamp(x1, x0, N_TILE_X);
|
|
|
|
y0 = clamp(y0, 0, N_TILE_Y);
|
|
|
|
y1 = clamp(y1, y0, N_TILE_Y);
|
|
|
|
if (x0 == x1) y1 = y0;
|
|
|
|
int x = x0, y = y0;
|
|
|
|
uint my_slice = gl_LocalInvocationID.x / 32;
|
|
|
|
uint my_mask = 1 << (gl_LocalInvocationID.x & 31);
|
|
|
|
while (y < y1) {
|
|
|
|
atomicOr(bitmaps[my_slice][y * N_TILE_X + x], my_mask);
|
|
|
|
x++;
|
|
|
|
if (x == x1) {
|
|
|
|
x = x0;
|
|
|
|
y++;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
barrier();
|
|
|
|
// Allocate output segments.
|
|
|
|
uint element_count = 0;
|
|
|
|
for (uint i = 0; i < N_SLICE; i++) {
|
|
|
|
element_count += bitCount(bitmaps[i][gl_LocalInvocationID.x]);
|
2020-05-13 14:26:44 +10:00
|
|
|
count[i][gl_LocalInvocationID.x] = element_count;
|
2020-05-13 06:38:26 +10:00
|
|
|
}
|
|
|
|
// element_count is number of elements covering bin for this invocation.
|
2020-05-13 14:26:44 +10:00
|
|
|
if (element_count != 0) {
|
|
|
|
uint chunk_end;
|
|
|
|
uint chunk_new_start;
|
|
|
|
// Refactor to reduce code duplication?
|
|
|
|
if (chunk_n > 0) {
|
2020-05-21 00:38:52 +10:00
|
|
|
uint next_chunk = chunk_ref.offset + BinChunk_size + chunk_n * BinInstance_size;
|
|
|
|
if (next_chunk + BinChunk_size + min(24, element_count * BinInstance_size) > wr_limit) {
|
|
|
|
uint alloc_amount = max(BIN_ALLOC, BinChunk_size + element_count * BinInstance_size);
|
2020-05-14 08:35:19 +10:00
|
|
|
// could try to reduce fragmentation if BIN_ALLOC is only a bit above needed
|
2020-05-13 14:26:44 +10:00
|
|
|
next_chunk = atomicAdd(alloc, alloc_amount);
|
|
|
|
wr_limit = next_chunk + alloc_amount;
|
|
|
|
}
|
|
|
|
BinChunk_write(chunk_ref, BinChunk(chunk_n, BinChunkRef(next_chunk)));
|
|
|
|
chunk_ref = BinChunkRef(next_chunk);
|
|
|
|
}
|
|
|
|
BinInstanceRef instance_ref = BinInstanceRef(chunk_ref.offset + BinChunk_size);
|
2020-05-21 00:38:52 +10:00
|
|
|
if (instance_ref.offset + element_count * BinInstance_size > wr_limit) {
|
2020-05-13 14:26:44 +10:00
|
|
|
chunk_end = wr_limit;
|
2020-05-21 00:38:52 +10:00
|
|
|
chunk_n = (wr_limit - instance_ref.offset) / BinInstance_size;
|
|
|
|
uint alloc_amount = max(BIN_ALLOC, BinChunk_size + (element_count - chunk_n) * BinInstance_size);
|
2020-05-13 14:26:44 +10:00
|
|
|
chunk_new_start = atomicAdd(alloc, alloc_amount);
|
|
|
|
wr_limit = chunk_new_start + alloc_amount;
|
|
|
|
BinChunk_write(chunk_ref, BinChunk(chunk_n, BinChunkRef(chunk_new_start)));
|
|
|
|
chunk_ref = BinChunkRef(chunk_new_start);
|
|
|
|
chunk_new_start += BinChunk_size;
|
|
|
|
chunk_n = element_count - chunk_n;
|
|
|
|
} else {
|
|
|
|
chunk_end = ~0;
|
2020-05-20 01:21:09 +10:00
|
|
|
chunk_new_start = ~0;
|
2020-05-13 14:26:44 +10:00
|
|
|
chunk_n = element_count;
|
2020-05-13 12:54:19 +10:00
|
|
|
}
|
2020-05-13 14:26:44 +10:00
|
|
|
sh_chunk_start[gl_LocalInvocationID.x] = instance_ref.offset;
|
|
|
|
sh_chunk_end[gl_LocalInvocationID.x] = chunk_end;
|
|
|
|
sh_chunk_jump[gl_LocalInvocationID.x] = chunk_new_start - chunk_end;
|
2020-05-13 06:38:26 +10:00
|
|
|
}
|
|
|
|
|
2020-05-13 14:26:44 +10:00
|
|
|
barrier();
|
|
|
|
// Use similar strategy as Laine & Karras paper; loop over bbox of bins
|
|
|
|
// touched by this element
|
|
|
|
x = x0;
|
|
|
|
y = y0;
|
|
|
|
while (y < y1) {
|
|
|
|
uint bin_ix = y * N_TILE_X + x;
|
|
|
|
uint out_mask = bitmaps[my_slice][bin_ix];
|
|
|
|
if ((out_mask & my_mask) != 0) {
|
|
|
|
uint idx = bitCount(out_mask & (my_mask - 1));
|
|
|
|
if (my_slice > 0) {
|
|
|
|
idx += count[my_slice - 1][bin_ix];
|
2020-05-13 06:38:26 +10:00
|
|
|
}
|
2020-05-21 00:38:52 +10:00
|
|
|
uint out_offset = sh_chunk_start[bin_ix] + idx * BinInstance_size;
|
2020-05-13 14:26:44 +10:00
|
|
|
if (out_offset >= sh_chunk_end[bin_ix]) {
|
|
|
|
out_offset += sh_chunk_jump[bin_ix];
|
2020-05-13 06:38:26 +10:00
|
|
|
}
|
2020-05-21 00:38:52 +10:00
|
|
|
BinInstance_write(BinInstanceRef(out_offset), BinInstance(element_ix, my_right_edge));
|
2020-05-13 06:38:26 +10:00
|
|
|
}
|
2020-05-13 14:26:44 +10:00
|
|
|
x++;
|
|
|
|
if (x == x1) {
|
|
|
|
x = x0;
|
|
|
|
y++;
|
2020-05-13 06:38:26 +10:00
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
BinChunk_write(chunk_ref, BinChunk(chunk_n, BinChunkRef(0)));
|
|
|
|
}
|