2020-12-12 01:01:48 +11:00
|
|
|
// SPDX-License-Identifier: Apache-2.0 OR MIT OR Unlicense
|
|
|
|
|
2020-05-13 06:38:26 +10:00
|
|
|
// The binning stage of the pipeline.
|
2020-06-28 23:37:27 +10:00
|
|
|
//
|
|
|
|
// Each workgroup processes N_TILE paths.
|
|
|
|
// Each thread processes one path and calculates a N_TILE_X x N_TILE_Y coverage mask
|
|
|
|
// based on the path bounding box to bin the paths.
|
2020-05-13 06:38:26 +10:00
|
|
|
|
|
|
|
#version 450
|
|
|
|
#extension GL_GOOGLE_include_directive : enable
|
|
|
|
|
2020-05-14 08:35:19 +10:00
|
|
|
#include "setup.h"
|
2020-12-12 04:30:20 +11:00
|
|
|
#include "mem.h"
|
2020-05-13 06:38:26 +10:00
|
|
|
|
|
|
|
layout(local_size_x = N_TILE, local_size_y = 1) in;
|
|
|
|
|
2020-12-12 04:30:20 +11:00
|
|
|
layout(set = 0, binding = 1) readonly buffer ConfigBuf {
|
|
|
|
Config conf;
|
2020-05-13 06:38:26 +10:00
|
|
|
};
|
|
|
|
|
|
|
|
#include "annotated.h"
|
|
|
|
#include "bins.h"
|
|
|
|
|
|
|
|
// scale factors useful for converting coordinates to bins
|
|
|
|
#define SX (1.0 / float(N_TILE_X * TILE_WIDTH_PX))
|
|
|
|
#define SY (1.0 / float(N_TILE_Y * TILE_HEIGHT_PX))
|
|
|
|
|
2020-05-20 01:21:09 +10:00
|
|
|
// Constant not available in GLSL. Also consider uintBitsToFloat(0x7f800000)
|
|
|
|
#define INFINITY (1.0 / 0.0)
|
|
|
|
|
2020-05-13 06:38:26 +10:00
|
|
|
// Note: cudaraster has N_TILE + 1 to cut down on bank conflicts.
|
2020-06-28 23:37:27 +10:00
|
|
|
// Bitmaps are sliced (256bit into 8 (N_SLICE) 32bit submaps)
|
2020-05-13 06:38:26 +10:00
|
|
|
shared uint bitmaps[N_SLICE][N_TILE];
|
2020-05-13 14:26:44 +10:00
|
|
|
shared uint count[N_SLICE][N_TILE];
|
|
|
|
shared uint sh_chunk_start[N_TILE];
|
2020-12-12 04:30:20 +11:00
|
|
|
shared bool sh_alloc_failed;
|
2020-05-13 06:38:26 +10:00
|
|
|
|
|
|
|
void main() {
|
2020-12-12 04:30:20 +11:00
|
|
|
if (mem_overflow) {
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
|
|
|
uint my_n_elements = conf.n_elements;
|
2020-05-31 01:35:26 +10:00
|
|
|
uint my_partition = gl_WorkGroupID.x;
|
2020-05-13 06:38:26 +10:00
|
|
|
|
2020-05-31 01:35:26 +10:00
|
|
|
for (uint i = 0; i < N_SLICE; i++) {
|
|
|
|
bitmaps[i][gl_LocalInvocationID.x] = 0;
|
|
|
|
}
|
2020-12-12 04:30:20 +11:00
|
|
|
if (gl_LocalInvocationID.x == 0) {
|
|
|
|
sh_alloc_failed = false;
|
|
|
|
}
|
2020-05-31 01:35:26 +10:00
|
|
|
barrier();
|
|
|
|
|
|
|
|
// Read inputs and determine coverage of bins
|
|
|
|
uint element_ix = my_partition * N_TILE + gl_LocalInvocationID.x;
|
2020-12-12 04:30:20 +11:00
|
|
|
AnnotatedRef ref = AnnotatedRef(conf.anno_base + element_ix * Annotated_size);
|
2020-05-31 01:35:26 +10:00
|
|
|
uint tag = Annotated_Nop;
|
|
|
|
if (element_ix < my_n_elements) {
|
|
|
|
tag = Annotated_tag(ref);
|
|
|
|
}
|
|
|
|
int x0 = 0, y0 = 0, x1 = 0, y1 = 0;
|
|
|
|
switch (tag) {
|
|
|
|
case Annotated_Fill:
|
|
|
|
case Annotated_Stroke:
|
2020-11-21 04:26:02 +11:00
|
|
|
case Annotated_BeginClip:
|
|
|
|
case Annotated_EndClip:
|
|
|
|
// Note: we take advantage of the fact that these drawing elements
|
|
|
|
// have the bbox at the same place in their layout.
|
2020-05-31 01:35:26 +10:00
|
|
|
AnnoFill fill = Annotated_Fill_read(ref);
|
|
|
|
x0 = int(floor(fill.bbox.x * SX));
|
|
|
|
y0 = int(floor(fill.bbox.y * SY));
|
|
|
|
x1 = int(ceil(fill.bbox.z * SX));
|
|
|
|
y1 = int(ceil(fill.bbox.w * SY));
|
|
|
|
break;
|
|
|
|
}
|
2020-05-13 06:38:26 +10:00
|
|
|
|
2020-05-31 01:35:26 +10:00
|
|
|
// At this point, we run an iterator over the coverage area,
|
|
|
|
// trying to keep divergence low.
|
|
|
|
// Right now, it's just a bbox, but we'll get finer with
|
|
|
|
// segments.
|
|
|
|
x0 = clamp(x0, 0, N_TILE_X);
|
|
|
|
x1 = clamp(x1, x0, N_TILE_X);
|
|
|
|
y0 = clamp(y0, 0, N_TILE_Y);
|
|
|
|
y1 = clamp(y1, y0, N_TILE_Y);
|
|
|
|
if (x0 == x1) y1 = y0;
|
|
|
|
int x = x0, y = y0;
|
|
|
|
uint my_slice = gl_LocalInvocationID.x / 32;
|
|
|
|
uint my_mask = 1 << (gl_LocalInvocationID.x & 31);
|
|
|
|
while (y < y1) {
|
|
|
|
atomicOr(bitmaps[my_slice][y * N_TILE_X + x], my_mask);
|
|
|
|
x++;
|
|
|
|
if (x == x1) {
|
|
|
|
x = x0;
|
|
|
|
y++;
|
2020-05-21 04:48:05 +10:00
|
|
|
}
|
2020-05-31 01:35:26 +10:00
|
|
|
}
|
2020-05-20 01:21:09 +10:00
|
|
|
|
2020-05-31 01:35:26 +10:00
|
|
|
barrier();
|
|
|
|
// Allocate output segments.
|
|
|
|
uint element_count = 0;
|
|
|
|
for (uint i = 0; i < N_SLICE; i++) {
|
|
|
|
element_count += bitCount(bitmaps[i][gl_LocalInvocationID.x]);
|
|
|
|
count[i][gl_LocalInvocationID.x] = element_count;
|
|
|
|
}
|
|
|
|
// element_count is number of elements covering bin for this invocation.
|
2020-12-12 04:30:20 +11:00
|
|
|
Alloc chunk_alloc = Alloc(0, false);
|
2020-05-31 01:35:26 +10:00
|
|
|
if (element_count != 0) {
|
|
|
|
// TODO: aggregate atomic adds (subgroup is probably fastest)
|
2020-12-12 04:30:20 +11:00
|
|
|
chunk_alloc = malloc(element_count * BinInstance_size);
|
|
|
|
sh_chunk_start[gl_LocalInvocationID.x] = chunk_alloc.offset;
|
|
|
|
if (chunk_alloc.failed) {
|
|
|
|
sh_alloc_failed = true;
|
|
|
|
}
|
2020-05-31 01:35:26 +10:00
|
|
|
}
|
|
|
|
// Note: it might be more efficient for reading to do this in the
|
|
|
|
// other order (each bin is a contiguous sequence of partitions)
|
2020-12-12 04:30:20 +11:00
|
|
|
uint out_ix = (conf.bin_base >> 2) + (my_partition * N_TILE + gl_LocalInvocationID.x) * 2;
|
|
|
|
memory[out_ix] = element_count;
|
|
|
|
memory[out_ix + 1] = chunk_alloc.offset;
|
2020-05-31 01:35:26 +10:00
|
|
|
|
|
|
|
barrier();
|
2020-12-12 04:30:20 +11:00
|
|
|
if (sh_alloc_failed) {
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
2020-05-31 01:35:26 +10:00
|
|
|
// Use similar strategy as Laine & Karras paper; loop over bbox of bins
|
|
|
|
// touched by this element
|
|
|
|
x = x0;
|
|
|
|
y = y0;
|
|
|
|
while (y < y1) {
|
|
|
|
uint bin_ix = y * N_TILE_X + x;
|
|
|
|
uint out_mask = bitmaps[my_slice][bin_ix];
|
|
|
|
if ((out_mask & my_mask) != 0) {
|
|
|
|
uint idx = bitCount(out_mask & (my_mask - 1));
|
|
|
|
if (my_slice > 0) {
|
|
|
|
idx += count[my_slice - 1][bin_ix];
|
2020-05-13 06:38:26 +10:00
|
|
|
}
|
2020-05-31 01:35:26 +10:00
|
|
|
uint out_offset = sh_chunk_start[bin_ix] + idx * BinInstance_size;
|
2020-11-21 04:26:02 +11:00
|
|
|
BinInstance_write(BinInstanceRef(out_offset), BinInstance(element_ix));
|
2020-05-13 06:38:26 +10:00
|
|
|
}
|
2020-05-31 01:35:26 +10:00
|
|
|
x++;
|
|
|
|
if (x == x1) {
|
|
|
|
x = x0;
|
|
|
|
y++;
|
2020-05-13 06:38:26 +10:00
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|