2020-12-12 01:01:48 +11:00
|
|
|
// SPDX-License-Identifier: Apache-2.0 OR MIT OR Unlicense
|
|
|
|
|
2020-05-13 06:38:26 +10:00
|
|
|
// The binning stage of the pipeline.
|
2020-06-28 23:37:27 +10:00
|
|
|
//
|
|
|
|
// Each workgroup processes N_TILE paths.
|
|
|
|
// Each thread processes one path and calculates a N_TILE_X x N_TILE_Y coverage mask
|
|
|
|
// based on the path bounding box to bin the paths.
|
2020-05-13 06:38:26 +10:00
|
|
|
|
|
|
|
#version 450
|
|
|
|
#extension GL_GOOGLE_include_directive : enable
|
|
|
|
|
2020-12-12 04:30:20 +11:00
|
|
|
#include "mem.h"
|
2020-12-24 22:00:53 +11:00
|
|
|
#include "setup.h"
|
2020-05-13 06:38:26 +10:00
|
|
|
|
|
|
|
layout(local_size_x = N_TILE, local_size_y = 1) in;
|
|
|
|
|
2020-12-12 04:30:20 +11:00
|
|
|
layout(set = 0, binding = 1) readonly buffer ConfigBuf {
|
|
|
|
Config conf;
|
2020-05-13 06:38:26 +10:00
|
|
|
};
|
|
|
|
|
|
|
|
#include "annotated.h"
|
|
|
|
#include "bins.h"
|
2022-02-18 11:25:41 +11:00
|
|
|
#include "drawtag.h"
|
2020-05-13 06:38:26 +10:00
|
|
|
|
|
|
|
// scale factors useful for converting coordinates to bins
|
|
|
|
#define SX (1.0 / float(N_TILE_X * TILE_WIDTH_PX))
|
|
|
|
#define SY (1.0 / float(N_TILE_Y * TILE_HEIGHT_PX))
|
|
|
|
|
2020-05-20 01:21:09 +10:00
|
|
|
// Constant not available in GLSL. Also consider uintBitsToFloat(0x7f800000)
|
|
|
|
#define INFINITY (1.0 / 0.0)
|
|
|
|
|
2020-05-13 06:38:26 +10:00
|
|
|
// Note: cudaraster has N_TILE + 1 to cut down on bank conflicts.
|
2020-06-28 23:37:27 +10:00
|
|
|
// Bitmaps are sliced (256bit into 8 (N_SLICE) 32bit submaps)
|
2020-05-13 06:38:26 +10:00
|
|
|
shared uint bitmaps[N_SLICE][N_TILE];
|
2020-05-13 14:26:44 +10:00
|
|
|
shared uint count[N_SLICE][N_TILE];
|
2020-12-24 22:00:53 +11:00
|
|
|
shared Alloc sh_chunk_alloc[N_TILE];
|
2020-12-12 04:30:20 +11:00
|
|
|
shared bool sh_alloc_failed;
|
2020-05-13 06:38:26 +10:00
|
|
|
|
2022-02-18 11:25:41 +11:00
|
|
|
DrawMonoid load_draw_monoid(uint element_ix) {
|
|
|
|
uint base = (conf.drawmonoid_alloc.offset >> 2) + 2 * element_ix;
|
|
|
|
uint path_ix = memory[base];
|
|
|
|
uint clip_ix = memory[base + 1];
|
|
|
|
return DrawMonoid(path_ix, clip_ix);
|
|
|
|
}
|
|
|
|
|
|
|
|
// Load bounding box computed by clip processing
|
|
|
|
vec4 load_clip_bbox(uint clip_ix) {
|
|
|
|
uint base = (conf.clip_bbox_alloc.offset >> 2) + 4 * clip_ix;
|
|
|
|
float x0 = uintBitsToFloat(memory[base]);
|
|
|
|
float y0 = uintBitsToFloat(memory[base + 1]);
|
|
|
|
float x1 = uintBitsToFloat(memory[base + 2]);
|
|
|
|
float y1 = uintBitsToFloat(memory[base + 3]);
|
|
|
|
vec4 bbox = vec4(x0, y0, x1, y1);
|
|
|
|
return bbox;
|
|
|
|
}
|
|
|
|
|
|
|
|
vec4 bbox_intersect(vec4 a, vec4 b) {
|
|
|
|
return vec4(max(a.xy, b.xy), min(a.zw, b.zw));
|
|
|
|
}
|
|
|
|
|
|
|
|
// Load path's bbox from bbox (as written by pathseg).
|
|
|
|
vec4 load_path_bbox(uint path_ix) {
|
|
|
|
uint base = (conf.bbox_alloc.offset >> 2) + 6 * path_ix;
|
|
|
|
float bbox_l = float(memory[base]) - 32768.0;
|
|
|
|
float bbox_t = float(memory[base + 1]) - 32768.0;
|
|
|
|
float bbox_r = float(memory[base + 2]) - 32768.0;
|
|
|
|
float bbox_b = float(memory[base + 3]) - 32768.0;
|
|
|
|
vec4 bbox = vec4(bbox_l, bbox_t, bbox_r, bbox_b);
|
|
|
|
return bbox;
|
|
|
|
}
|
|
|
|
|
|
|
|
void store_path_bbox(AnnotatedRef ref, vec4 bbox) {
|
|
|
|
uint ix = ref.offset >> 2;
|
|
|
|
memory[ix + 1] = floatBitsToUint(bbox.x);
|
|
|
|
memory[ix + 2] = floatBitsToUint(bbox.y);
|
|
|
|
memory[ix + 3] = floatBitsToUint(bbox.z);
|
|
|
|
memory[ix + 4] = floatBitsToUint(bbox.w);
|
|
|
|
}
|
|
|
|
|
2020-05-13 06:38:26 +10:00
|
|
|
void main() {
|
2020-12-12 04:30:20 +11:00
|
|
|
uint my_n_elements = conf.n_elements;
|
2020-05-31 01:35:26 +10:00
|
|
|
uint my_partition = gl_WorkGroupID.x;
|
2020-05-13 06:38:26 +10:00
|
|
|
|
2020-05-31 01:35:26 +10:00
|
|
|
for (uint i = 0; i < N_SLICE; i++) {
|
|
|
|
bitmaps[i][gl_LocalInvocationID.x] = 0;
|
|
|
|
}
|
2020-12-12 04:30:20 +11:00
|
|
|
if (gl_LocalInvocationID.x == 0) {
|
|
|
|
sh_alloc_failed = false;
|
|
|
|
}
|
2020-05-31 01:35:26 +10:00
|
|
|
barrier();
|
|
|
|
|
|
|
|
// Read inputs and determine coverage of bins
|
|
|
|
uint element_ix = my_partition * N_TILE + gl_LocalInvocationID.x;
|
2020-12-24 22:00:53 +11:00
|
|
|
AnnotatedRef ref = AnnotatedRef(conf.anno_alloc.offset + element_ix * Annotated_size);
|
2020-05-31 01:35:26 +10:00
|
|
|
uint tag = Annotated_Nop;
|
|
|
|
if (element_ix < my_n_elements) {
|
2021-03-17 20:51:38 +11:00
|
|
|
tag = Annotated_tag(conf.anno_alloc, ref).tag;
|
2020-05-31 01:35:26 +10:00
|
|
|
}
|
|
|
|
int x0 = 0, y0 = 0, x1 = 0, y1 = 0;
|
|
|
|
switch (tag) {
|
2021-03-17 22:02:41 +11:00
|
|
|
case Annotated_Color:
|
2021-06-24 04:50:51 +10:00
|
|
|
case Annotated_LinGradient:
|
2021-03-19 05:21:07 +11:00
|
|
|
case Annotated_Image:
|
2020-11-21 04:26:02 +11:00
|
|
|
case Annotated_BeginClip:
|
|
|
|
case Annotated_EndClip:
|
2022-02-18 11:25:41 +11:00
|
|
|
DrawMonoid draw_monoid = load_draw_monoid(element_ix);
|
|
|
|
uint path_ix = draw_monoid.path_ix;
|
|
|
|
vec4 clip_bbox = vec4(-1e9, -1e9, 1e9, 1e9);
|
|
|
|
uint clip_ix = draw_monoid.clip_ix;
|
|
|
|
if (clip_ix > 0) {
|
|
|
|
clip_bbox = load_clip_bbox(clip_ix - 1);
|
|
|
|
}
|
|
|
|
// For clip elements, clip_bbox is the bbox of the clip path, intersected
|
|
|
|
// with enclosing clips.
|
|
|
|
// For other elements, it is the bbox of the enclosing clips.
|
|
|
|
|
|
|
|
vec4 path_bbox = load_path_bbox(path_ix);
|
|
|
|
vec4 bbox = bbox_intersect(path_bbox, clip_bbox);
|
|
|
|
// Avoid negative-size bbox (is this necessary)?
|
|
|
|
bbox.zw = max(bbox.xy, bbox.zw);
|
|
|
|
// Store clip-intersected bbox for tile_alloc.
|
|
|
|
store_path_bbox(ref, bbox);
|
|
|
|
x0 = int(floor(bbox.x * SX));
|
|
|
|
y0 = int(floor(bbox.y * SY));
|
|
|
|
x1 = int(ceil(bbox.z * SX));
|
|
|
|
y1 = int(ceil(bbox.w * SY));
|
2020-05-31 01:35:26 +10:00
|
|
|
break;
|
|
|
|
}
|
2020-05-13 06:38:26 +10:00
|
|
|
|
2020-05-31 01:35:26 +10:00
|
|
|
// At this point, we run an iterator over the coverage area,
|
|
|
|
// trying to keep divergence low.
|
|
|
|
// Right now, it's just a bbox, but we'll get finer with
|
|
|
|
// segments.
|
2022-01-30 18:23:18 +11:00
|
|
|
uint width_in_bins = (conf.width_in_tiles + N_TILE_X - 1) / N_TILE_X;
|
|
|
|
uint height_in_bins = (conf.height_in_tiles + N_TILE_Y - 1) / N_TILE_Y;
|
2020-12-24 21:53:17 +11:00
|
|
|
x0 = clamp(x0, 0, int(width_in_bins));
|
|
|
|
x1 = clamp(x1, x0, int(width_in_bins));
|
|
|
|
y0 = clamp(y0, 0, int(height_in_bins));
|
|
|
|
y1 = clamp(y1, y0, int(height_in_bins));
|
2022-01-30 18:23:18 +11:00
|
|
|
if (x0 == x1)
|
|
|
|
y1 = y0;
|
2020-05-31 01:35:26 +10:00
|
|
|
int x = x0, y = y0;
|
|
|
|
uint my_slice = gl_LocalInvocationID.x / 32;
|
2021-12-09 05:42:35 +11:00
|
|
|
uint my_mask = 1u << (gl_LocalInvocationID.x & 31);
|
2020-05-31 01:35:26 +10:00
|
|
|
while (y < y1) {
|
2020-12-24 21:53:17 +11:00
|
|
|
atomicOr(bitmaps[my_slice][y * width_in_bins + x], my_mask);
|
2020-05-31 01:35:26 +10:00
|
|
|
x++;
|
|
|
|
if (x == x1) {
|
|
|
|
x = x0;
|
|
|
|
y++;
|
2020-05-21 04:48:05 +10:00
|
|
|
}
|
2020-05-31 01:35:26 +10:00
|
|
|
}
|
2020-05-20 01:21:09 +10:00
|
|
|
|
2020-05-31 01:35:26 +10:00
|
|
|
barrier();
|
|
|
|
// Allocate output segments.
|
|
|
|
uint element_count = 0;
|
|
|
|
for (uint i = 0; i < N_SLICE; i++) {
|
|
|
|
element_count += bitCount(bitmaps[i][gl_LocalInvocationID.x]);
|
|
|
|
count[i][gl_LocalInvocationID.x] = element_count;
|
|
|
|
}
|
|
|
|
// element_count is number of elements covering bin for this invocation.
|
2021-04-12 22:41:03 +10:00
|
|
|
Alloc chunk_alloc = new_alloc(0, 0, true);
|
2020-05-31 01:35:26 +10:00
|
|
|
if (element_count != 0) {
|
|
|
|
// TODO: aggregate atomic adds (subgroup is probably fastest)
|
2020-12-24 22:00:53 +11:00
|
|
|
MallocResult chunk = malloc(element_count * BinInstance_size);
|
|
|
|
chunk_alloc = chunk.alloc;
|
|
|
|
sh_chunk_alloc[gl_LocalInvocationID.x] = chunk_alloc;
|
|
|
|
if (chunk.failed) {
|
2020-12-12 04:30:20 +11:00
|
|
|
sh_alloc_failed = true;
|
|
|
|
}
|
2020-05-31 01:35:26 +10:00
|
|
|
}
|
|
|
|
// Note: it might be more efficient for reading to do this in the
|
|
|
|
// other order (each bin is a contiguous sequence of partitions)
|
2020-12-24 22:00:53 +11:00
|
|
|
uint out_ix = (conf.bin_alloc.offset >> 2) + (my_partition * N_TILE + gl_LocalInvocationID.x) * 2;
|
|
|
|
write_mem(conf.bin_alloc, out_ix, element_count);
|
|
|
|
write_mem(conf.bin_alloc, out_ix + 1, chunk_alloc.offset);
|
2020-05-31 01:35:26 +10:00
|
|
|
|
|
|
|
barrier();
|
2021-04-12 22:41:03 +10:00
|
|
|
if (sh_alloc_failed || mem_error != NO_ERROR) {
|
2020-12-12 04:30:20 +11:00
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
2020-05-31 01:35:26 +10:00
|
|
|
// Use similar strategy as Laine & Karras paper; loop over bbox of bins
|
|
|
|
// touched by this element
|
|
|
|
x = x0;
|
|
|
|
y = y0;
|
|
|
|
while (y < y1) {
|
2020-12-24 21:53:17 +11:00
|
|
|
uint bin_ix = y * width_in_bins + x;
|
2020-05-31 01:35:26 +10:00
|
|
|
uint out_mask = bitmaps[my_slice][bin_ix];
|
|
|
|
if ((out_mask & my_mask) != 0) {
|
|
|
|
uint idx = bitCount(out_mask & (my_mask - 1));
|
|
|
|
if (my_slice > 0) {
|
|
|
|
idx += count[my_slice - 1][bin_ix];
|
2020-05-13 06:38:26 +10:00
|
|
|
}
|
2020-12-24 22:00:53 +11:00
|
|
|
Alloc out_alloc = sh_chunk_alloc[bin_ix];
|
|
|
|
uint out_offset = out_alloc.offset + idx * BinInstance_size;
|
|
|
|
BinInstance_write(out_alloc, BinInstanceRef(out_offset), BinInstance(element_ix));
|
2020-05-13 06:38:26 +10:00
|
|
|
}
|
2020-05-31 01:35:26 +10:00
|
|
|
x++;
|
|
|
|
if (x == x1) {
|
|
|
|
x = x0;
|
|
|
|
y++;
|
2020-05-13 06:38:26 +10:00
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|