mirror of
https://github.com/italicsjenga/vello.git
synced 2025-01-10 04:31:30 +11:00
Merge one segment at a time
No parallelism yet, but seems to improve performance.
This commit is contained in:
parent
894ef156e1
commit
121f29fef6
|
@ -1016,7 +1016,7 @@ unsafe fn choose_compute_device(
|
||||||
devices: &[vk::PhysicalDevice],
|
devices: &[vk::PhysicalDevice],
|
||||||
surface: Option<&VkSurface>,
|
surface: Option<&VkSurface>,
|
||||||
) -> Option<(vk::PhysicalDevice, u32)> {
|
) -> Option<(vk::PhysicalDevice, u32)> {
|
||||||
for pdevice in &devices[1..] {
|
for pdevice in devices {
|
||||||
let props = instance.get_physical_device_queue_family_properties(*pdevice);
|
let props = instance.get_physical_device_queue_family_properties(*pdevice);
|
||||||
for (ix, info) in props.iter().enumerate() {
|
for (ix, info) in props.iter().enumerate() {
|
||||||
// Check for surface presentation support
|
// Check for surface presentation support
|
||||||
|
|
|
@ -181,10 +181,12 @@ fn main() -> Result<(), Error> {
|
||||||
println!("Coarse kernel time: {:.3}ms", (ts[2] - ts[1]) * 1e3);
|
println!("Coarse kernel time: {:.3}ms", (ts[2] - ts[1]) * 1e3);
|
||||||
println!("Render kernel time: {:.3}ms", (ts[3] - ts[2]) * 1e3);
|
println!("Render kernel time: {:.3}ms", (ts[3] - ts[2]) * 1e3);
|
||||||
|
|
||||||
|
/*
|
||||||
let mut data: Vec<u32> = Default::default();
|
let mut data: Vec<u32> = Default::default();
|
||||||
device.read_buffer(&renderer.bin_buf, &mut data).unwrap();
|
device.read_buffer(&renderer.ptcl_buf, &mut data).unwrap();
|
||||||
piet_gpu::dump_k1_data(&data);
|
piet_gpu::dump_k1_data(&data);
|
||||||
//trace_ptcl(&data);
|
//trace_ptcl(&data);
|
||||||
|
*/
|
||||||
|
|
||||||
let mut img_data: Vec<u8> = Default::default();
|
let mut img_data: Vec<u8> = Default::default();
|
||||||
// Note: because png can use a `&[u8]` slice, we could avoid an extra copy
|
// Note: because png can use a `&[u8]` slice, we could avoid an extra copy
|
||||||
|
|
|
@ -43,7 +43,6 @@ layout(set = 0, binding = 3) buffer BinsBuf {
|
||||||
// Note: cudaraster has N_TILE + 1 to cut down on bank conflicts.
|
// Note: cudaraster has N_TILE + 1 to cut down on bank conflicts.
|
||||||
shared uint bitmaps[N_SLICE][N_TILE];
|
shared uint bitmaps[N_SLICE][N_TILE];
|
||||||
shared uint count[N_SLICE][N_TILE];
|
shared uint count[N_SLICE][N_TILE];
|
||||||
shared uint sh_my_tile;
|
|
||||||
shared uint sh_chunk_start[N_TILE];
|
shared uint sh_chunk_start[N_TILE];
|
||||||
|
|
||||||
shared float sh_right_edge[N_TILE];
|
shared float sh_right_edge[N_TILE];
|
||||||
|
@ -57,15 +56,7 @@ uint state_right_edge_index(uint partition_ix) {
|
||||||
void main() {
|
void main() {
|
||||||
uint chunk_n = 0;
|
uint chunk_n = 0;
|
||||||
uint my_n_elements = n_elements;
|
uint my_n_elements = n_elements;
|
||||||
while (true) {
|
uint my_partition = gl_WorkGroupID.x;
|
||||||
if (gl_LocalInvocationID.x == 0) {
|
|
||||||
sh_my_tile = atomicAdd(tile_ix, 1);
|
|
||||||
}
|
|
||||||
barrier();
|
|
||||||
uint my_tile = sh_my_tile;
|
|
||||||
if (my_tile * N_TILE >= my_n_elements) {
|
|
||||||
break;
|
|
||||||
}
|
|
||||||
|
|
||||||
for (uint i = 0; i < N_SLICE; i++) {
|
for (uint i = 0; i < N_SLICE; i++) {
|
||||||
bitmaps[i][gl_LocalInvocationID.x] = 0;
|
bitmaps[i][gl_LocalInvocationID.x] = 0;
|
||||||
|
@ -73,7 +64,7 @@ void main() {
|
||||||
barrier();
|
barrier();
|
||||||
|
|
||||||
// Read inputs and determine coverage of bins
|
// Read inputs and determine coverage of bins
|
||||||
uint element_ix = my_tile * N_TILE + gl_LocalInvocationID.x;
|
uint element_ix = my_partition * N_TILE + gl_LocalInvocationID.x;
|
||||||
AnnotatedRef ref = AnnotatedRef(element_ix * Annotated_size);
|
AnnotatedRef ref = AnnotatedRef(element_ix * Annotated_size);
|
||||||
uint tag = Annotated_Nop;
|
uint tag = Annotated_Nop;
|
||||||
if (element_ix < my_n_elements) {
|
if (element_ix < my_n_elements) {
|
||||||
|
@ -111,7 +102,7 @@ void main() {
|
||||||
// look-forward to find the right edge of its corresponding fill. That data is
|
// look-forward to find the right edge of its corresponding fill. That data is
|
||||||
// recorded in aggregates computed in the element processing pass.
|
// recorded in aggregates computed in the element processing pass.
|
||||||
if (gl_LocalInvocationID.x == N_TILE - 1 && tag == Annotated_FillLine) {
|
if (gl_LocalInvocationID.x == N_TILE - 1 && tag == Annotated_FillLine) {
|
||||||
uint aggregate_ix = (my_tile + 1) * ELEMENT_BINNING_RATIO;
|
uint aggregate_ix = (my_partition + 1) * ELEMENT_BINNING_RATIO;
|
||||||
// This is sequential but the expectation is that the amount of
|
// This is sequential but the expectation is that the amount of
|
||||||
// look-forward is small (performance may degrade in the case
|
// look-forward is small (performance may degrade in the case
|
||||||
// of massively complex paths).
|
// of massively complex paths).
|
||||||
|
@ -171,7 +162,9 @@ void main() {
|
||||||
chunk_start = atomicAdd(alloc, element_count * BinInstance_size);
|
chunk_start = atomicAdd(alloc, element_count * BinInstance_size);
|
||||||
sh_chunk_start[gl_LocalInvocationID.x] = chunk_start;
|
sh_chunk_start[gl_LocalInvocationID.x] = chunk_start;
|
||||||
}
|
}
|
||||||
uint out_ix = (my_tile * N_TILE + gl_LocalInvocationID.x) * 2;
|
// Note: it might be more efficient for reading to do this in the
|
||||||
|
// other order (each bin is a contiguous sequence of partitions)
|
||||||
|
uint out_ix = (my_partition * N_TILE + gl_LocalInvocationID.x) * 2;
|
||||||
bins[out_ix] = element_count;
|
bins[out_ix] = element_count;
|
||||||
bins[out_ix + 1] = chunk_start;
|
bins[out_ix + 1] = chunk_start;
|
||||||
|
|
||||||
|
@ -197,5 +190,4 @@ void main() {
|
||||||
y++;
|
y++;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
Binary file not shown.
|
@ -16,6 +16,7 @@ layout(set = 0, binding = 1) buffer BinsBuf {
|
||||||
};
|
};
|
||||||
|
|
||||||
layout(set = 0, binding = 2) buffer AllocBuf {
|
layout(set = 0, binding = 2) buffer AllocBuf {
|
||||||
|
uint n_elements;
|
||||||
uint alloc;
|
uint alloc;
|
||||||
};
|
};
|
||||||
|
|
||||||
|
@ -31,15 +32,6 @@ layout(set = 0, binding = 3) buffer PtclBuf {
|
||||||
|
|
||||||
shared uint sh_elements[N_RINGBUF];
|
shared uint sh_elements[N_RINGBUF];
|
||||||
shared float sh_right_edge[N_RINGBUF];
|
shared float sh_right_edge[N_RINGBUF];
|
||||||
shared uint sh_chunk[N_WG];
|
|
||||||
shared uint sh_chunk_next[N_WG];
|
|
||||||
shared uint sh_chunk_n[N_WG];
|
|
||||||
shared uint sh_min_buf;
|
|
||||||
// Some of these are kept in shared memory to ease register
|
|
||||||
// pressure, but it could go either way.
|
|
||||||
shared uint sh_first_el[N_WG];
|
|
||||||
shared uint sh_selected_n;
|
|
||||||
shared uint sh_elements_ref;
|
|
||||||
|
|
||||||
shared uint sh_bitmaps[N_SLICE][N_TILE];
|
shared uint sh_bitmaps[N_SLICE][N_TILE];
|
||||||
shared uint sh_backdrop[N_SLICE][N_TILE];
|
shared uint sh_backdrop[N_SLICE][N_TILE];
|
||||||
|
@ -96,14 +88,16 @@ void main() {
|
||||||
// Could use either linear or 2d layouts for both dispatch and
|
// Could use either linear or 2d layouts for both dispatch and
|
||||||
// invocations within the workgroup. We'll use variables to abstract.
|
// invocations within the workgroup. We'll use variables to abstract.
|
||||||
uint bin_ix = N_TILE_X * gl_WorkGroupID.y + gl_WorkGroupID.x;
|
uint bin_ix = N_TILE_X * gl_WorkGroupID.y + gl_WorkGroupID.x;
|
||||||
|
uint partition_ix = 0;
|
||||||
|
uint my_n_elements = n_elements;
|
||||||
// Top left coordinates of this bin.
|
// Top left coordinates of this bin.
|
||||||
vec2 xy0 = vec2(N_TILE_X * TILE_WIDTH_PX * gl_WorkGroupID.x, N_TILE_Y * TILE_HEIGHT_PX * gl_WorkGroupID.y);
|
vec2 xy0 = vec2(N_TILE_X * TILE_WIDTH_PX * gl_WorkGroupID.x, N_TILE_Y * TILE_HEIGHT_PX * gl_WorkGroupID.y);
|
||||||
uint th_ix = gl_LocalInvocationID.x;
|
uint th_ix = gl_LocalInvocationID.x;
|
||||||
|
|
||||||
uint tile_x = N_TILE_X * gl_WorkGroupID.x + gl_LocalInvocationID.x % N_TILE_X;
|
uint tile_x = N_TILE_X * gl_WorkGroupID.x + gl_LocalInvocationID.x % N_TILE_X;
|
||||||
uint tile_y = N_TILE_Y * gl_WorkGroupID.y + gl_LocalInvocationID.x / N_TILE_X;
|
uint tile_y = N_TILE_Y * gl_WorkGroupID.y + gl_LocalInvocationID.x / N_TILE_X;
|
||||||
uint tile_ix = tile_y * WIDTH_IN_TILES + tile_x;
|
uint this_tile_ix = tile_y * WIDTH_IN_TILES + tile_x;
|
||||||
CmdRef cmd_ref = CmdRef(tile_ix * PTCL_INITIAL_ALLOC);
|
CmdRef cmd_ref = CmdRef(this_tile_ix * PTCL_INITIAL_ALLOC);
|
||||||
uint cmd_limit = cmd_ref.offset + PTCL_INITIAL_ALLOC - 2 * Cmd_size;
|
uint cmd_limit = cmd_ref.offset + PTCL_INITIAL_ALLOC - 2 * Cmd_size;
|
||||||
|
|
||||||
// Allocation and management of segment output
|
// Allocation and management of segment output
|
||||||
|
@ -115,16 +109,6 @@ void main() {
|
||||||
|
|
||||||
uint wr_ix = 0;
|
uint wr_ix = 0;
|
||||||
uint rd_ix = 0;
|
uint rd_ix = 0;
|
||||||
uint first_el;
|
|
||||||
if (th_ix < N_WG) {
|
|
||||||
uint start_chunk = (bin_ix * N_WG + th_ix) * BIN_INITIAL_ALLOC;
|
|
||||||
sh_chunk[th_ix] = start_chunk;
|
|
||||||
BinChunk chunk = BinChunk_read(BinChunkRef(start_chunk));
|
|
||||||
sh_chunk_n[th_ix] = chunk.n;
|
|
||||||
sh_chunk_next[th_ix] = chunk.next.offset;
|
|
||||||
sh_first_el[th_ix] = chunk.n > 0 ?
|
|
||||||
BinInstance_read(BinInstanceRef(start_chunk + BinChunk_size)).element_ix : ~0;
|
|
||||||
}
|
|
||||||
if (th_ix < N_SLICE) {
|
if (th_ix < N_SLICE) {
|
||||||
sh_bd_sign[th_ix] = 0;
|
sh_bd_sign[th_ix] = 0;
|
||||||
}
|
}
|
||||||
|
@ -138,47 +122,11 @@ void main() {
|
||||||
sh_is_segment[th_ix] = 0;
|
sh_is_segment[th_ix] = 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
while (wr_ix - rd_ix <= N_TILE) {
|
while (wr_ix - rd_ix <= N_TILE && partition_ix * N_TILE < my_n_elements) {
|
||||||
// Choose segment with least element.
|
uint in_ix = (partition_ix * N_TILE + bin_ix) * 2;
|
||||||
uint my_min;
|
uint chunk_n = bins[in_ix];
|
||||||
if (th_ix < N_WG) {
|
uint elements_ref = bins[in_ix + 1];
|
||||||
if (th_ix == 0) {
|
BinInstanceRef inst_ref = BinInstanceRef(elements_ref);
|
||||||
sh_selected_n = 0;
|
|
||||||
sh_min_buf = ~0;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
barrier();
|
|
||||||
// Tempting to do this with subgroups, but atomic should be good enough.
|
|
||||||
if (th_ix < N_WG) {
|
|
||||||
my_min = sh_first_el[th_ix];
|
|
||||||
atomicMin(sh_min_buf, my_min);
|
|
||||||
}
|
|
||||||
barrier();
|
|
||||||
if (th_ix < N_WG) {
|
|
||||||
if (my_min == sh_min_buf && my_min != ~0) {
|
|
||||||
sh_elements_ref = sh_chunk[th_ix] + BinChunk_size;
|
|
||||||
uint selected_n = sh_chunk_n[th_ix];
|
|
||||||
sh_selected_n = selected_n;
|
|
||||||
uint next_chunk = sh_chunk_next[th_ix];
|
|
||||||
if (next_chunk == 0) {
|
|
||||||
sh_first_el[th_ix] = ~0;
|
|
||||||
} else {
|
|
||||||
sh_chunk[th_ix] = next_chunk;
|
|
||||||
BinChunk chunk = BinChunk_read(BinChunkRef(next_chunk));
|
|
||||||
sh_chunk_n[th_ix] = chunk.n;
|
|
||||||
sh_chunk_next[th_ix] = chunk.next.offset;
|
|
||||||
sh_first_el[th_ix] = BinInstance_read(
|
|
||||||
BinInstanceRef(next_chunk + BinChunk_size)).element_ix;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
barrier();
|
|
||||||
uint chunk_n = sh_selected_n;
|
|
||||||
if (chunk_n == 0) {
|
|
||||||
// All chunks consumed
|
|
||||||
break;
|
|
||||||
}
|
|
||||||
BinInstanceRef inst_ref = BinInstanceRef(sh_elements_ref);
|
|
||||||
if (th_ix < chunk_n) {
|
if (th_ix < chunk_n) {
|
||||||
BinInstance inst = BinInstance_read(BinInstance_index(inst_ref, th_ix));
|
BinInstance inst = BinInstance_read(BinInstance_index(inst_ref, th_ix));
|
||||||
uint wr_el_ix = (wr_ix + th_ix) % N_RINGBUF;
|
uint wr_el_ix = (wr_ix + th_ix) % N_RINGBUF;
|
||||||
|
@ -186,6 +134,7 @@ void main() {
|
||||||
sh_right_edge[wr_el_ix] = inst.right_edge;
|
sh_right_edge[wr_el_ix] = inst.right_edge;
|
||||||
}
|
}
|
||||||
wr_ix += chunk_n;
|
wr_ix += chunk_n;
|
||||||
|
partition_ix++;
|
||||||
}
|
}
|
||||||
barrier();
|
barrier();
|
||||||
|
|
||||||
|
|
Binary file not shown.
|
@ -160,7 +160,7 @@ impl<D: Device> Renderer<D> {
|
||||||
|
|
||||||
let state_buf = device.create_buffer(1 * 1024 * 1024, dev)?;
|
let state_buf = device.create_buffer(1 * 1024 * 1024, dev)?;
|
||||||
let anno_buf = device.create_buffer(64 * 1024 * 1024, dev)?;
|
let anno_buf = device.create_buffer(64 * 1024 * 1024, dev)?;
|
||||||
let bin_buf = device.create_buffer(64 * 1024 * 1024, host)?;
|
let bin_buf = device.create_buffer(64 * 1024 * 1024, dev)?;
|
||||||
let ptcl_buf = device.create_buffer(48 * 1024 * 1024, dev)?;
|
let ptcl_buf = device.create_buffer(48 * 1024 * 1024, dev)?;
|
||||||
let image_dev = device.create_image2d(WIDTH as u32, HEIGHT as u32, dev)?;
|
let image_dev = device.create_image2d(WIDTH as u32, HEIGHT as u32, dev)?;
|
||||||
|
|
||||||
|
@ -192,12 +192,13 @@ impl<D: Device> Renderer<D> {
|
||||||
&[],
|
&[],
|
||||||
)?;
|
)?;
|
||||||
|
|
||||||
let coarse_alloc_buf_host = device.create_buffer(4, host)?;
|
let coarse_alloc_buf_host = device.create_buffer(8, host)?;
|
||||||
let coarse_alloc_buf_dev = device.create_buffer(4, dev)?;
|
let coarse_alloc_buf_dev = device.create_buffer(8, dev)?;
|
||||||
|
|
||||||
let coarse_alloc_start = WIDTH_IN_TILES * HEIGHT_IN_TILES * PTCL_INITIAL_ALLOC;
|
let coarse_alloc_start = WIDTH_IN_TILES * HEIGHT_IN_TILES * PTCL_INITIAL_ALLOC;
|
||||||
device
|
device
|
||||||
.write_buffer(&coarse_alloc_buf_host, &[
|
.write_buffer(&coarse_alloc_buf_host, &[
|
||||||
|
n_elements as u32,
|
||||||
coarse_alloc_start as u32,
|
coarse_alloc_start as u32,
|
||||||
])
|
])
|
||||||
?;
|
?;
|
||||||
|
@ -264,26 +265,22 @@ impl<D: Device> Renderer<D> {
|
||||||
cmd_buf.dispatch(
|
cmd_buf.dispatch(
|
||||||
&self.bin_pipeline,
|
&self.bin_pipeline,
|
||||||
&self.bin_ds,
|
&self.bin_ds,
|
||||||
(N_WG, 1, 1),
|
(((self.n_elements + 255) / 256) as u32, 1, 1),
|
||||||
);
|
);
|
||||||
cmd_buf.write_timestamp(&query_pool, 2);
|
cmd_buf.write_timestamp(&query_pool, 2);
|
||||||
cmd_buf.memory_barrier();
|
cmd_buf.memory_barrier();
|
||||||
/*
|
|
||||||
cmd_buf.dispatch(
|
cmd_buf.dispatch(
|
||||||
&self.coarse_pipeline,
|
&self.coarse_pipeline,
|
||||||
&self.coarse_ds,
|
&self.coarse_ds,
|
||||||
(WIDTH as u32 / 256, HEIGHT as u32 / 256, 1),
|
(WIDTH as u32 / 256, HEIGHT as u32 / 256, 1),
|
||||||
);
|
);
|
||||||
*/
|
|
||||||
cmd_buf.write_timestamp(&query_pool, 3);
|
cmd_buf.write_timestamp(&query_pool, 3);
|
||||||
cmd_buf.memory_barrier();
|
cmd_buf.memory_barrier();
|
||||||
/*
|
|
||||||
cmd_buf.dispatch(
|
cmd_buf.dispatch(
|
||||||
&self.k4_pipeline,
|
&self.k4_pipeline,
|
||||||
&self.k4_ds,
|
&self.k4_ds,
|
||||||
((WIDTH / TILE_W) as u32, (HEIGHT / TILE_H) as u32, 1),
|
((WIDTH / TILE_W) as u32, (HEIGHT / TILE_H) as u32, 1),
|
||||||
);
|
);
|
||||||
*/
|
|
||||||
cmd_buf.write_timestamp(&query_pool, 4);
|
cmd_buf.write_timestamp(&query_pool, 4);
|
||||||
cmd_buf.memory_barrier();
|
cmd_buf.memory_barrier();
|
||||||
cmd_buf.image_barrier(&self.image_dev, ImageLayout::General, ImageLayout::BlitSrc);
|
cmd_buf.image_barrier(&self.image_dev, ImageLayout::General, ImageLayout::BlitSrc);
|
||||||
|
|
Loading…
Reference in a new issue