Runtime querying of threadgroup size

This commit is contained in:
Ishi Tatsuyuki 2021-06-08 16:29:40 +09:00
parent c2772ceac7
commit d77dfb8c00
9 changed files with 48 additions and 10 deletions

View file

@ -14,7 +14,7 @@ use raw_window_handle::{HasRawWindowHandle, RawWindowHandle};
use smallvec::SmallVec;
use crate::{BufferUsage, Error, GpuInfo, ImageLayout};
use crate::{BufferUsage, Error, GpuInfo, ImageLayout, WorkgroupLimits};
use self::wrappers::{CommandAllocator, CommandQueue, Device, Factory4, Resource, ShaderByteCode};
@ -177,6 +177,10 @@ impl Dx12Instance {
has_descriptor_indexing: false,
has_subgroups: false,
subgroup_size: None,
workgroup_limits: WorkgroupLimits {
max_size: [1024, 1024, 64],
max_invocations: 1024,
},
has_memory_model: false,
use_staging_buffers,
};

View file

@ -99,6 +99,8 @@ pub struct GpuInfo {
/// required in Vulkan 1.1), and we should have finer grained
/// queries for shuffles, etc.
pub has_subgroups: bool,
/// Limits on workgroup size for compute shaders.
pub workgroup_limits: WorkgroupLimits,
/// Info about subgroup size control, if available.
pub subgroup_size: Option<SubgroupSize>,
/// The GPU supports a real, grown-ass memory model.
@ -114,6 +116,16 @@ pub struct GpuInfo {
/// available.
#[derive(Clone, Debug)]
pub struct SubgroupSize {
min: u32,
max: u32,
pub min: u32,
pub max: u32,
}
/// The range of workgroup sizes supported by a back-end.
#[derive(Clone, Debug)]
pub struct WorkgroupLimits {
/// The maximum size on each workgroup dimension can be.
pub max_size: [u32; 3],
/// The maximum overall invocations a workgroup can have. That is, the product of sizes in each
/// dimension.
pub max_invocations: u32,
}

View file

@ -29,7 +29,7 @@ use metal::{CGFloat, MTLFeatureSet};
use raw_window_handle::{HasRawWindowHandle, RawWindowHandle};
use crate::{BufferUsage, Error, GpuInfo};
use crate::{BufferUsage, Error, GpuInfo, WorkgroupLimits};
use util::*;
@ -164,6 +164,10 @@ impl MtlInstance {
has_descriptor_indexing: false,
has_subgroups: false,
subgroup_size: None,
workgroup_limits: WorkgroupLimits {
max_size: [512, 512, 512],
max_invocations: 512,
},
has_memory_model: false,
use_staging_buffers,
};

View file

@ -12,9 +12,7 @@ use ash::{vk, Device, Entry, Instance};
use smallvec::SmallVec;
use crate::{
BufferUsage, Error, GpuInfo, ImageLayout, SamplerParams, SubgroupSize,
};
use crate::{BufferUsage, Error, GpuInfo, ImageLayout, SamplerParams, SubgroupSize, WorkgroupLimits};
use crate::backend::Device as DeviceTrait;
@ -357,10 +355,17 @@ impl VkInstance {
// TODO: finer grained query of specific subgroup info.
let has_subgroups = self.vk_version >= vk::make_version(1, 1, 0);
let workgroup_limits = WorkgroupLimits {
max_invocations: props.limits.max_compute_work_group_invocations,
max_size: props.limits.max_compute_work_group_size,
};
let gpu_info = GpuInfo {
has_descriptor_indexing,
has_subgroups,
subgroup_size,
workgroup_limits,
has_memory_model,
use_staging_buffers,
};

View file

@ -21,11 +21,16 @@
#define LG_BACKDROP_WG (7 + LG_WG_FACTOR)
#define BACKDROP_WG (1 << LG_BACKDROP_WG)
#ifndef BACKDROP_DIST_FACTOR
// Some paths (those covering a large area) can generate a lot of backdrop tiles; BACKDROP_DIST_FACTOR defines how much
// additional threads should we spawn for parallel row processing. The additional threads does not participate in the
// earlier stages (calculating the tile counts) but does work in the final prefix sum stage which has a lot more
// parallelism.
#define BACKDROP_DIST_FACTOR 4
// This feature is opt-in: one variant is compiled with the following default, while the other variant is compiled with
// a larger BACKDROP_DIST_FACTOR, which is used on GPUs supporting a larger workgroup size to improve performance.
#define BACKDROP_DIST_FACTOR 1
#endif
layout(local_size_x = BACKDROP_WG, local_size_y = BACKDROP_DIST_FACTOR) in;

Binary file not shown.

Binary file not shown.

View file

@ -18,6 +18,9 @@ build path_coarse.spv: glsl path_coarse.comp | annotated.h pathseg.h tile.h setu
build backdrop.spv: glsl backdrop.comp | annotated.h tile.h setup.h
build backdrop_lg.spv: glsl backdrop.comp | annotated.h tile.h setup.h
flags = -DBACKDROP_DIST_FACTOR=4
build coarse.spv: glsl coarse.comp | annotated.h bins.h ptcl.h setup.h
build kernel4.spv: glsl kernel4.comp | ptcl.h setup.h

View file

@ -311,8 +311,13 @@ impl Renderer {
let path_ds = session
.create_simple_descriptor_set(&path_pipeline, &[&memory_buf_dev, &config_buf])?;
let backdrop_alloc_code = ShaderCode::Spv(include_bytes!("../shader/backdrop.spv"));
let backdrop_pipeline = session.create_simple_compute_pipeline(backdrop_alloc_code, 2)?;
let backdrop_code = if session.gpu_info().workgroup_limits.max_invocations >= 1024 {
ShaderCode::Spv(include_bytes!("../shader/backdrop_lg.spv"))
} else {
println!("using small workgroup backdrop kernel");
ShaderCode::Spv(include_bytes!("../shader/backdrop.spv"))
};
let backdrop_pipeline = session.create_simple_compute_pipeline(backdrop_code, 2)?;
let backdrop_ds = session
.create_simple_descriptor_set(&backdrop_pipeline, &[&memory_buf_dev, &config_buf])?;