16: LunarG samples support (7, 8, 9, 11) r=kvark a=msiglreith

Implement necessary functionality for LunarG API-Samples 7 (uniform buffer), 8 (pipeline layout), 9 (descriptor set) and 11 (shaders).

Merge + rebase of two outstanding gfx PRs to run.
This commit is contained in:
bors[bot] 2018-01-07 22:51:53 +00:00
commit 33978e81e2
3 changed files with 365 additions and 87 deletions

View file

@ -1,4 +1,4 @@
use hal::{adapter, buffer, format, image, memory, window};
use hal::{adapter, buffer, format, image, memory, pso, window};
use std::mem;
@ -176,6 +176,24 @@ pub fn map_image_kind(
}
}
pub fn map_image_layout(layout: VkImageLayout) -> image::ImageLayout {
match layout {
/*
VK_IMAGE_LAYOUT_UNDEFINED = 0,
VK_IMAGE_LAYOUT_GENERAL = 1,
VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL = 2,
VK_IMAGE_LAYOUT_DEPTH_STENCIL_ATTACHMENT_OPTIMAL = 3,
VK_IMAGE_LAYOUT_DEPTH_STENCIL_READ_ONLY_OPTIMAL = 4,
VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL = 5,
VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL = 6,
VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL = 7,
VK_IMAGE_LAYOUT_PREINITIALIZED = 8,
VK_IMAGE_LAYOUT_PRESENT_SRC_KHR = 1000001002,
*/
_ => unimplemented!(),
}
}
fn map_aa_mode(samples: VkSampleCountFlagBits) -> image::AaMode {
use VkSampleCountFlagBits::*;
@ -272,6 +290,57 @@ pub fn memory_properties_from_hal(properties: memory::Properties) -> VkMemoryPro
flags
}
pub fn map_descriptor_type(ty: VkDescriptorType) -> pso::DescriptorType {
use super::VkDescriptorType::*;
match ty {
VK_DESCRIPTOR_TYPE_SAMPLER => pso::DescriptorType::Sampler,
VK_DESCRIPTOR_TYPE_SAMPLED_IMAGE => pso::DescriptorType::SampledImage,
VK_DESCRIPTOR_TYPE_STORAGE_IMAGE => pso::DescriptorType::StorageImage,
VK_DESCRIPTOR_TYPE_UNIFORM_TEXEL_BUFFER => pso::DescriptorType::UniformTexelBuffer,
VK_DESCRIPTOR_TYPE_STORAGE_TEXEL_BUFFER => pso::DescriptorType::StorageTexelBuffer,
VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER => pso::DescriptorType::UniformBuffer,
VK_DESCRIPTOR_TYPE_STORAGE_BUFFER => pso::DescriptorType::StorageBuffer,
VK_DESCRIPTOR_TYPE_INPUT_ATTACHMENT => pso::DescriptorType::InputAttachment,
VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER |
VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER_DYNAMIC |
VK_DESCRIPTOR_TYPE_STORAGE_BUFFER_DYNAMIC => unimplemented!(),
_ => panic!("Unexpected descriptor type: {:?}", ty),
}
}
pub fn map_stage_flags(stages: VkShaderStageFlags) -> pso::ShaderStageFlags {
let mut flags = pso::ShaderStageFlags::empty();
if stages & VkShaderStageFlagBits::VK_SHADER_STAGE_VERTEX_BIT as u32 != 0 {
flags |= pso::ShaderStageFlags::VERTEX;
}
if stages & VkShaderStageFlagBits::VK_SHADER_STAGE_TESSELLATION_CONTROL_BIT as u32 != 0 {
flags |= pso::ShaderStageFlags::HULL;
}
if stages & VkShaderStageFlagBits::VK_SHADER_STAGE_TESSELLATION_EVALUATION_BIT as u32 != 0 {
flags |= pso::ShaderStageFlags::DOMAIN;
}
if stages & VkShaderStageFlagBits::VK_SHADER_STAGE_GEOMETRY_BIT as u32 != 0 {
flags |= pso::ShaderStageFlags::GEOMETRY;
}
if stages & VkShaderStageFlagBits::VK_SHADER_STAGE_FRAGMENT_BIT as u32 != 0 {
flags |= pso::ShaderStageFlags::FRAGMENT;
}
if stages & VkShaderStageFlagBits::VK_SHADER_STAGE_COMPUTE_BIT as u32 != 0 {
flags |= pso::ShaderStageFlags::COMPUTE;
}
if stages & VkShaderStageFlagBits::VK_SHADER_STAGE_ALL_GRAPHICS as u32 != 0 {
flags |= pso::ShaderStageFlags::GRAPHICS;
}
if stages & VkShaderStageFlagBits::VK_SHADER_STAGE_ALL as u32 != 0 {
flags |= pso::ShaderStageFlags::ALL;
}
flags
}
pub fn map_err_device_creation(err: adapter::DeviceCreationError) -> VkResult {
use hal::adapter::DeviceCreationError::*;

View file

@ -1,10 +1,16 @@
use hal::{Device, Instance, PhysicalDevice, QueueFamily, Surface};
use hal::pso;
use hal::{Backend, DescriptorPool, Device, Instance, PhysicalDevice, QueueFamily, Surface};
use hal::pool::RawCommandPool;
use std::ffi::CString;
use std::mem;
use std::ops::Deref;
use std::ops::{Deref, Range};
use super::*;
const VERSION: (u32, u32, u32) = (1, 0, 66);
const DRIVER_VERSION: u32 = 1;
#[inline]
pub extern "C" fn gfxCreateInstance(
_pCreateInfo: *const VkInstanceCreateInfo,
@ -49,10 +55,17 @@ pub extern "C" fn gfxGetPhysicalDeviceQueueFamilyProperties(
pQueueFamilyPropertyCount: *mut u32,
pQueueFamilyProperties: *mut VkQueueFamilyProperties,
) {
let families = &adapter.queue_families;
// If NULL, number of queue families is returned.
if pQueueFamilyProperties.is_null() {
unsafe { *pQueueFamilyPropertyCount = families.len() as _ };
return;
}
let output = unsafe {
slice::from_raw_parts_mut(pQueueFamilyProperties, *pQueueFamilyPropertyCount as _)
};
let families = &adapter.queue_families;
if output.len() > families.len() {
unsafe { *pQueueFamilyPropertyCount = families.len() as _ };
}
@ -80,7 +93,7 @@ pub extern "C" fn gfxGetPhysicalDeviceQueueFamilyProperties(
#[inline]
pub extern "C" fn gfxGetPhysicalDeviceFeatures(
physicalDevice: VkPhysicalDevice,
adapter: VkPhysicalDevice,
pFeatures: *mut VkPhysicalDeviceFeatures,
) {
unimplemented!()
@ -115,10 +128,38 @@ pub extern "C" fn gfxGetPhysicalDeviceImageFormatProperties(
}
#[inline]
pub extern "C" fn gfxGetPhysicalDeviceProperties(
physicalDevice: VkPhysicalDevice,
adapter: VkPhysicalDevice,
pProperties: *mut VkPhysicalDeviceProperties,
) {
unimplemented!()
let adapter_info = &adapter.info;
let limits = adapter.physical_device.get_limits();
let (major, minor, patch) = VERSION;
let device_name = {
let c_string = CString::new(adapter_info.name.clone()).unwrap();
let c_str = c_string.as_bytes_with_nul();
let mut name = [0; VK_MAX_PHYSICAL_DEVICE_NAME_SIZE as _];
let len = name.len().min(c_str.len()) - 1;
name[..len].copy_from_slice(&c_str[..len]);
unsafe { mem::transmute(name) }
};
let limits = unsafe { mem::zeroed() }; // TODO
let sparse_properties = unsafe { mem::zeroed() }; // TODO
unsafe {
*pProperties = VkPhysicalDeviceProperties {
apiVersion: (major << 22) | (minor << 12) | patch,
driverVersion: DRIVER_VERSION,
vendorID: adapter_info.vendor as _,
deviceID: adapter_info.device as _,
deviceType: VkPhysicalDeviceType::VK_PHYSICAL_DEVICE_TYPE_OTHER, // TODO
deviceName: device_name,
pipelineCacheUUID: [0; 16usize],
limits,
sparseProperties: sparse_properties,
};
}
}
#[inline]
pub extern "C" fn gfxGetPhysicalDeviceMemoryProperties(
@ -265,7 +306,10 @@ pub extern "C" fn gfxEnumerateInstanceLayerProperties(
pPropertyCount: *mut u32,
pProperties: *mut VkLayerProperties,
) -> VkResult {
unimplemented!()
// TODO: dummy implementation
unsafe { *pPropertyCount = 0; }
VkResult::VK_SUCCESS
}
#[inline]
pub extern "C" fn gfxEnumerateDeviceLayerProperties(
@ -299,7 +343,8 @@ pub extern "C" fn gfxQueueWaitIdle(queue: VkQueue) -> VkResult {
}
#[inline]
pub extern "C" fn gfxDeviceWaitIdle(device: VkDevice) -> VkResult {
unimplemented!()
// TODO
VkResult::VK_SUCCESS
}
#[inline]
pub extern "C" fn gfxAllocateMemory(
@ -385,13 +430,15 @@ pub extern "C" fn gfxBindBufferMemory(
memory: VkDeviceMemory,
memoryOffset: VkDeviceSize,
) -> VkResult {
*buffer = match *buffer.unwrap() {
Buffer::Buffer(_) => panic!("An Buffer can only be bound once!"),
let temp = unsafe { mem::zeroed() };
*buffer = match mem::replace(&mut *buffer, temp) {
Buffer::Buffer(_) => panic!("An non-sparse buffer can only be bound once!"),
Buffer::Unbound(unbound) => {
Buffer::Buffer(
gpu.device
.bind_buffer_memory(&memory, memoryOffset, unbound)
.unwrap(), // TODO
.unwrap() // TODO
)
}
};
@ -405,13 +452,15 @@ pub extern "C" fn gfxBindImageMemory(
memory: VkDeviceMemory,
memoryOffset: VkDeviceSize,
) -> VkResult {
*image = match *image.unwrap() {
Image::Image(_) => panic!("An Image can only be bound once!"),
let temp = unsafe { mem::zeroed() };
*image = match mem::replace(&mut *image, temp) {
Image::Image(_) => panic!("An non-sparse image can only be bound once!"),
Image::Unbound(unbound) => {
Image::Image(
gpu.device
.bind_image_memory(&memory, memoryOffset, unbound)
.unwrap(), // TODO
.unwrap() // TODO
)
}
};
@ -745,20 +794,33 @@ pub extern "C" fn gfxDestroyImageView(
}
#[inline]
pub extern "C" fn gfxCreateShaderModule(
device: VkDevice,
gpu: VkDevice,
pCreateInfo: *const VkShaderModuleCreateInfo,
pAllocator: *const VkAllocationCallbacks,
_pAllocator: *const VkAllocationCallbacks,
pShaderModule: *mut VkShaderModule,
) -> VkResult {
unimplemented!()
let info = unsafe { &*pCreateInfo };
let code = unsafe {
slice::from_raw_parts(info.pCode as *const u8, info.codeSize as usize)
};
let shader_module = gpu
.device
.create_shader_module(code)
.expect("Error creating shader module"); // TODO
unsafe {
*pShaderModule = Handle::new(shader_module);
}
VkResult::VK_SUCCESS
}
#[inline]
pub extern "C" fn gfxDestroyShaderModule(
device: VkDevice,
gpu: VkDevice,
shaderModule: VkShaderModule,
pAllocator: *const VkAllocationCallbacks,
_pAllocator: *const VkAllocationCallbacks,
) {
unimplemented!()
gpu.device.destroy_shader_module(*shaderModule.unwrap());
}
#[inline]
pub extern "C" fn gfxCreatePipelineCache(
@ -827,20 +889,48 @@ pub extern "C" fn gfxDestroyPipeline(
}
#[inline]
pub extern "C" fn gfxCreatePipelineLayout(
device: VkDevice,
gpu: VkDevice,
pCreateInfo: *const VkPipelineLayoutCreateInfo,
pAllocator: *const VkAllocationCallbacks,
_pAllocator: *const VkAllocationCallbacks,
pPipelineLayout: *mut VkPipelineLayout,
) -> VkResult {
unimplemented!()
let info = unsafe { &*pCreateInfo };
let set_layouts = unsafe {
slice::from_raw_parts(info.pSetLayouts, info.setLayoutCount as _)
};
let push_constants = unsafe {
slice::from_raw_parts(info.pPushConstantRanges, info.pushConstantRangeCount as _)
};
let layouts = set_layouts
.iter()
.map(|layout| layout.deref())
.collect::<Vec<&<B as Backend>::DescriptorSetLayout>>();
let ranges = push_constants
.iter()
.map(|constant| {
let stages = conv::map_stage_flags(constant.stageFlags);
let start = constant.offset / 4;
let size = constant.size / 4;
(stages, start .. start+size)
})
.collect::<Vec<_>>();
let pipeline_layout = gpu.device
.create_pipeline_layout(&layouts, &ranges);
unsafe { *pPipelineLayout = Handle::new(pipeline_layout); }
VkResult::VK_SUCCESS
}
#[inline]
pub extern "C" fn gfxDestroyPipelineLayout(
device: VkDevice,
gpu: VkDevice,
pipelineLayout: VkPipelineLayout,
pAllocator: *const VkAllocationCallbacks,
_pAllocator: *const VkAllocationCallbacks,
) {
unimplemented!()
gpu.device.destroy_pipeline_layout(*pipelineLayout.unwrap());
}
#[inline]
pub extern "C" fn gfxCreateSampler(
@ -861,37 +951,82 @@ pub extern "C" fn gfxDestroySampler(
}
#[inline]
pub extern "C" fn gfxCreateDescriptorSetLayout(
device: VkDevice,
gpu: VkDevice,
pCreateInfo: *const VkDescriptorSetLayoutCreateInfo,
pAllocator: *const VkAllocationCallbacks,
_pAllocator: *const VkAllocationCallbacks,
pSetLayout: *mut VkDescriptorSetLayout,
) -> VkResult {
unimplemented!()
let info = unsafe { &*pCreateInfo };
let layout_bindings = unsafe {
slice::from_raw_parts(info.pBindings, info.bindingCount as _)
};
let bindings = layout_bindings
.iter()
.map(|binding| {
assert!(binding.pImmutableSamplers.is_null()); // TODO
pso::DescriptorSetLayoutBinding {
binding: binding.binding as _,
ty: conv::map_descriptor_type(binding.descriptorType),
count: binding.descriptorCount as _,
stage_flags: conv::map_stage_flags(binding.stageFlags),
}
})
.collect::<Vec<_>>();
let set_layout = gpu.device
.create_descriptor_set_layout(&bindings);
unsafe { *pSetLayout = Handle::new(set_layout); }
VkResult::VK_SUCCESS
}
#[inline]
pub extern "C" fn gfxDestroyDescriptorSetLayout(
device: VkDevice,
gpu: VkDevice,
descriptorSetLayout: VkDescriptorSetLayout,
pAllocator: *const VkAllocationCallbacks,
_pAllocator: *const VkAllocationCallbacks,
) {
unimplemented!()
gpu.device.destroy_descriptor_set_layout(*descriptorSetLayout.unwrap());
}
#[inline]
pub extern "C" fn gfxCreateDescriptorPool(
device: VkDevice,
gpu: VkDevice,
pCreateInfo: *const VkDescriptorPoolCreateInfo,
pAllocator: *const VkAllocationCallbacks,
_pAllocator: *const VkAllocationCallbacks,
pDescriptorPool: *mut VkDescriptorPool,
) -> VkResult {
unimplemented!()
let info = unsafe { &*pCreateInfo };
assert_eq!(info.flags, 0); // TODO
let pool_sizes = unsafe {
slice::from_raw_parts(info.pPoolSizes, info.poolSizeCount as _)
};
let ranges = pool_sizes
.iter()
.map(|pool| {
pso::DescriptorRangeDesc {
ty: conv::map_descriptor_type(pool.type_),
count: pool.descriptorCount as _,
}
})
.collect::<Vec<_>>();
let pool = gpu.device
.create_descriptor_pool(info.maxSets as _, &ranges);
unsafe { *pDescriptorPool = Handle::new(pool); }
VkResult::VK_SUCCESS
}
#[inline]
pub extern "C" fn gfxDestroyDescriptorPool(
device: VkDevice,
gpu: VkDevice,
descriptorPool: VkDescriptorPool,
pAllocator: *const VkAllocationCallbacks,
_pAllocator: *const VkAllocationCallbacks,
) {
unimplemented!()
gpu.device.destroy_descriptor_pool(*descriptorPool.unwrap());
}
#[inline]
pub extern "C" fn gfxResetDescriptorPool(
@ -903,11 +1038,30 @@ pub extern "C" fn gfxResetDescriptorPool(
}
#[inline]
pub extern "C" fn gfxAllocateDescriptorSets(
device: VkDevice,
_device: VkDevice,
pAllocateInfo: *const VkDescriptorSetAllocateInfo,
pDescriptorSets: *mut VkDescriptorSet,
) -> VkResult {
unimplemented!()
let info = unsafe { &mut *(pAllocateInfo as *mut VkDescriptorSetAllocateInfo) };
let pool = &mut info.descriptorPool;
let set_layouts = unsafe {
slice::from_raw_parts(info.pSetLayouts, info.descriptorSetCount as _)
};
let layouts = set_layouts
.iter()
.map(|layout| layout.deref())
.collect::<Vec<_>>();
let descriptor_sets = pool.allocate_sets(&layouts);
let sets = unsafe {
slice::from_raw_parts_mut(pDescriptorSets, info.descriptorSetCount as _)
};
for (set, raw_set) in sets.iter_mut().zip(descriptor_sets.into_iter()) {
*set = Handle::new(raw_set);
}
VkResult::VK_SUCCESS
}
#[inline]
pub extern "C" fn gfxFreeDescriptorSets(
@ -920,13 +1074,105 @@ pub extern "C" fn gfxFreeDescriptorSets(
}
#[inline]
pub extern "C" fn gfxUpdateDescriptorSets(
device: VkDevice,
gpu: VkDevice,
descriptorWriteCount: u32,
pDescriptorWrites: *const VkWriteDescriptorSet,
descriptorCopyCount: u32,
pDescriptorCopies: *const VkCopyDescriptorSet,
) {
unimplemented!()
assert_eq!(descriptorCopyCount, 0); // TODO
let writes = unsafe {
slice::from_raw_parts(pDescriptorWrites, descriptorWriteCount as _)
};
let writes = writes
.iter()
.map(|write| {
fn map_buffer_info(buffer_info: &[VkDescriptorBufferInfo]) -> Vec<(&<B as Backend>::Buffer, Range<u64>)> {
buffer_info
.into_iter()
.map(|buffer| {
assert_ne!(buffer.range as i32, VK_WHOLE_SIZE);
(
match buffer.buffer.deref() {
&Buffer::Buffer(ref buf) => buf,
// Vulkan portability restriction:
// Non-sparse buffer need to be bound to device memory.
&Buffer::Unbound(_) => panic!("Buffer needs to be bound"),
},
buffer.offset .. buffer.offset+buffer.range,
)
})
.collect()
}
let image_info = unsafe {
slice::from_raw_parts(write.pImageInfo, write.descriptorCount as _)
};
let buffer_info = unsafe {
slice::from_raw_parts(write.pBufferInfo, write.descriptorCount as _)
};
let texel_buffer_views = unsafe {
slice::from_raw_parts(write.pTexelBufferView, write.descriptorCount as _)
};
let ty = conv::map_descriptor_type(write.descriptorType);
let desc_write = match ty {
pso::DescriptorType::Sampler => pso::DescriptorWrite::Sampler(
image_info
.into_iter()
.map(|image| &*image.sampler)
.collect()
),
pso::DescriptorType::SampledImage => pso::DescriptorWrite::SampledImage(
image_info
.into_iter()
.map(|image| (&*image.imageView, conv::map_image_layout(image.imageLayout)))
.collect()
),
pso::DescriptorType::StorageImage => pso::DescriptorWrite::StorageImage(
image_info
.into_iter()
.map(|image| (&*image.imageView, conv::map_image_layout(image.imageLayout)))
.collect()
),
pso::DescriptorType::UniformTexelBuffer => pso::DescriptorWrite::UniformTexelBuffer(
texel_buffer_views
.into_iter()
.map(|view| view.deref())
.collect()
),
pso::DescriptorType::StorageTexelBuffer => pso::DescriptorWrite::StorageTexelBuffer(
texel_buffer_views
.into_iter()
.map(|view| view.deref())
.collect()
),
pso::DescriptorType::UniformBuffer => pso::DescriptorWrite::UniformBuffer(
map_buffer_info(buffer_info)
),
pso::DescriptorType::StorageBuffer => pso::DescriptorWrite::StorageBuffer(
map_buffer_info(buffer_info)
),
pso::DescriptorType::InputAttachment => pso::DescriptorWrite::InputAttachment(
image_info
.into_iter()
.map(|image| (&*image.imageView, conv::map_image_layout(image.imageLayout)))
.collect()
),
};
pso::DescriptorSetWrite {
set: &*write.dstSet,
binding: write.dstBinding as _,
array_offset: write.dstArrayElement as _,
write: desc_write,
}
})
.collect::<Vec<_>>();
gpu.device.update_descriptor_sets(&writes);
}
#[inline]
pub extern "C" fn gfxCreateFramebuffer(

View file

@ -17,7 +17,6 @@ mod handle;
mod impls;
use std::{cmp, slice};
use hal::pool::RawCommandPool;
use back::Backend as B;
use handle::Handle;
@ -30,6 +29,13 @@ pub type VkDevice = Handle<hal::Gpu<B>>;
pub type VkCommandPool = Handle<<B as hal::Backend>::CommandPool>;
pub type VkCommandBuffer = Handle<<B as hal::Backend>::CommandBuffer>;
pub type VkDeviceMemory = Handle<<B as hal::Backend>::Memory>;
pub type VkDescriptorSetLayout = Handle<<B as hal::Backend>::DescriptorSetLayout>;
pub type VkPipelineLayout = Handle<<B as hal::Backend>::PipelineLayout>;
pub type VkDescriptorPool = Handle<<B as hal::Backend>::DescriptorPool>;
pub type VkDescriptorSet = Handle<<B as hal::Backend>::DescriptorSet>;
pub type VkSampler = Handle<<B as hal::Backend>::Sampler>;
pub type VkBufferView = Handle<<B as hal::Backend>::BufferView>;
pub type VkShaderModule = Handle<<B as hal::Backend>::ShaderModule>;
pub enum Image<B: hal::Backend> {
Image(B::Image),
@ -535,31 +541,12 @@ pub struct VkQueryPool_T {
pub type VkQueryPool = *mut VkQueryPool_T;
#[repr(C)]
#[derive(Debug, Copy, Clone)]
pub struct VkBufferView_T {
_unused: [u8; 0],
}
pub type VkBufferView = *mut VkBufferView_T;
#[repr(C)]
#[derive(Debug, Copy, Clone)]
pub struct VkShaderModule_T {
_unused: [u8; 0],
}
pub type VkShaderModule = *mut VkShaderModule_T;
#[repr(C)]
#[derive(Debug, Copy, Clone)]
pub struct VkPipelineCache_T {
_unused: [u8; 0],
}
pub type VkPipelineCache = *mut VkPipelineCache_T;
#[repr(C)]
#[derive(Debug, Copy, Clone)]
pub struct VkPipelineLayout_T {
_unused: [u8; 0],
}
pub type VkPipelineLayout = *mut VkPipelineLayout_T;
#[repr(C)]
#[derive(Debug, Copy, Clone)]
pub struct VkRenderPass_T {
_unused: [u8; 0],
}
@ -572,30 +559,6 @@ pub struct VkPipeline_T {
pub type VkPipeline = *mut VkPipeline_T;
#[repr(C)]
#[derive(Debug, Copy, Clone)]
pub struct VkDescriptorSetLayout_T {
_unused: [u8; 0],
}
pub type VkDescriptorSetLayout = *mut VkDescriptorSetLayout_T;
#[repr(C)]
#[derive(Debug, Copy, Clone)]
pub struct VkSampler_T {
_unused: [u8; 0],
}
pub type VkSampler = *mut VkSampler_T;
#[repr(C)]
#[derive(Debug, Copy, Clone)]
pub struct VkDescriptorPool_T {
_unused: [u8; 0],
}
pub type VkDescriptorPool = *mut VkDescriptorPool_T;
#[repr(C)]
#[derive(Debug, Copy, Clone)]
pub struct VkDescriptorSet_T {
_unused: [u8; 0],
}
pub type VkDescriptorSet = *mut VkDescriptorSet_T;
#[repr(C)]
#[derive(Debug, Copy, Clone)]
pub struct VkFramebuffer_T {
_unused: [u8; 0],
}