Implement vkGetDeviceQueue, vkCreateSemaphore, vkDestroySemaphore, vkAcquireNextImageKHR, vkCreateRenderPass and vkDestroyRenderPass

This commit is contained in:
msiglreith 2018-01-09 15:19:22 +01:00
parent 33978e81e2
commit b9118e60a5
4 changed files with 338 additions and 84 deletions

View file

@ -1,4 +1,4 @@
use hal::{adapter, buffer, format, image, memory, pso, window};
use hal::{adapter, buffer, format, image, memory, pass, pso, window};
use std::mem;
@ -70,10 +70,12 @@ fn buffer_features_from_hal(features: format::BufferFeature) -> VkFormatFeatureF
flags
}
pub fn map_format(format: VkFormat) -> format::Format {
if (format as usize) < format::NUM_FORMATS {
pub fn map_format(format: VkFormat) -> Option<format::Format> {
if format == VkFormat::VK_FORMAT_UNDEFINED {
None
} else if (format as usize) < format::NUM_FORMATS {
// HAL formats have the same numeric representation as Vulkan formats
unsafe { mem::transmute(format) }
Some(unsafe { mem::transmute(format) })
} else {
unimplemented!("Unknown format {:?}", format);
}
@ -178,19 +180,17 @@ pub fn map_image_kind(
pub fn map_image_layout(layout: VkImageLayout) -> image::ImageLayout {
match layout {
/*
VK_IMAGE_LAYOUT_UNDEFINED = 0,
VK_IMAGE_LAYOUT_GENERAL = 1,
VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL = 2,
VK_IMAGE_LAYOUT_DEPTH_STENCIL_ATTACHMENT_OPTIMAL = 3,
VK_IMAGE_LAYOUT_DEPTH_STENCIL_READ_ONLY_OPTIMAL = 4,
VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL = 5,
VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL = 6,
VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL = 7,
VK_IMAGE_LAYOUT_PREINITIALIZED = 8,
VK_IMAGE_LAYOUT_PRESENT_SRC_KHR = 1000001002,
*/
_ => unimplemented!(),
VkImageLayout::VK_IMAGE_LAYOUT_UNDEFINED => image::ImageLayout::Undefined,
VkImageLayout::VK_IMAGE_LAYOUT_GENERAL => image::ImageLayout::General,
VkImageLayout::VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL => image::ImageLayout::ColorAttachmentOptimal,
VkImageLayout::VK_IMAGE_LAYOUT_DEPTH_STENCIL_ATTACHMENT_OPTIMAL => image::ImageLayout::DepthStencilAttachmentOptimal,
VkImageLayout::VK_IMAGE_LAYOUT_DEPTH_STENCIL_READ_ONLY_OPTIMAL => image::ImageLayout::DepthStencilReadOnlyOptimal,
VkImageLayout::VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL => image::ImageLayout::ShaderReadOnlyOptimal,
VkImageLayout::VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL => image::ImageLayout::TransferSrcOptimal,
VkImageLayout::VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL => image::ImageLayout::TransferDstOptimal,
VkImageLayout::VK_IMAGE_LAYOUT_PREINITIALIZED => image::ImageLayout::Preinitialized,
VkImageLayout::VK_IMAGE_LAYOUT_PRESENT_SRC_KHR => image::ImageLayout::Present,
_ => panic!("Unexpected image layout: {:?}", layout),
}
}
@ -341,6 +341,18 @@ pub fn map_stage_flags(stages: VkShaderStageFlags) -> pso::ShaderStageFlags {
flags
}
pub fn map_pipeline_stage_flags(stages: VkPipelineStageFlags) -> pso::PipelineStage {
let max_flag = VkPipelineStageFlagBits::VK_PIPELINE_STAGE_HOST_BIT as u32;
if (stages & 1 << (max_flag + 1) - 1) == 0 {
// HAL flags have the same numeric representation as Vulkan flags
unsafe { mem::transmute(stages) }
} else {
// GRAPHICS and ALL missing
unimplemented!("Unsupported pipelinee stage flags: {:?}", stages)
}
}
pub fn map_err_device_creation(err: adapter::DeviceCreationError) -> VkResult {
use hal::adapter::DeviceCreationError::*;
@ -354,3 +366,66 @@ pub fn map_err_device_creation(err: adapter::DeviceCreationError) -> VkResult {
DeviceLost => VkResult::VK_ERROR_DEVICE_LOST,
}
}
pub fn map_attachment_load_op(op: VkAttachmentLoadOp) -> pass::AttachmentLoadOp {
match op {
VkAttachmentLoadOp::VK_ATTACHMENT_LOAD_OP_LOAD => pass::AttachmentLoadOp::Load,
VkAttachmentLoadOp::VK_ATTACHMENT_LOAD_OP_CLEAR => pass::AttachmentLoadOp::Clear,
VkAttachmentLoadOp::VK_ATTACHMENT_LOAD_OP_DONT_CARE => pass::AttachmentLoadOp::DontCare,
_ => panic!("Unsupported attachment load op: {:?}", op),
}
}
pub fn map_attachment_store_op(op: VkAttachmentStoreOp) -> pass::AttachmentStoreOp {
match op {
VkAttachmentStoreOp::VK_ATTACHMENT_STORE_OP_STORE => pass::AttachmentStoreOp::Store,
VkAttachmentStoreOp::VK_ATTACHMENT_STORE_OP_DONT_CARE => pass::AttachmentStoreOp::DontCare,
_ => panic!("Unsupported attachment store op: {:?}", op),
}
}
pub fn map_image_acces(access: VkAccessFlags) -> image::Access {
let mut mask = image::Access::empty();
if access & VkAccessFlagBits::VK_ACCESS_INPUT_ATTACHMENT_READ_BIT as u32 != 0 {
mask |= image::Access::INPUT_ATTACHMENT_READ;
}
if access & VkAccessFlagBits::VK_ACCESS_SHADER_READ_BIT as u32 != 0 {
mask |= image::Access::SHADER_READ;
}
if access & VkAccessFlagBits::VK_ACCESS_SHADER_WRITE_BIT as u32 != 0 {
mask |= image::Access::SHADER_WRITE;
}
if access & VkAccessFlagBits::VK_ACCESS_COLOR_ATTACHMENT_READ_BIT as u32 != 0 {
mask |= image::Access::COLOR_ATTACHMENT_READ;
}
if access & VkAccessFlagBits::VK_ACCESS_COLOR_ATTACHMENT_WRITE_BIT as u32 != 0 {
mask |= image::Access::COLOR_ATTACHMENT_WRITE;
}
if access & VkAccessFlagBits::VK_ACCESS_DEPTH_STENCIL_ATTACHMENT_READ_BIT as u32 != 0 {
mask |= image::Access::DEPTH_STENCIL_ATTACHMENT_READ;
}
if access & VkAccessFlagBits::VK_ACCESS_DEPTH_STENCIL_ATTACHMENT_WRITE_BIT as u32 != 0 {
mask |= image::Access::DEPTH_STENCIL_ATTACHMENT_WRITE;
}
if access & VkAccessFlagBits::VK_ACCESS_TRANSFER_READ_BIT as u32 != 0 {
mask |= image::Access::TRANSFER_READ;
}
if access & VkAccessFlagBits::VK_ACCESS_TRANSFER_WRITE_BIT as u32 != 0 {
mask |= image::Access::TRANSFER_WRITE;
}
if access & VkAccessFlagBits::VK_ACCESS_HOST_READ_BIT as u32 != 0 {
mask |= image::Access::HOST_READ;
}
if access & VkAccessFlagBits::VK_ACCESS_HOST_WRITE_BIT as u32 != 0 {
mask |= image::Access::HOST_WRITE;
}
if access & VkAccessFlagBits::VK_ACCESS_MEMORY_READ_BIT as u32 != 0 {
mask |= image::Access::MEMORY_READ;
}
if access & VkAccessFlagBits::VK_ACCESS_MEMORY_WRITE_BIT as u32 != 0 {
mask |= image::Access::MEMORY_WRITE;
}
mask
}

View file

@ -1,5 +1,8 @@
use hal::pso;
use hal::{Backend, DescriptorPool, Device, Instance, PhysicalDevice, QueueFamily, Surface};
use hal::{pass, pso, queue};
use hal::{
Backend, DescriptorPool, Device, Instance, PhysicalDevice, QueueFamily,
Surface, Swapchain as HalSwapchain, FrameSync,
};
use hal::pool::RawCommandPool;
use std::ffi::CString;
@ -104,12 +107,7 @@ pub extern "C" fn gfxGetPhysicalDeviceFormatProperties(
format: VkFormat,
pFormatProperties: *mut VkFormatProperties,
) {
let format = match format {
VkFormat::VK_FORMAT_UNDEFINED => None,
format => Some(conv::map_format(format)),
};
let properties = adapter.physical_device.format_properties(format);
let properties = adapter.physical_device.format_properties(conv::map_format(format));
unsafe {
*pFormatProperties = conv::format_properties_from_hal(properties);
}
@ -229,9 +227,28 @@ pub extern "C" fn gfxCreateDevice(
let gpu = adapter.physical_device.open(request_infos);
match gpu {
Ok(device) => {
Ok(mut gpu) => {
let queues = queue_infos
.iter()
.map(|info| {
let id = queue::QueueFamilyId(info.queueFamilyIndex as usize);
let group = gpu.queues.take_raw(id).unwrap();
let queues = group
.into_iter()
.map(|queue| Handle::new(queue))
.collect();
(info.queueFamilyIndex, queues)
})
.collect();
let gpu = Gpu {
device: gpu.device,
queues,
};
unsafe {
*pDevice = Handle::new(device);
*pDevice = Handle::new(gpu);
}
VkResult::VK_SUCCESS
}
@ -321,12 +338,14 @@ pub extern "C" fn gfxEnumerateDeviceLayerProperties(
}
#[inline]
pub extern "C" fn gfxGetDeviceQueue(
device: VkDevice,
gpu: VkDevice,
queueFamilyIndex: u32,
queueIndex: u32,
pQueue: *mut VkQueue,
) {
unimplemented!()
unsafe {
*pQueue = gpu.queues.get(&queueFamilyIndex).unwrap()[queueIndex as usize];
}
}
#[inline]
pub extern "C" fn gfxQueueSubmit(
@ -574,20 +593,26 @@ pub extern "C" fn gfxWaitForFences(
}
#[inline]
pub extern "C" fn gfxCreateSemaphore(
device: VkDevice,
gpu: VkDevice,
pCreateInfo: *const VkSemaphoreCreateInfo,
pAllocator: *const VkAllocationCallbacks,
_pAllocator: *const VkAllocationCallbacks,
pSemaphore: *mut VkSemaphore,
) -> VkResult {
unimplemented!()
let semaphore = gpu.device
.create_semaphore();
unsafe {
*pSemaphore = Handle::new(semaphore);
}
VkResult::VK_SUCCESS
}
#[inline]
pub extern "C" fn gfxDestroySemaphore(
device: VkDevice,
gpu: VkDevice,
semaphore: VkSemaphore,
pAllocator: *const VkAllocationCallbacks,
_pAllocator: *const VkAllocationCallbacks,
) {
unimplemented!()
gpu.device.destroy_semaphore(*semaphore.unwrap());
}
#[inline]
pub extern "C" fn gfxCreateEvent(
@ -720,7 +745,7 @@ pub extern "C" fn gfxCreateImage(
info.samples,
),
info.mipLevels as _,
conv::map_format(info.format),
conv::map_format(info.format).unwrap(),
conv::map_image_usage(info.usage),
)
.expect("Error on creating image");
@ -771,7 +796,7 @@ pub extern "C" fn gfxCreateImageView(
let view = gpu.device.create_image_view(
image,
conv::map_format(info.format),
conv::map_format(info.format).unwrap(),
conv::map_swizzle(info.components),
conv::map_subresource_range(info.subresourceRange),
);
@ -1193,20 +1218,173 @@ pub extern "C" fn gfxDestroyFramebuffer(
}
#[inline]
pub extern "C" fn gfxCreateRenderPass(
device: VkDevice,
gpu: VkDevice,
pCreateInfo: *const VkRenderPassCreateInfo,
pAllocator: *const VkAllocationCallbacks,
_pAllocator: *const VkAllocationCallbacks,
pRenderPass: *mut VkRenderPass,
) -> VkResult {
let info = unsafe { &*pCreateInfo };
// Attachment descriptions
let attachments = unsafe {
slice::from_raw_parts(info.pAttachments, info.attachmentCount as _)
};
let attachments = attachments
.into_iter()
.map(|attachment| {
assert_eq!(attachment.flags, 0); // TODO
let initial_layout = conv::map_image_layout(attachment.initialLayout);
let final_layout = conv::map_image_layout(attachment.finalLayout);
pass::Attachment {
format: conv::map_format(attachment.format),
ops: pass::AttachmentOps {
load: conv::map_attachment_load_op(attachment.loadOp),
store: conv::map_attachment_store_op(attachment.storeOp),
},
stencil_ops: pass::AttachmentOps {
load: conv::map_attachment_load_op(attachment.stencilLoadOp),
store: conv::map_attachment_store_op(attachment.stencilStoreOp),
},
layouts: initial_layout .. final_layout,
}
})
.collect::<Vec<_>>();
// Subpass descriptions
let subpasses = unsafe {
slice::from_raw_parts(info.pSubpasses, info.subpassCount as _)
};
// Store all attachment references, referenced by the subpasses.
let mut attachment_refs = Vec::with_capacity(subpasses.len());
struct AttachmentRefs {
input: Vec<pass::AttachmentRef>,
color: Vec<pass::AttachmentRef>,
resolve: Vec<pass::AttachmentRef>,
depth_stencil: Option<pass::AttachmentRef>,
preserve: Vec<usize>,
}
fn map_attachment_ref(att_ref: &VkAttachmentReference) -> pass::AttachmentRef {
(att_ref.attachment as _, conv::map_image_layout(att_ref.layout))
}
for subpass in subpasses {
let input = unsafe {
slice::from_raw_parts(subpass.pInputAttachments, subpass.inputAttachmentCount as _)
.into_iter()
.map(map_attachment_ref)
.collect()
};
let color = unsafe {
slice::from_raw_parts(subpass.pColorAttachments, subpass.colorAttachmentCount as _)
.into_iter()
.map(map_attachment_ref)
.collect()
};
let resolve = if subpass.pResolveAttachments.is_null() {
Vec::new()
} else {
unimplemented!()
/*
unsafe {
slice::from_raw_parts(subpass.pResolveAttachments, subpass.colorAttachmentCount as _)
.into_iter()
.map(map_attachment_ref)
.collect()
}
*/
};
let depth_stencil = if subpass.pDepthStencilAttachment.is_null() {
None
} else {
Some(unsafe { map_attachment_ref(&*subpass.pDepthStencilAttachment) })
};
let preserve = unsafe {
slice::from_raw_parts(subpass.pPreserveAttachments, subpass.preserveAttachmentCount as _)
.into_iter()
.map(|id| *id as usize)
.collect::<Vec<_>>()
};
attachment_refs.push(AttachmentRefs {
input,
color,
resolve,
depth_stencil,
preserve,
});
}
let subpasses = subpasses
.into_iter()
.enumerate()
.map(|(i, _)| {
pass::SubpassDesc {
colors: &attachment_refs[i].color,
depth_stencil: attachment_refs[i].depth_stencil.as_ref(),
inputs: &attachment_refs[i].input,
preserves: &attachment_refs[i].preserve,
}
})
.collect::<Vec<_>>();
// Subpass dependencies
let dependencies = unsafe {
slice::from_raw_parts(info.pDependencies, info.dependencyCount as _)
};
fn map_subpass_ref(subpass: u32) -> pass::SubpassRef {
if subpass == VK_SUBPASS_EXTERNAL as u32 {
pass::SubpassRef::External
} else {
pass::SubpassRef::Pass(subpass as _)
}
}
let dependencies = dependencies
.into_iter()
.map(|dependency| {
assert_eq!(dependency.dependencyFlags, 0); // TODO
let src_pass = map_subpass_ref(dependency.srcSubpass);
let dst_pass = map_subpass_ref(dependency.dstSubpass);
let src_stage = conv::map_pipeline_stage_flags(dependency.srcStageMask);
let dst_stage = conv::map_pipeline_stage_flags(dependency.dstStageMask);
// Our portability implementation only supports image access flags atm.
// Global buffer barriers can't be handled currently.
let src_access = conv::map_image_acces(dependency.srcAccessMask);
let dst_access = conv::map_image_acces(dependency.dstAccessMask);
pass::SubpassDependency {
passes: src_pass .. dst_pass,
stages: src_stage .. dst_stage,
accesses: src_access .. dst_access,
}
})
.collect::<Vec<_>>();
let render_pass = gpu
.device
.create_render_pass(&attachments, &subpasses, &dependencies);
unsafe {
*pRenderPass = Handle::new(render_pass);
}
VkResult::VK_SUCCESS
}
#[inline]
pub extern "C" fn gfxDestroyRenderPass(
device: VkDevice,
gpu: VkDevice,
renderPass: VkRenderPass,
pAllocator: *const VkAllocationCallbacks,
_pAllocator: *const VkAllocationCallbacks,
) {
unimplemented!()
gpu.device.destroy_renderpass(*renderPass.unwrap());
}
#[inline]
pub extern "C" fn gfxGetRenderAreaGranularity(
@ -1227,8 +1405,7 @@ pub extern "C" fn gfxCreateCommandPool(
use hal::pool::CommandPoolCreateFlags;
let info = unsafe { &*pCreateInfo };
assert_eq!(info.queueFamilyIndex, 0); //TODO
let family = gpu.queue_groups[0].family();
let family = queue::QueueFamilyId(info.queueFamilyIndex as _);
let mut flags = CommandPoolCreateFlags::empty();
if info.flags & VkCommandPoolCreateFlagBits::VK_COMMAND_POOL_CREATE_TRANSIENT_BIT as u32 != 0 {
@ -1863,7 +2040,7 @@ pub extern "C" fn gfxCreateSwapchainKHR(
); // TODO
let config = hal::SwapchainConfig {
color_format: conv::map_format(info.imageFormat),
color_format: conv::map_format(info.imageFormat).unwrap(),
depth_stencil_format: None,
image_count: info.minImageCount,
};
@ -2108,14 +2285,23 @@ pub extern "C" fn gfxCreateWin32SurfaceKHR(
}
#[inline]
pub extern "C" fn gfxAcquireNextImageKHR(
device: VkDevice,
swapchain: VkSwapchainKHR,
timeout: u64,
_device: VkDevice,
mut swapchain: VkSwapchainKHR,
_timeout: u64, // TODO
semaphore: VkSemaphore,
fence: VkFence,
pImageIndex: *mut u32,
) -> VkResult {
unimplemented!()
let sync = if !semaphore.is_null() {
FrameSync::Semaphore(&*semaphore)
} else {
FrameSync::Fence(&*fence)
};
let frame = swapchain.raw.acquire_frame(sync);
unsafe { *pImageIndex = frame.id() as _; }
VkResult::VK_SUCCESS
}
#[inline]
pub extern "C" fn gfxQueuePresentKHR(

View file

@ -16,16 +16,19 @@ mod conv;
mod handle;
mod impls;
use std::{cmp, slice};
use back::Backend as B;
use handle::Handle;
use std::{cmp, slice};
use std::collections::HashMap;
pub use impls::*;
// Vulkan objects
pub type VkInstance = Handle<back::Instance>;
pub type VkPhysicalDevice = Handle<hal::Adapter<B>>;
pub type VkDevice = Handle<hal::Gpu<B>>;
pub type VkDevice = Handle<Gpu<B>>;
pub type VkQueue = Handle<<B as hal::Backend>::CommandQueue>;
pub type VkCommandPool = Handle<<B as hal::Backend>::CommandPool>;
pub type VkCommandBuffer = Handle<<B as hal::Backend>::CommandBuffer>;
pub type VkDeviceMemory = Handle<<B as hal::Backend>::Memory>;
@ -36,32 +39,39 @@ pub type VkDescriptorSet = Handle<<B as hal::Backend>::DescriptorSet>;
pub type VkSampler = Handle<<B as hal::Backend>::Sampler>;
pub type VkBufferView = Handle<<B as hal::Backend>::BufferView>;
pub type VkShaderModule = Handle<<B as hal::Backend>::ShaderModule>;
pub type VkImage = Handle<Image<B>>;
pub type VkImageView = Handle<<B as hal::Backend>::ImageView>;
pub type VkBuffer = Handle<Buffer<B>>;
pub type VkSemaphore = Handle<<B as hal::Backend>::Semaphore>;
pub type VkFence = Handle<<B as hal::Backend>::Fence>;
pub type VkRenderPass = Handle<<B as hal::Backend>::RenderPass>;
pub type QueueFamilyIndex = u32;
pub struct Gpu<B: hal::Backend> {
device: B::Device,
queues: HashMap<QueueFamilyIndex, Vec<VkQueue>>,
}
pub enum Image<B: hal::Backend> {
Image(B::Image),
Unbound(B::UnboundImage),
}
pub type VkImage = Handle<Image<B>>;
pub type VkImageView = Handle<<B as hal::Backend>::ImageView>;
pub enum Buffer<B: hal::Backend> {
Buffer(B::Buffer),
Unbound(B::UnboundBuffer),
}
pub type VkBuffer = Handle<Buffer<B>>;
//NOTE: all *KHR types have to be pure `Handle` things for compatibility with
//`VK_DEFINE_NON_DISPATCHABLE_HANDLE` used in `vulkan.h`
pub type VkSurfaceKHR = Handle<<B as hal::Backend>::Surface>;
pub type VkSwapchainKHR = Handle<Swapchain>;
pub struct Swapchain {
raw: <B as hal::Backend>::Swapchain,
images: Vec<VkImage>,
}
pub type VkSwapchainKHR = Handle<Swapchain>;
/* automatically generated by rust-bindgen */
@ -509,24 +519,6 @@ pub type VkBool32 = u32;
pub type VkDeviceSize = u64;
pub type VkSampleMask = u32;
#[repr(C)]
#[derive(Debug, Copy, Clone)]
pub struct VkQueue_T {
_unused: [u8; 0],
}
pub type VkQueue = *mut VkQueue_T;
#[repr(C)]
#[derive(Debug, Copy, Clone)]
pub struct VkSemaphore_T {
_unused: [u8; 0],
}
pub type VkSemaphore = *mut VkSemaphore_T;
#[repr(C)]
#[derive(Debug, Copy, Clone)]
pub struct VkFence_T {
_unused: [u8; 0],
}
pub type VkFence = *mut VkFence_T;
#[repr(C)]
#[derive(Debug, Copy, Clone)]
pub struct VkEvent_T {
@ -547,12 +539,6 @@ pub struct VkPipelineCache_T {
pub type VkPipelineCache = *mut VkPipelineCache_T;
#[repr(C)]
#[derive(Debug, Copy, Clone)]
pub struct VkRenderPass_T {
_unused: [u8; 0],
}
pub type VkRenderPass = *mut VkRenderPass_T;
#[repr(C)]
#[derive(Debug, Copy, Clone)]
pub struct VkPipeline_T {
_unused: [u8; 0],
}

View file

@ -1336,7 +1336,14 @@ pub extern "C" fn vkAcquireNextImageKHR(
fence: VkFence,
pImageIndex: *mut u32,
) -> VkResult {
unimplemented!()
gfxAcquireNextImageKHR(
device,
swapchain,
timeout,
semaphore,
fence,
pImageIndex,
)
}
#[no_mangle]
pub extern "C" fn vkQueuePresentKHR(