Render API v2 (#112)

* WIP: Render API v2

* Fix doctests

* Expose all of PixelsContext (#110)

* Fix ScalingRenderer::new() taking &mut Device

* Replace getters with direct access to &mut PixelsContext

* Fix wrong reference type

* Fix unneeded mut

* Remove unnecessary mutable borrow, resurrect the shorter getter methods

* Initial port to wgpu master (0.6)
Surface creation is broken (see examples)
Does not support compressed textures

* Fix SurfaceTexture and examples

* Add support for compressed texture formats

* resize doesn't need mutability

* Update documentation

* Update wgpu

* Prepare release

* Goodbye Travis! Thanks for all the fish

Co-authored-by: JMS55 <47158642+JMS55@users.noreply.github.com>
This commit is contained in:
Jay Oster 2020-08-20 16:49:19 -07:00 committed by GitHub
parent 01d32c11f0
commit 265ba2e5b3
No known key found for this signature in database
GPG key ID: 4AEE18F83AFDEB23
15 changed files with 365 additions and 305 deletions

View file

@ -1,22 +0,0 @@
language: rust
dist: bionic
rust:
# MSRV
- 1.41.0
# Stable release channel
- stable
matrix:
fast_finish: true
before_script:
- rustup component add clippy
- rustup component add rustfmt
- sudo apt-get update
- sudo apt-get -y install libsdl2-dev
script:
- cargo clippy --all -- -D warnings
- cargo test --all
- cargo fmt --all -- --check

View file

@ -1,7 +1,7 @@
[package]
name = "pixels"
description = "A tiny library providing a GPU-powered pixel frame buffer."
version = "0.1.0"
version = "0.2.0"
authors = ["Jay Oster <jay@kodewerx.org>"]
edition = "2018"
repository = "https://github.com/parasyte/pixels"
@ -19,17 +19,19 @@ include = [
]
[dependencies]
thiserror = "1.0.15"
wgpu = "0.5.0"
pixels-dragons = { path = "internals/pixels-dragons" }
pollster = "0.2"
ultraviolet = "0.4.6"
raw-window-handle = "0.3"
thiserror = "1.0"
ultraviolet = "0.4"
wgpu = "0.6"
[dev-dependencies]
pixels-mocks = { path = "pixels-mocks" }
winit = "0.22.0"
pixels-mocks = { path = "internals/pixels-mocks" }
winit = "0.22"
[workspace]
members = [
"examples/*",
"pixels-mocks",
"internals/*",
]

View file

@ -15,10 +15,10 @@ fn main() -> Result<(), Error> {
env_logger::init();
let event_loop = EventLoop::new();
let mut input = WinitInputHelper::new();
let (window, surface, p_width, p_height, mut _hidpi_factor) =
let (window, p_width, p_height, mut _hidpi_factor) =
create_window("Conway's Game of Life", &event_loop);
let surface_texture = SurfaceTexture::new(p_width, p_height, surface);
let surface_texture = SurfaceTexture::new(p_width, p_height, &window);
let mut life = ConwayGrid::new_random(SCREEN_WIDTH as usize, SCREEN_HEIGHT as usize);
let mut pixels = Pixels::new(SCREEN_WIDTH, SCREEN_HEIGHT, surface_texture)?;
@ -137,7 +137,7 @@ fn main() -> Result<(), Error> {
fn create_window(
title: &str,
event_loop: &EventLoop<()>,
) -> (winit::window::Window, pixels::wgpu::Surface, u32, u32, f64) {
) -> (winit::window::Window, u32, u32, f64) {
// Create a hidden window so we can estimate a good default window size
let window = winit::window::WindowBuilder::new()
.with_visible(false)
@ -171,12 +171,10 @@ fn create_window(
window.set_outer_position(center);
window.set_visible(true);
let surface = pixels::wgpu::Surface::create(&window);
let size = default_size.to_physical::<f64>(hidpi_factor);
(
window,
surface,
size.width.round() as u32,
size.height.round() as u32,
hidpi_factor,

View file

@ -3,10 +3,7 @@
use crate::renderers::NoiseRenderer;
use log::error;
use pixels::{
wgpu::{self, Surface},
Error, Pixels, SurfaceTexture,
};
use pixels::{raw_window_handle::HasRawWindowHandle, wgpu, Error, Pixels, SurfaceTexture};
use winit::dpi::LogicalSize;
use winit::event::{Event, VirtualKeyCode};
use winit::event_loop::{ControlFlow, EventLoop};
@ -43,25 +40,25 @@ fn main() -> Result<(), Error> {
let mut pixels = {
let window_size = window.inner_size();
let surface = Surface::create(&window);
let surface_texture = SurfaceTexture::new(window_size.width, window_size.height, surface);
let surface_texture = SurfaceTexture::new(window_size.width, window_size.height, &window);
Pixels::new(WIDTH, HEIGHT, surface_texture)?
};
let mut world = World::new();
let mut time = 0.0;
let (scaled_texture, mut noise_renderer) = create_noise_renderer(&pixels);
let (scaled_texture, noise_renderer) = create_noise_renderer(&pixels);
event_loop.run(move |event, _, control_flow| {
// Draw the current frame
if let Event::RedrawRequested(_) = event {
world.draw(pixels.get_frame());
noise_renderer.update(pixels.device(), pixels.queue(), time);
time += 1.0;
let render_result = pixels.render_with(|encoder, render_target, context| {
context.scaling_renderer.render(encoder, &scaled_texture);
noise_renderer.update(&context.queue, time);
time += 0.01;
let render_result = pixels.render_with(|encoder, render_target, scaling_renderer| {
scaling_renderer.render(encoder, &scaled_texture);
noise_renderer.render(encoder, render_target);
});
@ -142,7 +139,11 @@ impl World {
}
}
fn create_noise_renderer(pixels: &Pixels) -> (wgpu::TextureView, NoiseRenderer) {
fn create_noise_renderer<W: HasRawWindowHandle>(
pixels: &Pixels<W>,
) -> (wgpu::TextureView, NoiseRenderer) {
let device = &pixels.device();
let texture_descriptor = wgpu::TextureDescriptor {
label: None,
size: pixels::wgpu::Extent3d {
@ -150,18 +151,16 @@ fn create_noise_renderer(pixels: &Pixels) -> (wgpu::TextureView, NoiseRenderer)
height: HEIGHT,
depth: 1,
},
array_layer_count: 1,
mip_level_count: 1,
sample_count: 1,
dimension: wgpu::TextureDimension::D2,
format: wgpu::TextureFormat::Bgra8UnormSrgb,
usage: wgpu::TextureUsage::SAMPLED | wgpu::TextureUsage::OUTPUT_ATTACHMENT,
};
let scaled_texture = pixels
.device()
let scaled_texture = device
.create_texture(&texture_descriptor)
.create_default_view();
let noise_renderer = NoiseRenderer::new(pixels.device(), &scaled_texture);
.create_view(&wgpu::TextureViewDescriptor::default());
let noise_renderer = NoiseRenderer::new(device, &scaled_texture);
(scaled_texture, noise_renderer)
}

View file

@ -1,4 +1,4 @@
use pixels::{include_spv, wgpu};
use pixels::wgpu::{self, util::DeviceExt};
pub(crate) struct NoiseRenderer {
bind_group: wgpu::BindGroup,
@ -8,11 +8,12 @@ pub(crate) struct NoiseRenderer {
impl NoiseRenderer {
pub(crate) fn new(device: &wgpu::Device, texture_view: &wgpu::TextureView) -> Self {
let vs_module = device.create_shader_module(include_spv!("../shaders/vert.spv"));
let fs_module = device.create_shader_module(include_spv!("../shaders/frag.spv"));
let vs_module = device.create_shader_module(wgpu::include_spirv!("../shaders/vert.spv"));
let fs_module = device.create_shader_module(wgpu::include_spirv!("../shaders/frag.spv"));
// Create a texture sampler with nearest neighbor
let sampler = device.create_sampler(&wgpu::SamplerDescriptor {
label: Some("NoiseRenderer sampler"),
address_mode_u: wgpu::AddressMode::ClampToEdge,
address_mode_v: wgpu::AddressMode::ClampToEdge,
address_mode_w: wgpu::AddressMode::ClampToEdge,
@ -21,20 +22,21 @@ impl NoiseRenderer {
mipmap_filter: wgpu::FilterMode::Nearest,
lod_min_clamp: 0.0,
lod_max_clamp: 1.0,
compare: wgpu::CompareFunction::Always,
compare: None,
anisotropy_clamp: None,
});
// Create uniform buffer
let time_buffer = device.create_buffer(&wgpu::BufferDescriptor {
let time_buffer = device.create_buffer_init(&wgpu::util::BufferInitDescriptor {
label: Some("NoiseRenderer u_Time"),
size: 4,
contents: &0.0_f32.to_ne_bytes(),
usage: wgpu::BufferUsage::UNIFORM | wgpu::BufferUsage::COPY_DST,
});
// Create bind group
let bind_group_layout = device.create_bind_group_layout(&wgpu::BindGroupLayoutDescriptor {
label: None,
bindings: &[
entries: &[
wgpu::BindGroupLayoutEntry {
binding: 0,
visibility: wgpu::ShaderStage::FRAGMENT,
@ -43,47 +45,53 @@ impl NoiseRenderer {
multisampled: false,
dimension: wgpu::TextureViewDimension::D2,
},
count: None,
},
wgpu::BindGroupLayoutEntry {
binding: 1,
visibility: wgpu::ShaderStage::FRAGMENT,
ty: wgpu::BindingType::Sampler { comparison: false },
count: None,
},
wgpu::BindGroupLayoutEntry {
binding: 2,
visibility: wgpu::ShaderStage::FRAGMENT,
ty: wgpu::BindingType::UniformBuffer { dynamic: false },
ty: wgpu::BindingType::UniformBuffer {
dynamic: false,
min_binding_size: None,
},
count: None,
},
],
});
let bind_group = device.create_bind_group(&wgpu::BindGroupDescriptor {
label: None,
layout: &bind_group_layout,
bindings: &[
wgpu::Binding {
entries: &[
wgpu::BindGroupEntry {
binding: 0,
resource: wgpu::BindingResource::TextureView(texture_view),
},
wgpu::Binding {
wgpu::BindGroupEntry {
binding: 1,
resource: wgpu::BindingResource::Sampler(&sampler),
},
wgpu::Binding {
wgpu::BindGroupEntry {
binding: 2,
resource: wgpu::BindingResource::Buffer {
buffer: &time_buffer,
range: 0..4,
},
resource: wgpu::BindingResource::Buffer(time_buffer.slice(..)),
},
],
});
// Create pipeline
let pipeline_layout = device.create_pipeline_layout(&wgpu::PipelineLayoutDescriptor {
label: Some("NoiseRenderer pipeline layout"),
bind_group_layouts: &[&bind_group_layout],
push_constant_ranges: &[],
});
let render_pipeline = device.create_render_pipeline(&wgpu::RenderPipelineDescriptor {
layout: &pipeline_layout,
label: Some("NoiseRenderer pipeline"),
layout: Some(&pipeline_layout),
vertex_stage: wgpu::ProgrammableStageDescriptor {
module: &vs_module,
entry_point: "main",
@ -95,6 +103,7 @@ impl NoiseRenderer {
rasterization_state: Some(wgpu::RasterizationStateDescriptor {
front_face: wgpu::FrontFace::Ccw,
cull_mode: wgpu::CullMode::None,
clamp_depth: false,
depth_bias: 0,
depth_bias_slope_scale: 0.0,
depth_bias_clamp: 0.0,
@ -123,15 +132,8 @@ impl NoiseRenderer {
}
}
pub(crate) fn update(&mut self, device: &wgpu::Device, queue: &wgpu::Queue, time: f32) {
let mut encoder =
device.create_command_encoder(&wgpu::CommandEncoderDescriptor { label: None });
let temp_buf =
device.create_buffer_with_data(&time.to_ne_bytes(), wgpu::BufferUsage::COPY_SRC);
encoder.copy_buffer_to_buffer(&temp_buf, 0, &self.time_buffer, 0, 4);
queue.submit(&[encoder.finish()]);
pub(crate) fn update(&self, queue: &wgpu::Queue, time: f32) {
queue.write_buffer(&self.time_buffer, 0, &time.to_ne_bytes());
}
pub(crate) fn render(
@ -143,9 +145,10 @@ impl NoiseRenderer {
color_attachments: &[wgpu::RenderPassColorAttachmentDescriptor {
attachment: render_target,
resolve_target: None,
load_op: wgpu::LoadOp::Clear,
store_op: wgpu::StoreOp::Store,
clear_color: wgpu::Color::BLACK,
ops: wgpu::Operations {
load: wgpu::LoadOp::Clear(wgpu::Color::BLACK),
store: true,
},
}],
depth_stencil_attachment: None,
});

View file

@ -25,9 +25,8 @@ fn main() -> Result<(), Error> {
.parse()
.unwrap_or(false);
let (window, surface, width, height, mut _hidpi_factor) =
create_window("pixel invaders", &event_loop);
let surface_texture = SurfaceTexture::new(width, height, surface);
let (window, width, height, mut _hidpi_factor) = create_window("pixel invaders", &event_loop);
let surface_texture = SurfaceTexture::new(width, height, &window);
let mut pixels = Pixels::new(SCREEN_WIDTH as u32, SCREEN_HEIGHT as u32, surface_texture)?;
let mut invaders = World::new(generate_seed(), debug);
let mut time = Instant::now();
@ -130,7 +129,7 @@ fn main() -> Result<(), Error> {
fn create_window(
title: &str,
event_loop: &EventLoop<()>,
) -> (winit::window::Window, pixels::wgpu::Surface, u32, u32, f64) {
) -> (winit::window::Window, u32, u32, f64) {
// Create a hidden window so we can estimate a good default window size
let window = winit::window::WindowBuilder::new()
.with_visible(false)
@ -163,12 +162,10 @@ fn create_window(
window.set_outer_position(center);
window.set_visible(true);
let surface = pixels::wgpu::Surface::create(&window);
let size = default_size.to_physical::<f64>(hidpi_factor);
(
window,
surface,
size.width.round() as u32,
size.height.round() as u32,
hidpi_factor,

View file

@ -3,7 +3,7 @@
#![cfg_attr(not(debug_assertions), windows_subsystem = "windows")]
use beryllium::*;
use pixels::{wgpu::Surface, Pixels, SurfaceTexture};
use pixels::{Pixels, SurfaceTexture};
const WIDTH: u32 = 320;
const HEIGHT: u32 = 240;
@ -24,10 +24,9 @@ fn main() -> Result<(), Box<dyn std::error::Error>> {
sdl.create_raw_window("Hello Pixels", WindowPosition::Centered, WIDTH, HEIGHT, 0)?;
let mut pixels = {
let surface = Surface::create(&window);
// TODO: Beryllium does not expose the SDL2 `GetDrawableSize` APIs, so choosing the correct
// surface texture size is not possible.
let surface_texture = SurfaceTexture::new(WIDTH, HEIGHT, surface);
let surface_texture = SurfaceTexture::new(WIDTH, HEIGHT, &window);
Pixels::new(WIDTH, HEIGHT, surface_texture)?
};
let mut world = World::new();

View file

@ -2,7 +2,7 @@
#![forbid(unsafe_code)]
use log::error;
use pixels::{wgpu::Surface, Error, Pixels, SurfaceTexture};
use pixels::{Error, Pixels, SurfaceTexture};
use winit::dpi::LogicalSize;
use winit::event::{Event, VirtualKeyCode};
use winit::event_loop::{ControlFlow, EventLoop};
@ -37,8 +37,7 @@ fn main() -> Result<(), Error> {
let mut pixels = {
let window_size = window.inner_size();
let surface = Surface::create(&window);
let surface_texture = SurfaceTexture::new(window_size.width, window_size.height, surface);
let surface_texture = SurfaceTexture::new(window_size.width, window_size.height, &window);
Pixels::new(WIDTH, HEIGHT, surface_texture)?
};
let mut world = World::new();

View file

@ -0,0 +1,9 @@
[package]
name = "pixels-dragons"
version = "0.1.0"
authors = ["Jay Oster <jay@kodewerx.org>"]
edition = "2018"
[dependencies]
raw-window-handle = "0.3"
wgpu = "0.6"

View file

@ -0,0 +1,24 @@
//! Here be dragons. Abandon all hope, ye who enter.
//!
//! This is probably a bad idea. The purpose of this crate is to move all `unsafe` invocations
//! into a single location and provide a faux safe interface that can be accessed by safe code with
//! `#![forbid(unsafe_code)]`
//!
//! This crate is only intended to be used by `pixels`.
#![deny(clippy::all)]
use raw_window_handle::HasRawWindowHandle;
use wgpu::{Instance, Surface};
/// Create a [`wgpu::Surface`] from the given window handle.
///
/// # Safety
///
/// The window handle must be valid, or very bad things will happen.
pub fn surface_from_window_handle<W: HasRawWindowHandle>(
instance: &Instance,
window: &W,
) -> Surface {
unsafe { instance.create_surface(window) }
}

View file

@ -7,7 +7,8 @@
//!
//! The GPU interface is offered by [`wgpu`](https://crates.io/crates/wgpu), and is re-exported for
//! your convenience. Use a windowing framework or context manager of your choice;
//! [`winit`](https://crates.io/crates/winit) is a good place to start.
//! [`winit`](https://crates.io/crates/winit) is a good place to start. Any windowing framework that
//! uses [`raw-window-handle`](https://crates.io/crates/raw-window-handle) will work.
//!
//! # Environment variables
//!
@ -28,43 +29,74 @@
#![deny(clippy::all)]
#![forbid(unsafe_code)]
use std::env;
pub use crate::macros::*;
pub use crate::renderers::ScalingRenderer;
use thiserror::Error;
pub use raw_window_handle;
pub use wgpu;
mod macros;
use pixels_dragons::surface_from_window_handle;
use raw_window_handle::HasRawWindowHandle;
use std::env;
use thiserror::Error;
mod renderers;
/// A logical texture for a window surface.
#[derive(Debug)]
pub struct SurfaceTexture {
surface: wgpu::Surface,
pub struct SurfaceTexture<'win, W: HasRawWindowHandle> {
window: &'win W,
size: SurfaceSize,
}
/// A logical texture size for a window surface.
#[derive(Debug)]
pub struct SurfaceSize {
width: u32,
height: u32,
}
/// Provides the internal state for custom shaders.
///
/// A reference to this struct is given to the `render_function` closure when using
/// [`Pixels::render_with`].
#[derive(Debug)]
pub struct PixelsContext {
/// The `Device` allows creating GPU resources.
pub device: wgpu::Device,
/// The `Queue` provides access to the GPU command queue.
pub queue: wgpu::Queue,
surface: wgpu::Surface,
swap_chain: wgpu::SwapChain,
/// This is the texture that your raw data is copied to by [`Pixels::render`] or
/// [`Pixels::render_with`].
pub texture: wgpu::Texture,
/// Provides access to the texture size.
pub texture_extent: wgpu::Extent3d,
/// Defines the "data rate" for the raw texture data. This is effectively the "bytes per pixel"
/// count.
///
/// Compressed textures may have less than one byte per pixel.
pub texture_format_size: f32,
/// A default renderer to scale the input texture to the screen size.
pub scaling_renderer: ScalingRenderer,
}
/// Represents a 2D pixel buffer with an explicit image resolution.
///
/// See [`PixelsBuilder`] for building a customized pixel buffer.
#[derive(Debug)]
pub struct Pixels {
// WGPU state
device: wgpu::Device,
queue: wgpu::Queue,
swap_chain: wgpu::SwapChain,
surface_texture: SurfaceTexture,
pub struct Pixels<W: HasRawWindowHandle> {
context: PixelsContext,
surface_size: SurfaceSize,
present_mode: wgpu::PresentMode,
_phantom: std::marker::PhantomData<W>,
// A default renderer to scale the input texture to the screen size
scaling_renderer: ScalingRenderer,
// Texture state for the texel upload
texture: wgpu::Texture,
texture_extent: wgpu::Extent3d,
texture_format_size: u32,
// Pixel buffer
pixels: Vec<u8>,
// The inverse of the scaling matrix used by the renderer
@ -73,7 +105,7 @@ pub struct Pixels {
}
/// A builder to help create customized pixel buffers.
pub struct PixelsBuilder<'req> {
pub struct PixelsBuilder<'req, 'win, W: HasRawWindowHandle> {
request_adapter_options: Option<wgpu::RequestAdapterOptions<'req>>,
device_descriptor: wgpu::DeviceDescriptor,
backend: wgpu::BackendBit,
@ -81,7 +113,7 @@ pub struct PixelsBuilder<'req> {
height: u32,
pixel_aspect_ratio: f64,
present_mode: wgpu::PresentMode,
surface_texture: SurfaceTexture,
surface_texture: SurfaceTexture<'win, W>,
texture_format: wgpu::TextureFormat,
}
@ -89,14 +121,17 @@ pub struct PixelsBuilder<'req> {
#[derive(Error, Debug)]
pub enum Error {
/// No suitable [`wgpu::Adapter`] found
#[error("No suitable `wgpu::Adapter` found")]
#[error("No suitable `wgpu::Adapter` found.")]
AdapterNotFound,
/// Equivalent to [`wgpu::TimeOut`]
#[error("The GPU timed out when attempting to acquire the next texture or if a previous output is still alive.")]
Timeout,
/// Equivalent to [`wgpu::RequestDeviceError`]
#[error("No wgpu::Device found.")]
DeviceNotFound(wgpu::RequestDeviceError),
/// Equivalent to [`wgpu::SwapChainError`]
#[error("The GPU failed to acquire a swapchain frame.")]
Swapchain(wgpu::SwapChainError),
}
impl SurfaceTexture {
impl<'win, W: HasRawWindowHandle> SurfaceTexture<'win, W> {
/// Create a logical texture for a window surface.
///
/// It is recommended (but not required) that the `width` and `height` are equivalent to the
@ -105,46 +140,43 @@ impl SurfaceTexture {
/// # Examples
///
/// ```no_run
/// use pixels::{wgpu::Surface, SurfaceTexture};
/// use pixels::SurfaceTexture;
/// use winit::event_loop::EventLoop;
/// use winit::window::Window;
///
/// let event_loop = EventLoop::new();
/// let window = Window::new(&event_loop).unwrap();
/// let surface = Surface::create(&window);
/// let size = window.inner_size();
///
/// let width = size.width;
/// let height = size.height;
///
/// let surface_texture = SurfaceTexture::new(width, height, surface);
/// let surface_texture = SurfaceTexture::new(width, height, &window);
/// # Ok::<(), pixels::Error>(())
/// ```
///
/// # Panics
///
/// Panics when `width` or `height` are 0.
pub fn new(width: u32, height: u32, surface: wgpu::Surface) -> SurfaceTexture {
pub fn new(width: u32, height: u32, window: &'win W) -> SurfaceTexture<'win, W> {
assert!(width > 0);
assert!(height > 0);
SurfaceTexture {
surface,
width,
height,
}
let size = SurfaceSize { width, height };
SurfaceTexture { window, size }
}
}
impl Pixels {
impl<'win, W: HasRawWindowHandle> Pixels<W> {
/// Create a pixel buffer instance with default options.
///
/// # Examples
///
/// ```no_run
/// # use pixels::Pixels;
/// # let surface = wgpu::Surface::create(&pixels_mocks::RWH);
/// # let surface_texture = pixels::SurfaceTexture::new(1024, 768, surface);
/// # let window = pixels_mocks::RWH;
/// # let surface_texture = pixels::SurfaceTexture::new(1024, 768, &window);
/// let mut pixels = Pixels::new(320, 240, surface_texture)?;
/// # Ok::<(), pixels::Error>(())
/// ```
@ -156,7 +188,11 @@ impl Pixels {
/// # Panics
///
/// Panics when `width` or `height` are 0.
pub fn new(width: u32, height: u32, surface_texture: SurfaceTexture) -> Result<Pixels, Error> {
pub fn new(
width: u32,
height: u32,
surface_texture: SurfaceTexture<'win, W>,
) -> Result<Pixels<W>, Error> {
PixelsBuilder::new(width, height, surface_texture).build()
}
@ -169,14 +205,14 @@ impl Pixels {
/// is in physical pixel units.
pub fn resize(&mut self, width: u32, height: u32) {
// Update SurfaceTexture dimensions
self.surface_texture.width = width;
self.surface_texture.height = height;
self.surface_size.width = width;
self.surface_size.height = height;
// Update ScalingMatrix for mouse transformation
self.scaling_matrix_inverse = renderers::ScalingMatrix::new(
(
self.texture_extent.width as f32,
self.texture_extent.height as f32,
self.context.texture_extent.width as f32,
self.context.texture_extent.height as f32,
),
(width as f32, height as f32),
)
@ -184,38 +220,35 @@ impl Pixels {
.inversed();
// Recreate the swap chain
self.swap_chain = self.device.create_swap_chain(
&self.surface_texture.surface,
self.context.swap_chain = self.context.device.create_swap_chain(
&self.context.surface,
&wgpu::SwapChainDescriptor {
usage: wgpu::TextureUsage::OUTPUT_ATTACHMENT,
format: wgpu::TextureFormat::Bgra8UnormSrgb,
width: self.surface_texture.width,
height: self.surface_texture.height,
width: self.surface_size.width,
height: self.surface_size.height,
present_mode: self.present_mode,
},
);
// Update state for all render passes
let mut encoder = self
.device
.create_command_encoder(&wgpu::CommandEncoderDescriptor { label: None });
self.scaling_renderer
.resize(&mut self.device, &mut encoder, width, height);
self.queue.submit(&[encoder.finish()]);
self.context
.scaling_renderer
.resize(&self.context.queue, width, height);
}
/// Draw this pixel buffer to the configured [`SurfaceTexture`].
///
/// # Errors
///
/// Returns an error when [`wgpu::SwapChain::get_next_texture`] times out.
/// Returns an error when [`wgpu::SwapChain::get_current_frame`] fails.
///
/// # Example
///
/// ```no_run
/// # use pixels::Pixels;
/// # let surface = wgpu::Surface::create(&pixels_mocks::RWH);
/// # let surface_texture = pixels::SurfaceTexture::new(1024, 768, surface);
/// # let window = pixels_mocks::RWH;
/// # let surface_texture = pixels::SurfaceTexture::new(1024, 768, &window);
/// let mut pixels = Pixels::new(320, 240, surface_texture)?;
///
/// // Clear the pixel buffer
@ -232,8 +265,8 @@ impl Pixels {
/// # Ok::<(), pixels::Error>(())
/// ```
pub fn render(&mut self) -> Result<(), Error> {
self.render_with(|encoder, render_target, scaling_renderer| {
scaling_renderer.render(encoder, render_target);
self.render_with(|encoder, render_target, context| {
context.scaling_renderer.render(encoder, render_target);
})
}
@ -241,18 +274,19 @@ impl Pixels {
/// render function.
///
/// Provides access to a [`wgpu::CommandEncoder`], a [`wgpu::TextureView`] from the swapchain
/// which you can use to render to the screen, and the default [`ScalingRenderer`].
/// which you can use to render to the screen, and a [`PixelsContext`] with all of the internal
/// `wgpu` context.
///
/// # Errors
///
/// Returns an error when [`wgpu::SwapChain::get_next_texture`] times out.
/// Returns an error when [`wgpu::SwapChain::get_current_frame`] fails.
///
/// # Example
///
/// ```no_run
/// # use pixels::Pixels;
/// # let surface = wgpu::Surface::create(&pixels_mocks::RWH);
/// # let surface_texture = pixels::SurfaceTexture::new(1024, 768, surface);
/// # let window = pixels_mocks::RWH;
/// # let surface_texture = pixels::SurfaceTexture::new(1024, 768, &window);
/// let mut pixels = Pixels::new(320, 240, surface_texture)?;
///
/// // Clear the pixel buffer
@ -265,54 +299,51 @@ impl Pixels {
/// }
///
/// // Draw it to the `SurfaceTexture`
/// pixels.render_with(|encoder, render_target, scaling_renderer| {
/// scaling_renderer.render(encoder, render_target);
/// pixels.render_with(|encoder, render_target, context| {
/// context.scaling_renderer.render(encoder, render_target);
/// // etc...
/// });
/// # Ok::<(), pixels::Error>(())
/// ```
pub fn render_with<F>(&mut self, render_function: F) -> Result<(), Error>
where
F: FnOnce(&mut wgpu::CommandEncoder, &wgpu::TextureView, &ScalingRenderer),
F: FnOnce(&mut wgpu::CommandEncoder, &wgpu::TextureView, &PixelsContext),
{
// TODO: Center frame buffer in surface
let frame = self
.context
.swap_chain
.get_next_texture()
.map_err(|_| Error::Timeout)?;
let mut encoder = self
.device
.create_command_encoder(&wgpu::CommandEncoderDescriptor { label: None });
.get_current_frame()
.map_err(Error::Swapchain)?;
let mut encoder =
self.context
.device
.create_command_encoder(&wgpu::CommandEncoderDescriptor {
label: Some("pixels_command_encoder"),
});
// Update the pixel buffer texture view
let mapped = self.device.create_buffer_mapped(&wgpu::BufferDescriptor {
label: None,
size: self.pixels.len() as u64,
usage: wgpu::BufferUsage::COPY_SRC,
});
mapped.data.copy_from_slice(&self.pixels);
let buffer = mapped.finish();
encoder.copy_buffer_to_texture(
wgpu::BufferCopyView {
buffer: &buffer,
offset: 0,
bytes_per_row: self.texture_extent.width * self.texture_format_size,
rows_per_image: self.texture_extent.height,
},
let bytes_per_row =
(self.context.texture_extent.width as f32 * self.context.texture_format_size) as u32;
self.context.queue.write_texture(
wgpu::TextureCopyView {
texture: &self.texture,
texture: &self.context.texture,
mip_level: 0,
array_layer: 0,
origin: wgpu::Origin3d { x: 0, y: 0, z: 0 },
},
self.texture_extent,
&self.pixels,
wgpu::TextureDataLayout {
offset: 0,
bytes_per_row,
rows_per_image: self.context.texture_extent.height,
},
self.context.texture_extent,
);
// Call the users render function.
(render_function)(&mut encoder, &frame.view, &self.scaling_renderer);
(render_function)(&mut encoder, &frame.output.view, &self.context);
self.queue.submit(&[encoder.finish()]);
self.context.queue.submit(Some(encoder.finish()));
Ok(())
}
@ -334,8 +365,8 @@ impl Pixels {
///
/// ```no_run
/// # use pixels::Pixels;
/// # let surface = wgpu::Surface::create(&pixels_mocks::RWH);
/// # let surface_texture = pixels::SurfaceTexture::new(1024, 768, surface);
/// # let window = pixels_mocks::RWH;
/// # let surface_texture = pixels::SurfaceTexture::new(1024, 768, &window);
/// const WIDTH: u32 = 320;
/// const HEIGHT: u32 = 240;
///
@ -354,11 +385,11 @@ impl Pixels {
&self,
physical_position: (f32, f32),
) -> Result<(usize, usize), (isize, isize)> {
let physical_width = self.surface_texture.width as f32;
let physical_height = self.surface_texture.height as f32;
let physical_width = self.surface_size.width as f32;
let physical_height = self.surface_size.height as f32;
let pixels_width = self.texture_extent.width as f32;
let pixels_height = self.texture_extent.height as f32;
let pixels_width = self.context.texture_extent.width as f32;
let pixels_height = self.context.texture_extent.height as f32;
let pos = ultraviolet::Vec4::new(
(physical_position.0 / physical_width - 0.5) * pixels_width,
@ -377,9 +408,9 @@ impl Pixels {
let pixel_y = pos.1.floor() as isize;
if pixel_x < 0
|| pixel_x >= self.texture_extent.width as isize
|| pixel_x >= self.context.texture_extent.width as isize
|| pixel_y < 0
|| pixel_y >= self.texture_extent.height as isize
|| pixel_y >= self.context.texture_extent.height as isize
{
Err((pixel_x, pixel_y))
} else {
@ -394,8 +425,8 @@ impl Pixels {
///
/// ```no_run
/// # use pixels::Pixels;
/// # let surface = wgpu::Surface::create(&pixels_mocks::RWH);
/// # let surface_texture = pixels::SurfaceTexture::new(1024, 768, surface);
/// # let window = pixels_mocks::RWH;
/// # let surface_texture = pixels::SurfaceTexture::new(1024, 768, &window);
/// const WIDTH: u32 = 320;
/// const HEIGHT: u32 = 240;
///
@ -410,38 +441,47 @@ impl Pixels {
/// ```
pub fn clamp_pixel_pos(&self, pos: (isize, isize)) -> (usize, usize) {
(
pos.0.max(0).min(self.texture_extent.width as isize - 1) as usize,
pos.1.max(0).min(self.texture_extent.height as isize - 1) as usize,
pos.0
.max(0)
.min(self.context.texture_extent.width as isize - 1) as usize,
pos.1
.max(0)
.min(self.context.texture_extent.height as isize - 1) as usize,
)
}
/// Provides access to the internal [`wgpu::Device`].
pub fn device(&self) -> &wgpu::Device {
&self.device
&self.context.device
}
/// Provides access to the internal [`wgpu::Queue`].
pub fn queue(&self) -> &wgpu::Queue {
&self.queue
&self.context.queue
}
/// Provides access to the internal source [`wgpu::Texture`].
///
/// This is the pre-scaled texture copied from the pixel buffer.
pub fn texture(&self) -> &wgpu::Texture {
&self.texture
&self.context.texture
}
/// Provides access to the internal [`PixelsContext`]
pub fn context(&self) -> &PixelsContext {
&self.context
}
}
impl<'req> PixelsBuilder<'req> {
impl<'req, 'win, W: HasRawWindowHandle> PixelsBuilder<'req, 'win, W> {
/// Create a builder that can be finalized into a [`Pixels`] pixel buffer.
///
/// # Examples
///
/// ```no_run
/// # use pixels::PixelsBuilder;
/// # let surface = wgpu::Surface::create(&pixels_mocks::RWH);
/// # let surface_texture = pixels::SurfaceTexture::new(1024, 768, surface);
/// # let window = pixels_mocks::RWH;
/// # let surface_texture = pixels::SurfaceTexture::new(1024, 768, &window);
/// let mut pixels = PixelsBuilder::new(256, 240, surface_texture)
/// .request_adapter_options(wgpu::RequestAdapterOptions {
/// power_preference: wgpu::PowerPreference::HighPerformance,
@ -455,7 +495,11 @@ impl<'req> PixelsBuilder<'req> {
/// # Panics
///
/// Panics when `width` or `height` are 0.
pub fn new(width: u32, height: u32, surface_texture: SurfaceTexture) -> PixelsBuilder<'req> {
pub fn new(
width: u32,
height: u32,
surface_texture: SurfaceTexture<'win, W>,
) -> PixelsBuilder<'req, 'win, W> {
assert!(width > 0);
assert!(height > 0);
@ -473,19 +517,19 @@ impl<'req> PixelsBuilder<'req> {
}
/// Add options for requesting a [`wgpu::Adapter`].
pub const fn request_adapter_options(
pub fn request_adapter_options(
mut self,
request_adapter_options: wgpu::RequestAdapterOptions<'req>,
) -> PixelsBuilder {
) -> PixelsBuilder<'req, 'win, W> {
self.request_adapter_options = Some(request_adapter_options);
self
}
/// Add options for requesting a [`wgpu::Device`].
pub const fn device_descriptor(
pub fn device_descriptor(
mut self,
device_descriptor: wgpu::DeviceDescriptor,
) -> PixelsBuilder<'req> {
) -> PixelsBuilder<'req, 'win, W> {
self.device_descriptor = device_descriptor;
self
}
@ -494,7 +538,7 @@ impl<'req> PixelsBuilder<'req> {
///
/// The default value of this is [`wgpu::BackendBit::PRIMARY`], which enables
/// the well supported backends for wgpu.
pub const fn wgpu_backend(mut self, backend: wgpu::BackendBit) -> PixelsBuilder<'req> {
pub fn wgpu_backend(mut self, backend: wgpu::BackendBit) -> PixelsBuilder<'req, 'win, W> {
self.backend = backend;
self
}
@ -514,7 +558,7 @@ impl<'req> PixelsBuilder<'req> {
///
/// This documentation is hidden because support for pixel aspect ratio is incomplete.
#[doc(hidden)]
pub fn pixel_aspect_ratio(mut self, pixel_aspect_ratio: f64) -> PixelsBuilder<'req> {
pub fn pixel_aspect_ratio(mut self, pixel_aspect_ratio: f64) -> PixelsBuilder<'req, 'win, W> {
assert!(pixel_aspect_ratio > 0.0);
self.pixel_aspect_ratio = pixel_aspect_ratio;
@ -528,7 +572,7 @@ impl<'req> PixelsBuilder<'req> {
/// The `wgpu` present mode will be set to `Fifo` when Vsync is enabled, or `Immediate` when
/// Vsync is disabled. To set the present mode to `Mailbox` or another value, use the
/// [`PixelsBuilder::present_mode`] method.
pub fn enable_vsync(mut self, enable_vsync: bool) -> PixelsBuilder<'req> {
pub fn enable_vsync(mut self, enable_vsync: bool) -> PixelsBuilder<'req, 'win, W> {
self.present_mode = if enable_vsync {
wgpu::PresentMode::Fifo
} else {
@ -541,7 +585,7 @@ impl<'req> PixelsBuilder<'req> {
///
/// This differs from [`PixelsBuilder::enable_vsync`] by allowing the present mode to be set to
/// any value.
pub fn present_mode(mut self, present_mode: wgpu::PresentMode) -> PixelsBuilder<'req> {
pub fn present_mode(mut self, present_mode: wgpu::PresentMode) -> PixelsBuilder<'req, 'win, W> {
self.present_mode = present_mode;
self
}
@ -551,10 +595,10 @@ impl<'req> PixelsBuilder<'req> {
/// The default value is [`wgpu::TextureFormat::Rgba8UnormSrgb`], which is 4 unsigned bytes in
/// `RGBA` order using the SRGB color space. This is typically what you want when you are
/// working with color values from popular image editing tools or web apps.
pub const fn texture_format(
pub fn texture_format(
mut self,
texture_format: wgpu::TextureFormat,
) -> PixelsBuilder<'req> {
) -> PixelsBuilder<'req, 'win, W> {
self.texture_format = texture_format;
self
}
@ -564,26 +608,27 @@ impl<'req> PixelsBuilder<'req> {
/// # Errors
///
/// Returns an error when a [`wgpu::Adapter`] cannot be found.
pub fn build(self) -> Result<Pixels, Error> {
// TODO: Use `options.pixel_aspect_ratio` to stretch the scaled texture
let compatible_surface = Some(&self.surface_texture.surface);
let adapter = pollster::block_on(wgpu::Adapter::request(
&self.request_adapter_options.map_or_else(
|| wgpu::RequestAdapterOptions {
compatible_surface,
power_preference: get_default_power_preference(),
},
|rao| wgpu::RequestAdapterOptions {
compatible_surface: rao.compatible_surface.or(compatible_surface),
power_preference: rao.power_preference,
},
),
self.backend,
))
.ok_or(Error::AdapterNotFound)?;
pub fn build(self) -> Result<Pixels<W>, Error> {
let instance = wgpu::Instance::new(self.backend);
let (mut device, queue) =
pollster::block_on(adapter.request_device(&self.device_descriptor));
// TODO: Use `options.pixel_aspect_ratio` to stretch the scaled texture
let surface = surface_from_window_handle(&instance, self.surface_texture.window);
let compatible_surface = Some(&surface);
let adapter = instance.request_adapter(&self.request_adapter_options.map_or_else(
|| wgpu::RequestAdapterOptions {
compatible_surface,
power_preference: get_default_power_preference(),
},
|rao| wgpu::RequestAdapterOptions {
compatible_surface: rao.compatible_surface.or(compatible_surface),
power_preference: rao.power_preference,
},
));
let adapter = pollster::block_on(adapter).ok_or(Error::AdapterNotFound)?;
let (device, queue) =
pollster::block_on(adapter.request_device(&self.device_descriptor, None))
.map_err(Error::DeviceNotFound)?;
// The rest of this is technically a fixed-function pipeline... For now!
@ -596,70 +641,75 @@ impl<'req> PixelsBuilder<'req> {
depth: 1,
};
let texture = device.create_texture(&wgpu::TextureDescriptor {
label: None,
label: Some("pixels_source_texture"),
size: texture_extent,
array_layer_count: 1,
mip_level_count: 1,
sample_count: 1,
dimension: wgpu::TextureDimension::D2,
format: self.texture_format,
usage: wgpu::TextureUsage::SAMPLED | wgpu::TextureUsage::COPY_DST,
});
let texture_view = texture.create_default_view();
let texture_view = texture.create_view(&wgpu::TextureViewDescriptor::default());
let texture_format_size = get_texture_format_size(self.texture_format);
// Create the pixel buffer
let capacity = (width * height * texture_format_size) as usize;
let capacity = ((width * height) as f32 * texture_format_size) as usize;
let mut pixels = Vec::with_capacity(capacity);
pixels.resize_with(capacity, Default::default);
let present_mode = self.present_mode;
// Create swap chain
let surface_texture = self.surface_texture;
let surface_size = self.surface_texture.size;
let swap_chain = device.create_swap_chain(
&surface_texture.surface,
&surface,
&wgpu::SwapChainDescriptor {
usage: wgpu::TextureUsage::OUTPUT_ATTACHMENT,
format: wgpu::TextureFormat::Bgra8UnormSrgb,
width: surface_texture.width,
height: surface_texture.height,
width: surface_size.width,
height: surface_size.height,
present_mode,
},
);
let scaling_matrix_inverse = renderers::ScalingMatrix::new(
(width as f32, height as f32),
(surface_texture.width as f32, surface_texture.height as f32),
(surface_size.width as f32, surface_size.height as f32),
)
.transform
.inversed();
let scaling_renderer = ScalingRenderer::new(&mut device, &texture_view, &texture_extent);
let scaling_renderer = ScalingRenderer::new(&device, &texture_view, &texture_extent);
Ok(Pixels {
let context = PixelsContext {
device,
queue,
surface,
swap_chain,
surface_texture,
present_mode,
scaling_renderer,
texture,
texture_extent,
texture_format_size,
scaling_renderer,
};
Ok(Pixels {
context,
surface_size,
present_mode,
_phantom: std::marker::PhantomData,
pixels,
scaling_matrix_inverse,
})
}
}
fn get_texture_format_size(texture_format: wgpu::TextureFormat) -> u32 {
fn get_texture_format_size(texture_format: wgpu::TextureFormat) -> f32 {
match texture_format {
// 8-bit formats
wgpu::TextureFormat::R8Unorm
| wgpu::TextureFormat::R8Snorm
| wgpu::TextureFormat::R8Uint
| wgpu::TextureFormat::R8Sint => 1,
| wgpu::TextureFormat::R8Sint => 1.0,
// 16-bit formats
wgpu::TextureFormat::R16Uint
@ -668,7 +718,7 @@ fn get_texture_format_size(texture_format: wgpu::TextureFormat) -> u32 {
| wgpu::TextureFormat::Rg8Unorm
| wgpu::TextureFormat::Rg8Snorm
| wgpu::TextureFormat::Rg8Uint
| wgpu::TextureFormat::Rg8Sint => 2,
| wgpu::TextureFormat::Rg8Sint => 2.0,
// 32-bit formats
wgpu::TextureFormat::R32Uint
@ -688,7 +738,7 @@ fn get_texture_format_size(texture_format: wgpu::TextureFormat) -> u32 {
| wgpu::TextureFormat::Rg11b10Float
| wgpu::TextureFormat::Depth32Float
| wgpu::TextureFormat::Depth24Plus
| wgpu::TextureFormat::Depth24PlusStencil8 => 4,
| wgpu::TextureFormat::Depth24PlusStencil8 => 4.0,
// 64-bit formats
wgpu::TextureFormat::Rg32Uint
@ -696,12 +746,29 @@ fn get_texture_format_size(texture_format: wgpu::TextureFormat) -> u32 {
| wgpu::TextureFormat::Rg32Float
| wgpu::TextureFormat::Rgba16Uint
| wgpu::TextureFormat::Rgba16Sint
| wgpu::TextureFormat::Rgba16Float => 8,
| wgpu::TextureFormat::Rgba16Float => 8.0,
// 128-bit formats
wgpu::TextureFormat::Rgba32Uint
| wgpu::TextureFormat::Rgba32Sint
| wgpu::TextureFormat::Rgba32Float => 16,
| wgpu::TextureFormat::Rgba32Float => 16.0,
// Compressed formats
wgpu::TextureFormat::Bc1RgbaUnorm
| wgpu::TextureFormat::Bc1RgbaUnormSrgb
| wgpu::TextureFormat::Bc4RUnorm
| wgpu::TextureFormat::Bc4RSnorm => 0.5,
wgpu::TextureFormat::Bc2RgbaUnorm
| wgpu::TextureFormat::Bc2RgbaUnormSrgb
| wgpu::TextureFormat::Bc3RgbaUnorm
| wgpu::TextureFormat::Bc3RgbaUnormSrgb
| wgpu::TextureFormat::Bc5RgUnorm
| wgpu::TextureFormat::Bc5RgSnorm
| wgpu::TextureFormat::Bc6hRgbUfloat
| wgpu::TextureFormat::Bc6hRgbSfloat
| wgpu::TextureFormat::Bc7RgbaUnorm
| wgpu::TextureFormat::Bc7RgbaUnormSrgb => 1.0,
}
}

View file

@ -1,17 +0,0 @@
/// Provides a macro and type for including SPIR-V shaders in const data.
///
/// In an ideal world, a shader will be compiled at build-time directly into the executable. This
/// is opposed to the typical method of including a shader, which reads a GLSL source code file
/// from the file system at start, compiles it, and sends it to the GPU. That process adds a
/// non-trivial amount of time to startup, and additional error handling code at runtime.
///
/// This macro moves all of that complexity to build-time. At least for the SPIR-V part of the
/// shader pipeline. (`gfx-hal` backends have their own SPIR-V-to-native compilers at runtime.)
#[macro_export]
macro_rules! include_spv {
($path:expr) => {
&wgpu::read_spirv(std::io::Cursor::new(&include_bytes!($path)[..]))
.expect(&format!("Invalid SPIR-V shader in file: {}", $path))
};
}

View file

@ -1,5 +1,5 @@
use crate::include_spv;
use ultraviolet::Mat4;
use wgpu::util::DeviceExt;
/// The default renderer that scales your frame to the screen size.
#[derive(Debug)]
@ -13,15 +13,16 @@ pub struct ScalingRenderer {
impl ScalingRenderer {
pub(crate) fn new(
device: &mut wgpu::Device,
device: &wgpu::Device,
texture_view: &wgpu::TextureView,
texture_size: &wgpu::Extent3d,
) -> Self {
let vs_module = device.create_shader_module(include_spv!("../shaders/vert.spv"));
let fs_module = device.create_shader_module(include_spv!("../shaders/frag.spv"));
let vs_module = device.create_shader_module(wgpu::include_spirv!("../shaders/vert.spv"));
let fs_module = device.create_shader_module(wgpu::include_spirv!("../shaders/frag.spv"));
// Create a texture sampler with nearest neighbor
let sampler = device.create_sampler(&wgpu::SamplerDescriptor {
label: Some("pixels_scaling_renderer_sampler"),
address_mode_u: wgpu::AddressMode::ClampToEdge,
address_mode_v: wgpu::AddressMode::ClampToEdge,
address_mode_w: wgpu::AddressMode::ClampToEdge,
@ -30,7 +31,8 @@ impl ScalingRenderer {
mipmap_filter: wgpu::FilterMode::Nearest,
lod_min_clamp: 0.0,
lod_max_clamp: 1.0,
compare: wgpu::CompareFunction::Always,
compare: None,
anisotropy_clamp: None,
});
// Create uniform buffer
@ -41,15 +43,16 @@ impl ScalingRenderer {
(texture_size.width as f32, texture_size.height as f32),
);
let transform_bytes = matrix.as_bytes();
let uniform_buffer = device.create_buffer_with_data(
&transform_bytes,
wgpu::BufferUsage::UNIFORM | wgpu::BufferUsage::COPY_DST,
);
let uniform_buffer = device.create_buffer_init(&wgpu::util::BufferInitDescriptor {
label: Some("pixels_scaling_renderer_matrix_uniform_buffer"),
contents: &transform_bytes,
usage: wgpu::BufferUsage::UNIFORM | wgpu::BufferUsage::COPY_DST,
});
// Create bind group
let bind_group_layout = device.create_bind_group_layout(&wgpu::BindGroupLayoutDescriptor {
label: None,
bindings: &[
label: Some("pixels_scaling_renderer_bind_group_layout"),
entries: &[
wgpu::BindGroupLayoutEntry {
binding: 0,
visibility: wgpu::ShaderStage::FRAGMENT,
@ -58,47 +61,53 @@ impl ScalingRenderer {
multisampled: false,
dimension: wgpu::TextureViewDimension::D2,
},
count: None,
},
wgpu::BindGroupLayoutEntry {
binding: 1,
visibility: wgpu::ShaderStage::FRAGMENT,
ty: wgpu::BindingType::Sampler { comparison: false },
count: None,
},
wgpu::BindGroupLayoutEntry {
binding: 2,
visibility: wgpu::ShaderStage::VERTEX,
ty: wgpu::BindingType::UniformBuffer { dynamic: false },
ty: wgpu::BindingType::UniformBuffer {
dynamic: false,
min_binding_size: None,
},
count: None,
},
],
});
let bind_group = device.create_bind_group(&wgpu::BindGroupDescriptor {
label: None,
label: Some("pixels_scaling_renderer_bind_group"),
layout: &bind_group_layout,
bindings: &[
wgpu::Binding {
entries: &[
wgpu::BindGroupEntry {
binding: 0,
resource: wgpu::BindingResource::TextureView(texture_view),
},
wgpu::Binding {
wgpu::BindGroupEntry {
binding: 1,
resource: wgpu::BindingResource::Sampler(&sampler),
},
wgpu::Binding {
wgpu::BindGroupEntry {
binding: 2,
resource: wgpu::BindingResource::Buffer {
buffer: &uniform_buffer,
range: 0..64,
},
resource: wgpu::BindingResource::Buffer(uniform_buffer.slice(..)),
},
],
});
// Create pipeline
let pipeline_layout = device.create_pipeline_layout(&wgpu::PipelineLayoutDescriptor {
label: Some("pixels_scaling_renderer_pipeline_layout"),
bind_group_layouts: &[&bind_group_layout],
push_constant_ranges: &[],
});
let render_pipeline = device.create_render_pipeline(&wgpu::RenderPipelineDescriptor {
layout: &pipeline_layout,
label: Some("pixels_scaling_renderer_pipeline"),
layout: Some(&pipeline_layout),
vertex_stage: wgpu::ProgrammableStageDescriptor {
module: &vs_module,
entry_point: "main",
@ -110,6 +119,7 @@ impl ScalingRenderer {
rasterization_state: Some(wgpu::RasterizationStateDescriptor {
front_face: wgpu::FrontFace::Ccw,
cull_mode: wgpu::CullMode::None,
clamp_depth: false,
depth_bias: 0,
depth_bias_slope_scale: 0.0,
depth_bias_clamp: 0.0,
@ -146,9 +156,10 @@ impl ScalingRenderer {
color_attachments: &[wgpu::RenderPassColorAttachmentDescriptor {
attachment: render_target,
resolve_target: None,
load_op: wgpu::LoadOp::Clear,
store_op: wgpu::StoreOp::Store,
clear_color: wgpu::Color::BLACK,
ops: wgpu::Operations {
load: wgpu::LoadOp::Clear(wgpu::Color::BLACK),
store: true,
},
}],
depth_stencil_attachment: None,
});
@ -157,19 +168,10 @@ impl ScalingRenderer {
rpass.draw(0..6, 0..1);
}
pub(crate) fn resize(
&mut self,
device: &mut wgpu::Device,
encoder: &mut wgpu::CommandEncoder,
width: u32,
height: u32,
) {
pub(crate) fn resize(&self, queue: &wgpu::Queue, width: u32, height: u32) {
let matrix = ScalingMatrix::new((self.width, self.height), (width as f32, height as f32));
let transform_bytes = matrix.as_bytes();
let temp_buf =
device.create_buffer_with_data(&transform_bytes, wgpu::BufferUsage::COPY_SRC);
encoder.copy_buffer_to_buffer(&temp_buf, 0, &self.uniform_buffer, 0, 64);
queue.write_buffer(&self.uniform_buffer, 0, &transform_bytes);
}
}