lights and cameras data driven. setup moved to relevant passes

This commit is contained in:
Carter Anderson 2019-12-02 15:19:56 -08:00
parent df5c74a0ea
commit 2a27cacba8
10 changed files with 266 additions and 207 deletions

View file

@ -1,6 +1,11 @@
use bevy::*;
use bevy::{render::*, asset::{Asset, AssetStorage}, math};
// fn build_move_system() -> Box<dyn Scheduleable> {
// SystemBuilder::new("MoveSystem")
// .with_query(<>)
// }
fn main() {
let universe = Universe::new();
let mut world = universe.create_world();
@ -16,12 +21,14 @@ fn main() {
world.resources.insert(mesh_storage);
world.insert((), vec![
// plane
(
Material::new(math::vec4(0.1, 0.2, 0.1, 1.0)),
plane_handle.clone(),
LocalToWorld(math::translation(&math::vec3(0.0, 0.0, 0.0))),
Translation::new(0.0, 0.0, 0.0)
),
// cubes
(
Material::new(math::vec4(0.1, 0.1, 0.6, 1.0)),
mesh_handle.clone(),
@ -35,6 +42,57 @@ fn main() {
Translation::new(0.0, 0.0, 0.0)
),
]);
world.insert((), vec![
// lights
(
Light {
pos: math::vec3(7.0, -5.0, 10.0),
color: wgpu::Color {
r: 0.5,
g: 1.0,
b: 0.5,
a: 1.0,
},
fov: f32::to_radians(60.0),
depth: 1.0 .. 20.0,
target_view: None,
},
LocalToWorld(math::translation(&math::vec3(7.0, -5.0, 10.0))),
Translation::new(0.0, 0.0, 0.0)
),
(
Light {
pos: math::vec3(-5.0, 7.0, 10.0),
color: wgpu::Color {
r: 1.0,
g: 0.5,
b: 0.5,
a: 1.0,
},
fov: f32::to_radians(45.0),
depth: 1.0 .. 20.0,
target_view: None,
},
LocalToWorld(math::translation(&math::vec3(-1.5, 0.0, 1.0))),
Translation::new(0.0, 0.0, 0.0)
),
]);
world.insert((), vec![
// camera
(
Camera::new(CameraType::Projection {
fov: math::quarter_pi(),
near: 1.0,
far: 20.0,
aspect_ratio: 1.0,
}),
LocalToWorld(math::look_at_rh(&math::vec3(3.0, -10.0, 6.0),
&math::vec3(0.0, 0.0, 0.0),
&math::vec3(0.0, 0.0, 1.0),)),
Translation::new(0.0, 0.0, 0.0)
)
]);
// let transform_system_bundle = transform_system_bundle::build(&mut world);
Application::run(universe, world);
}

View file

@ -2,6 +2,7 @@ use winit::{
event,
event::WindowEvent,
event_loop::{ControlFlow, EventLoop},
window::Window,
};
use zerocopy::AsBytes;
@ -10,33 +11,33 @@ use legion::prelude::*;
use std::sync::Arc;
use std::mem;
use wgpu::{Surface, Device, Queue, SwapChain, SwapChainDescriptor};
use crate::{vertex::*, render::*, math, LocalToWorld, ApplicationStage};
pub struct Application
{
pub universe: Universe,
pub world: World,
pub device: Device,
pub queue: Queue,
pub surface: Surface,
pub window: Window,
pub swap_chain: SwapChain,
pub swap_chain_descriptor: SwapChainDescriptor,
pub scheduler: SystemScheduler<ApplicationStage>,
pub shadow_pass: ShadowPass,
pub forward_pass: ForwardPass,
camera_position: math::Vec3,
camera_fov: f32,
pub render_passes: Vec<Box<dyn Pass>>,
}
impl Application {
pub const MAX_LIGHTS: usize = 10;
fn init(
universe: Universe,
mut world: World,
sc_desc: &wgpu::SwapChainDescriptor,
device: &wgpu::Device,
) -> (Self, Option<wgpu::CommandBuffer>)
{
let vertex_size = mem::size_of::<Vertex>();
fn add_default_passes(&mut self) {
let light_uniform_size =
(Self::MAX_LIGHTS * mem::size_of::<LightRaw>()) as wgpu::BufferAddress;
let local_bind_group_layout =
device.create_bind_group_layout(&wgpu::BindGroupLayoutDescriptor {
self.device.create_bind_group_layout(&wgpu::BindGroupLayoutDescriptor {
bindings: &[wgpu::BindGroupLayoutBinding {
binding: 0,
visibility: wgpu::ShaderStage::VERTEX | wgpu::ShaderStage::FRAGMENT,
@ -44,16 +45,25 @@ impl Application {
}],
});
let light_uniform_buffer = Arc::new(UniformBuffer {
buffer: self.device.create_buffer(&wgpu::BufferDescriptor {
size: light_uniform_size,
usage: wgpu::BufferUsage::UNIFORM
| wgpu::BufferUsage::COPY_SRC
| wgpu::BufferUsage::COPY_DST,
}),
size: light_uniform_size,
});
let mut entities = <Write<Material>>::query();
for mut entity in entities.iter(&mut world) {
let mut materials = <Write<Material>>::query();
for mut material in materials.iter(&mut self.world) {
let entity_uniform_size = mem::size_of::<MaterialUniforms>() as wgpu::BufferAddress;
let uniform_buf = device.create_buffer(&wgpu::BufferDescriptor {
let uniform_buf = self.device.create_buffer(&wgpu::BufferDescriptor {
size: entity_uniform_size,
usage: wgpu::BufferUsage::UNIFORM | wgpu::BufferUsage::COPY_DST,
});
let bind_group = device.create_bind_group(&wgpu::BindGroupDescriptor {
let bind_group = self.device.create_bind_group(&wgpu::BindGroupDescriptor {
layout: &local_bind_group_layout,
bindings: &[wgpu::Binding {
binding: 0,
@ -64,12 +74,17 @@ impl Application {
}],
});
entity.bind_group = Some(bind_group);
entity.uniform_buf = Some(uniform_buf);
material.bind_group = Some(bind_group);
material.uniform_buf = Some(uniform_buf);
}
let camera_position = math::vec3(3.0f32, -10.0, 6.0);
let camera_fov = math::quarter_pi();
let light_count = <Read<Light>>::query().iter(&mut self.world).count();
let forward_uniforms = ForwardUniforms {
proj: math::Mat4::identity().into(),
num_lights: [light_count as u32, 0, 0, 0],
};
let vertex_size = mem::size_of::<Vertex>();
let vb_desc = wgpu::VertexBufferDescriptor {
stride: vertex_size as wgpu::BufferAddress,
@ -88,162 +103,94 @@ impl Application {
],
};
let light_uniform_size =
(Self::MAX_LIGHTS * mem::size_of::<LightRaw>()) as wgpu::BufferAddress;
let light_uniform_buffer = Arc::new(UniformBuffer {
buffer: device.create_buffer(&wgpu::BufferDescriptor {
size: light_uniform_size,
usage: wgpu::BufferUsage::UNIFORM
| wgpu::BufferUsage::COPY_SRC
| wgpu::BufferUsage::COPY_DST,
}),
size: light_uniform_size,
});
let shadow_pass = ShadowPass::new(device, light_uniform_buffer.clone(), vb_desc.clone(), &local_bind_group_layout, Self::MAX_LIGHTS as u32);
let mut shadow_target_views = (0 .. 2)
.map(|i| {
Some(shadow_pass.shadow_texture.create_view(&wgpu::TextureViewDescriptor {
format: ShadowPass::SHADOW_FORMAT,
dimension: wgpu::TextureViewDimension::D2,
aspect: wgpu::TextureAspect::All,
base_mip_level: 0,
level_count: 1,
base_array_layer: i as u32,
array_layer_count: 1,
}))
})
.collect::<Vec<_>>();
let lights = vec![
(Light {
pos: math::vec3(7.0, -5.0, 10.0),
color: wgpu::Color {
r: 0.5,
g: 1.0,
b: 0.5,
a: 1.0,
},
fov: f32::to_radians(60.0),
depth: 1.0 .. 20.0,
target_view: shadow_target_views[0].take().unwrap(),
},),
(Light {
pos: math::vec3(-5.0, 7.0, 10.0),
color: wgpu::Color {
r: 1.0,
g: 0.5,
b: 0.5,
a: 1.0,
},
fov: f32::to_radians(45.0),
depth: 1.0 .. 20.0,
target_view: shadow_target_views[1].take().unwrap(),
},),
];
let light_count = lights.len();
world.insert((), lights);
let matrix = camera::get_projection_view_matrix(&camera_position, camera_fov, sc_desc.width as f32 / sc_desc.height as f32, 1.0, 20.0);
let forward_uniforms = ForwardUniforms {
proj: *matrix.as_ref(),
num_lights: [light_count as u32, 0, 0, 0],
};
let forward_pass = ForwardPass::new(device, forward_uniforms, light_uniform_buffer.clone(), &shadow_pass, vb_desc, &local_bind_group_layout, sc_desc);
let this = Application {
universe,
world,
scheduler: SystemScheduler::new(),
shadow_pass,
forward_pass,
camera_position,
camera_fov
};
(this, None)
let shadow_pass = ShadowPass::new(&mut self.device, &mut self.world, light_uniform_buffer.clone(), vb_desc.clone(), &local_bind_group_layout, Self::MAX_LIGHTS as u32);
let forward_pass = ForwardPass::new(&mut self.device, forward_uniforms, light_uniform_buffer.clone(), &shadow_pass, vb_desc, &local_bind_group_layout, &self.swap_chain_descriptor);
self.render_passes.push(Box::new(shadow_pass));
self.render_passes.push(Box::new(forward_pass));
}
fn resize(
&mut self,
sc_desc: &wgpu::SwapChainDescriptor,
device: &wgpu::Device,
) -> Option<wgpu::CommandBuffer>
fn resize(&mut self, width: u32, height: u32)
{
let command_buf = {
let mx_total = camera::get_projection_view_matrix(&self.camera_position, self.camera_fov, sc_desc.width as f32 / sc_desc.height as f32, 1.0, 20.0);
let mx_ref: [[f32; 4]; 4] = mx_total.into();
self.swap_chain_descriptor.width = width;
self.swap_chain_descriptor.height = height;
self.swap_chain = self.device.create_swap_chain(&self.surface, &self.swap_chain_descriptor);
let mut encoder =
self.device.create_command_encoder(&wgpu::CommandEncoderDescriptor { todo: 0 });
for (mut camera, local_to_world) in <(Write<Camera>, Read<LocalToWorld>)>::query().iter(&mut self.world) {
camera.update(self.swap_chain_descriptor.width, self.swap_chain_descriptor.height);
let camera_matrix: [[f32; 4]; 4] = (camera.view_matrix * local_to_world.0).into();
let temp_buf =
device.create_buffer_with_data(mx_ref.as_bytes(), wgpu::BufferUsage::COPY_SRC);
self.device.create_buffer_with_data(camera_matrix.as_bytes(), wgpu::BufferUsage::COPY_SRC);
for pass in self.render_passes.iter() {
if let Some(buffer) = pass.get_camera_uniform_buffer() {
encoder.copy_buffer_to_buffer(&temp_buf, 0, buffer, 0, 64);
}
}
}
let mut encoder =
device.create_command_encoder(&wgpu::CommandEncoderDescriptor { todo: 0 });
encoder.copy_buffer_to_buffer(&temp_buf, 0, &self.forward_pass.forward_uniform_buffer, 0, 64);
encoder.finish()
};
let command_buffer = encoder.finish();
self.forward_pass.update_swap_chain_descriptor(device, sc_desc);
Some(command_buf)
for pass in self.render_passes.iter_mut() {
pass.resize(&mut self.device, &mut self.swap_chain_descriptor);
}
self.queue.submit(&[command_buffer]);
}
fn update(&mut self, _: WindowEvent)
{
}
fn render(
&mut self,
frame: &wgpu::SwapChainOutput,
device: &wgpu::Device,
) -> wgpu::CommandBuffer
fn render(&mut self)
{
let mut frame = self.swap_chain
.get_next_texture()
.expect("Timeout when acquiring next swap chain texture");
let mut encoder =
device.create_command_encoder(&wgpu::CommandEncoderDescriptor { todo: 0 });
self.device.create_command_encoder(&wgpu::CommandEncoderDescriptor { todo: 0 });
let mut entities = <(Read<Material>, Read<LocalToWorld>)>::query();
let entities_count = entities.iter(&mut self.world).count();
let size = mem::size_of::<MaterialUniforms>();
let temp_buf_data = self.device
.create_buffer_mapped(entities_count * size, wgpu::BufferUsage::COPY_SRC);
for ((entity, transform), slot) in entities.iter(&mut self.world)
.zip(temp_buf_data.data.chunks_exact_mut(size))
{
let mut entities = <(Read<Material>, Read<LocalToWorld>)>::query();
let entities_count = entities.iter(&mut self.world).count();
let size = mem::size_of::<MaterialUniforms>();
let temp_buf_data = device
.create_buffer_mapped(entities_count * size, wgpu::BufferUsage::COPY_SRC);
for ((entity, transform), slot) in entities.iter(&mut self.world)
.zip(temp_buf_data.data.chunks_exact_mut(size))
{
slot.copy_from_slice(
MaterialUniforms {
model: transform.0.into(),
color: [
entity.color.x as f32,
entity.color.y as f32,
entity.color.z as f32,
entity.color.w as f32,
],
}
.as_bytes(),
);
}
let temp_buf = temp_buf_data.finish();
for (i, (entity, _)) in entities.iter(&mut self.world).enumerate() {
encoder.copy_buffer_to_buffer(
&temp_buf,
(i * size) as wgpu::BufferAddress,
entity.uniform_buf.as_ref().unwrap(),
0,
size as wgpu::BufferAddress,
);
}
slot.copy_from_slice(
MaterialUniforms {
model: transform.0.into(),
color: [
entity.color.x as f32,
entity.color.y as f32,
entity.color.z as f32,
entity.color.w as f32,
],
}
.as_bytes(),
);
}
self.shadow_pass.render(device, frame, &mut encoder, &mut self.world);
self.forward_pass.render(device, frame, &mut encoder, &mut self.world);
let temp_buf = temp_buf_data.finish();
encoder.finish()
for (i, (entity, _)) in entities.iter(&mut self.world).enumerate() {
encoder.copy_buffer_to_buffer(
&temp_buf,
(i * size) as wgpu::BufferAddress,
entity.uniform_buf.as_ref().unwrap(),
0,
size as wgpu::BufferAddress,
);
}
for pass in self.render_passes.iter_mut() {
pass.render(&mut self.device, &mut frame, &mut encoder, &mut self.world);
}
let command_buffer = encoder.finish();
self.queue.submit(&[command_buffer]);
}
#[allow(dead_code)]
@ -260,14 +207,14 @@ impl Application {
)
.unwrap();
let (device, mut queue) = adapter.request_device(&wgpu::DeviceDescriptor {
let (device, queue) = adapter.request_device(&wgpu::DeviceDescriptor {
extensions: wgpu::Extensions {
anisotropic_filtering: false,
},
limits: wgpu::Limits::default(),
});
let (_window, hidpi_factor, size, surface) = {
let (window, hidpi_factor, size, surface) = {
let window = winit::window::Window::new(&event_loop).unwrap();
window.set_title("bevy");
let hidpi_factor = window.hidpi_factor();
@ -276,20 +223,30 @@ impl Application {
(window, hidpi_factor, size, surface)
};
let mut sc_desc = wgpu::SwapChainDescriptor {
let swap_chain_descriptor = wgpu::SwapChainDescriptor {
usage: wgpu::TextureUsage::OUTPUT_ATTACHMENT,
format: wgpu::TextureFormat::Bgra8UnormSrgb,
width: size.width.round() as u32,
height: size.height.round() as u32,
present_mode: wgpu::PresentMode::Vsync,
};
let mut swap_chain = device.create_swap_chain(&surface, &sc_desc);
let swap_chain = device.create_swap_chain(&surface, &swap_chain_descriptor);
log::info!("Initializing the example...");
let (mut example, init_command_buf) = Application::init(universe, world, &sc_desc, &device);
if let Some(command_buf) = init_command_buf {
queue.submit(&[command_buf]);
}
let mut app = Application {
universe,
world,
device,
surface,
window,
queue,
swap_chain,
swap_chain_descriptor,
scheduler: SystemScheduler::new(),
render_passes: Vec::new(),
};
app.add_default_passes();
log::info!("Entering render loop...");
event_loop.run(move |event, _, control_flow| {
@ -305,13 +262,9 @@ impl Application {
} => {
let physical = size.to_physical(hidpi_factor);
log::info!("Resizing to {:?}", physical);
sc_desc.width = physical.width.round() as u32;
sc_desc.height = physical.height.round() as u32;
swap_chain = device.create_swap_chain(&surface, &sc_desc);
let command_buf = example.resize(&sc_desc, &device);
if let Some(command_buf) = command_buf {
queue.submit(&[command_buf]);
}
let width = physical.width.round() as u32;
let height = physical.height.round() as u32;
app.resize(width, height);
}
event::Event::WindowEvent { event, .. } => match event {
WindowEvent::KeyboardInput {
@ -327,16 +280,12 @@ impl Application {
*control_flow = ControlFlow::Exit;
}
_ => {
example.update(event);
app.update(event);
}
},
event::Event::EventsCleared => {
let frame = swap_chain
.get_next_texture()
.expect("Timeout when acquiring next swap chain texture");
example.scheduler.execute(&mut example.world);
let command_buf = example.render(&frame, &device);
queue.submit(&[command_buf]);
app.scheduler.execute(&mut app.world);
app.render();
}
_ => (),
}

View file

@ -7,7 +7,7 @@ edition = "2018"
license = "MIT"
[dependencies]
legion = { git = "https://github.com/TomGillen/legion.git" }
legion = { git = "https://github.com/TomGillen/legion.git", rev = "8628b227bcbe57582fffb5e80e73c634ec4eebd9" }
#legion = { path = "../legion" }
log = "0.4"
nalgebra = { version = "0.19.0" }

View file

@ -11,5 +11,7 @@ pub use wgpu;
pub use legion;
pub use legion_transform;
pub use legion::prelude::*;
pub use legion::schedule::Schedulable;
pub use legion_transform::prelude::*;
pub use legion_transform::transform_system_bundle;
pub use nalgebra_glm as math;

View file

@ -1,15 +1,41 @@
use crate::math;
pub fn get_projection_view_matrix(eye: &math::Vec3, fov: f32, aspect_ratio: f32, near: f32, far: f32) -> math::Mat4 {
pub enum CameraType {
Projection {
fov: f32,
aspect_ratio: f32,
near: f32,
far: f32
}
}
pub struct Camera {
pub view_matrix: math::Mat4,
pub camera_type: CameraType,
}
impl Camera {
pub fn new(camera_type: CameraType) -> Self {
Camera {
view_matrix: math::identity(),
camera_type,
}
}
pub fn update(&mut self, width: u32, height: u32) {
match &mut self.camera_type {
CameraType::Projection { mut aspect_ratio, fov, near, far } => {
aspect_ratio = width as f32 / height as f32;
self.view_matrix = get_projection_matrix(aspect_ratio, *fov, *near, *far)
}
}
}
}
pub fn get_projection_matrix(fov: f32, aspect_ratio: f32, near: f32, far: f32) -> math::Mat4 {
let projection = math::perspective(aspect_ratio, fov, near, far);
let view = math::look_at_rh::<f32>(
&eye,
&math::vec3(0.0, 0.0, 0.0),
&math::vec3(0.0, 0.0, 1.0),
);
opengl_to_wgpu_matrix() * projection * view
opengl_to_wgpu_matrix() * projection
}
pub fn opengl_to_wgpu_matrix() -> math::Mat4 {

View file

@ -2,7 +2,7 @@ use crate::{render::*, asset::*, render::mesh::*};
use legion::prelude::*;
use std::{mem, sync::Arc};
use zerocopy::{AsBytes, FromBytes};
use wgpu::{CommandEncoder, Device, BindGroupLayout, VertexBufferDescriptor, SwapChainDescriptor, SwapChainOutput};
use wgpu::{Buffer, CommandEncoder, Device, BindGroupLayout, VertexBufferDescriptor, SwapChainDescriptor, SwapChainOutput};
#[repr(C)]
#[derive(Clone, Copy, AsBytes, FromBytes)]
@ -60,6 +60,14 @@ impl Pass for ForwardPass {
};
}
}
fn resize(&mut self, device: &Device, frame: &SwapChainDescriptor) {
self.depth_texture = Self::get_depth_texture(device, frame);
}
fn get_camera_uniform_buffer(&self) -> Option<&Buffer> {
Some(&self.forward_uniform_buffer)
}
}
impl ForwardPass {
@ -198,11 +206,6 @@ impl ForwardPass {
}
}
pub fn update_swap_chain_descriptor(&mut self, device: &Device, swap_chain_descriptor: &SwapChainDescriptor) {
self.depth_texture = Self::get_depth_texture(device, swap_chain_descriptor);
}
fn get_depth_texture(device: &Device, swap_chain_descriptor: &SwapChainDescriptor) -> wgpu::TextureView {
let texture = device.create_texture(&wgpu::TextureDescriptor {
size: wgpu::Extent3d {

View file

@ -8,7 +8,7 @@ pub struct Light {
pub color: wgpu::Color,
pub fov: f32,
pub depth: Range<f32>,
pub target_view: wgpu::TextureView,
pub target_view: Option<wgpu::TextureView>,
}
#[repr(C)]
@ -20,9 +20,10 @@ pub struct LightRaw {
}
impl Light {
pub fn to_raw(&self) -> LightRaw {
pub fn to_raw(&self, transform: &math::Mat4) -> LightRaw {
let proj = camera::get_projection_matrix(self.fov, 1.0, self.depth.start, self.depth.end) * transform;
LightRaw {
proj: camera::get_projection_view_matrix(&self.pos, self.fov, 1.0, self.depth.start, self.depth.end).into(),
proj: proj.into(),
pos: [self.pos.x, self.pos.y, self.pos.z, 1.0],
color: [
self.color.r as f32,

View file

@ -14,6 +14,7 @@ pub use shader::*;
pub use pass::*;
pub use material::*;
pub use mesh::*;
pub use camera::*;
pub struct UniformBuffer {
pub buffer: wgpu::Buffer,

View file

@ -1,6 +1,8 @@
use legion::world::World;
use wgpu::{CommandEncoder, Device, SwapChainOutput};
use wgpu::{Buffer, CommandEncoder, Device, SwapChainDescriptor, SwapChainOutput};
pub trait Pass {
fn render(&mut self, device: &Device, frame: &SwapChainOutput, encoder: &mut CommandEncoder, world: &mut World);
fn resize(&mut self, device: &Device, frame: &SwapChainDescriptor);
fn get_camera_uniform_buffer(&self) -> Option<&Buffer>;
}

View file

@ -1,5 +1,5 @@
use crate::{render::*, asset::*};
use wgpu::{BindGroupLayout, CommandEncoder, Device, VertexBufferDescriptor, SwapChainOutput};
use crate::{render::*, asset::*, LocalToWorld};
use wgpu::{BindGroupLayout, Buffer, CommandEncoder, Device, VertexBufferDescriptor, SwapChainOutput, SwapChainDescriptor};
use legion::prelude::*;
use zerocopy::AsBytes;
use std::{mem, sync::Arc};
@ -22,7 +22,7 @@ pub struct ShadowUniforms {
impl Pass for ShadowPass {
fn render(&mut self, device: &Device, _: &SwapChainOutput, encoder: &mut CommandEncoder, world: &mut World) {
let mut light_query = <Read<Light>>::query();
let mut light_query = <(Read<Light>, Read<LocalToWorld>)>::query();
let mut mesh_query = <(Read<Material>, Read<Handle<Mesh>>)>::query();
let light_count = light_query.iter(world).count();
@ -32,11 +32,11 @@ impl Pass for ShadowPass {
let total_size = size * light_count;
let temp_buf_data =
device.create_buffer_mapped(total_size, wgpu::BufferUsage::COPY_SRC);
for (light, slot) in light_query
for ((light, local_to_world), slot) in light_query
.iter(world)
.zip(temp_buf_data.data.chunks_exact_mut(size))
{
slot.copy_from_slice(light.to_raw().as_bytes());
slot.copy_from_slice(light.to_raw(&local_to_world.0).as_bytes());
}
encoder.copy_buffer_to_buffer(
&temp_buf_data.finish(),
@ -47,7 +47,21 @@ impl Pass for ShadowPass {
);
}
for (i, light) in light_query.iter_immutable(world).enumerate() {
for (i, (mut light, _)) in <(Write<Light>, Read<LocalToWorld>)>::query().iter(world).enumerate() {
if let None = light.target_view {
light.target_view = Some(self.shadow_texture.create_view(&wgpu::TextureViewDescriptor {
format: ShadowPass::SHADOW_FORMAT,
dimension: wgpu::TextureViewDimension::D2,
aspect: wgpu::TextureAspect::All,
base_mip_level: 0,
level_count: 1,
base_array_layer: i as u32,
array_layer_count: 1,
}));
}
}
for (i, (light, _)) in light_query.iter_immutable(world).enumerate() {
// The light uniform buffer already has the projection,
// let's just copy it over to the shadow uniform buffer.
encoder.copy_buffer_to_buffer(
@ -61,7 +75,7 @@ impl Pass for ShadowPass {
let mut pass = encoder.begin_render_pass(&wgpu::RenderPassDescriptor {
color_attachments: &[],
depth_stencil_attachment: Some(wgpu::RenderPassDepthStencilAttachmentDescriptor {
attachment: &light.target_view,
attachment: light.target_view.as_ref().unwrap(),
depth_load_op: wgpu::LoadOp::Clear,
depth_store_op: wgpu::StoreOp::Store,
stencil_load_op: wgpu::LoadOp::Clear,
@ -87,6 +101,9 @@ impl Pass for ShadowPass {
}
}
}
fn resize(&mut self, _: &Device, _: &SwapChainDescriptor) { }
fn get_camera_uniform_buffer(&self) -> Option<&Buffer> { None }
}
impl ShadowPass {
@ -97,7 +114,7 @@ impl ShadowPass {
depth: 1,
};
pub fn new(device: &Device, light_uniform_buffer: Arc::<UniformBuffer>, vertex_buffer_descriptor: VertexBufferDescriptor, local_bind_group_layout: &BindGroupLayout, max_lights: u32) -> ShadowPass {
pub fn new(device: &Device, _: &World, light_uniform_buffer: Arc::<UniformBuffer>, vertex_buffer_descriptor: VertexBufferDescriptor, local_bind_group_layout: &BindGroupLayout, max_lights: u32) -> ShadowPass {
// Create pipeline layout
let bind_group_layout =
device.create_bind_group_layout(&wgpu::BindGroupLayoutDescriptor {