bevy/src/render/render_graph_2/renderers/wgpu_renderer.rs

764 lines
28 KiB
Rust
Raw Normal View History

2020-01-20 08:57:54 +00:00
use crate::{
asset::{AssetStorage, Handle},
2020-01-20 08:57:54 +00:00
legion::prelude::*,
render::{
render_graph_2::{
resource_name, update_shader_assignments, BindGroup, BindType,
DynamicUniformBufferInfo, PassDescriptor, PipelineDescriptor, PipelineLayout,
PipelineLayoutType, RenderGraph, RenderPass, RenderPassColorAttachmentDescriptor,
RenderPassDepthStencilAttachmentDescriptor, Renderer, ResourceInfo, TextureDescriptor,
},
Shader,
},
2020-01-20 08:57:54 +00:00
};
2020-01-28 09:53:28 +00:00
use std::{collections::HashMap, ops::Deref};
2020-01-20 08:57:54 +00:00
pub struct WgpuRenderer {
pub device: wgpu::Device,
pub queue: wgpu::Queue,
pub surface: Option<wgpu::Surface>,
2020-02-04 08:06:17 +00:00
pub encoder: Option<wgpu::CommandEncoder>,
2020-01-20 08:57:54 +00:00
pub swap_chain_descriptor: wgpu::SwapChainDescriptor,
pub render_pipelines: HashMap<Handle<PipelineDescriptor>, wgpu::RenderPipeline>,
2020-01-26 00:33:26 +00:00
pub buffers: HashMap<String, wgpu::Buffer>,
2020-01-20 08:57:54 +00:00
pub textures: HashMap<String, wgpu::TextureView>,
2020-01-26 00:33:26 +00:00
pub resource_info: HashMap<String, ResourceInfo>,
pub bind_groups: HashMap<u64, BindGroupInfo>,
2020-01-26 00:33:26 +00:00
pub bind_group_layouts: HashMap<u64, wgpu::BindGroupLayout>,
2020-01-28 08:36:51 +00:00
pub dynamic_uniform_buffer_info: HashMap<String, DynamicUniformBufferInfo>,
2020-01-20 08:57:54 +00:00
}
impl WgpuRenderer {
pub fn new() -> Self {
let adapter = wgpu::Adapter::request(
&wgpu::RequestAdapterOptions {
power_preference: wgpu::PowerPreference::Default,
},
wgpu::BackendBit::PRIMARY,
)
.unwrap();
let (device, queue) = adapter.request_device(&wgpu::DeviceDescriptor {
extensions: wgpu::Extensions {
anisotropic_filtering: false,
},
limits: wgpu::Limits::default(),
});
let swap_chain_descriptor = wgpu::SwapChainDescriptor {
usage: wgpu::TextureUsage::OUTPUT_ATTACHMENT,
format: wgpu::TextureFormat::Bgra8UnormSrgb,
width: 0,
height: 0,
present_mode: wgpu::PresentMode::Vsync,
};
WgpuRenderer {
device,
queue,
surface: None,
2020-02-04 08:06:17 +00:00
encoder: None,
2020-01-20 08:57:54 +00:00
swap_chain_descriptor,
render_pipelines: HashMap::new(),
buffers: HashMap::new(),
textures: HashMap::new(),
2020-01-26 00:33:26 +00:00
resource_info: HashMap::new(),
bind_groups: HashMap::new(),
bind_group_layouts: HashMap::new(),
2020-01-28 08:36:51 +00:00
dynamic_uniform_buffer_info: HashMap::new(),
2020-01-20 08:57:54 +00:00
}
}
pub fn create_render_pipeline(
2020-02-18 00:33:46 +00:00
dynamic_uniform_buffer_info: &HashMap<String, DynamicUniformBufferInfo>,
2020-01-28 09:53:28 +00:00
pipeline_descriptor: &mut PipelineDescriptor,
2020-01-26 00:33:26 +00:00
bind_group_layouts: &mut HashMap<u64, wgpu::BindGroupLayout>,
2020-01-20 08:57:54 +00:00
device: &wgpu::Device,
vertex_shader: &Shader,
fragment_shader: Option<&Shader>,
2020-01-20 08:57:54 +00:00
) -> wgpu::RenderPipeline {
let vertex_spirv = vertex_shader.get_spirv_shader(None);
let fragment_spirv = fragment_shader.map(|f| f.get_spirv_shader(None));
let vertex_shader_module = Self::create_shader_module(device, &vertex_spirv, None);
let fragment_shader_module = match fragment_shader {
Some(fragment_spirv) => Some(Self::create_shader_module(device, fragment_spirv, None)),
2020-01-20 08:57:54 +00:00
None => None,
};
if let PipelineLayoutType::Reflected(None) = pipeline_descriptor.layout {
let mut layouts = vec![vertex_spirv.reflect_layout().unwrap()];
if let Some(ref fragment_spirv) = fragment_spirv {
layouts.push(fragment_spirv.reflect_layout().unwrap());
}
2020-02-18 00:33:46 +00:00
let mut layout = PipelineLayout::from_shader_layouts(&mut layouts);
// set each uniform binding to dynamic if there is a matching dynamic uniform buffer info
for mut bind_group in layout.bind_groups.iter_mut() {
bind_group.bindings = bind_group
.bindings
.iter()
.cloned()
.map(|mut binding| {
if let BindType::Uniform {
ref mut dynamic, ..
} = binding.bind_type
{
if dynamic_uniform_buffer_info.contains_key(&binding.name) {
*dynamic = true;
}
}
binding
})
.collect();
}
pipeline_descriptor.layout = PipelineLayoutType::Reflected(Some(layout));
}
let layout = pipeline_descriptor.get_layout_mut().unwrap();
2020-02-18 00:33:46 +00:00
// println!("{:#?}", layout);
// println!();
2020-01-26 00:33:26 +00:00
// setup new bind group layouts
for bind_group in layout.bind_groups.iter_mut() {
let bind_group_id = bind_group.get_or_update_hash();
2020-01-26 00:33:26 +00:00
if let None = bind_group_layouts.get(&bind_group_id) {
2020-01-23 08:31:56 +00:00
let bind_group_layout_binding = bind_group
.bindings
.iter()
.map(|binding| wgpu::BindGroupLayoutBinding {
binding: binding.index,
2020-01-23 08:31:56 +00:00
visibility: wgpu::ShaderStage::VERTEX | wgpu::ShaderStage::FRAGMENT,
2020-01-24 07:39:56 +00:00
ty: (&binding.bind_type).into(),
2020-01-23 08:31:56 +00:00
})
.collect::<Vec<wgpu::BindGroupLayoutBinding>>();
2020-01-26 00:33:26 +00:00
let bind_group_layout =
device.create_bind_group_layout(&wgpu::BindGroupLayoutDescriptor {
bindings: bind_group_layout_binding.as_slice(),
});
bind_group_layouts.insert(bind_group_id, bind_group_layout);
}
}
// collect bind group layout references
let bind_group_layouts = layout
2020-01-26 00:33:26 +00:00
.bind_groups
.iter()
.map(|bind_group| {
let bind_group_id = bind_group.get_hash().unwrap();
2020-01-26 00:33:26 +00:00
bind_group_layouts.get(&bind_group_id).unwrap()
2020-01-23 08:31:56 +00:00
})
2020-01-26 00:33:26 +00:00
.collect::<Vec<&wgpu::BindGroupLayout>>();
2020-01-20 08:57:54 +00:00
let pipeline_layout = device.create_pipeline_layout(&wgpu::PipelineLayoutDescriptor {
bind_group_layouts: bind_group_layouts.as_slice(),
2020-01-20 08:57:54 +00:00
});
2020-01-23 08:31:56 +00:00
2020-01-28 09:53:28 +00:00
let mut render_pipeline_descriptor = wgpu::RenderPipelineDescriptor {
2020-01-20 08:57:54 +00:00
layout: &pipeline_layout,
vertex_stage: wgpu::ProgrammableStageDescriptor {
module: &vertex_shader_module,
entry_point: "main",
2020-01-20 08:57:54 +00:00
},
fragment_stage: match fragment_shader {
Some(fragment_shader) => Some(wgpu::ProgrammableStageDescriptor {
entry_point: "main",
2020-01-20 08:57:54 +00:00
module: fragment_shader_module.as_ref().unwrap(),
}),
None => None,
},
rasterization_state: pipeline_descriptor.rasterization_state.clone(),
primitive_topology: pipeline_descriptor.primitive_topology,
color_states: &pipeline_descriptor.color_states,
depth_stencil_state: pipeline_descriptor.depth_stencil_state.clone(),
index_format: pipeline_descriptor.index_format,
vertex_buffers: &pipeline_descriptor
.vertex_buffer_descriptors
.iter()
.map(|v| v.into())
.collect::<Vec<wgpu::VertexBufferDescriptor>>(),
sample_count: pipeline_descriptor.sample_count,
sample_mask: pipeline_descriptor.sample_mask,
alpha_to_coverage_enabled: pipeline_descriptor.alpha_to_coverage_enabled,
};
2020-01-28 09:53:28 +00:00
device.create_render_pipeline(&mut render_pipeline_descriptor)
2020-01-20 08:57:54 +00:00
}
pub fn create_render_pass<'a>(
&self,
pass_descriptor: &PassDescriptor,
encoder: &'a mut wgpu::CommandEncoder,
frame: &'a wgpu::SwapChainOutput,
) -> wgpu::RenderPass<'a> {
encoder.begin_render_pass(&wgpu::RenderPassDescriptor {
color_attachments: &pass_descriptor
.color_attachments
.iter()
.map(|c| self.create_wgpu_color_attachment_descriptor(c, frame))
.collect::<Vec<wgpu::RenderPassColorAttachmentDescriptor>>(),
depth_stencil_attachment: pass_descriptor
.depth_stencil_attachment
.as_ref()
.map(|d| self.create_wgpu_depth_stencil_attachment_descriptor(d, frame)),
})
}
fn create_wgpu_color_attachment_descriptor<'a>(
&'a self,
color_attachment_descriptor: &RenderPassColorAttachmentDescriptor,
frame: &'a wgpu::SwapChainOutput,
) -> wgpu::RenderPassColorAttachmentDescriptor<'a> {
let attachment = match color_attachment_descriptor.attachment.as_str() {
2020-01-23 08:31:56 +00:00
resource_name::texture::SWAP_CHAIN => &frame.view,
2020-01-20 08:57:54 +00:00
_ => self
.textures
.get(&color_attachment_descriptor.attachment)
.unwrap(),
};
let resolve_target = match color_attachment_descriptor.resolve_target {
Some(ref target) => match target.as_str() {
2020-01-23 08:31:56 +00:00
resource_name::texture::SWAP_CHAIN => Some(&frame.view),
2020-01-20 08:57:54 +00:00
_ => Some(&frame.view),
},
None => None,
};
wgpu::RenderPassColorAttachmentDescriptor {
store_op: color_attachment_descriptor.store_op,
load_op: color_attachment_descriptor.load_op,
clear_color: color_attachment_descriptor.clear_color,
attachment,
resolve_target,
}
}
fn create_wgpu_depth_stencil_attachment_descriptor<'a>(
&'a self,
depth_stencil_attachment_descriptor: &RenderPassDepthStencilAttachmentDescriptor,
frame: &'a wgpu::SwapChainOutput,
) -> wgpu::RenderPassDepthStencilAttachmentDescriptor<&'a wgpu::TextureView> {
let attachment = match depth_stencil_attachment_descriptor.attachment.as_str() {
2020-01-23 08:31:56 +00:00
resource_name::texture::SWAP_CHAIN => &frame.view,
2020-01-20 08:57:54 +00:00
_ => self
.textures
.get(&depth_stencil_attachment_descriptor.attachment)
.unwrap(),
};
wgpu::RenderPassDepthStencilAttachmentDescriptor {
attachment,
clear_depth: depth_stencil_attachment_descriptor.clear_depth,
clear_stencil: depth_stencil_attachment_descriptor.clear_stencil,
depth_load_op: depth_stencil_attachment_descriptor.depth_load_op,
depth_store_op: depth_stencil_attachment_descriptor.depth_store_op,
stencil_load_op: depth_stencil_attachment_descriptor.stencil_load_op,
stencil_store_op: depth_stencil_attachment_descriptor.stencil_store_op,
}
}
2020-01-26 00:33:26 +00:00
fn add_resource_info(&mut self, name: &str, resource_info: ResourceInfo) {
self.resource_info.insert(name.to_string(), resource_info);
}
// TODO: consider moving this to a resource provider
fn setup_bind_group(&mut self, bind_group: &BindGroup) -> u64 {
let bind_group_id = bind_group.get_hash().unwrap();
if let None = self.bind_groups.get(&bind_group_id) {
let mut unset_uniforms = Vec::new();
// if a uniform resource buffer doesn't exist, create a new empty one
for binding in bind_group.bindings.iter() {
if let None = self.resource_info.get(&binding.name) {
2020-02-08 07:17:51 +00:00
println!(
"Warning: creating new empty buffer for binding {}",
binding.name
);
unset_uniforms.push(binding.name.to_string());
if let BindType::Uniform { .. } = &binding.bind_type {
let size = binding.bind_type.get_uniform_size().unwrap();
self.create_buffer(
&binding.name,
size,
wgpu::BufferUsage::UNIFORM | wgpu::BufferUsage::COPY_DST,
)
}
}
}
// create wgpu Bindings
let bindings = bind_group
.bindings
.iter()
.map(|binding| {
let resource_info = self.resource_info.get(&binding.name).unwrap();
wgpu::Binding {
binding: binding.index,
resource: match &binding.bind_type {
BindType::Uniform {
2020-01-28 03:07:05 +00:00
dynamic: _,
properties: _,
} => {
2020-01-28 08:36:51 +00:00
if let ResourceInfo::Buffer {
size,
buffer_usage: _,
} = resource_info
{
let buffer = self.buffers.get(&binding.name).unwrap();
wgpu::BindingResource::Buffer {
2020-02-09 19:43:45 +00:00
buffer,
range: 0..*size,
}
} else {
panic!("expected a Buffer resource");
}
}
_ => panic!("unsupported bind type"),
},
}
})
.collect::<Vec<wgpu::Binding>>();
let bind_group_layout = self.bind_group_layouts.get(&bind_group_id).unwrap();
let bind_group_descriptor = wgpu::BindGroupDescriptor {
layout: bind_group_layout,
bindings: bindings.as_slice(),
};
let bind_group = self.device.create_bind_group(&bind_group_descriptor);
self.bind_groups.insert(
bind_group_id,
BindGroupInfo {
bind_group,
unset_uniforms,
},
);
}
bind_group_id
}
pub fn create_shader_module(
device: &wgpu::Device,
shader: &Shader,
macros: Option<&[String]>,
) -> wgpu::ShaderModule {
2020-02-13 17:17:18 +00:00
device.create_shader_module(&shader.get_spirv(macros))
}
2020-02-18 00:33:46 +00:00
pub fn initialize_resource_providers(
&mut self,
world: &mut World,
render_graph: &mut RenderGraph,
) {
self.encoder = Some(
self.device
.create_command_encoder(&wgpu::CommandEncoderDescriptor { todo: 0 }),
);
for resource_provider in render_graph.resource_providers.iter_mut() {
resource_provider.initialize(self, world);
}
// consume current encoder
let command_buffer = self.encoder.take().unwrap().finish();
self.queue.submit(&[command_buffer]);
}
}
2020-01-20 08:57:54 +00:00
impl Renderer for WgpuRenderer {
fn initialize(&mut self, world: &mut World, render_graph: &mut RenderGraph) {
2020-01-20 08:57:54 +00:00
let (surface, window_size) = {
let window = world.resources.get::<winit::window::Window>().unwrap();
let surface = wgpu::Surface::create(window.deref());
let window_size = window.inner_size();
(surface, window_size)
};
self.surface = Some(surface);
2020-02-18 00:33:46 +00:00
self.initialize_resource_providers(world, render_graph);
self.resize(world, render_graph, window_size.width, window_size.height);
2020-01-20 08:57:54 +00:00
}
fn resize(
&mut self,
world: &mut World,
render_graph: &mut RenderGraph,
width: u32,
height: u32,
) {
self.encoder = Some(
self.device
.create_command_encoder(&wgpu::CommandEncoderDescriptor { todo: 0 }),
);
2020-01-20 10:03:05 +00:00
self.swap_chain_descriptor.width = width;
self.swap_chain_descriptor.height = height;
2020-01-20 08:57:54 +00:00
let swap_chain = self
.device
.create_swap_chain(self.surface.as_ref().unwrap(), &self.swap_chain_descriptor);
2020-01-23 08:31:56 +00:00
// WgpuRenderer can't own swap_chain without creating lifetime ergonomics issues, so lets just store it in World.
2020-01-20 08:57:54 +00:00
world.resources.insert(swap_chain);
for resource_provider in render_graph.resource_providers.iter_mut() {
resource_provider.resize(self, world, width, height);
}
2020-02-06 01:50:56 +00:00
// consume current encoder
let command_buffer = self.encoder.take().unwrap().finish();
self.queue.submit(&[command_buffer]);
2020-01-20 08:57:54 +00:00
}
fn process_render_graph(&mut self, render_graph: &mut RenderGraph, world: &mut World) {
2020-02-04 08:06:17 +00:00
// TODO: this self.encoder handoff is a bit gross, but its here to give resource providers access to buffer copies without
// exposing the wgpu renderer internals to ResourceProvider traits. if this can be made cleaner that would be pretty cool.
self.encoder = Some(
self.device
.create_command_encoder(&wgpu::CommandEncoderDescriptor { todo: 0 }),
);
2020-02-04 08:06:17 +00:00
for resource_provider in render_graph.resource_providers.iter_mut() {
resource_provider.update(self, world);
}
update_shader_assignments(world, render_graph);
2020-02-06 01:50:56 +00:00
for (name, texture_descriptor) in render_graph.queued_textures.drain(..) {
2020-02-06 02:59:09 +00:00
self.create_texture(&name, &texture_descriptor);
2020-02-08 07:17:51 +00:00
}
2020-02-06 01:50:56 +00:00
2020-02-04 08:06:17 +00:00
let mut encoder = self.encoder.take().unwrap();
2020-01-20 08:57:54 +00:00
let mut swap_chain = world.resources.get_mut::<wgpu::SwapChain>().unwrap();
let frame = swap_chain
.get_next_texture()
.expect("Timeout when acquiring next swap chain texture");
// self.setup_dynamic_entity_shader_uniforms(world, render_graph, &mut encoder);
2020-01-20 08:57:54 +00:00
// setup, pipelines, bind groups, and resources
let mut pipeline_storage = world
.resources
.get_mut::<AssetStorage<PipelineDescriptor>>()
.unwrap();
let shader_storage = world.resources.get::<AssetStorage<Shader>>().unwrap();
for pipeline_descriptor_handle in render_graph.pipeline_descriptors.iter() {
let pipeline_descriptor = pipeline_storage
.get_mut(pipeline_descriptor_handle)
.unwrap();
// create pipelines
if !self
.render_pipelines
.contains_key(pipeline_descriptor_handle)
{
let vertex_shader = shader_storage
.get(&pipeline_descriptor.shader_stages.vertex)
.unwrap();
let fragment_shader = pipeline_descriptor
.shader_stages
.fragment
.as_ref()
.map(|handle| &*shader_storage.get(&handle).unwrap());
let render_pipeline = WgpuRenderer::create_render_pipeline(
2020-02-18 00:33:46 +00:00
&self.dynamic_uniform_buffer_info,
pipeline_descriptor,
&mut self.bind_group_layouts,
&self.device,
vertex_shader,
fragment_shader,
);
self.render_pipelines
.insert(pipeline_descriptor_handle.clone(), render_pipeline);
}
// create bind groups
let pipeline_layout = pipeline_descriptor.get_layout().unwrap();
for bind_group in pipeline_layout.bind_groups.iter() {
self.setup_bind_group(bind_group);
}
}
2020-01-20 08:57:54 +00:00
for (pass_name, pass_descriptor) in render_graph.pass_descriptors.iter() {
// run passes
2020-01-20 08:57:54 +00:00
let mut render_pass = self.create_render_pass(pass_descriptor, &mut encoder, &frame);
if let Some(pass_pipelines) = render_graph.pass_pipelines.get(pass_name) {
for pass_pipeline in pass_pipelines.iter() {
let pipeline_descriptor = pipeline_storage.get(pass_pipeline).unwrap();
let render_pipeline = self.render_pipelines.get(pass_pipeline).unwrap();
render_pass.set_pipeline(render_pipeline);
let mut render_pass = WgpuRenderPass {
render_pass: &mut render_pass,
renderer: self,
pipeline_descriptor,
};
for draw_target_name in pipeline_descriptor.draw_targets.iter() {
let draw_target = render_graph.draw_targets.get(draw_target_name).unwrap();
draw_target(world, &mut render_pass, pass_pipeline.clone());
2020-01-20 08:57:54 +00:00
}
}
}
}
let command_buffer = encoder.finish();
self.queue.submit(&[command_buffer]);
}
2020-01-24 07:39:56 +00:00
fn create_buffer_with_data(
&mut self,
name: &str,
data: &[u8],
buffer_usage: wgpu::BufferUsage,
) {
2020-01-23 08:31:56 +00:00
let buffer = self.device.create_buffer_with_data(data, buffer_usage);
2020-01-26 00:33:26 +00:00
self.add_resource_info(
name,
ResourceInfo::Buffer {
buffer_usage,
size: data.len() as u64,
2020-01-24 07:39:56 +00:00
},
);
2020-01-26 00:33:26 +00:00
self.buffers.insert(name.to_string(), buffer);
2020-01-20 08:57:54 +00:00
}
2020-01-24 07:39:56 +00:00
fn create_buffer(&mut self, name: &str, size: u64, buffer_usage: wgpu::BufferUsage) {
let buffer = self.device.create_buffer(&wgpu::BufferDescriptor {
2020-02-09 19:43:45 +00:00
size,
usage: buffer_usage,
});
2020-02-09 19:43:45 +00:00
self.add_resource_info(name, ResourceInfo::Buffer { buffer_usage, size });
self.buffers.insert(name.to_string(), buffer);
}
2020-02-08 07:17:51 +00:00
fn create_instance_buffer(
&mut self,
name: &str,
mesh_id: usize,
size: usize,
count: usize,
buffer_usage: wgpu::BufferUsage,
) {
2020-02-08 06:42:30 +00:00
let buffer = self.device.create_buffer(&wgpu::BufferDescriptor {
size: (size * count) as u64,
usage: buffer_usage,
});
self.add_resource_info(
name,
ResourceInfo::InstanceBuffer {
buffer_usage,
size,
count,
mesh_id,
},
);
self.buffers.insert(name.to_string(), buffer);
}
2020-02-08 07:17:51 +00:00
fn create_instance_buffer_with_data(
&mut self,
name: &str,
mesh_id: usize,
data: &[u8],
size: usize,
count: usize,
buffer_usage: wgpu::BufferUsage,
) {
2020-02-08 06:42:30 +00:00
let buffer = self.device.create_buffer_with_data(data, buffer_usage);
self.add_resource_info(
name,
ResourceInfo::InstanceBuffer {
buffer_usage,
size,
count,
mesh_id,
},
);
self.buffers.insert(name.to_string(), buffer);
}
2020-01-26 00:33:26 +00:00
fn get_resource_info(&self, name: &str) -> Option<&ResourceInfo> {
self.resource_info.get(name)
2020-01-23 08:31:56 +00:00
}
2020-01-24 07:39:56 +00:00
fn remove_buffer(&mut self, name: &str) {
self.buffers.remove(name);
}
fn create_buffer_mapped(
&mut self,
name: &str,
size: usize,
buffer_usage: wgpu::BufferUsage,
setup_data: &mut dyn FnMut(&mut [u8]),
) {
let mut mapped = self.device.create_buffer_mapped(size, buffer_usage);
setup_data(&mut mapped.data);
2020-02-04 08:06:17 +00:00
let buffer = mapped.finish();
2020-02-04 08:06:17 +00:00
self.add_resource_info(
name,
ResourceInfo::Buffer {
buffer_usage,
size: size as u64,
},
);
self.buffers.insert(name.to_string(), buffer);
}
fn copy_buffer_to_buffer(
&mut self,
source_buffer: &str,
source_offset: u64,
destination_buffer: &str,
destination_offset: u64,
size: u64,
) {
let source = self.buffers.get(source_buffer).unwrap();
let destination = self.buffers.get(destination_buffer).unwrap();
2020-02-04 08:06:17 +00:00
let encoder = self.encoder.as_mut().unwrap();
encoder.copy_buffer_to_buffer(source, source_offset, destination, destination_offset, size);
}
fn get_dynamic_uniform_buffer_info(&self, name: &str) -> Option<&DynamicUniformBufferInfo> {
self.dynamic_uniform_buffer_info.get(name)
}
fn get_dynamic_uniform_buffer_info_mut(
&mut self,
name: &str,
) -> Option<&mut DynamicUniformBufferInfo> {
self.dynamic_uniform_buffer_info.get_mut(name)
}
fn add_dynamic_uniform_buffer_info(&mut self, name: &str, info: DynamicUniformBufferInfo) {
self.dynamic_uniform_buffer_info
.insert(name.to_string(), info);
}
2020-02-06 01:50:56 +00:00
2020-02-06 02:59:09 +00:00
fn create_texture(&mut self, name: &str, texture_descriptor: &TextureDescriptor) {
let descriptor: wgpu::TextureDescriptor = (*texture_descriptor).into();
let texture = self.device.create_texture(&descriptor);
2020-02-06 01:50:56 +00:00
self.textures
.insert(name.to_string(), texture.create_default_view());
}
2020-01-20 08:57:54 +00:00
}
2020-01-24 07:39:56 +00:00
pub struct WgpuRenderPass<'a, 'b, 'c, 'd> {
2020-01-20 08:57:54 +00:00
pub render_pass: &'b mut wgpu::RenderPass<'a>,
2020-01-24 07:39:56 +00:00
pub pipeline_descriptor: &'c PipelineDescriptor,
pub renderer: &'d mut WgpuRenderer,
2020-01-20 08:57:54 +00:00
}
2020-01-26 00:33:26 +00:00
impl<'a, 'b, 'c, 'd> RenderPass for WgpuRenderPass<'a, 'b, 'c, 'd> {
2020-01-24 07:39:56 +00:00
fn get_renderer(&mut self) -> &mut dyn Renderer {
self.renderer
}
fn get_pipeline_descriptor(&self) -> &PipelineDescriptor {
self.pipeline_descriptor
}
fn set_vertex_buffer(&mut self, start_slot: u32, name: &str, offset: u64) {
let buffer = self.renderer.buffers.get(name).unwrap();
2020-01-26 00:33:26 +00:00
self.render_pass
.set_vertex_buffers(start_slot, &[(&buffer, offset)]);
2020-01-24 07:39:56 +00:00
}
fn set_index_buffer(&mut self, name: &str, offset: u64) {
let buffer = self.renderer.buffers.get(name).unwrap();
2020-01-26 00:33:26 +00:00
self.render_pass.set_index_buffer(&buffer, offset);
2020-01-24 07:39:56 +00:00
}
2020-01-26 00:33:26 +00:00
fn draw_indexed(
&mut self,
indices: core::ops::Range<u32>,
base_vertex: i32,
instances: core::ops::Range<u32>,
) {
self.render_pass
.draw_indexed(indices, base_vertex, instances);
}
2020-01-28 08:36:51 +00:00
fn setup_bind_groups(&mut self, entity: Option<&Entity>) {
let pipeline_layout = self.pipeline_descriptor.get_layout().unwrap();
for bind_group in pipeline_layout.bind_groups.iter() {
let bind_group_id = bind_group.get_hash().unwrap();
let bind_group_info = self.renderer.bind_groups.get(&bind_group_id).unwrap();
let mut dynamic_uniform_indices = Vec::new();
for binding in bind_group.bindings.iter() {
2020-01-28 08:36:51 +00:00
if let BindType::Uniform { dynamic, .. } = binding.bind_type {
if !dynamic {
continue;
}
// PERF: This hashmap get is pretty expensive (10 fps for 10000 entities)
2020-01-28 08:36:51 +00:00
if let Some(dynamic_uniform_buffer_info) =
self.renderer.dynamic_uniform_buffer_info.get(&binding.name)
{
let index = dynamic_uniform_buffer_info
.offsets
.get(entity.unwrap())
.unwrap();
dynamic_uniform_indices.push(*index);
}
}
2020-01-28 08:36:51 +00:00
}
2020-02-08 06:42:30 +00:00
// TODO: check to see if bind group is already set
2020-01-28 08:36:51 +00:00
self.render_pass.set_bind_group(
bind_group.index,
2020-01-28 08:36:51 +00:00
&bind_group_info.bind_group,
dynamic_uniform_indices.as_slice(),
);
2020-01-26 00:33:26 +00:00
}
2020-01-20 08:57:54 +00:00
}
}
2020-01-23 08:31:56 +00:00
impl From<&BindType> for wgpu::BindingType {
fn from(bind_type: &BindType) -> Self {
match bind_type {
2020-01-24 07:39:56 +00:00
BindType::Uniform {
dynamic,
properties: _,
} => wgpu::BindingType::UniformBuffer { dynamic: *dynamic },
BindType::Buffer { dynamic, readonly } => wgpu::BindingType::StorageBuffer {
dynamic: *dynamic,
readonly: *readonly,
},
2020-01-23 08:31:56 +00:00
BindType::SampledTexture {
dimension,
multisampled,
} => wgpu::BindingType::SampledTexture {
dimension: (*dimension).into(),
multisampled: *multisampled,
},
BindType::Sampler => wgpu::BindingType::Sampler,
2020-01-24 07:39:56 +00:00
BindType::StorageTexture { dimension } => wgpu::BindingType::StorageTexture {
dimension: (*dimension).into(),
2020-01-23 08:31:56 +00:00
},
}
}
2020-01-24 07:39:56 +00:00
}
pub struct BindGroupInfo {
pub bind_group: wgpu::BindGroup,
pub unset_uniforms: Vec<String>,
}