Add capability to render to a texture (#3412)

# Objective

Will fix #3377 and #3254

## Solution

Use an enum to represent either a `WindowId` or `Handle<Image>` in place of `Camera::window`.


Co-authored-by: Carter Anderson <mcanders1@gmail.com>
This commit is contained in:
Dusty DeWeese 2022-02-24 00:40:24 +00:00
parent ba6b74ba20
commit 81d57e129b
12 changed files with 499 additions and 119 deletions

View file

@ -212,6 +212,10 @@ path = "examples/3d/spherical_area_lights.rs"
name = "texture"
path = "examples/3d/texture.rs"
[[example]]
name = "render_to_texture"
path = "examples/3d/render_to_texture.rs"
[[example]]
name = "update_gltf_scene"
path = "examples/3d/update_gltf_scene.rs"

View file

@ -19,4 +19,5 @@ bevy_asset = { path = "../bevy_asset", version = "0.6.0" }
bevy_core = { path = "../bevy_core", version = "0.6.0" }
bevy_ecs = { path = "../bevy_ecs", version = "0.6.0" }
bevy_render = { path = "../bevy_render", version = "0.6.0" }
bevy_utils = { path = "../bevy_utils", version = "0.6.0" }

View file

@ -1,9 +1,11 @@
use std::collections::HashSet;
use crate::ClearColor;
use crate::{ClearColor, RenderTargetClearColors};
use bevy_ecs::prelude::*;
use bevy_render::{
camera::ExtractedCamera,
camera::{ExtractedCamera, RenderTarget},
prelude::Image,
render_asset::RenderAssets,
render_graph::{Node, NodeRunError, RenderGraphContext, SlotInfo},
render_resource::{
LoadOp, Operations, RenderPassColorAttachment, RenderPassDepthStencilAttachment,
@ -47,21 +49,26 @@ impl Node for ClearPassNode {
render_context: &mut RenderContext,
world: &World,
) -> Result<(), NodeRunError> {
let mut cleared_windows = HashSet::new();
let mut cleared_targets = HashSet::new();
let clear_color = world.get_resource::<ClearColor>().unwrap();
let render_target_clear_colors = world.get_resource::<RenderTargetClearColors>().unwrap();
// This gets all ViewTargets and ViewDepthTextures and clears its attachments
// TODO: This has the potential to clear the same target multiple times, if there
// are multiple views drawing to the same target. This should be fixed when we make
// clearing happen on "render targets" instead of "views" (see the TODO below for more context).
for (target, depth, camera) in self.query.iter_manual(world) {
let mut color = &clear_color.0;
if let Some(camera) = camera {
cleared_windows.insert(camera.window_id);
cleared_targets.insert(&camera.target);
if let Some(target_color) = render_target_clear_colors.get(&camera.target) {
color = target_color;
}
}
let pass_descriptor = RenderPassDescriptor {
label: Some("clear_pass"),
color_attachments: &[target.get_color_attachment(Operations {
load: LoadOp::Clear(clear_color.0.into()),
load: LoadOp::Clear((*color).into()),
store: true,
})],
depth_stencil_attachment: depth.map(|depth| RenderPassDepthStencilAttachment {
@ -83,18 +90,28 @@ impl Node for ClearPassNode {
// which will cause panics. The real fix here is to clear "render targets" directly
// instead of "views". This should be removed once full RenderTargets are implemented.
let windows = world.get_resource::<ExtractedWindows>().unwrap();
for window in windows.values() {
let images = world.get_resource::<RenderAssets<Image>>().unwrap();
for target in render_target_clear_colors.colors.keys().cloned().chain(
windows
.values()
.map(|window| RenderTarget::Window(window.id)),
) {
// skip windows that have already been cleared
if cleared_windows.contains(&window.id) {
if cleared_targets.contains(&target) {
continue;
}
let pass_descriptor = RenderPassDescriptor {
label: Some("clear_pass"),
color_attachments: &[RenderPassColorAttachment {
view: window.swap_chain_texture.as_ref().unwrap(),
view: target.get_texture_view(windows, images).unwrap(),
resolve_target: None,
ops: Operations {
load: LoadOp::Clear(clear_color.0.into()),
load: LoadOp::Clear(
(*render_target_clear_colors
.get(&target)
.unwrap_or(&clear_color.0))
.into(),
),
store: true,
},
}],

View file

@ -9,6 +9,8 @@ pub mod prelude {
pub use crate::ClearColor;
}
use bevy_utils::HashMap;
pub use clear_pass::*;
pub use clear_pass_driver::*;
pub use main_pass_2d::*;
@ -21,7 +23,7 @@ use bevy_app::{App, Plugin};
use bevy_core::FloatOrd;
use bevy_ecs::prelude::*;
use bevy_render::{
camera::{ActiveCameras, CameraPlugin},
camera::{ActiveCameras, CameraPlugin, RenderTarget},
color::Color,
render_graph::{EmptyNode, RenderGraph, SlotInfo, SlotType},
render_phase::{
@ -48,6 +50,20 @@ impl Default for ClearColor {
}
}
#[derive(Clone, Debug, Default)]
pub struct RenderTargetClearColors {
colors: HashMap<RenderTarget, Color>,
}
impl RenderTargetClearColors {
pub fn get(&self, target: &RenderTarget) -> Option<&Color> {
self.colors.get(target)
}
pub fn insert(&mut self, target: RenderTarget, color: Color) {
self.colors.insert(target, color);
}
}
// Plugins that contribute to the RenderGraph should use the following label conventions:
// 1. Graph modules should have a NAME, input module, and node module (where relevant)
// 2. The "top level" graph is the plugin module root. Just add things like `pub mod node` directly under the plugin module
@ -96,7 +112,8 @@ pub enum CorePipelineRenderSystems {
impl Plugin for CorePipelinePlugin {
fn build(&self, app: &mut App) {
app.init_resource::<ClearColor>();
app.init_resource::<ClearColor>()
.init_resource::<RenderTargetClearColors>();
let render_app = match app.get_sub_app_mut(RenderApp) {
Ok(render_app) => render_app,
@ -330,12 +347,22 @@ impl CachedPipelinePhaseItem for Transparent3d {
}
}
pub fn extract_clear_color(clear_color: Res<ClearColor>, mut render_world: ResMut<RenderWorld>) {
pub fn extract_clear_color(
clear_color: Res<ClearColor>,
clear_colors: Res<RenderTargetClearColors>,
mut render_world: ResMut<RenderWorld>,
) {
// If the clear color has changed
if clear_color.is_changed() {
// Update the clear color resource in the render world
render_world.insert_resource(clear_color.clone());
}
// If the clear color has changed
if clear_colors.is_changed() {
// Update the clear color resource in the render world
render_world.insert_resource(clear_colors.clone());
}
}
pub fn extract_core_pipeline_camera_phases(

View file

@ -1,11 +1,13 @@
use std::collections::HashSet;
use bevy_asset::Assets;
use bevy_ecs::prelude::*;
use bevy_math::{Mat4, UVec2, UVec3, Vec2, Vec3, Vec3Swizzles, Vec4, Vec4Swizzles};
use bevy_reflect::Reflect;
use bevy_render::{
camera::{Camera, CameraProjection, OrthographicProjection},
color::Color,
prelude::Image,
primitives::{Aabb, CubemapFrusta, Frustum, Sphere},
view::{ComputedVisibility, RenderLayers, Visibility, VisibleEntities},
};
@ -354,62 +356,62 @@ const Z_SLICES: u32 = 24;
pub fn add_clusters(
mut commands: Commands,
windows: Res<Windows>,
images: Res<Assets<Image>>,
cameras: Query<(Entity, &Camera), Without<Clusters>>,
) {
for (entity, camera) in cameras.iter() {
let window = match windows.get(camera.window) {
Some(window) => window,
None => continue,
};
let clusters = Clusters::from_screen_size_and_z_slices(
UVec2::new(window.physical_width(), window.physical_height()),
Z_SLICES,
);
commands.entity(entity).insert(clusters);
if let Some(size) = camera.target.get_physical_size(&windows, &images) {
let clusters = Clusters::from_screen_size_and_z_slices(size, Z_SLICES);
commands.entity(entity).insert(clusters);
}
}
}
pub fn update_clusters(windows: Res<Windows>, mut views: Query<(&Camera, &mut Clusters)>) {
pub fn update_clusters(
windows: Res<Windows>,
images: Res<Assets<Image>>,
mut views: Query<(&Camera, &mut Clusters)>,
) {
for (camera, mut clusters) in views.iter_mut() {
let is_orthographic = camera.projection_matrix.w_axis.w == 1.0;
let inverse_projection = camera.projection_matrix.inverse();
let window = windows.get(camera.window).unwrap();
let screen_size_u32 = UVec2::new(window.physical_width(), window.physical_height());
// Don't update clusters if screen size is 0.
if screen_size_u32.x == 0 || screen_size_u32.y == 0 {
continue;
}
*clusters =
Clusters::from_screen_size_and_z_slices(screen_size_u32, clusters.axis_slices.z);
let screen_size = screen_size_u32.as_vec2();
let tile_size_u32 = clusters.tile_size;
let tile_size = tile_size_u32.as_vec2();
if let Some(screen_size_u32) = camera.target.get_physical_size(&windows, &images) {
// Don't update clusters if screen size is 0.
if screen_size_u32.x == 0 || screen_size_u32.y == 0 {
continue;
}
*clusters =
Clusters::from_screen_size_and_z_slices(screen_size_u32, clusters.axis_slices.z);
let screen_size = screen_size_u32.as_vec2();
let tile_size_u32 = clusters.tile_size;
let tile_size = tile_size_u32.as_vec2();
// Calculate view space AABBs
// NOTE: It is important that these are iterated in a specific order
// so that we can calculate the cluster index in the fragment shader!
// I (Rob Swain) choose to scan along rows of tiles in x,y, and for each tile then scan
// along z
let mut aabbs = Vec::with_capacity(
(clusters.axis_slices.y * clusters.axis_slices.x * clusters.axis_slices.z) as usize,
);
for y in 0..clusters.axis_slices.y {
for x in 0..clusters.axis_slices.x {
for z in 0..clusters.axis_slices.z {
aabbs.push(compute_aabb_for_cluster(
clusters.near,
camera.far,
tile_size,
screen_size,
inverse_projection,
is_orthographic,
clusters.axis_slices,
UVec3::new(x, y, z),
));
// Calculate view space AABBs
// NOTE: It is important that these are iterated in a specific order
// so that we can calculate the cluster index in the fragment shader!
// I (Rob Swain) choose to scan along rows of tiles in x,y, and for each tile then scan
// along z
let mut aabbs = Vec::with_capacity(
(clusters.axis_slices.y * clusters.axis_slices.x * clusters.axis_slices.z) as usize,
);
for y in 0..clusters.axis_slices.y {
for x in 0..clusters.axis_slices.x {
for z in 0..clusters.axis_slices.z {
aabbs.push(compute_aabb_for_cluster(
clusters.near,
camera.far,
tile_size,
screen_size,
inverse_projection,
is_orthographic,
clusters.axis_slices,
UVec3::new(x, y, z),
));
}
}
}
clusters.aabbs = aabbs;
}
clusters.aabbs = aabbs;
}
}

View file

@ -1,4 +1,8 @@
use crate::camera::CameraProjection;
use crate::{
camera::CameraProjection, prelude::Image, render_asset::RenderAssets,
render_resource::TextureView, view::ExtractedWindows,
};
use bevy_asset::{AssetEvent, Assets, Handle};
use bevy_ecs::{
component::Component,
entity::Entity,
@ -8,11 +12,13 @@ use bevy_ecs::{
reflect::ReflectComponent,
system::{QuerySet, Res},
};
use bevy_math::{Mat4, Vec2, Vec3};
use bevy_math::{Mat4, UVec2, Vec2, Vec3};
use bevy_reflect::{Reflect, ReflectDeserialize};
use bevy_transform::components::GlobalTransform;
use bevy_utils::HashSet;
use bevy_window::{WindowCreated, WindowId, WindowResized, Windows};
use serde::{Deserialize, Serialize};
use wgpu::Extent3d;
#[derive(Component, Default, Debug, Reflect)]
#[reflect(Component)]
@ -20,13 +26,77 @@ pub struct Camera {
pub projection_matrix: Mat4,
pub name: Option<String>,
#[reflect(ignore)]
pub window: WindowId,
pub target: RenderTarget,
#[reflect(ignore)]
pub depth_calculation: DepthCalculation,
pub near: f32,
pub far: f32,
}
#[derive(Debug, Clone, Reflect, PartialEq, Eq, Hash)]
pub enum RenderTarget {
/// Window to which the camera's view is rendered.
Window(WindowId),
/// Image to which the camera's view is rendered.
Image(Handle<Image>),
}
impl Default for RenderTarget {
fn default() -> Self {
Self::Window(Default::default())
}
}
impl RenderTarget {
pub fn get_texture_view<'a>(
&self,
windows: &'a ExtractedWindows,
images: &'a RenderAssets<Image>,
) -> Option<&'a TextureView> {
match self {
RenderTarget::Window(window_id) => windows
.get(window_id)
.and_then(|window| window.swap_chain_texture.as_ref()),
RenderTarget::Image(image_handle) => {
images.get(image_handle).map(|image| &image.texture_view)
}
}
}
pub fn get_physical_size(&self, windows: &Windows, images: &Assets<Image>) -> Option<UVec2> {
match self {
RenderTarget::Window(window_id) => windows
.get(*window_id)
.map(|window| UVec2::new(window.physical_width(), window.physical_height())),
RenderTarget::Image(image_handle) => images.get(image_handle).map(|image| {
let Extent3d { width, height, .. } = image.texture_descriptor.size;
UVec2::new(width, height)
}),
}
}
pub fn get_logical_size(&self, windows: &Windows, images: &Assets<Image>) -> Option<Vec2> {
match self {
RenderTarget::Window(window_id) => windows
.get(*window_id)
.map(|window| Vec2::new(window.width(), window.height())),
RenderTarget::Image(image_handle) => images.get(image_handle).map(|image| {
let Extent3d { width, height, .. } = image.texture_descriptor.size;
Vec2::new(width as f32, height as f32)
}),
}
}
// Check if this render target is contained in the given changed windows or images.
fn is_changed(
&self,
changed_window_ids: &[WindowId],
changed_image_handles: &HashSet<&Handle<Image>>,
) -> bool {
match self {
RenderTarget::Window(window_id) => changed_window_ids.contains(window_id),
RenderTarget::Image(image_handle) => changed_image_handles.contains(&image_handle),
}
}
}
#[derive(Debug, Clone, Copy, Reflect, Serialize, Deserialize)]
#[reflect_value(Serialize, Deserialize)]
pub enum DepthCalculation {
@ -47,11 +117,11 @@ impl Camera {
pub fn world_to_screen(
&self,
windows: &Windows,
images: &Assets<Image>,
camera_transform: &GlobalTransform,
world_position: Vec3,
) -> Option<Vec2> {
let window = windows.get(self.window)?;
let window_size = Vec2::new(window.width(), window.height());
let window_size = self.target.get_logical_size(windows, images)?;
// Build a transform to convert from world to NDC using camera data
let world_to_ndc: Mat4 =
self.projection_matrix * camera_transform.compute_matrix().inverse();
@ -74,7 +144,9 @@ impl Camera {
pub fn camera_system<T: CameraProjection + Component>(
mut window_resized_events: EventReader<WindowResized>,
mut window_created_events: EventReader<WindowCreated>,
mut image_asset_events: EventReader<AssetEvent<Image>>,
windows: Res<Windows>,
images: Res<Assets<Image>>,
mut queries: QuerySet<(
QueryState<(Entity, &mut Camera, &mut T)>,
QueryState<Entity, Added<Camera>>,
@ -101,17 +173,30 @@ pub fn camera_system<T: CameraProjection + Component>(
changed_window_ids.push(event.id);
}
let changed_image_handles: HashSet<&Handle<Image>> = image_asset_events
.iter()
.filter_map(|event| {
if let AssetEvent::Modified { handle } = event {
Some(handle)
} else {
None
}
})
.collect();
let mut added_cameras = vec![];
for entity in &mut queries.q1().iter() {
added_cameras.push(entity);
}
for (entity, mut camera, mut camera_projection) in queries.q0().iter_mut() {
if let Some(window) = windows.get(camera.window) {
if changed_window_ids.contains(&window.id())
|| added_cameras.contains(&entity)
|| camera_projection.is_changed()
{
camera_projection.update(window.width(), window.height());
if camera
.target
.is_changed(&changed_window_ids, &changed_image_handles)
|| added_cameras.contains(&entity)
|| camera_projection.is_changed()
{
if let Some(size) = camera.target.get_logical_size(&windows, &images) {
camera_projection.update(size.x, size.y);
camera.projection_matrix = camera_projection.get_projection_matrix();
camera.depth_calculation = camera_projection.depth_calculation();
}

View file

@ -5,14 +5,17 @@ mod camera;
mod projection;
pub use active_cameras::*;
use bevy_asset::Assets;
use bevy_math::UVec2;
use bevy_transform::components::GlobalTransform;
use bevy_utils::HashMap;
use bevy_window::{WindowId, Windows};
use bevy_window::Windows;
pub use bundle::*;
pub use camera::*;
pub use projection::*;
use crate::{
prelude::Image,
primitives::Aabb,
view::{ComputedVisibility, ExtractedView, Visibility, VisibleEntities},
RenderApp, RenderStage,
@ -68,14 +71,16 @@ pub struct ExtractedCameraNames {
#[derive(Component, Debug)]
pub struct ExtractedCamera {
pub window_id: WindowId,
pub target: RenderTarget,
pub name: Option<String>,
pub physical_size: Option<UVec2>,
}
fn extract_cameras(
mut commands: Commands,
active_cameras: Res<ActiveCameras>,
windows: Res<Windows>,
images: Res<Assets<Image>>,
query: Query<(Entity, &Camera, &GlobalTransform, &VisibleEntities)>,
) {
let mut entities = HashMap::default();
@ -84,18 +89,19 @@ fn extract_cameras(
if let Some((entity, camera, transform, visible_entities)) =
camera.entity.and_then(|e| query.get(e).ok())
{
if let Some(window) = windows.get(camera.window) {
if let Some(size) = camera.target.get_physical_size(&windows, &images) {
entities.insert(name.clone(), entity);
commands.get_or_spawn(entity).insert_bundle((
ExtractedCamera {
window_id: camera.window,
target: camera.target.clone(),
name: camera.name.clone(),
physical_size: camera.target.get_physical_size(&windows, &images),
},
ExtractedView {
projection: camera.projection_matrix,
transform: *transform,
width: window.physical_width().max(1),
height: window.physical_height().max(1),
width: size.x.max(1),
height: size.y.max(1),
near: camera.near,
far: camera.far,
},

View file

@ -10,6 +10,8 @@ pub use window::*;
use crate::{
camera::{ExtractedCamera, ExtractedCameraNames},
prelude::Image,
render_asset::RenderAssets,
render_resource::{std140::AsStd140, DynamicUniformVec, Texture, TextureView},
renderer::{RenderDevice, RenderQueue},
texture::{BevyDefault, TextureCache},
@ -170,10 +172,12 @@ fn prepare_view_uniforms(
.write_buffer(&render_device, &render_queue);
}
#[allow(clippy::too_many_arguments)]
fn prepare_view_targets(
mut commands: Commands,
camera_names: Res<ExtractedCameraNames>,
windows: Res<ExtractedWindows>,
images: Res<RenderAssets<Image>>,
msaa: Res<Msaa>,
render_device: Res<RenderDevice>,
mut texture_cache: ResMut<TextureCache>,
@ -185,41 +189,34 @@ fn prepare_view_targets(
} else {
continue;
};
let window = if let Some(window) = windows.get(&camera.window_id) {
window
} else {
continue;
};
let swap_chain_texture = if let Some(texture) = &window.swap_chain_texture {
texture
} else {
continue;
};
let sampled_target = if msaa.samples > 1 {
let sampled_texture = texture_cache.get(
&render_device,
TextureDescriptor {
label: Some("sampled_color_attachment_texture"),
size: Extent3d {
width: window.physical_width,
height: window.physical_height,
depth_or_array_layers: 1,
},
mip_level_count: 1,
sample_count: msaa.samples,
dimension: TextureDimension::D2,
format: TextureFormat::bevy_default(),
usage: TextureUsages::RENDER_ATTACHMENT,
},
);
Some(sampled_texture.default_view.clone())
} else {
None
};
commands.entity(entity).insert(ViewTarget {
view: swap_chain_texture.clone(),
sampled_target,
});
if let Some(size) = camera.physical_size {
if let Some(texture_view) = camera.target.get_texture_view(&windows, &images) {
let sampled_target = if msaa.samples > 1 {
let sampled_texture = texture_cache.get(
&render_device,
TextureDescriptor {
label: Some("sampled_color_attachment_texture"),
size: Extent3d {
width: size.x,
height: size.y,
depth_or_array_layers: 1,
},
mip_level_count: 1,
sample_count: msaa.samples,
dimension: TextureDimension::D2,
format: TextureFormat::bevy_default(),
usage: TextureUsages::RENDER_ATTACHMENT,
},
);
Some(sampled_texture.default_view.clone())
} else {
None
};
commands.entity(entity).insert(ViewTarget {
view: texture_view.clone(),
sampled_target,
});
}
}
}
}

View file

@ -49,17 +49,17 @@ impl RenderLayers {
pub const TOTAL_LAYERS: usize = std::mem::size_of::<LayerMask>() * 8;
/// Create a new `RenderLayers` belonging to the given layer.
pub fn layer(n: Layer) -> Self {
pub const fn layer(n: Layer) -> Self {
RenderLayers(0).with(n)
}
/// Create a new `RenderLayers` that belongs to all layers.
pub fn all() -> Self {
pub const fn all() -> Self {
RenderLayers(u32::MAX)
}
/// Create a new `RenderLayers` that belongs to no layers.
pub fn none() -> Self {
pub const fn none() -> Self {
RenderLayers(0)
}
@ -75,9 +75,8 @@ impl RenderLayers {
///
/// # Panics
/// Panics when called with a layer greater than `TOTAL_LAYERS - 1`.
#[must_use]
pub fn with(mut self, layer: Layer) -> Self {
assert!(usize::from(layer) < Self::TOTAL_LAYERS);
pub const fn with(mut self, layer: Layer) -> Self {
assert!((layer as usize) < Self::TOTAL_LAYERS);
self.0 |= 1 << layer;
self
}
@ -86,9 +85,8 @@ impl RenderLayers {
///
/// # Panics
/// Panics when called with a layer greater than `TOTAL_LAYERS - 1`.
#[must_use]
pub fn without(mut self, layer: Layer) -> Self {
assert!(usize::from(layer) < Self::TOTAL_LAYERS);
pub const fn without(mut self, layer: Layer) -> Self {
assert!((layer as usize) < Self::TOTAL_LAYERS);
self.0 &= !(1 << layer);
self
}

View file

@ -0,0 +1,242 @@
use bevy::{
core_pipeline::{
draw_3d_graph, node, AlphaMask3d, Opaque3d, RenderTargetClearColors, Transparent3d,
},
prelude::*,
reflect::TypeUuid,
render::{
camera::{ActiveCameras, Camera, ExtractedCameraNames, RenderTarget},
render_graph::{NodeRunError, RenderGraph, RenderGraphContext, SlotValue},
render_phase::RenderPhase,
render_resource::{
Extent3d, TextureDescriptor, TextureDimension, TextureFormat, TextureUsages,
},
renderer::RenderContext,
view::RenderLayers,
RenderApp, RenderStage,
},
};
// This handle will point at the texture to which we will render in the first pass.
pub const RENDER_IMAGE_HANDLE: HandleUntyped =
HandleUntyped::weak_from_u64(Image::TYPE_UUID, 13378939762009864029);
// The name of the final node of the first pass.
pub const FIRST_PASS_DRIVER: &str = "first_pass_driver";
// The name of the camera that determines the view rendered in the first pass.
pub const FIRST_PASS_CAMERA: &str = "first_pass_camera";
fn main() {
let mut app = App::new();
app.insert_resource(Msaa { samples: 4 }) // Use 4x MSAA
.add_plugins(DefaultPlugins)
.add_startup_system(setup)
.add_system(cube_rotator_system)
.add_system(rotator_system);
let render_app = app.sub_app_mut(RenderApp);
// This will add 3D render phases for the new camera.
render_app.add_system_to_stage(RenderStage::Extract, extract_first_pass_camera_phases);
let mut graph = render_app.world.get_resource_mut::<RenderGraph>().unwrap();
// Add a node for the first pass.
graph.add_node(FIRST_PASS_DRIVER, FirstPassCameraDriver);
// The first pass's dependencies include those of the main pass.
graph
.add_node_edge(node::MAIN_PASS_DEPENDENCIES, FIRST_PASS_DRIVER)
.unwrap();
// Insert the first pass node: CLEAR_PASS_DRIVER -> FIRST_PASS_DRIVER -> MAIN_PASS_DRIVER
graph
.add_node_edge(node::CLEAR_PASS_DRIVER, FIRST_PASS_DRIVER)
.unwrap();
graph
.add_node_edge(FIRST_PASS_DRIVER, node::MAIN_PASS_DRIVER)
.unwrap();
app.run();
}
// Add 3D render phases for FIRST_PASS_CAMERA.
fn extract_first_pass_camera_phases(mut commands: Commands, active_cameras: Res<ActiveCameras>) {
if let Some(camera) = active_cameras.get(FIRST_PASS_CAMERA) {
if let Some(entity) = camera.entity {
commands.get_or_spawn(entity).insert_bundle((
RenderPhase::<Opaque3d>::default(),
RenderPhase::<AlphaMask3d>::default(),
RenderPhase::<Transparent3d>::default(),
));
}
}
}
// A node for the first pass camera that runs draw_3d_graph with this camera.
struct FirstPassCameraDriver;
impl bevy::render::render_graph::Node for FirstPassCameraDriver {
fn run(
&self,
graph: &mut RenderGraphContext,
_render_context: &mut RenderContext,
world: &World,
) -> Result<(), NodeRunError> {
let extracted_cameras = world.get_resource::<ExtractedCameraNames>().unwrap();
if let Some(camera_3d) = extracted_cameras.entities.get(FIRST_PASS_CAMERA) {
graph.run_sub_graph(draw_3d_graph::NAME, vec![SlotValue::Entity(*camera_3d)])?;
}
Ok(())
}
}
// Marks the first pass cube (rendered to a texture.)
#[derive(Component)]
struct FirstPassCube;
// Marks the main pass cube, to which the texture is applied.
#[derive(Component)]
struct MainPassCube;
fn setup(
mut commands: Commands,
mut meshes: ResMut<Assets<Mesh>>,
mut materials: ResMut<Assets<StandardMaterial>>,
mut active_cameras: ResMut<ActiveCameras>,
mut images: ResMut<Assets<Image>>,
mut clear_colors: ResMut<RenderTargetClearColors>,
) {
let size = Extent3d {
width: 512,
height: 512,
..Default::default()
};
// This is the texture that will be rendered to.
let mut image = Image {
texture_descriptor: TextureDescriptor {
label: None,
size,
dimension: TextureDimension::D2,
format: TextureFormat::Bgra8UnormSrgb,
mip_level_count: 1,
sample_count: 1,
usage: TextureUsages::TEXTURE_BINDING
| TextureUsages::COPY_DST
| TextureUsages::RENDER_ATTACHMENT,
},
..Default::default()
};
// fill image.data with zeroes
image.resize(size);
let image_handle = images.set(RENDER_IMAGE_HANDLE, image);
let cube_handle = meshes.add(Mesh::from(shape::Cube { size: 4.0 }));
let cube_material_handle = materials.add(StandardMaterial {
base_color: Color::rgb(0.8, 0.7, 0.6),
reflectance: 0.02,
unlit: false,
..Default::default()
});
// This specifies the layer used for the first pass, which will be attached to the first pass camera and cube.
let first_pass_layer = RenderLayers::layer(1);
// The cube that will be rendered to the texture.
commands
.spawn_bundle(PbrBundle {
mesh: cube_handle,
material: cube_material_handle,
transform: Transform::from_translation(Vec3::new(0.0, 0.0, 1.0)),
..Default::default()
})
.insert(FirstPassCube)
.insert(first_pass_layer);
// Light
// NOTE: Currently lights are shared between passes - see https://github.com/bevyengine/bevy/issues/3462
commands.spawn_bundle(PointLightBundle {
transform: Transform::from_translation(Vec3::new(0.0, 0.0, 10.0)),
..Default::default()
});
// First pass camera
let render_target = RenderTarget::Image(image_handle);
clear_colors.insert(render_target.clone(), Color::WHITE);
active_cameras.add(FIRST_PASS_CAMERA);
commands
.spawn_bundle(PerspectiveCameraBundle {
camera: Camera {
name: Some(FIRST_PASS_CAMERA.to_string()),
target: render_target,
..Default::default()
},
transform: Transform::from_translation(Vec3::new(0.0, 0.0, 15.0))
.looking_at(Vec3::default(), Vec3::Y),
..Default::default()
})
.insert(first_pass_layer);
// NOTE: omitting the RenderLayers component for this camera may cause a validation error:
//
// thread 'main' panicked at 'wgpu error: Validation Error
//
// Caused by:
// In a RenderPass
// note: encoder = `<CommandBuffer-(0, 1, Metal)>`
// In a pass parameter
// note: command buffer = `<CommandBuffer-(0, 1, Metal)>`
// Attempted to use texture (5, 1, Metal) mips 0..1 layers 0..1 as a combination of COLOR_TARGET within a usage scope.
//
// This happens because the texture would be written and read in the same frame, which is not allowed.
// So either render layers must be used to avoid this, or the texture must be double buffered.
let cube_size = 4.0;
let cube_handle = meshes.add(Mesh::from(shape::Box::new(cube_size, cube_size, cube_size)));
// This material has the texture that has been rendered.
let material_handle = materials.add(StandardMaterial {
base_color_texture: Some(RENDER_IMAGE_HANDLE.typed()),
reflectance: 0.02,
unlit: false,
..Default::default()
});
// Main pass cube, with material containing the rendered first pass texture.
commands
.spawn_bundle(PbrBundle {
mesh: cube_handle,
material: material_handle,
transform: Transform {
translation: Vec3::new(0.0, 0.0, 1.5),
rotation: Quat::from_rotation_x(-std::f32::consts::PI / 5.0),
..Default::default()
},
..Default::default()
})
.insert(MainPassCube);
// The main pass camera.
commands.spawn_bundle(PerspectiveCameraBundle {
transform: Transform::from_translation(Vec3::new(0.0, 0.0, 15.0))
.looking_at(Vec3::default(), Vec3::Y),
..Default::default()
});
}
/// Rotates the inner cube (first pass)
fn rotator_system(time: Res<Time>, mut query: Query<&mut Transform, With<FirstPassCube>>) {
for mut transform in query.iter_mut() {
transform.rotation *= Quat::from_rotation_x(1.5 * time.delta_seconds());
transform.rotation *= Quat::from_rotation_z(1.3 * time.delta_seconds());
}
}
/// Rotates the outer cube (main pass)
fn cube_rotator_system(time: Res<Time>, mut query: Query<&mut Transform, With<MainPassCube>>) {
for mut transform in query.iter_mut() {
transform.rotation *= Quat::from_rotation_x(1.0 * time.delta_seconds());
transform.rotation *= Quat::from_rotation_y(0.7 * time.delta_seconds());
}
}

View file

@ -107,6 +107,7 @@ Example | File | Description
`orthographic` | [`3d/orthographic.rs`](./3d/orthographic.rs) | Shows how to create a 3D orthographic view (for isometric-look games or CAD applications)
`parenting` | [`3d/parenting.rs`](./3d/parenting.rs) | Demonstrates parent->child relationships and relative transformations
`pbr` | [`3d/pbr.rs`](./3d/pbr.rs) | Demonstrates use of Physically Based Rendering (PBR) properties
`render_to_texture` | [`3d/render_to_texture.rs`](./3d/render_to_texture.rs) | Shows how to render to a texture, useful for mirrors, UI, or exporting images
`shadow_caster_receiver` | [`3d/shadow_caster_receiver.rs`](./3d/shadow_caster_receiver.rs) | Demonstrates how to prevent meshes from casting/receiving shadows in a 3d scene
`shadow_biases` | [`3d/shadow_biases.rs`](./3d/shadow_biases.rs) | Demonstrates how shadow biases affect shadows in a 3d scene
`spherical_area_lights` | [`3d/spherical_area_lights.rs`](./3d/spherical_area_lights.rs) | Demonstrates how point light radius values affect light behavior.

View file

@ -2,7 +2,7 @@ use bevy::{
core_pipeline::{draw_3d_graph, node, AlphaMask3d, Opaque3d, Transparent3d},
prelude::*,
render::{
camera::{ActiveCameras, ExtractedCameraNames},
camera::{ActiveCameras, ExtractedCameraNames, RenderTarget},
render_graph::{Node, NodeRunError, RenderGraph, RenderGraphContext, SlotValue},
render_phase::RenderPhase,
renderer::RenderContext,
@ -65,7 +65,7 @@ fn create_new_window(
// second window camera
commands.spawn_bundle(PerspectiveCameraBundle {
camera: Camera {
window: window_id,
target: RenderTarget::Window(window_id),
name: Some(SECONDARY_CAMERA_NAME.into()),
..Default::default()
},