bevy/examples/shader/post_process_pass.rs
IceSentry 2c21d423fd
Make render graph slots optional for most cases (#8109)
# Objective

- Currently, the render graph slots are only used to pass the
view_entity around. This introduces significant boilerplate for very
little value. Instead of using slots for this, make the view_entity part
of the `RenderGraphContext`. This also means we won't need to have
`IN_VIEW` on every node and and we'll be able to use the default impl of
`Node::input()`.

## Solution

- Add `view_entity: Option<Entity>` to the `RenderGraphContext`
- Update all nodes to use this instead of entity slot input

---

## Changelog

- Add optional `view_entity` to `RenderGraphContext`

## Migration Guide

You can now get the view_entity directly from the `RenderGraphContext`. 

When implementing the Node:

```rust
// 0.10
struct FooNode;
impl FooNode {
    const IN_VIEW: &'static str = "view";
}
impl Node for FooNode {
    fn input(&self) -> Vec<SlotInfo> {
        vec![SlotInfo::new(Self::IN_VIEW, SlotType::Entity)]
    }
    fn run(
        &self,
        graph: &mut RenderGraphContext,
        // ... 
    ) -> Result<(), NodeRunError> {
        let view_entity = graph.get_input_entity(Self::IN_VIEW)?;
        // ...
        Ok(())
    }
}

// 0.11
struct FooNode;
impl Node for FooNode {
    fn run(
        &self,
        graph: &mut RenderGraphContext,
        // ... 
    ) -> Result<(), NodeRunError> {
        let view_entity = graph.view_entity();
        // ...
        Ok(())
    }
}
```

When adding the node to the graph, you don't need to specify a slot_edge
for the view_entity.

```rust
// 0.10
let mut graph = RenderGraph::default();
graph.add_node(FooNode::NAME, node);
let input_node_id = draw_2d_graph.set_input(vec![SlotInfo::new(
    graph::input::VIEW_ENTITY,
    SlotType::Entity,
)]);
graph.add_slot_edge(
    input_node_id,
    graph::input::VIEW_ENTITY,
    FooNode::NAME,
    FooNode::IN_VIEW,
);
// add_node_edge ...

// 0.11
let mut graph = RenderGraph::default();
graph.add_node(FooNode::NAME, node);
// add_node_edge ...
```

## Notes

This PR paired with #8007 will help reduce a lot of annoying boilerplate
with the render nodes. Depending on which one gets merged first. It will
require a bit of clean up work to make both compatible.

I tagged this as a breaking change, because using the old system to get
the view_entity will break things because it's not a node input slot
anymore.

## Notes for reviewers

A lot of the diffs are just removing the slots in every nodes and graph
creation. The important part is mostly in the
graph_runner/CameraDriverNode.
2023-03-21 20:11:13 +00:00

392 lines
16 KiB
Rust

//! This example shows how to create a custom render pass that runs after the main pass
//! and reads the texture generated by the main pass.
//!
//! The example shader is a very simple implementation of chromatic aberration.
//!
//! This is a fairly low level example and assumes some familiarity with rendering concepts and wgpu.
use bevy::{
core_pipeline::{
clear_color::ClearColorConfig, core_3d,
fullscreen_vertex_shader::fullscreen_shader_vertex_state,
},
prelude::*,
render::{
extract_component::{
ComponentUniforms, ExtractComponent, ExtractComponentPlugin, UniformComponentPlugin,
},
render_graph::{Node, NodeRunError, RenderGraph, RenderGraphContext},
render_resource::{
BindGroupDescriptor, BindGroupEntry, BindGroupLayout, BindGroupLayoutDescriptor,
BindGroupLayoutEntry, BindingResource, BindingType, CachedRenderPipelineId,
ColorTargetState, ColorWrites, FragmentState, MultisampleState, Operations,
PipelineCache, PrimitiveState, RenderPassColorAttachment, RenderPassDescriptor,
RenderPipelineDescriptor, Sampler, SamplerBindingType, SamplerDescriptor, ShaderStages,
ShaderType, TextureFormat, TextureSampleType, TextureViewDimension,
},
renderer::{RenderContext, RenderDevice},
texture::BevyDefault,
view::{ExtractedView, ViewTarget},
RenderApp,
},
};
fn main() {
App::new()
.add_plugins(DefaultPlugins.set(AssetPlugin {
// Hot reloading the shader works correctly
watch_for_changes: true,
..default()
}))
.add_plugin(PostProcessPlugin)
.add_systems(Startup, setup)
.add_systems(Update, (rotate, update_settings))
.run();
}
/// It is generally encouraged to set up post processing effects as a plugin
struct PostProcessPlugin;
impl Plugin for PostProcessPlugin {
fn build(&self, app: &mut App) {
app
// The settings will be a component that lives in the main world but will
// be extracted to the render world every frame.
// This makes it possible to control the effect from the main world.
// This plugin will take care of extracting it automatically.
// It's important to derive [`ExtractComponent`] on [`PostProcessingSettings`] for this plugin to work correctly.
.add_plugin(ExtractComponentPlugin::<PostProcessSettings>::default())
// The settings will also be the data used in the shader.
// This plugin will prepare the component for the GPU by creating a uniform buffer
// and writing the data to that buffer every frame.
.add_plugin(UniformComponentPlugin::<PostProcessSettings>::default());
// We need to get the render app from the main app
let Ok(render_app) = app.get_sub_app_mut(RenderApp) else {
return;
};
// Initialize the pipeline
render_app.init_resource::<PostProcessPipeline>();
// Bevy's renderer uses a render graph which is a collection of nodes in a directed acyclic graph.
// It currently runs on each view/camera and executes each node in the specified order.
// It will make sure that any node that needs a dependency from another node only runs when that dependency is done.
//
// Each node can execute arbitrary work, but it generally runs at least one render pass.
// A node only has access to the render world, so if you need data from the main world
// you need to extract it manually or with the plugin like above.
// Create the node with the render world
let node = PostProcessNode::new(&mut render_app.world);
// Get the render graph for the entire app
let mut graph = render_app.world.resource_mut::<RenderGraph>();
// Get the render graph for 3d cameras/views
let core_3d_graph = graph.get_sub_graph_mut(core_3d::graph::NAME).unwrap();
// Register the post process node in the 3d render graph
core_3d_graph.add_node(PostProcessNode::NAME, node);
// We now need to add an edge between our node and the nodes from bevy
// to make sure our node is ordered correctly relative to other nodes.
//
// Here we want our effect to run after tonemapping and before the end of the main pass post processing
core_3d_graph.add_node_edge(core_3d::graph::node::TONEMAPPING, PostProcessNode::NAME);
core_3d_graph.add_node_edge(
PostProcessNode::NAME,
core_3d::graph::node::END_MAIN_PASS_POST_PROCESSING,
);
}
}
/// The post process node used for the render graph
struct PostProcessNode {
// The node needs a query to gather data from the ECS in order to do its rendering,
// but it's not a normal system so we need to define it manually.
query: QueryState<&'static ViewTarget, With<ExtractedView>>,
}
impl PostProcessNode {
pub const NAME: &str = "post_process";
fn new(world: &mut World) -> Self {
Self {
query: QueryState::new(world),
}
}
}
impl Node for PostProcessNode {
// This will run every frame before the run() method
// The important difference is that `self` is `mut` here
fn update(&mut self, world: &mut World) {
// Since this is not a system we need to update the query manually.
// This is mostly boilerplate. There are plans to remove this in the future.
// For now, you can just copy it.
self.query.update_archetypes(world);
}
// Runs the node logic
// This is where you encode draw commands.
//
// This will run on every view on which the graph is running. If you don't want your effect to run on every camera,
// you'll need to make sure you have a marker component to identify which camera(s) should run the effect.
fn run(
&self,
graph_context: &mut RenderGraphContext,
render_context: &mut RenderContext,
world: &World,
) -> Result<(), NodeRunError> {
// Get the entity of the view for the render graph where this node is running
let view_entity = graph_context.view_entity();
// We get the data we need from the world based on the view entity passed to the node.
// The data is the query that was defined earlier in the [`PostProcessNode`]
let Ok(view_target) = self.query.get_manual(world, view_entity) else {
return Ok(());
};
// Get the pipeline resource that contains the global data we need to create the render pipeline
let post_process_pipeline = world.resource::<PostProcessPipeline>();
// The pipeline cache is a cache of all previously created pipelines.
// It is required to avoid creating a new pipeline each frame, which is expensive due to shader compilation.
let pipeline_cache = world.resource::<PipelineCache>();
// Get the pipeline from the cache
let Some(pipeline) = pipeline_cache.get_render_pipeline(post_process_pipeline.pipeline_id) else {
return Ok(());
};
// Get the settings uniform binding
let settings_uniforms = world.resource::<ComponentUniforms<PostProcessSettings>>();
let Some(settings_binding) = settings_uniforms.uniforms().binding() else {
return Ok(());
};
// This will start a new "post process write", obtaining two texture
// views from the view target - a `source` and a `destination`.
// `source` is the "current" main texture and you _must_ write into
// `destination` because calling `post_process_write()` on the
// [`ViewTarget`] will internally flip the [`ViewTarget`]'s main
// texture to the `destination` texture. Failing to do so will cause
// the current main texture information to be lost.
let post_process = view_target.post_process_write();
// The bind_group gets created each frame.
//
// Normally, you would create a bind_group in the Queue stage, but this doesn't work with the post_process_write().
// The reason it doesn't work is because each post_process_write will alternate the source/destination.
// The only way to have the correct source/destination for the bind_group is to make sure you get it during the node execution.
let bind_group = render_context
.render_device()
.create_bind_group(&BindGroupDescriptor {
label: Some("post_process_bind_group"),
layout: &post_process_pipeline.layout,
// It's important for this to match the BindGroupLayout defined in the PostProcessPipeline
entries: &[
BindGroupEntry {
binding: 0,
// Make sure to use the source view
resource: BindingResource::TextureView(post_process.source),
},
BindGroupEntry {
binding: 1,
// Use the sampler created for the pipeline
resource: BindingResource::Sampler(&post_process_pipeline.sampler),
},
BindGroupEntry {
binding: 2,
// Set the settings binding
resource: settings_binding.clone(),
},
],
});
// Begin the render pass
let mut render_pass = render_context.begin_tracked_render_pass(RenderPassDescriptor {
label: Some("post_process_pass"),
color_attachments: &[Some(RenderPassColorAttachment {
// We need to specify the post process destination view here
// to make sure we write to the appropriate texture.
view: post_process.destination,
resolve_target: None,
ops: Operations::default(),
})],
depth_stencil_attachment: None,
});
// This is mostly just wgpu boilerplate for drawing a fullscreen triangle,
// using the pipeline/bind_group created above
render_pass.set_render_pipeline(pipeline);
render_pass.set_bind_group(0, &bind_group, &[]);
render_pass.draw(0..3, 0..1);
Ok(())
}
}
// This contains global data used by the render pipeline. This will be created once on startup.
#[derive(Resource)]
struct PostProcessPipeline {
layout: BindGroupLayout,
sampler: Sampler,
pipeline_id: CachedRenderPipelineId,
}
impl FromWorld for PostProcessPipeline {
fn from_world(world: &mut World) -> Self {
let render_device = world.resource::<RenderDevice>();
// We need to define the bind group layout used for our pipeline
let layout = render_device.create_bind_group_layout(&BindGroupLayoutDescriptor {
label: Some("post_process_bind_group_layout"),
entries: &[
// The screen texture
BindGroupLayoutEntry {
binding: 0,
visibility: ShaderStages::FRAGMENT,
ty: BindingType::Texture {
sample_type: TextureSampleType::Float { filterable: true },
view_dimension: TextureViewDimension::D2,
multisampled: false,
},
count: None,
},
// The sampler that will be used to sample the screen texture
BindGroupLayoutEntry {
binding: 1,
visibility: ShaderStages::FRAGMENT,
ty: BindingType::Sampler(SamplerBindingType::Filtering),
count: None,
},
// The settings uniform that will control the effect
BindGroupLayoutEntry {
binding: 2,
visibility: ShaderStages::FRAGMENT,
ty: BindingType::Buffer {
ty: bevy::render::render_resource::BufferBindingType::Uniform,
has_dynamic_offset: false,
min_binding_size: None,
},
count: None,
},
],
});
// We can create the sampler here since it won't change at runtime and doesn't depend on the view
let sampler = render_device.create_sampler(&SamplerDescriptor::default());
// Get the shader handle
let shader = world
.resource::<AssetServer>()
.load("shaders/post_process_pass.wgsl");
let pipeline_id = world
.resource_mut::<PipelineCache>()
// This will add the pipeline to the cache and queue it's creation
.queue_render_pipeline(RenderPipelineDescriptor {
label: Some("post_process_pipeline".into()),
layout: vec![layout.clone()],
// This will setup a fullscreen triangle for the vertex state
vertex: fullscreen_shader_vertex_state(),
fragment: Some(FragmentState {
shader,
shader_defs: vec![],
// Make sure this matches the entry point of your shader.
// It can be anything as long as it matches here and in the shader.
entry_point: "fragment".into(),
targets: vec![Some(ColorTargetState {
format: TextureFormat::bevy_default(),
blend: None,
write_mask: ColorWrites::ALL,
})],
}),
// All of the following property are not important for this effect so just use the default values.
// This struct doesn't have the Default trai implemented because not all field can have a default value.
primitive: PrimitiveState::default(),
depth_stencil: None,
multisample: MultisampleState::default(),
push_constant_ranges: vec![],
});
Self {
layout,
sampler,
pipeline_id,
}
}
}
// This is the component that will get passed to the shader
#[derive(Component, Default, Clone, Copy, ExtractComponent, ShaderType)]
struct PostProcessSettings {
intensity: f32,
}
/// Set up a simple 3D scene
fn setup(
mut commands: Commands,
mut meshes: ResMut<Assets<Mesh>>,
mut materials: ResMut<Assets<StandardMaterial>>,
) {
// camera
commands.spawn((
Camera3dBundle {
transform: Transform::from_translation(Vec3::new(0.0, 0.0, 5.0))
.looking_at(Vec3::default(), Vec3::Y),
camera_3d: Camera3d {
clear_color: ClearColorConfig::Custom(Color::WHITE),
..default()
},
..default()
},
// Add the setting to the camera.
// This component is also used to determine on which camera to run the post processing effect.
PostProcessSettings { intensity: 0.02 },
));
// cube
commands.spawn((
PbrBundle {
mesh: meshes.add(Mesh::from(shape::Cube { size: 1.0 })),
material: materials.add(Color::rgb(0.8, 0.7, 0.6).into()),
transform: Transform::from_xyz(0.0, 0.5, 0.0),
..default()
},
Rotates,
));
// light
commands.spawn(PointLightBundle {
transform: Transform::from_translation(Vec3::new(0.0, 0.0, 10.0)),
..default()
});
}
#[derive(Component)]
struct Rotates;
/// Rotates any entity around the x and y axis
fn rotate(time: Res<Time>, mut query: Query<&mut Transform, With<Rotates>>) {
for mut transform in &mut query {
transform.rotate_x(0.55 * time.delta_seconds());
transform.rotate_z(0.15 * time.delta_seconds());
}
}
// Change the intensity over time to show that the effect is controlled from the main world
fn update_settings(mut settings: Query<&mut PostProcessSettings>, time: Res<Time>) {
for mut setting in &mut settings {
let mut intensity = time.elapsed_seconds().sin();
// Make it loop periodically
intensity = intensity.sin();
// Remap it to 0..1 because the intensity can't be negative
intensity = intensity * 0.5 + 0.5;
// Scale it to a more reasonable level
intensity *= 0.015;
// Set the intensity. This will then be extracted to the render world and uploaded to the gpu automatically.
setting.intensity = intensity;
}
}