mirror of
https://github.com/bevyengine/bevy
synced 2024-11-21 20:23:28 +00:00
Normalise matrix naming (#13489)
# Objective - Fixes #10909 - Fixes #8492 ## Solution - Name all matrices `x_from_y`, for example `world_from_view`. ## Testing - I've tested most of the 3D examples. The `lighting` example particularly should hit a lot of the changes and appears to run fine. --- ## Changelog - Renamed matrices across the engine to follow a `y_from_x` naming, making the space conversion more obvious. ## Migration Guide - `Frustum`'s `from_view_projection`, `from_view_projection_custom_far` and `from_view_projection_no_far` were renamed to `from_clip_from_world`, `from_clip_from_world_custom_far` and `from_clip_from_world_no_far`. - `ComputedCameraValues::projection_matrix` was renamed to `clip_from_view`. - `CameraProjection::get_projection_matrix` was renamed to `get_clip_from_view` (this affects implementations on `Projection`, `PerspectiveProjection` and `OrthographicProjection`). - `ViewRangefinder3d::from_view_matrix` was renamed to `from_world_from_view`. - `PreviousViewData`'s members were renamed to `view_from_world` and `clip_from_world`. - `ExtractedView`'s `projection`, `transform` and `view_projection` were renamed to `clip_from_view`, `world_from_view` and `clip_from_world`. - `ViewUniform`'s `view_proj`, `unjittered_view_proj`, `inverse_view_proj`, `view`, `inverse_view`, `projection` and `inverse_projection` were renamed to `clip_from_world`, `unjittered_clip_from_world`, `world_from_clip`, `world_from_view`, `view_from_world`, `clip_from_view` and `view_from_clip`. - `GpuDirectionalCascade::view_projection` was renamed to `clip_from_world`. - `MeshTransforms`' `transform` and `previous_transform` were renamed to `world_from_local` and `previous_world_from_local`. - `MeshUniform`'s `transform`, `previous_transform`, `inverse_transpose_model_a` and `inverse_transpose_model_b` were renamed to `world_from_local`, `previous_world_from_local`, `local_from_world_transpose_a` and `local_from_world_transpose_b` (the `Mesh` type in WGSL mirrors this, however `transform` and `previous_transform` were named `model` and `previous_model`). - `Mesh2dTransforms::transform` was renamed to `world_from_local`. - `Mesh2dUniform`'s `transform`, `inverse_transpose_model_a` and `inverse_transpose_model_b` were renamed to `world_from_local`, `local_from_world_transpose_a` and `local_from_world_transpose_b` (the `Mesh2d` type in WGSL mirrors this). - In WGSL, in `bevy_pbr::mesh_functions`, `get_model_matrix` and `get_previous_model_matrix` were renamed to `get_world_from_local` and `get_previous_world_from_local`. - In WGSL, `bevy_sprite::mesh2d_functions::get_model_matrix` was renamed to `get_world_from_local`.
This commit is contained in:
parent
5ca7ba2c18
commit
9b9d3d81cb
59 changed files with 476 additions and 472 deletions
|
@ -36,7 +36,7 @@ fn fragment(
|
||||||
is_front,
|
is_front,
|
||||||
);
|
);
|
||||||
|
|
||||||
pbr_input.is_orthographic = view.projection[3].w == 1.0;
|
pbr_input.is_orthographic = view.clip_from_view[3].w == 1.0;
|
||||||
|
|
||||||
pbr_input.N = normalize(pbr_input.world_normal);
|
pbr_input.N = normalize(pbr_input.world_normal);
|
||||||
|
|
||||||
|
|
|
@ -1,6 +1,6 @@
|
||||||
#import bevy_sprite::{
|
#import bevy_sprite::{
|
||||||
mesh2d_view_bindings::globals,
|
mesh2d_view_bindings::globals,
|
||||||
mesh2d_functions::{get_model_matrix, mesh2d_position_local_to_clip},
|
mesh2d_functions::{get_world_from_local, mesh2d_position_local_to_clip},
|
||||||
}
|
}
|
||||||
|
|
||||||
struct Vertex {
|
struct Vertex {
|
||||||
|
@ -19,8 +19,8 @@ struct VertexOutput {
|
||||||
@vertex
|
@vertex
|
||||||
fn vertex(vertex: Vertex) -> VertexOutput {
|
fn vertex(vertex: Vertex) -> VertexOutput {
|
||||||
var out: VertexOutput;
|
var out: VertexOutput;
|
||||||
let model = get_model_matrix(vertex.instance_index);
|
let world_from_local = get_world_from_local(vertex.instance_index);
|
||||||
out.clip_position = mesh2d_position_local_to_clip(model, vec4<f32>(vertex.position, 1.0));
|
out.clip_position = mesh2d_position_local_to_clip(world_from_local, vec4<f32>(vertex.position, 1.0));
|
||||||
out.color = vertex.color;
|
out.color = vertex.color;
|
||||||
out.barycentric = vertex.barycentric;
|
out.barycentric = vertex.barycentric;
|
||||||
return out;
|
return out;
|
||||||
|
|
|
@ -1,4 +1,4 @@
|
||||||
#import bevy_pbr::mesh_functions::{get_model_matrix, mesh_position_local_to_clip}
|
#import bevy_pbr::mesh_functions::{get_world_from_local, mesh_position_local_to_clip}
|
||||||
|
|
||||||
struct CustomMaterial {
|
struct CustomMaterial {
|
||||||
color: vec4<f32>,
|
color: vec4<f32>,
|
||||||
|
@ -20,7 +20,7 @@ struct VertexOutput {
|
||||||
fn vertex(vertex: Vertex) -> VertexOutput {
|
fn vertex(vertex: Vertex) -> VertexOutput {
|
||||||
var out: VertexOutput;
|
var out: VertexOutput;
|
||||||
out.clip_position = mesh_position_local_to_clip(
|
out.clip_position = mesh_position_local_to_clip(
|
||||||
get_model_matrix(vertex.instance_index),
|
get_world_from_local(vertex.instance_index),
|
||||||
vec4<f32>(vertex.position, 1.0),
|
vec4<f32>(vertex.position, 1.0),
|
||||||
);
|
);
|
||||||
out.blend_color = vertex.blend_color;
|
out.blend_color = vertex.blend_color;
|
||||||
|
|
|
@ -1,4 +1,4 @@
|
||||||
#import bevy_pbr::mesh_functions::{get_model_matrix, mesh_position_local_to_clip}
|
#import bevy_pbr::mesh_functions::{get_world_from_local, mesh_position_local_to_clip}
|
||||||
|
|
||||||
struct Vertex {
|
struct Vertex {
|
||||||
@location(0) position: vec3<f32>,
|
@location(0) position: vec3<f32>,
|
||||||
|
@ -18,12 +18,12 @@ struct VertexOutput {
|
||||||
fn vertex(vertex: Vertex) -> VertexOutput {
|
fn vertex(vertex: Vertex) -> VertexOutput {
|
||||||
let position = vertex.position * vertex.i_pos_scale.w + vertex.i_pos_scale.xyz;
|
let position = vertex.position * vertex.i_pos_scale.w + vertex.i_pos_scale.xyz;
|
||||||
var out: VertexOutput;
|
var out: VertexOutput;
|
||||||
// NOTE: Passing 0 as the instance_index to get_model_matrix() is a hack
|
// NOTE: Passing 0 as the instance_index to get_world_from_local() is a hack
|
||||||
// for this example as the instance_index builtin would map to the wrong
|
// for this example as the instance_index builtin would map to the wrong
|
||||||
// index in the Mesh array. This index could be passed in via another
|
// index in the Mesh array. This index could be passed in via another
|
||||||
// uniform instead but it's unnecessary for the example.
|
// uniform instead but it's unnecessary for the example.
|
||||||
out.clip_position = mesh_position_local_to_clip(
|
out.clip_position = mesh_position_local_to_clip(
|
||||||
get_model_matrix(0u),
|
get_world_from_local(0u),
|
||||||
vec4<f32>(position, 1.0)
|
vec4<f32>(position, 1.0)
|
||||||
);
|
);
|
||||||
out.color = vertex.i_color;
|
out.color = vertex.i_color;
|
||||||
|
|
|
@ -3,8 +3,8 @@
|
||||||
#import bevy_pbr::mesh_view_bindings
|
#import bevy_pbr::mesh_view_bindings
|
||||||
|
|
||||||
struct VoxelVisualizationIrradianceVolumeInfo {
|
struct VoxelVisualizationIrradianceVolumeInfo {
|
||||||
transform: mat4x4<f32>,
|
world_from_voxel: mat4x4<f32>,
|
||||||
inverse_transform: mat4x4<f32>,
|
voxel_from_world: mat4x4<f32>,
|
||||||
resolution: vec3<u32>,
|
resolution: vec3<u32>,
|
||||||
// A scale factor that's applied to the diffuse and specular light from the
|
// A scale factor that's applied to the diffuse and specular light from the
|
||||||
// light probe. This is in units of cd/m² (candela per square meter).
|
// light probe. This is in units of cd/m² (candela per square meter).
|
||||||
|
@ -18,12 +18,12 @@ var<uniform> irradiance_volume_info: VoxelVisualizationIrradianceVolumeInfo;
|
||||||
fn fragment(mesh: VertexOutput) -> @location(0) vec4<f32> {
|
fn fragment(mesh: VertexOutput) -> @location(0) vec4<f32> {
|
||||||
// Snap the world position we provide to `irradiance_volume_light()` to the
|
// Snap the world position we provide to `irradiance_volume_light()` to the
|
||||||
// middle of the nearest texel.
|
// middle of the nearest texel.
|
||||||
var unit_pos = (irradiance_volume_info.inverse_transform *
|
var unit_pos = (irradiance_volume_info.voxel_from_world *
|
||||||
vec4(mesh.world_position.xyz, 1.0f)).xyz;
|
vec4(mesh.world_position.xyz, 1.0f)).xyz;
|
||||||
let resolution = vec3<f32>(irradiance_volume_info.resolution);
|
let resolution = vec3<f32>(irradiance_volume_info.resolution);
|
||||||
let stp = clamp((unit_pos + 0.5) * resolution, vec3(0.5f), resolution - vec3(0.5f));
|
let stp = clamp((unit_pos + 0.5) * resolution, vec3(0.5f), resolution - vec3(0.5f));
|
||||||
let stp_rounded = round(stp - 0.5f) + 0.5f;
|
let stp_rounded = round(stp - 0.5f) + 0.5f;
|
||||||
let rounded_world_pos = (irradiance_volume_info.transform * vec4(stp_rounded, 1.0f)).xyz;
|
let rounded_world_pos = (irradiance_volume_info.world_from_voxel * vec4(stp_rounded, 1.0f)).xyz;
|
||||||
|
|
||||||
// `irradiance_volume_light()` multiplies by intensity, so cancel it out.
|
// `irradiance_volume_light()` multiplies by intensity, so cancel it out.
|
||||||
// If we take intensity into account, the cubes will be way too bright.
|
// If we take intensity into account, the cubes will be way too bright.
|
||||||
|
|
|
@ -71,8 +71,8 @@ pub struct DeferredPrepass;
|
||||||
|
|
||||||
#[derive(Component, ShaderType, Clone)]
|
#[derive(Component, ShaderType, Clone)]
|
||||||
pub struct PreviousViewData {
|
pub struct PreviousViewData {
|
||||||
pub inverse_view: Mat4,
|
pub view_from_world: Mat4,
|
||||||
pub view_proj: Mat4,
|
pub clip_from_world: Mat4,
|
||||||
}
|
}
|
||||||
|
|
||||||
#[derive(Resource, Default)]
|
#[derive(Resource, Default)]
|
||||||
|
|
|
@ -24,7 +24,7 @@ fn coords_to_ray_direction(position: vec2<f32>, viewport: vec4<f32>) -> vec3<f32
|
||||||
// fragment position.
|
// fragment position.
|
||||||
// Use the position on the near clipping plane to avoid -inf world position
|
// Use the position on the near clipping plane to avoid -inf world position
|
||||||
// because the far plane of an infinite reverse projection is at infinity.
|
// because the far plane of an infinite reverse projection is at infinity.
|
||||||
let view_position_homogeneous = view.inverse_projection * vec4(
|
let view_position_homogeneous = view.view_from_clip * vec4(
|
||||||
coords_to_viewport_uv(position, viewport) * vec2(2.0, -2.0) + vec2(-1.0, 1.0),
|
coords_to_viewport_uv(position, viewport) * vec2(2.0, -2.0) + vec2(-1.0, 1.0),
|
||||||
1.0,
|
1.0,
|
||||||
1.0,
|
1.0,
|
||||||
|
@ -34,7 +34,7 @@ fn coords_to_ray_direction(position: vec2<f32>, viewport: vec4<f32>) -> vec3<f32
|
||||||
// direction to world space. Note that the w element is set to 0.0, as this is a
|
// direction to world space. Note that the w element is set to 0.0, as this is a
|
||||||
// vector direction, not a position, That causes the matrix multiplication to ignore
|
// vector direction, not a position, That causes the matrix multiplication to ignore
|
||||||
// the translations from the view matrix.
|
// the translations from the view matrix.
|
||||||
let ray_direction = (view.view * vec4(view_ray_direction, 0.0)).xyz;
|
let ray_direction = (view.world_from_view * vec4(view_ray_direction, 0.0)).xyz;
|
||||||
|
|
||||||
return normalize(ray_direction);
|
return normalize(ray_direction);
|
||||||
}
|
}
|
||||||
|
|
|
@ -3,8 +3,8 @@
|
||||||
#import bevy_pbr::view_transformations::uv_to_ndc
|
#import bevy_pbr::view_transformations::uv_to_ndc
|
||||||
|
|
||||||
struct PreviousViewUniforms {
|
struct PreviousViewUniforms {
|
||||||
inverse_view: mat4x4<f32>,
|
view_from_world: mat4x4<f32>,
|
||||||
view_proj: mat4x4<f32>,
|
clip_from_world: mat4x4<f32>,
|
||||||
}
|
}
|
||||||
|
|
||||||
@group(0) @binding(0) var<uniform> view: View;
|
@group(0) @binding(0) var<uniform> view: View;
|
||||||
|
@ -13,8 +13,8 @@ struct PreviousViewUniforms {
|
||||||
@fragment
|
@fragment
|
||||||
fn fragment(in: FullscreenVertexOutput) -> @location(1) vec4<f32> {
|
fn fragment(in: FullscreenVertexOutput) -> @location(1) vec4<f32> {
|
||||||
let clip_pos = uv_to_ndc(in.uv); // Convert from uv to clip space
|
let clip_pos = uv_to_ndc(in.uv); // Convert from uv to clip space
|
||||||
let world_pos = view.inverse_view_proj * vec4(clip_pos, 0.0, 1.0);
|
let world_pos = view.world_from_clip * vec4(clip_pos, 0.0, 1.0);
|
||||||
let prev_clip_pos = (previous_view.view_proj * world_pos).xy;
|
let prev_clip_pos = (previous_view.clip_from_world * world_pos).xy;
|
||||||
let velocity = (clip_pos - prev_clip_pos) * vec2(0.5, -0.5); // Copied from mesh motion vectors
|
let velocity = (clip_pos - prev_clip_pos) * vec2(0.5, -0.5); // Copied from mesh motion vectors
|
||||||
|
|
||||||
return vec4(velocity.x, velocity.y, 0.0, 1.0);
|
return vec4(velocity.x, velocity.y, 0.0, 1.0);
|
||||||
|
|
|
@ -39,9 +39,9 @@ fn vertex_bevel(vertex: VertexInput) -> VertexOutput {
|
||||||
);
|
);
|
||||||
var position = positions[vertex.index];
|
var position = positions[vertex.index];
|
||||||
|
|
||||||
var clip_a = view.view_proj * vec4(vertex.position_a, 1.);
|
var clip_a = view.clip_from_world * vec4(vertex.position_a, 1.);
|
||||||
var clip_b = view.view_proj * vec4(vertex.position_b, 1.);
|
var clip_b = view.clip_from_world * vec4(vertex.position_b, 1.);
|
||||||
var clip_c = view.view_proj * vec4(vertex.position_c, 1.);
|
var clip_c = view.clip_from_world * vec4(vertex.position_c, 1.);
|
||||||
|
|
||||||
// Manual near plane clipping to avoid errors when doing the perspective divide inside this shader.
|
// Manual near plane clipping to avoid errors when doing the perspective divide inside this shader.
|
||||||
clip_a = clip_near_plane(clip_a, clip_c);
|
clip_a = clip_near_plane(clip_a, clip_c);
|
||||||
|
@ -97,10 +97,10 @@ fn vertex_miter(vertex: VertexInput) -> VertexOutput {
|
||||||
vec3(0, 0, 0.5),
|
vec3(0, 0, 0.5),
|
||||||
);
|
);
|
||||||
var position = positions[vertex.index];
|
var position = positions[vertex.index];
|
||||||
|
|
||||||
var clip_a = view.view_proj * vec4(vertex.position_a, 1.);
|
var clip_a = view.clip_from_world * vec4(vertex.position_a, 1.);
|
||||||
var clip_b = view.view_proj * vec4(vertex.position_b, 1.);
|
var clip_b = view.clip_from_world * vec4(vertex.position_b, 1.);
|
||||||
var clip_c = view.view_proj * vec4(vertex.position_c, 1.);
|
var clip_c = view.clip_from_world * vec4(vertex.position_c, 1.);
|
||||||
|
|
||||||
// Manual near plane clipping to avoid errors when doing the perspective divide inside this shader.
|
// Manual near plane clipping to avoid errors when doing the perspective divide inside this shader.
|
||||||
clip_a = clip_near_plane(clip_a, clip_c);
|
clip_a = clip_near_plane(clip_a, clip_c);
|
||||||
|
@ -148,9 +148,9 @@ fn vertex_miter(vertex: VertexInput) -> VertexOutput {
|
||||||
|
|
||||||
@vertex
|
@vertex
|
||||||
fn vertex_round(vertex: VertexInput) -> VertexOutput {
|
fn vertex_round(vertex: VertexInput) -> VertexOutput {
|
||||||
var clip_a = view.view_proj * vec4(vertex.position_a, 1.);
|
var clip_a = view.clip_from_world * vec4(vertex.position_a, 1.);
|
||||||
var clip_b = view.view_proj * vec4(vertex.position_b, 1.);
|
var clip_b = view.clip_from_world * vec4(vertex.position_b, 1.);
|
||||||
var clip_c = view.view_proj * vec4(vertex.position_c, 1.);
|
var clip_c = view.clip_from_world * vec4(vertex.position_c, 1.);
|
||||||
|
|
||||||
// Manual near plane clipping to avoid errors when doing the perspective divide inside this shader.
|
// Manual near plane clipping to avoid errors when doing the perspective divide inside this shader.
|
||||||
clip_a = clip_near_plane(clip_a, clip_c);
|
clip_a = clip_near_plane(clip_a, clip_c);
|
||||||
|
@ -245,4 +245,4 @@ struct FragmentOutput {
|
||||||
fn fragment(in: FragmentInput) -> FragmentOutput {
|
fn fragment(in: FragmentInput) -> FragmentOutput {
|
||||||
// return FragmentOutput(vec4(1, 1, 1, 1));
|
// return FragmentOutput(vec4(1, 1, 1, 1));
|
||||||
return FragmentOutput(in.color);
|
return FragmentOutput(in.color);
|
||||||
}
|
}
|
||||||
|
|
|
@ -44,8 +44,8 @@ fn vertex(vertex: VertexInput) -> VertexOutput {
|
||||||
let position = positions[vertex.index];
|
let position = positions[vertex.index];
|
||||||
|
|
||||||
// algorithm based on https://wwwtyro.net/2019/11/18/instanced-lines.html
|
// algorithm based on https://wwwtyro.net/2019/11/18/instanced-lines.html
|
||||||
var clip_a = view.view_proj * vec4(vertex.position_a, 1.);
|
var clip_a = view.clip_from_world * vec4(vertex.position_a, 1.);
|
||||||
var clip_b = view.view_proj * vec4(vertex.position_b, 1.);
|
var clip_b = view.clip_from_world * vec4(vertex.position_b, 1.);
|
||||||
|
|
||||||
// Manual near plane clipping to avoid errors when doing the perspective divide inside this shader.
|
// Manual near plane clipping to avoid errors when doing the perspective divide inside this shader.
|
||||||
clip_a = clip_near_plane(clip_a, clip_b);
|
clip_a = clip_near_plane(clip_a, clip_b);
|
||||||
|
@ -69,13 +69,13 @@ fn vertex(vertex: VertexInput) -> VertexOutput {
|
||||||
line_width /= clip.w;
|
line_width /= clip.w;
|
||||||
|
|
||||||
// get height of near clipping plane in world space
|
// get height of near clipping plane in world space
|
||||||
let pos0 = view.inverse_projection * vec4(0, -1, 0, 1); // Bottom of the screen
|
let pos0 = view.view_from_clip * vec4(0, -1, 0, 1); // Bottom of the screen
|
||||||
let pos1 = view.inverse_projection * vec4(0, 1, 0, 1); // Top of the screen
|
let pos1 = view.view_from_clip * vec4(0, 1, 0, 1); // Top of the screen
|
||||||
let near_clipping_plane_height = length(pos0.xyz - pos1.xyz);
|
let near_clipping_plane_height = length(pos0.xyz - pos1.xyz);
|
||||||
|
|
||||||
// We can't use vertex.position_X because we may have changed the clip positions with clip_near_plane
|
// We can't use vertex.position_X because we may have changed the clip positions with clip_near_plane
|
||||||
let position_a = view.inverse_view_proj * clip_a;
|
let position_a = view.inverse_clip_from_world * clip_a;
|
||||||
let position_b = view.inverse_view_proj * clip_b;
|
let position_b = view.inverse_clip_from_world * clip_b;
|
||||||
let world_distance = length(position_a.xyz - position_b.xyz);
|
let world_distance = length(position_a.xyz - position_b.xyz);
|
||||||
|
|
||||||
// Offset to compensate for moved clip positions. If removed dots on lines will slide when position a is ofscreen.
|
// Offset to compensate for moved clip positions. If removed dots on lines will slide when position a is ofscreen.
|
||||||
|
@ -84,7 +84,7 @@ fn vertex(vertex: VertexInput) -> VertexOutput {
|
||||||
uv = (clipped_offset + position.y * world_distance) * resolution.y / near_clipping_plane_height / line_gizmo.line_width;
|
uv = (clipped_offset + position.y * world_distance) * resolution.y / near_clipping_plane_height / line_gizmo.line_width;
|
||||||
#else
|
#else
|
||||||
// Get the distance of b to the camera along camera axes
|
// Get the distance of b to the camera along camera axes
|
||||||
let camera_b = view.inverse_projection * clip_b;
|
let camera_b = view.view_from_clip * clip_b;
|
||||||
|
|
||||||
// This differentiates between orthographic and perspective cameras.
|
// This differentiates between orthographic and perspective cameras.
|
||||||
// For orthographic cameras no depth adaptment (depth_adaptment = 1) is needed.
|
// For orthographic cameras no depth adaptment (depth_adaptment = 1) is needed.
|
||||||
|
|
|
@ -615,7 +615,7 @@ async fn load_gltf<'a, 'b, 'c>(
|
||||||
.skins()
|
.skins()
|
||||||
.map(|gltf_skin| {
|
.map(|gltf_skin| {
|
||||||
let reader = gltf_skin.reader(|buffer| Some(&buffer_data[buffer.index()]));
|
let reader = gltf_skin.reader(|buffer| Some(&buffer_data[buffer.index()]));
|
||||||
let inverse_bindposes: Vec<Mat4> = reader
|
let local_to_bone_bind_matrices: Vec<Mat4> = reader
|
||||||
.read_inverse_bind_matrices()
|
.read_inverse_bind_matrices()
|
||||||
.unwrap()
|
.unwrap()
|
||||||
.map(|mat| Mat4::from_cols_array_2d(&mat))
|
.map(|mat| Mat4::from_cols_array_2d(&mat))
|
||||||
|
@ -623,7 +623,7 @@ async fn load_gltf<'a, 'b, 'c>(
|
||||||
|
|
||||||
load_context.add_labeled_asset(
|
load_context.add_labeled_asset(
|
||||||
skin_label(&gltf_skin),
|
skin_label(&gltf_skin),
|
||||||
SkinnedMeshInverseBindposes::from(inverse_bindposes),
|
SkinnedMeshInverseBindposes::from(local_to_bone_bind_matrices),
|
||||||
)
|
)
|
||||||
})
|
})
|
||||||
.collect();
|
.collect();
|
||||||
|
|
|
@ -209,20 +209,20 @@ pub(crate) fn assign_lights_to_clusters(
|
||||||
|
|
||||||
let mut requested_cluster_dimensions = config.dimensions_for_screen_size(screen_size);
|
let mut requested_cluster_dimensions = config.dimensions_for_screen_size(screen_size);
|
||||||
|
|
||||||
let view_transform = camera_transform.compute_matrix();
|
let world_from_view = camera_transform.compute_matrix();
|
||||||
let view_inv_scale = camera_transform.compute_transform().scale.recip();
|
let view_from_world_scale = camera_transform.compute_transform().scale.recip();
|
||||||
let view_inv_scale_max = view_inv_scale.abs().max_element();
|
let view_from_world_scale_max = view_from_world_scale.abs().max_element();
|
||||||
let inverse_view_transform = view_transform.inverse();
|
let view_from_world = world_from_view.inverse();
|
||||||
let is_orthographic = camera.projection_matrix().w_axis.w == 1.0;
|
let is_orthographic = camera.clip_from_view().w_axis.w == 1.0;
|
||||||
|
|
||||||
let far_z = match config.far_z_mode() {
|
let far_z = match config.far_z_mode() {
|
||||||
ClusterFarZMode::MaxLightRange => {
|
ClusterFarZMode::MaxLightRange => {
|
||||||
let inverse_view_row_2 = inverse_view_transform.row(2);
|
let view_from_world_row_2 = view_from_world.row(2);
|
||||||
lights
|
lights
|
||||||
.iter()
|
.iter()
|
||||||
.map(|light| {
|
.map(|light| {
|
||||||
-inverse_view_row_2.dot(light.transform.translation().extend(1.0))
|
-view_from_world_row_2.dot(light.transform.translation().extend(1.0))
|
||||||
+ light.range * view_inv_scale.z
|
+ light.range * view_from_world_scale.z
|
||||||
})
|
})
|
||||||
.reduce(f32::max)
|
.reduce(f32::max)
|
||||||
.unwrap_or(0.0)
|
.unwrap_or(0.0)
|
||||||
|
@ -239,12 +239,12 @@ pub(crate) fn assign_lights_to_clusters(
|
||||||
// 3,2 = r * far and 2,2 = r where r = 1.0 / (far - near)
|
// 3,2 = r * far and 2,2 = r where r = 1.0 / (far - near)
|
||||||
// rearranging r = 1.0 / (far - near), r * (far - near) = 1.0, r * far - 1.0 = r * near, near = (r * far - 1.0) / r
|
// rearranging r = 1.0 / (far - near), r * (far - near) = 1.0, r * far - 1.0 = r * near, near = (r * far - 1.0) / r
|
||||||
// = (3,2 - 1.0) / 2,2
|
// = (3,2 - 1.0) / 2,2
|
||||||
(camera.projection_matrix().w_axis.z - 1.0) / camera.projection_matrix().z_axis.z
|
(camera.clip_from_view().w_axis.z - 1.0) / camera.clip_from_view().z_axis.z
|
||||||
}
|
}
|
||||||
(false, 1) => config.first_slice_depth().max(far_z),
|
(false, 1) => config.first_slice_depth().max(far_z),
|
||||||
_ => config.first_slice_depth(),
|
_ => config.first_slice_depth(),
|
||||||
};
|
};
|
||||||
let first_slice_depth = first_slice_depth * view_inv_scale.z;
|
let first_slice_depth = first_slice_depth * view_from_world_scale.z;
|
||||||
|
|
||||||
// NOTE: Ensure the far_z is at least as far as the first_depth_slice to avoid clustering problems.
|
// NOTE: Ensure the far_z is at least as far as the first_depth_slice to avoid clustering problems.
|
||||||
let far_z = far_z.max(first_slice_depth);
|
let far_z = far_z.max(first_slice_depth);
|
||||||
|
@ -269,9 +269,9 @@ pub(crate) fn assign_lights_to_clusters(
|
||||||
// this overestimates index counts by at most 50% (and typically much less) when the whole light range is in view
|
// this overestimates index counts by at most 50% (and typically much less) when the whole light range is in view
|
||||||
// it can overestimate more significantly when light ranges are only partially in view
|
// it can overestimate more significantly when light ranges are only partially in view
|
||||||
let (light_aabb_min, light_aabb_max) = cluster_space_light_aabb(
|
let (light_aabb_min, light_aabb_max) = cluster_space_light_aabb(
|
||||||
inverse_view_transform,
|
view_from_world,
|
||||||
view_inv_scale,
|
view_from_world_scale,
|
||||||
camera.projection_matrix(),
|
camera.clip_from_view(),
|
||||||
&light_sphere,
|
&light_sphere,
|
||||||
);
|
);
|
||||||
|
|
||||||
|
@ -337,7 +337,7 @@ pub(crate) fn assign_lights_to_clusters(
|
||||||
clusters.dimensions.x * clusters.dimensions.y * clusters.dimensions.z <= 4096
|
clusters.dimensions.x * clusters.dimensions.y * clusters.dimensions.z <= 4096
|
||||||
);
|
);
|
||||||
|
|
||||||
let inverse_projection = camera.projection_matrix().inverse();
|
let view_from_clip = camera.clip_from_view().inverse();
|
||||||
|
|
||||||
for lights in &mut clusters.lights {
|
for lights in &mut clusters.lights {
|
||||||
lights.entities.clear();
|
lights.entities.clear();
|
||||||
|
@ -364,7 +364,7 @@ pub(crate) fn assign_lights_to_clusters(
|
||||||
for x in 0..=clusters.dimensions.x {
|
for x in 0..=clusters.dimensions.x {
|
||||||
let x_proportion = x as f32 / x_slices;
|
let x_proportion = x as f32 / x_slices;
|
||||||
let x_pos = x_proportion * 2.0 - 1.0;
|
let x_pos = x_proportion * 2.0 - 1.0;
|
||||||
let view_x = clip_to_view(inverse_projection, Vec4::new(x_pos, 0.0, 1.0, 1.0)).x;
|
let view_x = clip_to_view(view_from_clip, Vec4::new(x_pos, 0.0, 1.0, 1.0)).x;
|
||||||
let normal = Vec3::X;
|
let normal = Vec3::X;
|
||||||
let d = view_x * normal.x;
|
let d = view_x * normal.x;
|
||||||
x_planes.push(HalfSpace::new(normal.extend(d)));
|
x_planes.push(HalfSpace::new(normal.extend(d)));
|
||||||
|
@ -374,7 +374,7 @@ pub(crate) fn assign_lights_to_clusters(
|
||||||
for y in 0..=clusters.dimensions.y {
|
for y in 0..=clusters.dimensions.y {
|
||||||
let y_proportion = 1.0 - y as f32 / y_slices;
|
let y_proportion = 1.0 - y as f32 / y_slices;
|
||||||
let y_pos = y_proportion * 2.0 - 1.0;
|
let y_pos = y_proportion * 2.0 - 1.0;
|
||||||
let view_y = clip_to_view(inverse_projection, Vec4::new(0.0, y_pos, 1.0, 1.0)).y;
|
let view_y = clip_to_view(view_from_clip, Vec4::new(0.0, y_pos, 1.0, 1.0)).y;
|
||||||
let normal = Vec3::Y;
|
let normal = Vec3::Y;
|
||||||
let d = view_y * normal.y;
|
let d = view_y * normal.y;
|
||||||
y_planes.push(HalfSpace::new(normal.extend(d)));
|
y_planes.push(HalfSpace::new(normal.extend(d)));
|
||||||
|
@ -384,8 +384,8 @@ pub(crate) fn assign_lights_to_clusters(
|
||||||
for x in 0..=clusters.dimensions.x {
|
for x in 0..=clusters.dimensions.x {
|
||||||
let x_proportion = x as f32 / x_slices;
|
let x_proportion = x as f32 / x_slices;
|
||||||
let x_pos = x_proportion * 2.0 - 1.0;
|
let x_pos = x_proportion * 2.0 - 1.0;
|
||||||
let nb = clip_to_view(inverse_projection, Vec4::new(x_pos, -1.0, 1.0, 1.0)).xyz();
|
let nb = clip_to_view(view_from_clip, Vec4::new(x_pos, -1.0, 1.0, 1.0)).xyz();
|
||||||
let nt = clip_to_view(inverse_projection, Vec4::new(x_pos, 1.0, 1.0, 1.0)).xyz();
|
let nt = clip_to_view(view_from_clip, Vec4::new(x_pos, 1.0, 1.0, 1.0)).xyz();
|
||||||
let normal = nb.cross(nt);
|
let normal = nb.cross(nt);
|
||||||
let d = nb.dot(normal);
|
let d = nb.dot(normal);
|
||||||
x_planes.push(HalfSpace::new(normal.extend(d)));
|
x_planes.push(HalfSpace::new(normal.extend(d)));
|
||||||
|
@ -395,8 +395,8 @@ pub(crate) fn assign_lights_to_clusters(
|
||||||
for y in 0..=clusters.dimensions.y {
|
for y in 0..=clusters.dimensions.y {
|
||||||
let y_proportion = 1.0 - y as f32 / y_slices;
|
let y_proportion = 1.0 - y as f32 / y_slices;
|
||||||
let y_pos = y_proportion * 2.0 - 1.0;
|
let y_pos = y_proportion * 2.0 - 1.0;
|
||||||
let nl = clip_to_view(inverse_projection, Vec4::new(-1.0, y_pos, 1.0, 1.0)).xyz();
|
let nl = clip_to_view(view_from_clip, Vec4::new(-1.0, y_pos, 1.0, 1.0)).xyz();
|
||||||
let nr = clip_to_view(inverse_projection, Vec4::new(1.0, y_pos, 1.0, 1.0)).xyz();
|
let nr = clip_to_view(view_from_clip, Vec4::new(1.0, y_pos, 1.0, 1.0)).xyz();
|
||||||
let normal = nr.cross(nl);
|
let normal = nr.cross(nl);
|
||||||
let d = nr.dot(normal);
|
let d = nr.dot(normal);
|
||||||
y_planes.push(HalfSpace::new(normal.extend(d)));
|
y_planes.push(HalfSpace::new(normal.extend(d)));
|
||||||
|
@ -432,9 +432,9 @@ pub(crate) fn assign_lights_to_clusters(
|
||||||
// note: caching seems to be slower than calling twice for this aabb calculation
|
// note: caching seems to be slower than calling twice for this aabb calculation
|
||||||
let (light_aabb_xy_ndc_z_view_min, light_aabb_xy_ndc_z_view_max) =
|
let (light_aabb_xy_ndc_z_view_min, light_aabb_xy_ndc_z_view_max) =
|
||||||
cluster_space_light_aabb(
|
cluster_space_light_aabb(
|
||||||
inverse_view_transform,
|
view_from_world,
|
||||||
view_inv_scale,
|
view_from_world_scale,
|
||||||
camera.projection_matrix(),
|
camera.clip_from_view(),
|
||||||
&light_sphere,
|
&light_sphere,
|
||||||
);
|
);
|
||||||
|
|
||||||
|
@ -463,13 +463,13 @@ pub(crate) fn assign_lights_to_clusters(
|
||||||
// as they often assume that the widest part of the sphere under projection is the
|
// as they often assume that the widest part of the sphere under projection is the
|
||||||
// center point on the axis of interest plus the radius, and that is not true!
|
// center point on the axis of interest plus the radius, and that is not true!
|
||||||
let view_light_sphere = Sphere {
|
let view_light_sphere = Sphere {
|
||||||
center: Vec3A::from(inverse_view_transform * light_sphere.center.extend(1.0)),
|
center: Vec3A::from(view_from_world * light_sphere.center.extend(1.0)),
|
||||||
radius: light_sphere.radius * view_inv_scale_max,
|
radius: light_sphere.radius * view_from_world_scale_max,
|
||||||
};
|
};
|
||||||
let spot_light_dir_sin_cos = light.spot_light_angle.map(|angle| {
|
let spot_light_dir_sin_cos = light.spot_light_angle.map(|angle| {
|
||||||
let (angle_sin, angle_cos) = angle.sin_cos();
|
let (angle_sin, angle_cos) = angle.sin_cos();
|
||||||
(
|
(
|
||||||
(inverse_view_transform * light.transform.back().extend(0.0))
|
(view_from_world * light.transform.back().extend(0.0))
|
||||||
.truncate()
|
.truncate()
|
||||||
.normalize(),
|
.normalize(),
|
||||||
angle_sin,
|
angle_sin,
|
||||||
|
@ -477,7 +477,7 @@ pub(crate) fn assign_lights_to_clusters(
|
||||||
)
|
)
|
||||||
});
|
});
|
||||||
let light_center_clip =
|
let light_center_clip =
|
||||||
camera.projection_matrix() * view_light_sphere.center.extend(1.0);
|
camera.clip_from_view() * view_light_sphere.center.extend(1.0);
|
||||||
let light_center_ndc = light_center_clip.xyz() / light_center_clip.w;
|
let light_center_ndc = light_center_clip.xyz() / light_center_clip.w;
|
||||||
let cluster_coordinates = ndc_position_to_cluster(
|
let cluster_coordinates = ndc_position_to_cluster(
|
||||||
clusters.dimensions,
|
clusters.dimensions,
|
||||||
|
@ -586,7 +586,7 @@ pub(crate) fn assign_lights_to_clusters(
|
||||||
far_z,
|
far_z,
|
||||||
clusters.tile_size.as_vec2(),
|
clusters.tile_size.as_vec2(),
|
||||||
screen_size.as_vec2(),
|
screen_size.as_vec2(),
|
||||||
inverse_projection,
|
view_from_clip,
|
||||||
is_orthographic,
|
is_orthographic,
|
||||||
clusters.dimensions,
|
clusters.dimensions,
|
||||||
UVec3::new(x, y, z),
|
UVec3::new(x, y, z),
|
||||||
|
@ -613,7 +613,8 @@ pub(crate) fn assign_lights_to_clusters(
|
||||||
distance_closest_point > cluster_aabb_sphere.radius;
|
distance_closest_point > cluster_aabb_sphere.radius;
|
||||||
|
|
||||||
let front_cull = v1_len
|
let front_cull = v1_len
|
||||||
> cluster_aabb_sphere.radius + light.range * view_inv_scale_max;
|
> cluster_aabb_sphere.radius
|
||||||
|
+ light.range * view_from_world_scale_max;
|
||||||
let back_cull = v1_len < -cluster_aabb_sphere.radius;
|
let back_cull = v1_len < -cluster_aabb_sphere.radius;
|
||||||
|
|
||||||
if !angle_cull && !front_cull && !back_cull {
|
if !angle_cull && !front_cull && !back_cull {
|
||||||
|
@ -657,7 +658,7 @@ fn compute_aabb_for_cluster(
|
||||||
z_far: f32,
|
z_far: f32,
|
||||||
tile_size: Vec2,
|
tile_size: Vec2,
|
||||||
screen_size: Vec2,
|
screen_size: Vec2,
|
||||||
inverse_projection: Mat4,
|
view_from_clip: Mat4,
|
||||||
is_orthographic: bool,
|
is_orthographic: bool,
|
||||||
cluster_dimensions: UVec3,
|
cluster_dimensions: UVec3,
|
||||||
ijk: UVec3,
|
ijk: UVec3,
|
||||||
|
@ -675,8 +676,8 @@ fn compute_aabb_for_cluster(
|
||||||
|
|
||||||
// Convert to view space at the cluster near and far planes
|
// Convert to view space at the cluster near and far planes
|
||||||
// NOTE: 1.0 is the near plane due to using reverse z projections
|
// NOTE: 1.0 is the near plane due to using reverse z projections
|
||||||
let mut p_min = screen_to_view(screen_size, inverse_projection, p_min, 0.0).xyz();
|
let mut p_min = screen_to_view(screen_size, view_from_clip, p_min, 0.0).xyz();
|
||||||
let mut p_max = screen_to_view(screen_size, inverse_projection, p_max, 0.0).xyz();
|
let mut p_max = screen_to_view(screen_size, view_from_clip, p_max, 0.0).xyz();
|
||||||
|
|
||||||
// calculate cluster depth using z_near and z_far
|
// calculate cluster depth using z_near and z_far
|
||||||
p_min.z = -z_near + (z_near - z_far) * ijk.z / cluster_dimensions.z as f32;
|
p_min.z = -z_near + (z_near - z_far) * ijk.z / cluster_dimensions.z as f32;
|
||||||
|
@ -687,8 +688,8 @@ fn compute_aabb_for_cluster(
|
||||||
} else {
|
} else {
|
||||||
// Convert to view space at the near plane
|
// Convert to view space at the near plane
|
||||||
// NOTE: 1.0 is the near plane due to using reverse z projections
|
// NOTE: 1.0 is the near plane due to using reverse z projections
|
||||||
let p_min = screen_to_view(screen_size, inverse_projection, p_min, 1.0);
|
let p_min = screen_to_view(screen_size, view_from_clip, p_min, 1.0);
|
||||||
let p_max = screen_to_view(screen_size, inverse_projection, p_max, 1.0);
|
let p_max = screen_to_view(screen_size, view_from_clip, p_max, 1.0);
|
||||||
|
|
||||||
let z_far_over_z_near = -z_far / -z_near;
|
let z_far_over_z_near = -z_far / -z_near;
|
||||||
let cluster_near = if ijk.z == 0.0 {
|
let cluster_near = if ijk.z == 0.0 {
|
||||||
|
@ -763,14 +764,14 @@ fn ndc_position_to_cluster(
|
||||||
/// `X` and `Y` in normalized device coordinates with range `[-1, 1]`
|
/// `X` and `Y` in normalized device coordinates with range `[-1, 1]`
|
||||||
/// `Z` in view space, with range `[-inf, -f32::MIN_POSITIVE]`
|
/// `Z` in view space, with range `[-inf, -f32::MIN_POSITIVE]`
|
||||||
fn cluster_space_light_aabb(
|
fn cluster_space_light_aabb(
|
||||||
inverse_view_transform: Mat4,
|
view_from_world: Mat4,
|
||||||
view_inv_scale: Vec3,
|
view_from_world_scale: Vec3,
|
||||||
projection_matrix: Mat4,
|
clip_from_view: Mat4,
|
||||||
light_sphere: &Sphere,
|
light_sphere: &Sphere,
|
||||||
) -> (Vec3, Vec3) {
|
) -> (Vec3, Vec3) {
|
||||||
let light_aabb_view = Aabb {
|
let light_aabb_view = Aabb {
|
||||||
center: Vec3A::from(inverse_view_transform * light_sphere.center.extend(1.0)),
|
center: Vec3A::from(view_from_world * light_sphere.center.extend(1.0)),
|
||||||
half_extents: Vec3A::from(light_sphere.radius * view_inv_scale.abs()),
|
half_extents: Vec3A::from(light_sphere.radius * view_from_world_scale.abs()),
|
||||||
};
|
};
|
||||||
let (mut light_aabb_view_min, mut light_aabb_view_max) =
|
let (mut light_aabb_view_min, mut light_aabb_view_max) =
|
||||||
(light_aabb_view.min(), light_aabb_view.max());
|
(light_aabb_view.min(), light_aabb_view.max());
|
||||||
|
@ -806,10 +807,10 @@ fn cluster_space_light_aabb(
|
||||||
light_aabb_clip_xymax_near,
|
light_aabb_clip_xymax_near,
|
||||||
light_aabb_clip_xymax_far,
|
light_aabb_clip_xymax_far,
|
||||||
) = (
|
) = (
|
||||||
projection_matrix * light_aabb_view_xymin_near.extend(1.0),
|
clip_from_view * light_aabb_view_xymin_near.extend(1.0),
|
||||||
projection_matrix * light_aabb_view_xymin_far.extend(1.0),
|
clip_from_view * light_aabb_view_xymin_far.extend(1.0),
|
||||||
projection_matrix * light_aabb_view_xymax_near.extend(1.0),
|
clip_from_view * light_aabb_view_xymax_near.extend(1.0),
|
||||||
projection_matrix * light_aabb_view_xymax_far.extend(1.0),
|
clip_from_view * light_aabb_view_xymax_far.extend(1.0),
|
||||||
);
|
);
|
||||||
let (
|
let (
|
||||||
light_aabb_ndc_xymin_near,
|
light_aabb_ndc_xymin_near,
|
||||||
|
@ -872,12 +873,12 @@ fn view_z_to_z_slice(
|
||||||
z_slice.min(z_slices - 1)
|
z_slice.min(z_slices - 1)
|
||||||
}
|
}
|
||||||
|
|
||||||
fn clip_to_view(inverse_projection: Mat4, clip: Vec4) -> Vec4 {
|
fn clip_to_view(view_from_clip: Mat4, clip: Vec4) -> Vec4 {
|
||||||
let view = inverse_projection * clip;
|
let view = view_from_clip * clip;
|
||||||
view / view.w
|
view / view.w
|
||||||
}
|
}
|
||||||
|
|
||||||
fn screen_to_view(screen_size: Vec2, inverse_projection: Mat4, screen: Vec2, ndc_z: f32) -> Vec4 {
|
fn screen_to_view(screen_size: Vec2, view_from_clip: Mat4, screen: Vec2, ndc_z: f32) -> Vec4 {
|
||||||
let tex_coord = screen / screen_size;
|
let tex_coord = screen / screen_size;
|
||||||
let clip = Vec4::new(
|
let clip = Vec4::new(
|
||||||
tex_coord.x * 2.0 - 1.0,
|
tex_coord.x * 2.0 - 1.0,
|
||||||
|
@ -885,7 +886,7 @@ fn screen_to_view(screen_size: Vec2, inverse_projection: Mat4, screen: Vec2, ndc
|
||||||
ndc_z,
|
ndc_z,
|
||||||
1.0,
|
1.0,
|
||||||
);
|
);
|
||||||
clip_to_view(inverse_projection, clip)
|
clip_to_view(view_from_clip, clip)
|
||||||
}
|
}
|
||||||
|
|
||||||
// NOTE: This exploits the fact that a x-plane normal has only x and z components
|
// NOTE: This exploits the fact that a x-plane normal has only x and z components
|
||||||
|
|
|
@ -95,7 +95,7 @@ fn pbr_input_from_deferred_gbuffer(frag_coord: vec4<f32>, gbuffer: vec4<u32>) ->
|
||||||
let N = octahedral_decode(octahedral_normal);
|
let N = octahedral_decode(octahedral_normal);
|
||||||
|
|
||||||
let world_position = vec4(position_ndc_to_world(frag_coord_to_ndc(frag_coord)), 1.0);
|
let world_position = vec4(position_ndc_to_world(frag_coord_to_ndc(frag_coord)), 1.0);
|
||||||
let is_orthographic = view.projection[3].w == 1.0;
|
let is_orthographic = view.clip_from_view[3].w == 1.0;
|
||||||
let V = pbr_functions::calculate_view(world_position, is_orthographic);
|
let V = pbr_functions::calculate_view(world_position, is_orthographic);
|
||||||
|
|
||||||
pbr.frag_coord = frag_coord;
|
pbr.frag_coord = frag_coord;
|
||||||
|
|
|
@ -277,13 +277,13 @@ pub struct Cascades {
|
||||||
#[derive(Clone, Debug, Default, Reflect)]
|
#[derive(Clone, Debug, Default, Reflect)]
|
||||||
pub struct Cascade {
|
pub struct Cascade {
|
||||||
/// The transform of the light, i.e. the view to world matrix.
|
/// The transform of the light, i.e. the view to world matrix.
|
||||||
pub(crate) view_transform: Mat4,
|
pub(crate) world_from_cascade: Mat4,
|
||||||
/// The orthographic projection for this cascade.
|
/// The orthographic projection for this cascade.
|
||||||
pub(crate) projection: Mat4,
|
pub(crate) clip_from_cascade: Mat4,
|
||||||
/// The view-projection matrix for this cascade, converting world space into light clip space.
|
/// The view-projection matrix for this cascade, converting world space into light clip space.
|
||||||
/// Importantly, this is derived and stored separately from `view_transform` and `projection` to
|
/// Importantly, this is derived and stored separately from `view_transform` and `projection` to
|
||||||
/// ensure shadow stability.
|
/// ensure shadow stability.
|
||||||
pub(crate) view_projection: Mat4,
|
pub(crate) clip_from_world: Mat4,
|
||||||
/// Size of each shadow map texel in world units.
|
/// Size of each shadow map texel in world units.
|
||||||
pub(crate) texel_size: f32,
|
pub(crate) texel_size: f32,
|
||||||
}
|
}
|
||||||
|
@ -329,8 +329,8 @@ pub fn build_directional_light_cascades<P: CameraProjection + Component>(
|
||||||
// users to not change any other aspects of the transform - there's no guarantee
|
// users to not change any other aspects of the transform - there's no guarantee
|
||||||
// `transform.compute_matrix()` will give us a matrix with our desired properties.
|
// `transform.compute_matrix()` will give us a matrix with our desired properties.
|
||||||
// Instead, we directly create a good matrix from just the rotation.
|
// Instead, we directly create a good matrix from just the rotation.
|
||||||
let light_to_world = Mat4::from_quat(transform.compute_transform().rotation);
|
let world_from_light = Mat4::from_quat(transform.compute_transform().rotation);
|
||||||
let light_to_world_inverse = light_to_world.inverse();
|
let light_to_world_inverse = world_from_light.inverse();
|
||||||
|
|
||||||
for (view_entity, projection, view_to_world) in views.iter().copied() {
|
for (view_entity, projection, view_to_world) in views.iter().copied() {
|
||||||
let camera_to_light_view = light_to_world_inverse * view_to_world;
|
let camera_to_light_view = light_to_world_inverse * view_to_world;
|
||||||
|
@ -353,7 +353,7 @@ pub fn build_directional_light_cascades<P: CameraProjection + Component>(
|
||||||
calculate_cascade(
|
calculate_cascade(
|
||||||
corners,
|
corners,
|
||||||
directional_light_shadow_map.size as f32,
|
directional_light_shadow_map.size as f32,
|
||||||
light_to_world,
|
world_from_light,
|
||||||
camera_to_light_view,
|
camera_to_light_view,
|
||||||
)
|
)
|
||||||
})
|
})
|
||||||
|
@ -369,13 +369,13 @@ pub fn build_directional_light_cascades<P: CameraProjection + Component>(
|
||||||
fn calculate_cascade(
|
fn calculate_cascade(
|
||||||
frustum_corners: [Vec3A; 8],
|
frustum_corners: [Vec3A; 8],
|
||||||
cascade_texture_size: f32,
|
cascade_texture_size: f32,
|
||||||
light_to_world: Mat4,
|
world_from_light: Mat4,
|
||||||
camera_to_light: Mat4,
|
light_from_camera: Mat4,
|
||||||
) -> Cascade {
|
) -> Cascade {
|
||||||
let mut min = Vec3A::splat(f32::MAX);
|
let mut min = Vec3A::splat(f32::MAX);
|
||||||
let mut max = Vec3A::splat(f32::MIN);
|
let mut max = Vec3A::splat(f32::MIN);
|
||||||
for corner_camera_view in frustum_corners {
|
for corner_camera_view in frustum_corners {
|
||||||
let corner_light_view = camera_to_light.transform_point3a(corner_camera_view);
|
let corner_light_view = light_from_camera.transform_point3a(corner_camera_view);
|
||||||
min = min.min(corner_light_view);
|
min = min.min(corner_light_view);
|
||||||
max = max.max(corner_light_view);
|
max = max.max(corner_light_view);
|
||||||
}
|
}
|
||||||
|
@ -408,8 +408,8 @@ fn calculate_cascade(
|
||||||
// It is critical for `world_to_cascade` to be stable. So rather than forming `cascade_to_world`
|
// It is critical for `world_to_cascade` to be stable. So rather than forming `cascade_to_world`
|
||||||
// and inverting it, which risks instability due to numerical precision, we directly form
|
// and inverting it, which risks instability due to numerical precision, we directly form
|
||||||
// `world_to_cascade` as the reference material suggests.
|
// `world_to_cascade` as the reference material suggests.
|
||||||
let light_to_world_transpose = light_to_world.transpose();
|
let light_to_world_transpose = world_from_light.transpose();
|
||||||
let world_to_cascade = Mat4::from_cols(
|
let cascade_from_world = Mat4::from_cols(
|
||||||
light_to_world_transpose.x_axis,
|
light_to_world_transpose.x_axis,
|
||||||
light_to_world_transpose.y_axis,
|
light_to_world_transpose.y_axis,
|
||||||
light_to_world_transpose.z_axis,
|
light_to_world_transpose.z_axis,
|
||||||
|
@ -419,18 +419,18 @@ fn calculate_cascade(
|
||||||
// Right-handed orthographic projection, centered at `near_plane_center`.
|
// Right-handed orthographic projection, centered at `near_plane_center`.
|
||||||
// NOTE: This is different from the reference material, as we use reverse Z.
|
// NOTE: This is different from the reference material, as we use reverse Z.
|
||||||
let r = (max.z - min.z).recip();
|
let r = (max.z - min.z).recip();
|
||||||
let cascade_projection = Mat4::from_cols(
|
let clip_from_cascade = Mat4::from_cols(
|
||||||
Vec4::new(2.0 / cascade_diameter, 0.0, 0.0, 0.0),
|
Vec4::new(2.0 / cascade_diameter, 0.0, 0.0, 0.0),
|
||||||
Vec4::new(0.0, 2.0 / cascade_diameter, 0.0, 0.0),
|
Vec4::new(0.0, 2.0 / cascade_diameter, 0.0, 0.0),
|
||||||
Vec4::new(0.0, 0.0, r, 0.0),
|
Vec4::new(0.0, 0.0, r, 0.0),
|
||||||
Vec4::new(0.0, 0.0, 1.0, 1.0),
|
Vec4::new(0.0, 0.0, 1.0, 1.0),
|
||||||
);
|
);
|
||||||
|
|
||||||
let cascade_view_projection = cascade_projection * world_to_cascade;
|
let clip_from_world = clip_from_cascade * cascade_from_world;
|
||||||
Cascade {
|
Cascade {
|
||||||
view_transform: world_to_cascade.inverse(),
|
world_from_cascade: cascade_from_world.inverse(),
|
||||||
projection: cascade_projection,
|
clip_from_cascade,
|
||||||
view_projection: cascade_view_projection,
|
clip_from_world,
|
||||||
texel_size: cascade_texel_size,
|
texel_size: cascade_texel_size,
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -551,7 +551,7 @@ pub fn update_directional_light_frusta(
|
||||||
*view,
|
*view,
|
||||||
cascades
|
cascades
|
||||||
.iter()
|
.iter()
|
||||||
.map(|c| Frustum::from_view_projection(&c.view_projection))
|
.map(|c| Frustum::from_clip_from_world(&c.clip_from_world))
|
||||||
.collect::<Vec<_>>(),
|
.collect::<Vec<_>>(),
|
||||||
)
|
)
|
||||||
})
|
})
|
||||||
|
@ -567,7 +567,7 @@ pub fn update_point_light_frusta(
|
||||||
Or<(Changed<GlobalTransform>, Changed<PointLight>)>,
|
Or<(Changed<GlobalTransform>, Changed<PointLight>)>,
|
||||||
>,
|
>,
|
||||||
) {
|
) {
|
||||||
let projection =
|
let clip_from_view =
|
||||||
Mat4::perspective_infinite_reverse_rh(std::f32::consts::FRAC_PI_2, 1.0, POINT_LIGHT_NEAR_Z);
|
Mat4::perspective_infinite_reverse_rh(std::f32::consts::FRAC_PI_2, 1.0, POINT_LIGHT_NEAR_Z);
|
||||||
let view_rotations = CUBE_MAP_FACES
|
let view_rotations = CUBE_MAP_FACES
|
||||||
.iter()
|
.iter()
|
||||||
|
@ -591,11 +591,11 @@ pub fn update_point_light_frusta(
|
||||||
let view_backward = transform.back();
|
let view_backward = transform.back();
|
||||||
|
|
||||||
for (view_rotation, frustum) in view_rotations.iter().zip(cubemap_frusta.iter_mut()) {
|
for (view_rotation, frustum) in view_rotations.iter().zip(cubemap_frusta.iter_mut()) {
|
||||||
let view = view_translation * *view_rotation;
|
let world_from_view = view_translation * *view_rotation;
|
||||||
let view_projection = projection * view.compute_matrix().inverse();
|
let clip_from_world = clip_from_view * world_from_view.compute_matrix().inverse();
|
||||||
|
|
||||||
*frustum = Frustum::from_view_projection_custom_far(
|
*frustum = Frustum::from_clip_from_world_custom_far(
|
||||||
&view_projection,
|
&clip_from_world,
|
||||||
&transform.translation(),
|
&transform.translation(),
|
||||||
&view_backward,
|
&view_backward,
|
||||||
point_light.range,
|
point_light.range,
|
||||||
|
@ -625,12 +625,12 @@ pub fn update_spot_light_frusta(
|
||||||
// by applying those as a view transform to shadow map rendering of objects
|
// by applying those as a view transform to shadow map rendering of objects
|
||||||
let view_backward = transform.back();
|
let view_backward = transform.back();
|
||||||
|
|
||||||
let spot_view = spot_light_view_matrix(transform);
|
let spot_world_from_view = spot_light_world_from_view(transform);
|
||||||
let spot_projection = spot_light_projection_matrix(spot_light.outer_angle);
|
let spot_clip_from_view = spot_light_clip_from_view(spot_light.outer_angle);
|
||||||
let view_projection = spot_projection * spot_view.inverse();
|
let clip_from_world = spot_clip_from_view * spot_world_from_view.inverse();
|
||||||
|
|
||||||
*frustum = Frustum::from_view_projection_custom_far(
|
*frustum = Frustum::from_clip_from_world_custom_far(
|
||||||
&view_projection,
|
&clip_from_world,
|
||||||
&transform.translation(),
|
&transform.translation(),
|
||||||
&view_backward,
|
&view_backward,
|
||||||
spot_light.range,
|
spot_light.range,
|
||||||
|
|
|
@ -32,7 +32,7 @@ fn irradiance_volume_light(world_position: vec3<f32>, N: vec3<f32>) -> vec3<f32>
|
||||||
let resolution = vec3<f32>(textureDimensions(irradiance_volume_texture) / vec3(1u, 2u, 3u));
|
let resolution = vec3<f32>(textureDimensions(irradiance_volume_texture) / vec3(1u, 2u, 3u));
|
||||||
|
|
||||||
// Make sure to clamp to the edges to avoid texture bleed.
|
// Make sure to clamp to the edges to avoid texture bleed.
|
||||||
var unit_pos = (query_result.inverse_transform * vec4(world_position, 1.0f)).xyz;
|
var unit_pos = (query_result.light_from_world * vec4(world_position, 1.0f)).xyz;
|
||||||
let stp = clamp((unit_pos + 0.5) * resolution, vec3(0.5f), resolution - vec3(0.5f));
|
let stp = clamp((unit_pos + 0.5) * resolution, vec3(0.5f), resolution - vec3(0.5f));
|
||||||
let uvw = stp / atlas_resolution;
|
let uvw = stp / atlas_resolution;
|
||||||
|
|
||||||
|
|
|
@ -13,7 +13,7 @@ struct LightProbeQueryResult {
|
||||||
intensity: f32,
|
intensity: f32,
|
||||||
// Transform from world space to the light probe model space. In light probe
|
// Transform from world space to the light probe model space. In light probe
|
||||||
// model space, the light probe is a 1×1×1 cube centered on the origin.
|
// model space, the light probe is a 1×1×1 cube centered on the origin.
|
||||||
inverse_transform: mat4x4<f32>,
|
light_from_world: mat4x4<f32>,
|
||||||
};
|
};
|
||||||
|
|
||||||
fn transpose_affine_matrix(matrix: mat3x4<f32>) -> mat4x4<f32> {
|
fn transpose_affine_matrix(matrix: mat3x4<f32>) -> mat4x4<f32> {
|
||||||
|
@ -53,16 +53,16 @@ fn query_light_probe(
|
||||||
}
|
}
|
||||||
|
|
||||||
// Unpack the inverse transform.
|
// Unpack the inverse transform.
|
||||||
let inverse_transform =
|
let light_from_world =
|
||||||
transpose_affine_matrix(light_probe.inverse_transpose_transform);
|
transpose_affine_matrix(light_probe.light_from_world_transposed);
|
||||||
|
|
||||||
// Check to see if the transformed point is inside the unit cube
|
// Check to see if the transformed point is inside the unit cube
|
||||||
// centered at the origin.
|
// centered at the origin.
|
||||||
let probe_space_pos = (inverse_transform * vec4<f32>(world_position, 1.0f)).xyz;
|
let probe_space_pos = (light_from_world * vec4<f32>(world_position, 1.0f)).xyz;
|
||||||
if (all(abs(probe_space_pos) <= vec3(0.5f))) {
|
if (all(abs(probe_space_pos) <= vec3(0.5f))) {
|
||||||
result.texture_index = light_probe.cubemap_index;
|
result.texture_index = light_probe.cubemap_index;
|
||||||
result.intensity = light_probe.intensity;
|
result.intensity = light_probe.intensity;
|
||||||
result.inverse_transform = inverse_transform;
|
result.light_from_world = light_from_world;
|
||||||
|
|
||||||
// TODO: Workaround for ICE in DXC https://github.com/microsoft/DirectXShaderCompiler/issues/6183
|
// TODO: Workaround for ICE in DXC https://github.com/microsoft/DirectXShaderCompiler/issues/6183
|
||||||
// We can't use `break` here because of the ICE.
|
// We can't use `break` here because of the ICE.
|
||||||
|
|
|
@ -111,7 +111,7 @@ pub struct LightProbe;
|
||||||
struct RenderLightProbe {
|
struct RenderLightProbe {
|
||||||
/// The transform from the world space to the model space. This is used to
|
/// The transform from the world space to the model space. This is used to
|
||||||
/// efficiently check for bounding box intersection.
|
/// efficiently check for bounding box intersection.
|
||||||
inverse_transpose_transform: [Vec4; 3],
|
light_from_world_transposed: [Vec4; 3],
|
||||||
|
|
||||||
/// The index of the texture or textures in the appropriate binding array or
|
/// The index of the texture or textures in the appropriate binding array or
|
||||||
/// arrays.
|
/// arrays.
|
||||||
|
@ -179,10 +179,10 @@ where
|
||||||
C: LightProbeComponent,
|
C: LightProbeComponent,
|
||||||
{
|
{
|
||||||
// The transform from world space to light probe space.
|
// The transform from world space to light probe space.
|
||||||
inverse_transform: Mat4,
|
light_from_world: Mat4,
|
||||||
|
|
||||||
// The transform from light probe space to world space.
|
// The transform from light probe space to world space.
|
||||||
affine_transform: Affine3A,
|
world_from_light: Affine3A,
|
||||||
|
|
||||||
// Scale factor applied to the diffuse and specular light generated by this
|
// Scale factor applied to the diffuse and specular light generated by this
|
||||||
// reflection probe.
|
// reflection probe.
|
||||||
|
@ -508,8 +508,8 @@ where
|
||||||
image_assets: &RenderAssets<GpuImage>,
|
image_assets: &RenderAssets<GpuImage>,
|
||||||
) -> Option<LightProbeInfo<C>> {
|
) -> Option<LightProbeInfo<C>> {
|
||||||
environment_map.id(image_assets).map(|id| LightProbeInfo {
|
environment_map.id(image_assets).map(|id| LightProbeInfo {
|
||||||
affine_transform: light_probe_transform.affine(),
|
world_from_light: light_probe_transform.affine(),
|
||||||
inverse_transform: light_probe_transform.compute_matrix().inverse(),
|
light_from_world: light_probe_transform.compute_matrix().inverse(),
|
||||||
asset_id: id,
|
asset_id: id,
|
||||||
intensity: environment_map.intensity(),
|
intensity: environment_map.intensity(),
|
||||||
})
|
})
|
||||||
|
@ -523,7 +523,7 @@ where
|
||||||
center: Vec3A::default(),
|
center: Vec3A::default(),
|
||||||
half_extents: Vec3A::splat(0.5),
|
half_extents: Vec3A::splat(0.5),
|
||||||
},
|
},
|
||||||
&self.affine_transform,
|
&self.world_from_light,
|
||||||
true,
|
true,
|
||||||
false,
|
false,
|
||||||
)
|
)
|
||||||
|
@ -533,7 +533,7 @@ where
|
||||||
/// suitable for distance sorting.
|
/// suitable for distance sorting.
|
||||||
fn camera_distance_sort_key(&self, view_transform: &GlobalTransform) -> FloatOrd {
|
fn camera_distance_sort_key(&self, view_transform: &GlobalTransform) -> FloatOrd {
|
||||||
FloatOrd(
|
FloatOrd(
|
||||||
(self.affine_transform.translation - view_transform.translation_vec3a())
|
(self.world_from_light.translation - view_transform.translation_vec3a())
|
||||||
.length_squared(),
|
.length_squared(),
|
||||||
)
|
)
|
||||||
}
|
}
|
||||||
|
@ -598,14 +598,14 @@ where
|
||||||
// Transpose the inverse transform to compress the structure on the
|
// Transpose the inverse transform to compress the structure on the
|
||||||
// GPU (from 4 `Vec4`s to 3 `Vec4`s). The shader will transpose it
|
// GPU (from 4 `Vec4`s to 3 `Vec4`s). The shader will transpose it
|
||||||
// to recover the original inverse transform.
|
// to recover the original inverse transform.
|
||||||
let inverse_transpose_transform = light_probe.inverse_transform.transpose();
|
let light_from_world_transposed = light_probe.light_from_world.transpose();
|
||||||
|
|
||||||
// Write in the light probe data.
|
// Write in the light probe data.
|
||||||
self.render_light_probes.push(RenderLightProbe {
|
self.render_light_probes.push(RenderLightProbe {
|
||||||
inverse_transpose_transform: [
|
light_from_world_transposed: [
|
||||||
inverse_transpose_transform.x_axis,
|
light_from_world_transposed.x_axis,
|
||||||
inverse_transpose_transform.y_axis,
|
light_from_world_transposed.y_axis,
|
||||||
inverse_transpose_transform.z_axis,
|
light_from_world_transposed.z_axis,
|
||||||
],
|
],
|
||||||
texture_index: cubemap_index as i32,
|
texture_index: cubemap_index as i32,
|
||||||
intensity: light_probe.intensity,
|
intensity: light_probe.intensity,
|
||||||
|
@ -620,8 +620,8 @@ where
|
||||||
{
|
{
|
||||||
fn clone(&self) -> Self {
|
fn clone(&self) -> Self {
|
||||||
Self {
|
Self {
|
||||||
inverse_transform: self.inverse_transform,
|
light_from_world: self.light_from_world,
|
||||||
affine_transform: self.affine_transform,
|
world_from_light: self.world_from_light,
|
||||||
intensity: self.intensity,
|
intensity: self.intensity,
|
||||||
asset_id: self.asset_id.clone(),
|
asset_id: self.asset_id.clone(),
|
||||||
}
|
}
|
||||||
|
|
|
@ -44,11 +44,11 @@ fn cull_meshlets(
|
||||||
// Calculate world-space culling bounding sphere for the cluster
|
// Calculate world-space culling bounding sphere for the cluster
|
||||||
let instance_uniform = meshlet_instance_uniforms[instance_id];
|
let instance_uniform = meshlet_instance_uniforms[instance_id];
|
||||||
let meshlet_id = meshlet_cluster_meshlet_ids[cluster_id];
|
let meshlet_id = meshlet_cluster_meshlet_ids[cluster_id];
|
||||||
let model = affine3_to_square(instance_uniform.model);
|
let world_from_local = affine3_to_square(instance_uniform.world_from_local);
|
||||||
let model_scale = max(length(model[0]), max(length(model[1]), length(model[2])));
|
let world_scale = max(length(world_from_local[0]), max(length(world_from_local[1]), length(world_from_local[2])));
|
||||||
let bounding_spheres = meshlet_bounding_spheres[meshlet_id];
|
let bounding_spheres = meshlet_bounding_spheres[meshlet_id];
|
||||||
var culling_bounding_sphere_center = model * vec4(bounding_spheres.self_culling.center, 1.0);
|
var culling_bounding_sphere_center = world_from_local * vec4(bounding_spheres.self_culling.center, 1.0);
|
||||||
var culling_bounding_sphere_radius = model_scale * bounding_spheres.self_culling.radius;
|
var culling_bounding_sphere_radius = world_scale * bounding_spheres.self_culling.radius;
|
||||||
|
|
||||||
#ifdef MESHLET_FIRST_CULLING_PASS
|
#ifdef MESHLET_FIRST_CULLING_PASS
|
||||||
// Frustum culling
|
// Frustum culling
|
||||||
|
@ -60,14 +60,14 @@ fn cull_meshlets(
|
||||||
}
|
}
|
||||||
|
|
||||||
// Calculate view-space LOD bounding sphere for the meshlet
|
// Calculate view-space LOD bounding sphere for the meshlet
|
||||||
let lod_bounding_sphere_center = model * vec4(bounding_spheres.self_lod.center, 1.0);
|
let lod_bounding_sphere_center = world_from_local * vec4(bounding_spheres.self_lod.center, 1.0);
|
||||||
let lod_bounding_sphere_radius = model_scale * bounding_spheres.self_lod.radius;
|
let lod_bounding_sphere_radius = world_scale * bounding_spheres.self_lod.radius;
|
||||||
let lod_bounding_sphere_center_view_space = (view.inverse_view * vec4(lod_bounding_sphere_center.xyz, 1.0)).xyz;
|
let lod_bounding_sphere_center_view_space = (view.view_from_world * vec4(lod_bounding_sphere_center.xyz, 1.0)).xyz;
|
||||||
|
|
||||||
// Calculate view-space LOD bounding sphere for the meshlet's parent
|
// Calculate view-space LOD bounding sphere for the meshlet's parent
|
||||||
let parent_lod_bounding_sphere_center = model * vec4(bounding_spheres.parent_lod.center, 1.0);
|
let parent_lod_bounding_sphere_center = world_from_local * vec4(bounding_spheres.parent_lod.center, 1.0);
|
||||||
let parent_lod_bounding_sphere_radius = model_scale * bounding_spheres.parent_lod.radius;
|
let parent_lod_bounding_sphere_radius = world_scale * bounding_spheres.parent_lod.radius;
|
||||||
let parent_lod_bounding_sphere_center_view_space = (view.inverse_view * vec4(parent_lod_bounding_sphere_center.xyz, 1.0)).xyz;
|
let parent_lod_bounding_sphere_center_view_space = (view.view_from_world * vec4(parent_lod_bounding_sphere_center.xyz, 1.0)).xyz;
|
||||||
|
|
||||||
// Check LOD cut (meshlet error imperceptible, and parent error not imperceptible)
|
// Check LOD cut (meshlet error imperceptible, and parent error not imperceptible)
|
||||||
let lod_is_ok = lod_error_is_imperceptible(lod_bounding_sphere_center_view_space, lod_bounding_sphere_radius);
|
let lod_is_ok = lod_error_is_imperceptible(lod_bounding_sphere_center_view_space, lod_bounding_sphere_radius);
|
||||||
|
@ -77,12 +77,12 @@ fn cull_meshlets(
|
||||||
|
|
||||||
// Project the culling bounding sphere to view-space for occlusion culling
|
// Project the culling bounding sphere to view-space for occlusion culling
|
||||||
#ifdef MESHLET_FIRST_CULLING_PASS
|
#ifdef MESHLET_FIRST_CULLING_PASS
|
||||||
let previous_model = affine3_to_square(instance_uniform.previous_model);
|
let previous_world_from_local = affine3_to_square(instance_uniform.previous_world_from_local);
|
||||||
let previous_model_scale = max(length(previous_model[0]), max(length(previous_model[1]), length(previous_model[2])));
|
let previous_world_from_local_scale = max(length(previous_world_from_local[0]), max(length(previous_world_from_local[1]), length(previous_world_from_local[2])));
|
||||||
culling_bounding_sphere_center = previous_model * vec4(bounding_spheres.self_culling.center, 1.0);
|
culling_bounding_sphere_center = previous_world_from_local * vec4(bounding_spheres.self_culling.center, 1.0);
|
||||||
culling_bounding_sphere_radius = previous_model_scale * bounding_spheres.self_culling.radius;
|
culling_bounding_sphere_radius = previous_world_from_local_scale * bounding_spheres.self_culling.radius;
|
||||||
#endif
|
#endif
|
||||||
let culling_bounding_sphere_center_view_space = (view.inverse_view * vec4(culling_bounding_sphere_center.xyz, 1.0)).xyz;
|
let culling_bounding_sphere_center_view_space = (view.view_from_world * vec4(culling_bounding_sphere_center.xyz, 1.0)).xyz;
|
||||||
|
|
||||||
let aabb = project_view_space_sphere_to_screen_space_aabb(culling_bounding_sphere_center_view_space, culling_bounding_sphere_radius);
|
let aabb = project_view_space_sphere_to_screen_space_aabb(culling_bounding_sphere_center_view_space, culling_bounding_sphere_radius);
|
||||||
// Halve the view-space AABB size as the depth pyramid is half the view size
|
// Halve the view-space AABB size as the depth pyramid is half the view size
|
||||||
|
@ -101,13 +101,13 @@ fn cull_meshlets(
|
||||||
|
|
||||||
// Check whether or not the cluster would be occluded if drawn
|
// Check whether or not the cluster would be occluded if drawn
|
||||||
var meshlet_visible: bool;
|
var meshlet_visible: bool;
|
||||||
if view.projection[3][3] == 1.0 {
|
if view.clip_from_view[3][3] == 1.0 {
|
||||||
// Orthographic
|
// Orthographic
|
||||||
let sphere_depth = view.projection[3][2] + (culling_bounding_sphere_center_view_space.z + culling_bounding_sphere_radius) * view.projection[2][2];
|
let sphere_depth = view.clip_from_view[3][2] + (culling_bounding_sphere_center_view_space.z + culling_bounding_sphere_radius) * view.clip_from_view[2][2];
|
||||||
meshlet_visible = sphere_depth >= occluder_depth;
|
meshlet_visible = sphere_depth >= occluder_depth;
|
||||||
} else {
|
} else {
|
||||||
// Perspective
|
// Perspective
|
||||||
let sphere_depth = -view.projection[3][2] / (culling_bounding_sphere_center_view_space.z + culling_bounding_sphere_radius);
|
let sphere_depth = -view.clip_from_view[3][2] / (culling_bounding_sphere_center_view_space.z + culling_bounding_sphere_radius);
|
||||||
meshlet_visible = sphere_depth >= occluder_depth;
|
meshlet_visible = sphere_depth >= occluder_depth;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -132,7 +132,7 @@ fn cull_meshlets(
|
||||||
fn lod_error_is_imperceptible(cp: vec3<f32>, r: f32) -> bool {
|
fn lod_error_is_imperceptible(cp: vec3<f32>, r: f32) -> bool {
|
||||||
let d2 = dot(cp, cp);
|
let d2 = dot(cp, cp);
|
||||||
let r2 = r * r;
|
let r2 = r * r;
|
||||||
let sphere_diameter_uv = view.projection[0][0] * r / sqrt(d2 - r2);
|
let sphere_diameter_uv = view.clip_from_view[0][0] * r / sqrt(d2 - r2);
|
||||||
let view_size = f32(max(view.viewport.z, view.viewport.w));
|
let view_size = f32(max(view.viewport.z, view.viewport.w));
|
||||||
let sphere_diameter_pixels = sphere_diameter_uv * view_size;
|
let sphere_diameter_pixels = sphere_diameter_uv * view_size;
|
||||||
return sphere_diameter_pixels < 1.0;
|
return sphere_diameter_pixels < 1.0;
|
||||||
|
@ -140,9 +140,9 @@ fn lod_error_is_imperceptible(cp: vec3<f32>, r: f32) -> bool {
|
||||||
|
|
||||||
// https://zeux.io/2023/01/12/approximate-projected-bounds
|
// https://zeux.io/2023/01/12/approximate-projected-bounds
|
||||||
fn project_view_space_sphere_to_screen_space_aabb(cp: vec3<f32>, r: f32) -> vec4<f32> {
|
fn project_view_space_sphere_to_screen_space_aabb(cp: vec3<f32>, r: f32) -> vec4<f32> {
|
||||||
let inv_width = view.projection[0][0] * 0.5;
|
let inv_width = view.clip_from_view[0][0] * 0.5;
|
||||||
let inv_height = view.projection[1][1] * 0.5;
|
let inv_height = view.clip_from_view[1][1] * 0.5;
|
||||||
if view.projection[3][3] == 1.0 {
|
if view.clip_from_view[3][3] == 1.0 {
|
||||||
// Orthographic
|
// Orthographic
|
||||||
let min_x = cp.x - r;
|
let min_x = cp.x - r;
|
||||||
let max_x = cp.x + r;
|
let max_x = cp.x + r;
|
||||||
|
|
|
@ -131,8 +131,8 @@ pub fn extract_meshlet_meshes(
|
||||||
flags |= MeshFlags::SIGN_DETERMINANT_MODEL_3X3;
|
flags |= MeshFlags::SIGN_DETERMINANT_MODEL_3X3;
|
||||||
}
|
}
|
||||||
let transforms = MeshTransforms {
|
let transforms = MeshTransforms {
|
||||||
transform: (&transform).into(),
|
world_from_local: (&transform).into(),
|
||||||
previous_transform: (&previous_transform).into(),
|
previous_world_from_local: (&previous_transform).into(),
|
||||||
flags: flags.bits(),
|
flags: flags.bits(),
|
||||||
};
|
};
|
||||||
gpu_scene
|
gpu_scene
|
||||||
|
|
|
@ -50,9 +50,9 @@ fn vertex(@builtin(vertex_index) vertex_index: u32) -> VertexOutput {
|
||||||
let instance_id = meshlet_cluster_instance_ids[cluster_id];
|
let instance_id = meshlet_cluster_instance_ids[cluster_id];
|
||||||
let instance_uniform = meshlet_instance_uniforms[instance_id];
|
let instance_uniform = meshlet_instance_uniforms[instance_id];
|
||||||
|
|
||||||
let model = affine3_to_square(instance_uniform.model);
|
let world_from_local = affine3_to_square(instance_uniform.world_from_local);
|
||||||
let world_position = mesh_position_local_to_world(model, vec4(vertex.position, 1.0));
|
let world_position = mesh_position_local_to_world(world_from_local, vec4(vertex.position, 1.0));
|
||||||
var clip_position = view.view_proj * vec4(world_position.xyz, 1.0);
|
var clip_position = view.clip_from_world * vec4(world_position.xyz, 1.0);
|
||||||
#ifdef DEPTH_CLAMP_ORTHO
|
#ifdef DEPTH_CLAMP_ORTHO
|
||||||
let unclamped_clip_depth = clip_position.z;
|
let unclamped_clip_depth = clip_position.z;
|
||||||
clip_position.z = min(clip_position.z, 1.0);
|
clip_position.z = min(clip_position.z, 1.0);
|
||||||
|
|
|
@ -109,11 +109,11 @@ fn resolve_vertex_output(frag_coord: vec4<f32>) -> VertexOutput {
|
||||||
|
|
||||||
let instance_id = meshlet_cluster_instance_ids[cluster_id];
|
let instance_id = meshlet_cluster_instance_ids[cluster_id];
|
||||||
let instance_uniform = meshlet_instance_uniforms[instance_id];
|
let instance_uniform = meshlet_instance_uniforms[instance_id];
|
||||||
let model = affine3_to_square(instance_uniform.model);
|
let world_from_local = affine3_to_square(instance_uniform.world_from_local);
|
||||||
|
|
||||||
let world_position_1 = mesh_position_local_to_world(model, vec4(vertex_1.position, 1.0));
|
let world_position_1 = mesh_position_local_to_world(world_from_local, vec4(vertex_1.position, 1.0));
|
||||||
let world_position_2 = mesh_position_local_to_world(model, vec4(vertex_2.position, 1.0));
|
let world_position_2 = mesh_position_local_to_world(world_from_local, vec4(vertex_2.position, 1.0));
|
||||||
let world_position_3 = mesh_position_local_to_world(model, vec4(vertex_3.position, 1.0));
|
let world_position_3 = mesh_position_local_to_world(world_from_local, vec4(vertex_3.position, 1.0));
|
||||||
|
|
||||||
let clip_position_1 = position_world_to_clip(world_position_1.xyz);
|
let clip_position_1 = position_world_to_clip(world_position_1.xyz);
|
||||||
let clip_position_2 = position_world_to_clip(world_position_2.xyz);
|
let clip_position_2 = position_world_to_clip(world_position_2.xyz);
|
||||||
|
@ -129,8 +129,8 @@ fn resolve_vertex_output(frag_coord: vec4<f32>) -> VertexOutput {
|
||||||
let vertex_normal = mat3x3(vertex_1.normal, vertex_2.normal, vertex_3.normal) * partial_derivatives.barycentrics;
|
let vertex_normal = mat3x3(vertex_1.normal, vertex_2.normal, vertex_3.normal) * partial_derivatives.barycentrics;
|
||||||
let world_normal = normalize(
|
let world_normal = normalize(
|
||||||
mat2x4_f32_to_mat3x3_unpack(
|
mat2x4_f32_to_mat3x3_unpack(
|
||||||
instance_uniform.inverse_transpose_model_a,
|
instance_uniform.local_from_world_transpose_a,
|
||||||
instance_uniform.inverse_transpose_model_b,
|
instance_uniform.local_from_world_transpose_b,
|
||||||
) * vertex_normal
|
) * vertex_normal
|
||||||
);
|
);
|
||||||
let uv = mat3x2(vertex_1.uv, vertex_2.uv, vertex_3.uv) * partial_derivatives.barycentrics;
|
let uv = mat3x2(vertex_1.uv, vertex_2.uv, vertex_3.uv) * partial_derivatives.barycentrics;
|
||||||
|
@ -140,9 +140,9 @@ fn resolve_vertex_output(frag_coord: vec4<f32>) -> VertexOutput {
|
||||||
let world_tangent = vec4(
|
let world_tangent = vec4(
|
||||||
normalize(
|
normalize(
|
||||||
mat3x3(
|
mat3x3(
|
||||||
model[0].xyz,
|
world_from_local[0].xyz,
|
||||||
model[1].xyz,
|
world_from_local[1].xyz,
|
||||||
model[2].xyz
|
world_from_local[2].xyz
|
||||||
) * vertex_tangent.xyz
|
) * vertex_tangent.xyz
|
||||||
),
|
),
|
||||||
vertex_tangent.w * (f32(bool(instance_uniform.flags & MESH_FLAGS_SIGN_DETERMINANT_MODEL_3X3_BIT)) * 2.0 - 1.0)
|
vertex_tangent.w * (f32(bool(instance_uniform.flags & MESH_FLAGS_SIGN_DETERMINANT_MODEL_3X3_BIT)) * 2.0 - 1.0)
|
||||||
|
@ -150,13 +150,13 @@ fn resolve_vertex_output(frag_coord: vec4<f32>) -> VertexOutput {
|
||||||
|
|
||||||
#ifdef PREPASS_FRAGMENT
|
#ifdef PREPASS_FRAGMENT
|
||||||
#ifdef MOTION_VECTOR_PREPASS
|
#ifdef MOTION_VECTOR_PREPASS
|
||||||
let previous_model = affine3_to_square(instance_uniform.previous_model);
|
let previous_world_from_local = affine3_to_square(instance_uniform.previous_world_from_local);
|
||||||
let previous_world_position_1 = mesh_position_local_to_world(previous_model, vec4(vertex_1.position, 1.0));
|
let previous_world_position_1 = mesh_position_local_to_world(previous_world_from_local, vec4(vertex_1.position, 1.0));
|
||||||
let previous_world_position_2 = mesh_position_local_to_world(previous_model, vec4(vertex_2.position, 1.0));
|
let previous_world_position_2 = mesh_position_local_to_world(previous_world_from_local, vec4(vertex_2.position, 1.0));
|
||||||
let previous_world_position_3 = mesh_position_local_to_world(previous_model, vec4(vertex_3.position, 1.0));
|
let previous_world_position_3 = mesh_position_local_to_world(previous_world_from_local, vec4(vertex_3.position, 1.0));
|
||||||
let previous_clip_position_1 = previous_view_uniforms.view_proj * vec4(previous_world_position_1.xyz, 1.0);
|
let previous_clip_position_1 = previous_view_uniforms.clip_from_world * vec4(previous_world_position_1.xyz, 1.0);
|
||||||
let previous_clip_position_2 = previous_view_uniforms.view_proj * vec4(previous_world_position_2.xyz, 1.0);
|
let previous_clip_position_2 = previous_view_uniforms.clip_from_world * vec4(previous_world_position_2.xyz, 1.0);
|
||||||
let previous_clip_position_3 = previous_view_uniforms.view_proj * vec4(previous_world_position_3.xyz, 1.0);
|
let previous_clip_position_3 = previous_view_uniforms.clip_from_world * vec4(previous_world_position_3.xyz, 1.0);
|
||||||
let previous_partial_derivatives = compute_partial_derivatives(
|
let previous_partial_derivatives = compute_partial_derivatives(
|
||||||
array(previous_clip_position_1, previous_clip_position_2, previous_clip_position_3),
|
array(previous_clip_position_1, previous_clip_position_2, previous_clip_position_3),
|
||||||
frag_coord_ndc,
|
frag_coord_ndc,
|
||||||
|
|
|
@ -204,10 +204,10 @@ pub fn update_previous_view_data(
|
||||||
query: Query<(Entity, &Camera, &GlobalTransform), PreviousViewFilter>,
|
query: Query<(Entity, &Camera, &GlobalTransform), PreviousViewFilter>,
|
||||||
) {
|
) {
|
||||||
for (entity, camera, camera_transform) in &query {
|
for (entity, camera, camera_transform) in &query {
|
||||||
let inverse_view = camera_transform.compute_matrix().inverse();
|
let view_from_world = camera_transform.compute_matrix().inverse();
|
||||||
commands.entity(entity).try_insert(PreviousViewData {
|
commands.entity(entity).try_insert(PreviousViewData {
|
||||||
inverse_view,
|
view_from_world,
|
||||||
view_proj: camera.projection_matrix() * inverse_view,
|
clip_from_world: camera.clip_from_view() * view_from_world,
|
||||||
});
|
});
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -608,19 +608,19 @@ pub fn prepare_previous_view_uniforms(
|
||||||
};
|
};
|
||||||
|
|
||||||
for (entity, camera, maybe_previous_view_uniforms) in views_iter {
|
for (entity, camera, maybe_previous_view_uniforms) in views_iter {
|
||||||
let view_projection = match maybe_previous_view_uniforms {
|
let prev_view_data = match maybe_previous_view_uniforms {
|
||||||
Some(previous_view) => previous_view.clone(),
|
Some(previous_view) => previous_view.clone(),
|
||||||
None => {
|
None => {
|
||||||
let inverse_view = camera.transform.compute_matrix().inverse();
|
let view_from_world = camera.world_from_view.compute_matrix().inverse();
|
||||||
PreviousViewData {
|
PreviousViewData {
|
||||||
inverse_view,
|
view_from_world,
|
||||||
view_proj: camera.projection * inverse_view,
|
clip_from_world: camera.clip_from_view * view_from_world,
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
};
|
};
|
||||||
|
|
||||||
commands.entity(entity).insert(PreviousViewUniformOffset {
|
commands.entity(entity).insert(PreviousViewUniformOffset {
|
||||||
offset: writer.write(&view_projection),
|
offset: writer.write(&prev_view_data),
|
||||||
});
|
});
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
|
@ -63,14 +63,14 @@ fn vertex(vertex_no_morph: Vertex) -> VertexOutput {
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
#ifdef SKINNED
|
#ifdef SKINNED
|
||||||
var model = skinning::skin_model(vertex.joint_indices, vertex.joint_weights);
|
var world_from_local = skinning::skin_model(vertex.joint_indices, vertex.joint_weights);
|
||||||
#else // SKINNED
|
#else // SKINNED
|
||||||
// Use vertex_no_morph.instance_index instead of vertex.instance_index to work around a wgpu dx12 bug.
|
// Use vertex_no_morph.instance_index instead of vertex.instance_index to work around a wgpu dx12 bug.
|
||||||
// See https://github.com/gfx-rs/naga/issues/2416
|
// See https://github.com/gfx-rs/naga/issues/2416
|
||||||
var model = mesh_functions::get_model_matrix(vertex_no_morph.instance_index);
|
var world_from_local = mesh_functions::get_world_from_local(vertex_no_morph.instance_index);
|
||||||
#endif // SKINNED
|
#endif // SKINNED
|
||||||
|
|
||||||
out.world_position = mesh_functions::mesh_position_local_to_world(model, vec4<f32>(vertex.position, 1.0));
|
out.world_position = mesh_functions::mesh_position_local_to_world(world_from_local, vec4<f32>(vertex.position, 1.0));
|
||||||
out.position = position_world_to_clip(out.world_position.xyz);
|
out.position = position_world_to_clip(out.world_position.xyz);
|
||||||
#ifdef DEPTH_CLAMP_ORTHO
|
#ifdef DEPTH_CLAMP_ORTHO
|
||||||
out.clip_position_unclamped = out.position;
|
out.clip_position_unclamped = out.position;
|
||||||
|
@ -87,7 +87,7 @@ fn vertex(vertex_no_morph: Vertex) -> VertexOutput {
|
||||||
|
|
||||||
#ifdef NORMAL_PREPASS_OR_DEFERRED_PREPASS
|
#ifdef NORMAL_PREPASS_OR_DEFERRED_PREPASS
|
||||||
#ifdef SKINNED
|
#ifdef SKINNED
|
||||||
out.world_normal = skinning::skin_normals(model, vertex.normal);
|
out.world_normal = skinning::skin_normals(world_from_local, vertex.normal);
|
||||||
#else // SKINNED
|
#else // SKINNED
|
||||||
out.world_normal = mesh_functions::mesh_normal_local_to_world(
|
out.world_normal = mesh_functions::mesh_normal_local_to_world(
|
||||||
vertex.normal,
|
vertex.normal,
|
||||||
|
@ -99,7 +99,7 @@ fn vertex(vertex_no_morph: Vertex) -> VertexOutput {
|
||||||
|
|
||||||
#ifdef VERTEX_TANGENTS
|
#ifdef VERTEX_TANGENTS
|
||||||
out.world_tangent = mesh_functions::mesh_tangent_local_to_world(
|
out.world_tangent = mesh_functions::mesh_tangent_local_to_world(
|
||||||
model,
|
world_from_local,
|
||||||
vertex.tangent,
|
vertex.tangent,
|
||||||
// Use vertex_no_morph.instance_index instead of vertex.instance_index to work around a wgpu dx12 bug.
|
// Use vertex_no_morph.instance_index instead of vertex.instance_index to work around a wgpu dx12 bug.
|
||||||
// See https://github.com/gfx-rs/naga/issues/2416
|
// See https://github.com/gfx-rs/naga/issues/2416
|
||||||
|
@ -138,11 +138,11 @@ fn vertex(vertex_no_morph: Vertex) -> VertexOutput {
|
||||||
prev_vertex.joint_weights,
|
prev_vertex.joint_weights,
|
||||||
);
|
);
|
||||||
#else // HAS_PREVIOUS_SKIN
|
#else // HAS_PREVIOUS_SKIN
|
||||||
let prev_model = mesh_functions::get_previous_model_matrix(prev_vertex.instance_index);
|
let prev_model = mesh_functions::get_previous_world_from_local(prev_vertex.instance_index);
|
||||||
#endif // HAS_PREVIOUS_SKIN
|
#endif // HAS_PREVIOUS_SKIN
|
||||||
|
|
||||||
#else // SKINNED
|
#else // SKINNED
|
||||||
let prev_model = mesh_functions::get_previous_model_matrix(prev_vertex.instance_index);
|
let prev_model = mesh_functions::get_previous_world_from_local(prev_vertex.instance_index);
|
||||||
#endif // SKINNED
|
#endif // SKINNED
|
||||||
|
|
||||||
out.previous_world_position = mesh_functions::mesh_position_local_to_world(
|
out.previous_world_position = mesh_functions::mesh_position_local_to_world(
|
||||||
|
@ -174,9 +174,9 @@ fn fragment(in: VertexOutput) -> FragmentOutput {
|
||||||
#endif // DEPTH_CLAMP_ORTHO
|
#endif // DEPTH_CLAMP_ORTHO
|
||||||
|
|
||||||
#ifdef MOTION_VECTOR_PREPASS
|
#ifdef MOTION_VECTOR_PREPASS
|
||||||
let clip_position_t = view.unjittered_view_proj * in.world_position;
|
let clip_position_t = view.unjittered_clip_from_world * in.world_position;
|
||||||
let clip_position = clip_position_t.xy / clip_position_t.w;
|
let clip_position = clip_position_t.xy / clip_position_t.w;
|
||||||
let previous_clip_position_t = prepass_bindings::previous_view_uniforms.view_proj * in.previous_world_position;
|
let previous_clip_position_t = prepass_bindings::previous_view_uniforms.clip_from_world * in.previous_world_position;
|
||||||
let previous_clip_position = previous_clip_position_t.xy / previous_clip_position_t.w;
|
let previous_clip_position = previous_clip_position_t.xy / previous_clip_position_t.w;
|
||||||
// These motion vectors are used as offsets to UV positions and are stored
|
// These motion vectors are used as offsets to UV positions and are stored
|
||||||
// in the range -1,1 to allow offsetting from the one corner to the
|
// in the range -1,1 to allow offsetting from the one corner to the
|
||||||
|
|
|
@ -1,8 +1,8 @@
|
||||||
#define_import_path bevy_pbr::prepass_bindings
|
#define_import_path bevy_pbr::prepass_bindings
|
||||||
|
|
||||||
struct PreviousViewUniforms {
|
struct PreviousViewUniforms {
|
||||||
inverse_view: mat4x4<f32>,
|
view_from_world: mat4x4<f32>,
|
||||||
view_proj: mat4x4<f32>,
|
clip_from_world: mat4x4<f32>,
|
||||||
}
|
}
|
||||||
|
|
||||||
#ifdef MOTION_VECTOR_PREPASS
|
#ifdef MOTION_VECTOR_PREPASS
|
||||||
|
|
|
@ -68,7 +68,7 @@ bitflags::bitflags! {
|
||||||
|
|
||||||
#[derive(Copy, Clone, ShaderType, Default, Debug)]
|
#[derive(Copy, Clone, ShaderType, Default, Debug)]
|
||||||
pub struct GpuDirectionalCascade {
|
pub struct GpuDirectionalCascade {
|
||||||
view_projection: Mat4,
|
clip_from_world: Mat4,
|
||||||
texel_size: f32,
|
texel_size: f32,
|
||||||
far_bound: f32,
|
far_bound: f32,
|
||||||
}
|
}
|
||||||
|
@ -479,7 +479,7 @@ pub fn calculate_cluster_factors(
|
||||||
// we will also construct it in the fragment shader and need our implementations to match,
|
// we will also construct it in the fragment shader and need our implementations to match,
|
||||||
// so we reproduce it here to avoid a mismatch if glam changes. we also switch the handedness
|
// so we reproduce it here to avoid a mismatch if glam changes. we also switch the handedness
|
||||||
// could move this onto transform but it's pretty niche
|
// could move this onto transform but it's pretty niche
|
||||||
pub(crate) fn spot_light_view_matrix(transform: &GlobalTransform) -> Mat4 {
|
pub(crate) fn spot_light_world_from_view(transform: &GlobalTransform) -> Mat4 {
|
||||||
// the matrix z_local (opposite of transform.forward())
|
// the matrix z_local (opposite of transform.forward())
|
||||||
let fwd_dir = transform.back().extend(0.0);
|
let fwd_dir = transform.back().extend(0.0);
|
||||||
|
|
||||||
|
@ -502,7 +502,7 @@ pub(crate) fn spot_light_view_matrix(transform: &GlobalTransform) -> Mat4 {
|
||||||
)
|
)
|
||||||
}
|
}
|
||||||
|
|
||||||
pub(crate) fn spot_light_projection_matrix(angle: f32) -> Mat4 {
|
pub(crate) fn spot_light_clip_from_view(angle: f32) -> Mat4 {
|
||||||
// spot light projection FOV is 2x the angle from spot light center to outer edge
|
// spot light projection FOV is 2x the angle from spot light center to outer edge
|
||||||
Mat4::perspective_infinite_reverse_rh(angle * 2.0, 1.0, POINT_LIGHT_NEAR_Z)
|
Mat4::perspective_infinite_reverse_rh(angle * 2.0, 1.0, POINT_LIGHT_NEAR_Z)
|
||||||
}
|
}
|
||||||
|
@ -828,7 +828,7 @@ pub fn prepare_lights(
|
||||||
);
|
);
|
||||||
let mut view_lights = Vec::new();
|
let mut view_lights = Vec::new();
|
||||||
|
|
||||||
let is_orthographic = extracted_view.projection.w_axis.w == 1.0;
|
let is_orthographic = extracted_view.clip_from_view.w_axis.w == 1.0;
|
||||||
let cluster_factors_zw = calculate_cluster_factors(
|
let cluster_factors_zw = calculate_cluster_factors(
|
||||||
clusters.near,
|
clusters.near,
|
||||||
clusters.far,
|
clusters.far,
|
||||||
|
@ -909,9 +909,9 @@ pub fn prepare_lights(
|
||||||
point_light_shadow_map.size as u32,
|
point_light_shadow_map.size as u32,
|
||||||
point_light_shadow_map.size as u32,
|
point_light_shadow_map.size as u32,
|
||||||
),
|
),
|
||||||
transform: view_translation * *view_rotation,
|
world_from_view: view_translation * *view_rotation,
|
||||||
view_projection: None,
|
clip_from_world: None,
|
||||||
projection: cube_face_projection,
|
clip_from_view: cube_face_projection,
|
||||||
hdr: false,
|
hdr: false,
|
||||||
color_grading: Default::default(),
|
color_grading: Default::default(),
|
||||||
},
|
},
|
||||||
|
@ -936,12 +936,12 @@ pub fn prepare_lights(
|
||||||
.take(spot_light_shadow_maps_count)
|
.take(spot_light_shadow_maps_count)
|
||||||
.enumerate()
|
.enumerate()
|
||||||
{
|
{
|
||||||
let spot_view_matrix = spot_light_view_matrix(&light.transform);
|
let spot_world_from_view = spot_light_world_from_view(&light.transform);
|
||||||
let spot_view_transform = spot_view_matrix.into();
|
let spot_world_from_view = spot_world_from_view.into();
|
||||||
|
|
||||||
let angle = light.spot_light_angles.expect("lights should be sorted so that \
|
let angle = light.spot_light_angles.expect("lights should be sorted so that \
|
||||||
[point_light_count..point_light_count + spot_light_shadow_maps_count] are spot lights").1;
|
[point_light_count..point_light_count + spot_light_shadow_maps_count] are spot lights").1;
|
||||||
let spot_projection = spot_light_projection_matrix(angle);
|
let spot_projection = spot_light_clip_from_view(angle);
|
||||||
|
|
||||||
let depth_texture_view =
|
let depth_texture_view =
|
||||||
directional_light_depth_texture
|
directional_light_depth_texture
|
||||||
|
@ -970,9 +970,9 @@ pub fn prepare_lights(
|
||||||
directional_light_shadow_map.size as u32,
|
directional_light_shadow_map.size as u32,
|
||||||
directional_light_shadow_map.size as u32,
|
directional_light_shadow_map.size as u32,
|
||||||
),
|
),
|
||||||
transform: spot_view_transform,
|
world_from_view: spot_world_from_view,
|
||||||
projection: spot_projection,
|
clip_from_view: spot_projection,
|
||||||
view_projection: None,
|
clip_from_world: None,
|
||||||
hdr: false,
|
hdr: false,
|
||||||
color_grading: Default::default(),
|
color_grading: Default::default(),
|
||||||
},
|
},
|
||||||
|
@ -1027,7 +1027,7 @@ pub fn prepare_lights(
|
||||||
{
|
{
|
||||||
gpu_lights.directional_lights[light_index].cascades[cascade_index] =
|
gpu_lights.directional_lights[light_index].cascades[cascade_index] =
|
||||||
GpuDirectionalCascade {
|
GpuDirectionalCascade {
|
||||||
view_projection: cascade.view_projection,
|
clip_from_world: cascade.clip_from_world,
|
||||||
texel_size: cascade.texel_size,
|
texel_size: cascade.texel_size,
|
||||||
far_bound: *bound,
|
far_bound: *bound,
|
||||||
};
|
};
|
||||||
|
@ -1066,9 +1066,9 @@ pub fn prepare_lights(
|
||||||
directional_light_shadow_map.size as u32,
|
directional_light_shadow_map.size as u32,
|
||||||
directional_light_shadow_map.size as u32,
|
directional_light_shadow_map.size as u32,
|
||||||
),
|
),
|
||||||
transform: GlobalTransform::from(cascade.view_transform),
|
world_from_view: GlobalTransform::from(cascade.world_from_cascade),
|
||||||
projection: cascade.projection,
|
clip_from_view: cascade.clip_from_cascade,
|
||||||
view_projection: Some(cascade.view_projection),
|
clip_from_world: Some(cascade.clip_from_world),
|
||||||
hdr: false,
|
hdr: false,
|
||||||
color_grading: Default::default(),
|
color_grading: Default::default(),
|
||||||
},
|
},
|
||||||
|
|
|
@ -250,22 +250,22 @@ impl Plugin for MeshRenderPlugin {
|
||||||
|
|
||||||
#[derive(Component)]
|
#[derive(Component)]
|
||||||
pub struct MeshTransforms {
|
pub struct MeshTransforms {
|
||||||
pub transform: Affine3,
|
pub world_from_local: Affine3,
|
||||||
pub previous_transform: Affine3,
|
pub previous_world_from_local: Affine3,
|
||||||
pub flags: u32,
|
pub flags: u32,
|
||||||
}
|
}
|
||||||
|
|
||||||
#[derive(ShaderType, Clone)]
|
#[derive(ShaderType, Clone)]
|
||||||
pub struct MeshUniform {
|
pub struct MeshUniform {
|
||||||
// Affine 4x3 matrices transposed to 3x4
|
// Affine 4x3 matrices transposed to 3x4
|
||||||
pub transform: [Vec4; 3],
|
pub world_from_local: [Vec4; 3],
|
||||||
pub previous_transform: [Vec4; 3],
|
pub previous_world_from_local: [Vec4; 3],
|
||||||
// 3x3 matrix packed in mat2x4 and f32 as:
|
// 3x3 matrix packed in mat2x4 and f32 as:
|
||||||
// [0].xyz, [1].x,
|
// [0].xyz, [1].x,
|
||||||
// [1].yz, [2].xy
|
// [1].yz, [2].xy
|
||||||
// [2].z
|
// [2].z
|
||||||
pub inverse_transpose_model_a: [Vec4; 2],
|
pub local_from_world_transpose_a: [Vec4; 2],
|
||||||
pub inverse_transpose_model_b: f32,
|
pub local_from_world_transpose_b: f32,
|
||||||
pub flags: u32,
|
pub flags: u32,
|
||||||
// Four 16-bit unsigned normalized UV values packed into a `UVec2`:
|
// Four 16-bit unsigned normalized UV values packed into a `UVec2`:
|
||||||
//
|
//
|
||||||
|
@ -287,7 +287,7 @@ pub struct MeshUniform {
|
||||||
#[repr(C)]
|
#[repr(C)]
|
||||||
pub struct MeshInputUniform {
|
pub struct MeshInputUniform {
|
||||||
/// Affine 4x3 matrix transposed to 3x4.
|
/// Affine 4x3 matrix transposed to 3x4.
|
||||||
pub transform: [Vec4; 3],
|
pub world_from_local: [Vec4; 3],
|
||||||
/// Four 16-bit unsigned normalized UV values packed into a `UVec2`:
|
/// Four 16-bit unsigned normalized UV values packed into a `UVec2`:
|
||||||
///
|
///
|
||||||
/// ```text
|
/// ```text
|
||||||
|
@ -334,14 +334,14 @@ pub struct MeshCullingDataBuffer(RawBufferVec<MeshCullingData>);
|
||||||
|
|
||||||
impl MeshUniform {
|
impl MeshUniform {
|
||||||
pub fn new(mesh_transforms: &MeshTransforms, maybe_lightmap_uv_rect: Option<Rect>) -> Self {
|
pub fn new(mesh_transforms: &MeshTransforms, maybe_lightmap_uv_rect: Option<Rect>) -> Self {
|
||||||
let (inverse_transpose_model_a, inverse_transpose_model_b) =
|
let (local_from_world_transpose_a, local_from_world_transpose_b) =
|
||||||
mesh_transforms.transform.inverse_transpose_3x3();
|
mesh_transforms.world_from_local.inverse_transpose_3x3();
|
||||||
Self {
|
Self {
|
||||||
transform: mesh_transforms.transform.to_transpose(),
|
world_from_local: mesh_transforms.world_from_local.to_transpose(),
|
||||||
previous_transform: mesh_transforms.previous_transform.to_transpose(),
|
previous_world_from_local: mesh_transforms.previous_world_from_local.to_transpose(),
|
||||||
lightmap_uv_rect: lightmap::pack_lightmap_uv_rect(maybe_lightmap_uv_rect),
|
lightmap_uv_rect: pack_lightmap_uv_rect(maybe_lightmap_uv_rect),
|
||||||
inverse_transpose_model_a,
|
local_from_world_transpose_a,
|
||||||
inverse_transpose_model_b,
|
local_from_world_transpose_b,
|
||||||
flags: mesh_transforms.flags,
|
flags: mesh_transforms.flags,
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -475,7 +475,7 @@ pub struct RenderMeshInstanceGpuBuilder {
|
||||||
/// Data that will be placed on the [`RenderMeshInstanceGpu`].
|
/// Data that will be placed on the [`RenderMeshInstanceGpu`].
|
||||||
pub shared: RenderMeshInstanceShared,
|
pub shared: RenderMeshInstanceShared,
|
||||||
/// The current transform.
|
/// The current transform.
|
||||||
pub transform: Affine3,
|
pub world_from_local: Affine3,
|
||||||
/// Four 16-bit unsigned normalized UV values packed into a [`UVec2`]:
|
/// Four 16-bit unsigned normalized UV values packed into a [`UVec2`]:
|
||||||
///
|
///
|
||||||
/// ```text
|
/// ```text
|
||||||
|
@ -631,7 +631,7 @@ impl RenderMeshInstancesCpu {
|
||||||
self.get(&entity)
|
self.get(&entity)
|
||||||
.map(|render_mesh_instance| RenderMeshQueueData {
|
.map(|render_mesh_instance| RenderMeshQueueData {
|
||||||
shared: &render_mesh_instance.shared,
|
shared: &render_mesh_instance.shared,
|
||||||
translation: render_mesh_instance.transforms.transform.translation,
|
translation: render_mesh_instance.transforms.world_from_local.translation,
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -724,7 +724,7 @@ impl RenderMeshInstanceGpuBuilder {
|
||||||
) -> usize {
|
) -> usize {
|
||||||
// Push the mesh input uniform.
|
// Push the mesh input uniform.
|
||||||
let current_uniform_index = current_input_buffer.push(MeshInputUniform {
|
let current_uniform_index = current_input_buffer.push(MeshInputUniform {
|
||||||
transform: self.transform.to_transpose(),
|
world_from_local: self.world_from_local.to_transpose(),
|
||||||
lightmap_uv_rect: self.lightmap_uv_rect,
|
lightmap_uv_rect: self.lightmap_uv_rect,
|
||||||
flags: self.mesh_flags.bits(),
|
flags: self.mesh_flags.bits(),
|
||||||
previous_input_index: match self.previous_input_index {
|
previous_input_index: match self.previous_input_index {
|
||||||
|
@ -737,7 +737,7 @@ impl RenderMeshInstanceGpuBuilder {
|
||||||
render_mesh_instances.insert(
|
render_mesh_instances.insert(
|
||||||
entity,
|
entity,
|
||||||
RenderMeshInstanceGpu {
|
RenderMeshInstanceGpu {
|
||||||
translation: self.transform.translation,
|
translation: self.world_from_local.translation,
|
||||||
shared: self.shared,
|
shared: self.shared,
|
||||||
current_uniform_index: (current_uniform_index as u32)
|
current_uniform_index: (current_uniform_index as u32)
|
||||||
.try_into()
|
.try_into()
|
||||||
|
@ -859,13 +859,15 @@ pub fn extract_meshes_for_cpu_building(
|
||||||
no_automatic_batching,
|
no_automatic_batching,
|
||||||
);
|
);
|
||||||
|
|
||||||
let transform = transform.affine();
|
let world_from_local = transform.affine();
|
||||||
queue.push((
|
queue.push((
|
||||||
entity,
|
entity,
|
||||||
RenderMeshInstanceCpu {
|
RenderMeshInstanceCpu {
|
||||||
transforms: MeshTransforms {
|
transforms: MeshTransforms {
|
||||||
transform: (&transform).into(),
|
world_from_local: (&world_from_local).into(),
|
||||||
previous_transform: (&previous_transform.map(|t| t.0).unwrap_or(transform))
|
previous_world_from_local: (&previous_transform
|
||||||
|
.map(|t| t.0)
|
||||||
|
.unwrap_or(world_from_local))
|
||||||
.into(),
|
.into(),
|
||||||
flags: mesh_flags.bits(),
|
flags: mesh_flags.bits(),
|
||||||
},
|
},
|
||||||
|
@ -995,7 +997,7 @@ pub fn extract_meshes_for_gpu_building(
|
||||||
|
|
||||||
let gpu_mesh_instance_builder = RenderMeshInstanceGpuBuilder {
|
let gpu_mesh_instance_builder = RenderMeshInstanceGpuBuilder {
|
||||||
shared,
|
shared,
|
||||||
transform: (&transform.affine()).into(),
|
world_from_local: (&transform.affine()).into(),
|
||||||
lightmap_uv_rect,
|
lightmap_uv_rect,
|
||||||
mesh_flags,
|
mesh_flags,
|
||||||
previous_input_index,
|
previous_input_index,
|
||||||
|
|
|
@ -38,16 +38,16 @@ fn vertex(vertex_no_morph: Vertex) -> VertexOutput {
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
#ifdef SKINNED
|
#ifdef SKINNED
|
||||||
var model = skinning::skin_model(vertex.joint_indices, vertex.joint_weights);
|
var world_from_local = skinning::skin_model(vertex.joint_indices, vertex.joint_weights);
|
||||||
#else
|
#else
|
||||||
// Use vertex_no_morph.instance_index instead of vertex.instance_index to work around a wgpu dx12 bug.
|
// Use vertex_no_morph.instance_index instead of vertex.instance_index to work around a wgpu dx12 bug.
|
||||||
// See https://github.com/gfx-rs/naga/issues/2416 .
|
// See https://github.com/gfx-rs/naga/issues/2416 .
|
||||||
var model = mesh_functions::get_model_matrix(vertex_no_morph.instance_index);
|
var world_from_local = mesh_functions::get_world_from_local(vertex_no_morph.instance_index);
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
#ifdef VERTEX_NORMALS
|
#ifdef VERTEX_NORMALS
|
||||||
#ifdef SKINNED
|
#ifdef SKINNED
|
||||||
out.world_normal = skinning::skin_normals(model, vertex.normal);
|
out.world_normal = skinning::skin_normals(world_from_local, vertex.normal);
|
||||||
#else
|
#else
|
||||||
out.world_normal = mesh_functions::mesh_normal_local_to_world(
|
out.world_normal = mesh_functions::mesh_normal_local_to_world(
|
||||||
vertex.normal,
|
vertex.normal,
|
||||||
|
@ -59,7 +59,7 @@ fn vertex(vertex_no_morph: Vertex) -> VertexOutput {
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
#ifdef VERTEX_POSITIONS
|
#ifdef VERTEX_POSITIONS
|
||||||
out.world_position = mesh_functions::mesh_position_local_to_world(model, vec4<f32>(vertex.position, 1.0));
|
out.world_position = mesh_functions::mesh_position_local_to_world(world_from_local, vec4<f32>(vertex.position, 1.0));
|
||||||
out.position = position_world_to_clip(out.world_position.xyz);
|
out.position = position_world_to_clip(out.world_position.xyz);
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
|
@ -72,7 +72,7 @@ fn vertex(vertex_no_morph: Vertex) -> VertexOutput {
|
||||||
|
|
||||||
#ifdef VERTEX_TANGENTS
|
#ifdef VERTEX_TANGENTS
|
||||||
out.world_tangent = mesh_functions::mesh_tangent_local_to_world(
|
out.world_tangent = mesh_functions::mesh_tangent_local_to_world(
|
||||||
model,
|
world_from_local,
|
||||||
vertex.tangent,
|
vertex.tangent,
|
||||||
// Use vertex_no_morph.instance_index instead of vertex.instance_index to work around a wgpu dx12 bug.
|
// Use vertex_no_morph.instance_index instead of vertex.instance_index to work around a wgpu dx12 bug.
|
||||||
// See https://github.com/gfx-rs/naga/issues/2416
|
// See https://github.com/gfx-rs/naga/issues/2416
|
||||||
|
@ -92,7 +92,7 @@ fn vertex(vertex_no_morph: Vertex) -> VertexOutput {
|
||||||
|
|
||||||
#ifdef VISIBILITY_RANGE_DITHER
|
#ifdef VISIBILITY_RANGE_DITHER
|
||||||
out.visibility_range_dither = mesh_functions::get_visibility_range_dither_level(
|
out.visibility_range_dither = mesh_functions::get_visibility_range_dither_level(
|
||||||
vertex_no_morph.instance_index, model[3]);
|
vertex_no_morph.instance_index, world_from_local[3]);
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
return out;
|
return out;
|
||||||
|
|
|
@ -13,23 +13,23 @@
|
||||||
#import bevy_render::maths::{affine3_to_square, mat2x4_f32_to_mat3x3_unpack}
|
#import bevy_render::maths::{affine3_to_square, mat2x4_f32_to_mat3x3_unpack}
|
||||||
|
|
||||||
|
|
||||||
fn get_model_matrix(instance_index: u32) -> mat4x4<f32> {
|
fn get_world_from_local(instance_index: u32) -> mat4x4<f32> {
|
||||||
return affine3_to_square(mesh[instance_index].model);
|
return affine3_to_square(mesh[instance_index].world_from_local);
|
||||||
}
|
}
|
||||||
|
|
||||||
fn get_previous_model_matrix(instance_index: u32) -> mat4x4<f32> {
|
fn get_previous_world_from_local(instance_index: u32) -> mat4x4<f32> {
|
||||||
return affine3_to_square(mesh[instance_index].previous_model);
|
return affine3_to_square(mesh[instance_index].previous_world_from_local);
|
||||||
}
|
}
|
||||||
|
|
||||||
fn mesh_position_local_to_world(model: mat4x4<f32>, vertex_position: vec4<f32>) -> vec4<f32> {
|
fn mesh_position_local_to_world(world_from_local: mat4x4<f32>, vertex_position: vec4<f32>) -> vec4<f32> {
|
||||||
return model * vertex_position;
|
return world_from_local * vertex_position;
|
||||||
}
|
}
|
||||||
|
|
||||||
// NOTE: The intermediate world_position assignment is important
|
// NOTE: The intermediate world_position assignment is important
|
||||||
// for precision purposes when using the 'equals' depth comparison
|
// for precision purposes when using the 'equals' depth comparison
|
||||||
// function.
|
// function.
|
||||||
fn mesh_position_local_to_clip(model: mat4x4<f32>, vertex_position: vec4<f32>) -> vec4<f32> {
|
fn mesh_position_local_to_clip(world_from_local: mat4x4<f32>, vertex_position: vec4<f32>) -> vec4<f32> {
|
||||||
let world_position = mesh_position_local_to_world(model, vertex_position);
|
let world_position = mesh_position_local_to_world(world_from_local, vertex_position);
|
||||||
return position_world_to_clip(world_position.xyz);
|
return position_world_to_clip(world_position.xyz);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -44,8 +44,8 @@ fn mesh_normal_local_to_world(vertex_normal: vec3<f32>, instance_index: u32) ->
|
||||||
if any(vertex_normal != vec3<f32>(0.0)) {
|
if any(vertex_normal != vec3<f32>(0.0)) {
|
||||||
return normalize(
|
return normalize(
|
||||||
mat2x4_f32_to_mat3x3_unpack(
|
mat2x4_f32_to_mat3x3_unpack(
|
||||||
mesh[instance_index].inverse_transpose_model_a,
|
mesh[instance_index].local_from_world_transpose_a,
|
||||||
mesh[instance_index].inverse_transpose_model_b,
|
mesh[instance_index].local_from_world_transpose_b,
|
||||||
) * vertex_normal
|
) * vertex_normal
|
||||||
);
|
);
|
||||||
} else {
|
} else {
|
||||||
|
@ -62,7 +62,7 @@ fn sign_determinant_model_3x3m(instance_index: u32) -> f32 {
|
||||||
return f32(bool(mesh[instance_index].flags & MESH_FLAGS_SIGN_DETERMINANT_MODEL_3X3_BIT)) * 2.0 - 1.0;
|
return f32(bool(mesh[instance_index].flags & MESH_FLAGS_SIGN_DETERMINANT_MODEL_3X3_BIT)) * 2.0 - 1.0;
|
||||||
}
|
}
|
||||||
|
|
||||||
fn mesh_tangent_local_to_world(model: mat4x4<f32>, vertex_tangent: vec4<f32>, instance_index: u32) -> vec4<f32> {
|
fn mesh_tangent_local_to_world(world_from_local: mat4x4<f32>, vertex_tangent: vec4<f32>, instance_index: u32) -> vec4<f32> {
|
||||||
// NOTE: The mikktspace method of normal mapping requires that the world tangent is
|
// NOTE: The mikktspace method of normal mapping requires that the world tangent is
|
||||||
// re-normalized in the vertex shader to match the way mikktspace bakes vertex tangents
|
// re-normalized in the vertex shader to match the way mikktspace bakes vertex tangents
|
||||||
// and normal maps so that the exact inverse process is applied when shading. Blender, Unity,
|
// and normal maps so that the exact inverse process is applied when shading. Blender, Unity,
|
||||||
|
@ -74,9 +74,9 @@ fn mesh_tangent_local_to_world(model: mat4x4<f32>, vertex_tangent: vec4<f32>, in
|
||||||
return vec4<f32>(
|
return vec4<f32>(
|
||||||
normalize(
|
normalize(
|
||||||
mat3x3<f32>(
|
mat3x3<f32>(
|
||||||
model[0].xyz,
|
world_from_local[0].xyz,
|
||||||
model[1].xyz,
|
world_from_local[1].xyz,
|
||||||
model[2].xyz
|
world_from_local[2].xyz
|
||||||
) * vertex_tangent.xyz
|
) * vertex_tangent.xyz
|
||||||
),
|
),
|
||||||
// NOTE: Multiplying by the sign of the determinant of the 3x3 model matrix accounts for
|
// NOTE: Multiplying by the sign of the determinant of the 3x3 model matrix accounts for
|
||||||
|
|
|
@ -14,7 +14,7 @@
|
||||||
// Per-frame data that the CPU supplies to the GPU.
|
// Per-frame data that the CPU supplies to the GPU.
|
||||||
struct MeshInput {
|
struct MeshInput {
|
||||||
// The model transform.
|
// The model transform.
|
||||||
model: mat3x4<f32>,
|
world_from_local: mat3x4<f32>,
|
||||||
// The lightmap UV rect, packed into 64 bits.
|
// The lightmap UV rect, packed into 64 bits.
|
||||||
lightmap_uv_rect: vec2<u32>,
|
lightmap_uv_rect: vec2<u32>,
|
||||||
// Various flags.
|
// Various flags.
|
||||||
|
@ -92,7 +92,7 @@ struct IndirectParameters {
|
||||||
//
|
//
|
||||||
// `aabb_center.w` should be 1.0.
|
// `aabb_center.w` should be 1.0.
|
||||||
fn view_frustum_intersects_obb(
|
fn view_frustum_intersects_obb(
|
||||||
model: mat4x4<f32>,
|
world_from_local: mat4x4<f32>,
|
||||||
aabb_center: vec4<f32>,
|
aabb_center: vec4<f32>,
|
||||||
aabb_half_extents: vec3<f32>,
|
aabb_half_extents: vec3<f32>,
|
||||||
) -> bool {
|
) -> bool {
|
||||||
|
@ -103,9 +103,9 @@ fn view_frustum_intersects_obb(
|
||||||
let relative_radius = dot(
|
let relative_radius = dot(
|
||||||
abs(
|
abs(
|
||||||
vec3(
|
vec3(
|
||||||
dot(plane_normal, model[0]),
|
dot(plane_normal, world_from_local[0]),
|
||||||
dot(plane_normal, model[1]),
|
dot(plane_normal, world_from_local[1]),
|
||||||
dot(plane_normal, model[2]),
|
dot(plane_normal, world_from_local[2]),
|
||||||
)
|
)
|
||||||
),
|
),
|
||||||
aabb_half_extents
|
aabb_half_extents
|
||||||
|
@ -135,8 +135,8 @@ fn main(@builtin(global_invocation_id) global_invocation_id: vec3<u32>) {
|
||||||
// Unpack.
|
// Unpack.
|
||||||
let input_index = work_items[instance_index].input_index;
|
let input_index = work_items[instance_index].input_index;
|
||||||
let output_index = work_items[instance_index].output_index;
|
let output_index = work_items[instance_index].output_index;
|
||||||
let model_affine_transpose = current_input[input_index].model;
|
let world_from_local_affine_transpose = current_input[input_index].world_from_local;
|
||||||
let model = maths::affine3_to_square(model_affine_transpose);
|
let world_from_local = maths::affine3_to_square(world_from_local_affine_transpose);
|
||||||
|
|
||||||
// Cull if necessary.
|
// Cull if necessary.
|
||||||
#ifdef FRUSTUM_CULLING
|
#ifdef FRUSTUM_CULLING
|
||||||
|
@ -144,29 +144,29 @@ fn main(@builtin(global_invocation_id) global_invocation_id: vec3<u32>) {
|
||||||
let aabb_half_extents = mesh_culling_data[input_index].aabb_half_extents.xyz;
|
let aabb_half_extents = mesh_culling_data[input_index].aabb_half_extents.xyz;
|
||||||
|
|
||||||
// Do an OBB-based frustum cull.
|
// Do an OBB-based frustum cull.
|
||||||
let model_center = model * vec4(aabb_center, 1.0);
|
let model_center = world_from_local * vec4(aabb_center, 1.0);
|
||||||
if (!view_frustum_intersects_obb(model, model_center, aabb_half_extents)) {
|
if (!view_frustum_intersects_obb(world_from_local, model_center, aabb_half_extents)) {
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
// Calculate inverse transpose.
|
// Calculate inverse transpose.
|
||||||
let inverse_transpose_model = transpose(maths::inverse_affine3(transpose(
|
let local_from_world_transpose = transpose(maths::inverse_affine3(transpose(
|
||||||
model_affine_transpose)));
|
world_from_local_affine_transpose)));
|
||||||
|
|
||||||
// Pack inverse transpose.
|
// Pack inverse transpose.
|
||||||
let inverse_transpose_model_a = mat2x4<f32>(
|
let local_from_world_transpose_a = mat2x4<f32>(
|
||||||
vec4<f32>(inverse_transpose_model[0].xyz, inverse_transpose_model[1].x),
|
vec4<f32>(local_from_world_transpose[0].xyz, local_from_world_transpose[1].x),
|
||||||
vec4<f32>(inverse_transpose_model[1].yz, inverse_transpose_model[2].xy));
|
vec4<f32>(local_from_world_transpose[1].yz, local_from_world_transpose[2].xy));
|
||||||
let inverse_transpose_model_b = inverse_transpose_model[2].z;
|
let local_from_world_transpose_b = local_from_world_transpose[2].z;
|
||||||
|
|
||||||
// Look up the previous model matrix.
|
// Look up the previous model matrix.
|
||||||
let previous_input_index = current_input[input_index].previous_input_index;
|
let previous_input_index = current_input[input_index].previous_input_index;
|
||||||
var previous_model: mat3x4<f32>;
|
var previous_world_from_local: mat3x4<f32>;
|
||||||
if (previous_input_index == 0xffffffff) {
|
if (previous_input_index == 0xffffffff) {
|
||||||
previous_model = model_affine_transpose;
|
previous_world_from_local = world_from_local_affine_transpose;
|
||||||
} else {
|
} else {
|
||||||
previous_model = previous_input[previous_input_index].model;
|
previous_world_from_local = previous_input[previous_input_index].world_from_local;
|
||||||
}
|
}
|
||||||
|
|
||||||
// Figure out the output index. In indirect mode, this involves bumping the
|
// Figure out the output index. In indirect mode, this involves bumping the
|
||||||
|
@ -180,10 +180,10 @@ fn main(@builtin(global_invocation_id) global_invocation_id: vec3<u32>) {
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
// Write the output.
|
// Write the output.
|
||||||
output[mesh_output_index].model = model_affine_transpose;
|
output[mesh_output_index].world_from_local = world_from_local_affine_transpose;
|
||||||
output[mesh_output_index].previous_model = previous_model;
|
output[mesh_output_index].previous_world_from_local = previous_world_from_local;
|
||||||
output[mesh_output_index].inverse_transpose_model_a = inverse_transpose_model_a;
|
output[mesh_output_index].local_from_world_transpose_a = local_from_world_transpose_a;
|
||||||
output[mesh_output_index].inverse_transpose_model_b = inverse_transpose_model_b;
|
output[mesh_output_index].local_from_world_transpose_b = local_from_world_transpose_b;
|
||||||
output[mesh_output_index].flags = current_input[input_index].flags;
|
output[mesh_output_index].flags = current_input[input_index].flags;
|
||||||
output[mesh_output_index].lightmap_uv_rect = current_input[input_index].lightmap_uv_rect;
|
output[mesh_output_index].lightmap_uv_rect = current_input[input_index].lightmap_uv_rect;
|
||||||
}
|
}
|
||||||
|
|
|
@ -3,15 +3,15 @@
|
||||||
struct Mesh {
|
struct Mesh {
|
||||||
// Affine 4x3 matrices transposed to 3x4
|
// Affine 4x3 matrices transposed to 3x4
|
||||||
// Use bevy_render::maths::affine3_to_square to unpack
|
// Use bevy_render::maths::affine3_to_square to unpack
|
||||||
model: mat3x4<f32>,
|
world_from_local: mat3x4<f32>,
|
||||||
previous_model: mat3x4<f32>,
|
previous_world_from_local: mat3x4<f32>,
|
||||||
// 3x3 matrix packed in mat2x4 and f32 as:
|
// 3x3 matrix packed in mat2x4 and f32 as:
|
||||||
// [0].xyz, [1].x,
|
// [0].xyz, [1].x,
|
||||||
// [1].yz, [2].xy
|
// [1].yz, [2].xy
|
||||||
// [2].z
|
// [2].z
|
||||||
// Use bevy_pbr::mesh_functions::mat2x4_f32_to_mat3x3_unpack to unpack
|
// Use bevy_pbr::mesh_functions::mat2x4_f32_to_mat3x3_unpack to unpack
|
||||||
inverse_transpose_model_a: mat2x4<f32>,
|
local_from_world_transpose_a: mat2x4<f32>,
|
||||||
inverse_transpose_model_b: f32,
|
local_from_world_transpose_b: f32,
|
||||||
// 'flags' is a bit field indicating various options. u32 is 32 bits so we have up to 32 options.
|
// 'flags' is a bit field indicating various options. u32 is 32 bits so we have up to 32 options.
|
||||||
flags: u32,
|
flags: u32,
|
||||||
lightmap_uv_rect: vec2<u32>,
|
lightmap_uv_rect: vec2<u32>,
|
||||||
|
|
|
@ -17,7 +17,7 @@ const POINT_LIGHT_FLAGS_SHADOWS_ENABLED_BIT: u32 = 1u;
|
||||||
const POINT_LIGHT_FLAGS_SPOT_LIGHT_Y_NEGATIVE: u32 = 2u;
|
const POINT_LIGHT_FLAGS_SPOT_LIGHT_Y_NEGATIVE: u32 = 2u;
|
||||||
|
|
||||||
struct DirectionalCascade {
|
struct DirectionalCascade {
|
||||||
view_projection: mat4x4<f32>,
|
clip_from_world: mat4x4<f32>,
|
||||||
texel_size: f32,
|
texel_size: f32,
|
||||||
far_bound: f32,
|
far_bound: f32,
|
||||||
}
|
}
|
||||||
|
@ -115,7 +115,7 @@ struct ClusterOffsetsAndCounts {
|
||||||
struct LightProbe {
|
struct LightProbe {
|
||||||
// This is stored as the transpose in order to save space in this structure.
|
// This is stored as the transpose in order to save space in this structure.
|
||||||
// It'll be transposed in the `environment_map_light` function.
|
// It'll be transposed in the `environment_map_light` function.
|
||||||
inverse_transpose_transform: mat3x4<f32>,
|
light_from_world_transposed: mat3x4<f32>,
|
||||||
cubemap_index: i32,
|
cubemap_index: i32,
|
||||||
intensity: f32,
|
intensity: f32,
|
||||||
};
|
};
|
||||||
|
|
|
@ -40,7 +40,7 @@ fn pbr_input_from_vertex_output(
|
||||||
pbr_input.flags = mesh[in.instance_index].flags;
|
pbr_input.flags = mesh[in.instance_index].flags;
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
pbr_input.is_orthographic = view.projection[3].w == 1.0;
|
pbr_input.is_orthographic = view.clip_from_view[3].w == 1.0;
|
||||||
pbr_input.V = pbr_functions::calculate_view(in.world_position, pbr_input.is_orthographic);
|
pbr_input.V = pbr_functions::calculate_view(in.world_position, pbr_input.is_orthographic);
|
||||||
pbr_input.frag_coord = in.position;
|
pbr_input.frag_coord = in.position;
|
||||||
pbr_input.world_position = in.world_position;
|
pbr_input.world_position = in.world_position;
|
||||||
|
@ -297,7 +297,7 @@ fn pbr_input_from_standard_material(
|
||||||
// TODO: Meshlet support
|
// TODO: Meshlet support
|
||||||
#ifndef MESHLET_MESH_MATERIAL_PASS
|
#ifndef MESHLET_MESH_MATERIAL_PASS
|
||||||
thickness *= length(
|
thickness *= length(
|
||||||
(transpose(mesh[in.instance_index].model) * vec4(pbr_input.N, 0.0)).xyz
|
(transpose(mesh[in.instance_index].world_from_local) * vec4(pbr_input.N, 0.0)).xyz
|
||||||
);
|
);
|
||||||
#endif
|
#endif
|
||||||
pbr_input.material.thickness = thickness;
|
pbr_input.material.thickness = thickness;
|
||||||
|
|
|
@ -213,7 +213,7 @@ fn calculate_view(
|
||||||
var V: vec3<f32>;
|
var V: vec3<f32>;
|
||||||
if is_orthographic {
|
if is_orthographic {
|
||||||
// Orthographic view vector
|
// Orthographic view vector
|
||||||
V = normalize(vec3<f32>(view_bindings::view.view_proj[0].z, view_bindings::view.view_proj[1].z, view_bindings::view.view_proj[2].z));
|
V = normalize(vec3<f32>(view_bindings::view.clip_from_world[0].z, view_bindings::view.clip_from_world[1].z, view_bindings::view.clip_from_world[2].z));
|
||||||
} else {
|
} else {
|
||||||
// Only valid for a perspective projection
|
// Only valid for a perspective projection
|
||||||
V = normalize(view_bindings::view.world_position.xyz - world_position.xyz);
|
V = normalize(view_bindings::view.world_position.xyz - world_position.xyz);
|
||||||
|
@ -342,10 +342,10 @@ fn apply_pbr_lighting(
|
||||||
#endif // STANDARD_MATERIAL_DIFFUSE_TRANSMISSION
|
#endif // STANDARD_MATERIAL_DIFFUSE_TRANSMISSION
|
||||||
|
|
||||||
let view_z = dot(vec4<f32>(
|
let view_z = dot(vec4<f32>(
|
||||||
view_bindings::view.inverse_view[0].z,
|
view_bindings::view.view_from_world[0].z,
|
||||||
view_bindings::view.inverse_view[1].z,
|
view_bindings::view.view_from_world[1].z,
|
||||||
view_bindings::view.inverse_view[2].z,
|
view_bindings::view.view_from_world[2].z,
|
||||||
view_bindings::view.inverse_view[3].z
|
view_bindings::view.view_from_world[3].z
|
||||||
), in.world_position);
|
), in.world_position);
|
||||||
let cluster_index = clustering::fragment_cluster_index(in.frag_coord.xy, view_z, in.is_orthographic);
|
let cluster_index = clustering::fragment_cluster_index(in.frag_coord.xy, view_z, in.is_orthographic);
|
||||||
let offset_and_counts = clustering::unpack_offset_and_counts(cluster_index);
|
let offset_and_counts = clustering::unpack_offset_and_counts(cluster_index);
|
||||||
|
|
|
@ -53,9 +53,9 @@ fn prepass_alpha_discard(in: VertexOutput) {
|
||||||
|
|
||||||
#ifdef MOTION_VECTOR_PREPASS
|
#ifdef MOTION_VECTOR_PREPASS
|
||||||
fn calculate_motion_vector(world_position: vec4<f32>, previous_world_position: vec4<f32>) -> vec2<f32> {
|
fn calculate_motion_vector(world_position: vec4<f32>, previous_world_position: vec4<f32>) -> vec2<f32> {
|
||||||
let clip_position_t = view.unjittered_view_proj * world_position;
|
let clip_position_t = view.unjittered_clip_from_world * world_position;
|
||||||
let clip_position = clip_position_t.xy / clip_position_t.w;
|
let clip_position = clip_position_t.xy / clip_position_t.w;
|
||||||
let previous_clip_position_t = previous_view_uniforms.view_proj * previous_world_position;
|
let previous_clip_position_t = previous_view_uniforms.clip_from_world * previous_world_position;
|
||||||
let previous_clip_position = previous_clip_position_t.xy / previous_clip_position_t.w;
|
let previous_clip_position = previous_clip_position_t.xy / previous_clip_position_t.w;
|
||||||
// These motion vectors are used as offsets to UV positions and are stored
|
// These motion vectors are used as offsets to UV positions and are stored
|
||||||
// in the range -1,1 to allow offsetting from the one corner to the
|
// in the range -1,1 to allow offsetting from the one corner to the
|
||||||
|
|
|
@ -30,7 +30,7 @@ fn specular_transmissive_light(world_position: vec4<f32>, frag_coord: vec3<f32>,
|
||||||
let exit_position = world_position.xyz + T * thickness;
|
let exit_position = world_position.xyz + T * thickness;
|
||||||
|
|
||||||
// Transform exit_position into clip space
|
// Transform exit_position into clip space
|
||||||
let clip_exit_position = view_bindings::view.view_proj * vec4<f32>(exit_position, 1.0);
|
let clip_exit_position = view_bindings::view.clip_from_world * vec4<f32>(exit_position, 1.0);
|
||||||
|
|
||||||
// Scale / offset position so that coordinate is in right space for sampling transmissive background texture
|
// Scale / offset position so that coordinate is in right space for sampling transmissive background texture
|
||||||
let offset_position = (clip_exit_position.xy / clip_exit_position.w) * vec2<f32>(0.5, -0.5) + 0.5;
|
let offset_position = (clip_exit_position.xy / clip_exit_position.w) * vec2<f32>(0.5, -0.5) + 0.5;
|
||||||
|
|
|
@ -125,7 +125,7 @@ fn world_to_directional_light_local(
|
||||||
let light = &view_bindings::lights.directional_lights[light_id];
|
let light = &view_bindings::lights.directional_lights[light_id];
|
||||||
let cascade = &(*light).cascades[cascade_index];
|
let cascade = &(*light).cascades[cascade_index];
|
||||||
|
|
||||||
let offset_position_clip = (*cascade).view_projection * offset_position;
|
let offset_position_clip = (*cascade).clip_from_world * offset_position;
|
||||||
if (offset_position_clip.w <= 0.0) {
|
if (offset_position_clip.w <= 0.0) {
|
||||||
return vec4(0.0);
|
return vec4(0.0);
|
||||||
}
|
}
|
||||||
|
|
|
@ -51,15 +51,15 @@ fn inverse_transpose_3x3m(in: mat3x3<f32>) -> mat3x3<f32> {
|
||||||
}
|
}
|
||||||
|
|
||||||
fn skin_normals(
|
fn skin_normals(
|
||||||
model: mat4x4<f32>,
|
world_from_local: mat4x4<f32>,
|
||||||
normal: vec3<f32>,
|
normal: vec3<f32>,
|
||||||
) -> vec3<f32> {
|
) -> vec3<f32> {
|
||||||
return normalize(
|
return normalize(
|
||||||
inverse_transpose_3x3m(
|
inverse_transpose_3x3m(
|
||||||
mat3x3<f32>(
|
mat3x3<f32>(
|
||||||
model[0].xyz,
|
world_from_local[0].xyz,
|
||||||
model[1].xyz,
|
world_from_local[1].xyz,
|
||||||
model[2].xyz
|
world_from_local[2].xyz
|
||||||
)
|
)
|
||||||
) * normal
|
) * normal
|
||||||
);
|
);
|
||||||
|
|
|
@ -31,31 +31,31 @@
|
||||||
|
|
||||||
/// Convert a view space position to world space
|
/// Convert a view space position to world space
|
||||||
fn position_view_to_world(view_pos: vec3<f32>) -> vec3<f32> {
|
fn position_view_to_world(view_pos: vec3<f32>) -> vec3<f32> {
|
||||||
let world_pos = view_bindings::view.view * vec4(view_pos, 1.0);
|
let world_pos = view_bindings::view.world_from_view * vec4(view_pos, 1.0);
|
||||||
return world_pos.xyz;
|
return world_pos.xyz;
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Convert a clip space position to world space
|
/// Convert a clip space position to world space
|
||||||
fn position_clip_to_world(clip_pos: vec4<f32>) -> vec3<f32> {
|
fn position_clip_to_world(clip_pos: vec4<f32>) -> vec3<f32> {
|
||||||
let world_pos = view_bindings::view.inverse_view_proj * clip_pos;
|
let world_pos = view_bindings::view.world_from_clip * clip_pos;
|
||||||
return world_pos.xyz;
|
return world_pos.xyz;
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Convert a ndc space position to world space
|
/// Convert a ndc space position to world space
|
||||||
fn position_ndc_to_world(ndc_pos: vec3<f32>) -> vec3<f32> {
|
fn position_ndc_to_world(ndc_pos: vec3<f32>) -> vec3<f32> {
|
||||||
let world_pos = view_bindings::view.inverse_view_proj * vec4(ndc_pos, 1.0);
|
let world_pos = view_bindings::view.world_from_clip * vec4(ndc_pos, 1.0);
|
||||||
return world_pos.xyz / world_pos.w;
|
return world_pos.xyz / world_pos.w;
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Convert a view space direction to world space
|
/// Convert a view space direction to world space
|
||||||
fn direction_view_to_world(view_dir: vec3<f32>) -> vec3<f32> {
|
fn direction_view_to_world(view_dir: vec3<f32>) -> vec3<f32> {
|
||||||
let world_dir = view_bindings::view.view * vec4(view_dir, 0.0);
|
let world_dir = view_bindings::view.world_from_view * vec4(view_dir, 0.0);
|
||||||
return world_dir.xyz;
|
return world_dir.xyz;
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Convert a clip space direction to world space
|
/// Convert a clip space direction to world space
|
||||||
fn direction_clip_to_world(clip_dir: vec4<f32>) -> vec3<f32> {
|
fn direction_clip_to_world(clip_dir: vec4<f32>) -> vec3<f32> {
|
||||||
let world_dir = view_bindings::view.inverse_view_proj * clip_dir;
|
let world_dir = view_bindings::view.world_from_clip * clip_dir;
|
||||||
return world_dir.xyz;
|
return world_dir.xyz;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -65,31 +65,31 @@ fn direction_clip_to_world(clip_dir: vec4<f32>) -> vec3<f32> {
|
||||||
|
|
||||||
/// Convert a world space position to view space
|
/// Convert a world space position to view space
|
||||||
fn position_world_to_view(world_pos: vec3<f32>) -> vec3<f32> {
|
fn position_world_to_view(world_pos: vec3<f32>) -> vec3<f32> {
|
||||||
let view_pos = view_bindings::view.inverse_view * vec4(world_pos, 1.0);
|
let view_pos = view_bindings::view.view_from_world * vec4(world_pos, 1.0);
|
||||||
return view_pos.xyz;
|
return view_pos.xyz;
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Convert a clip space position to view space
|
/// Convert a clip space position to view space
|
||||||
fn position_clip_to_view(clip_pos: vec4<f32>) -> vec3<f32> {
|
fn position_clip_to_view(clip_pos: vec4<f32>) -> vec3<f32> {
|
||||||
let view_pos = view_bindings::view.inverse_projection * clip_pos;
|
let view_pos = view_bindings::view.view_from_clip * clip_pos;
|
||||||
return view_pos.xyz;
|
return view_pos.xyz;
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Convert a ndc space position to view space
|
/// Convert a ndc space position to view space
|
||||||
fn position_ndc_to_view(ndc_pos: vec3<f32>) -> vec3<f32> {
|
fn position_ndc_to_view(ndc_pos: vec3<f32>) -> vec3<f32> {
|
||||||
let view_pos = view_bindings::view.inverse_projection * vec4(ndc_pos, 1.0);
|
let view_pos = view_bindings::view.view_from_clip * vec4(ndc_pos, 1.0);
|
||||||
return view_pos.xyz / view_pos.w;
|
return view_pos.xyz / view_pos.w;
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Convert a world space direction to view space
|
/// Convert a world space direction to view space
|
||||||
fn direction_world_to_view(world_dir: vec3<f32>) -> vec3<f32> {
|
fn direction_world_to_view(world_dir: vec3<f32>) -> vec3<f32> {
|
||||||
let view_dir = view_bindings::view.inverse_view * vec4(world_dir, 0.0);
|
let view_dir = view_bindings::view.view_from_world * vec4(world_dir, 0.0);
|
||||||
return view_dir.xyz;
|
return view_dir.xyz;
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Convert a clip space direction to view space
|
/// Convert a clip space direction to view space
|
||||||
fn direction_clip_to_view(clip_dir: vec4<f32>) -> vec3<f32> {
|
fn direction_clip_to_view(clip_dir: vec4<f32>) -> vec3<f32> {
|
||||||
let view_dir = view_bindings::view.inverse_projection * clip_dir;
|
let view_dir = view_bindings::view.view_from_clip * clip_dir;
|
||||||
return view_dir.xyz;
|
return view_dir.xyz;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -99,25 +99,25 @@ fn direction_clip_to_view(clip_dir: vec4<f32>) -> vec3<f32> {
|
||||||
|
|
||||||
/// Convert a world space position to clip space
|
/// Convert a world space position to clip space
|
||||||
fn position_world_to_clip(world_pos: vec3<f32>) -> vec4<f32> {
|
fn position_world_to_clip(world_pos: vec3<f32>) -> vec4<f32> {
|
||||||
let clip_pos = view_bindings::view.view_proj * vec4(world_pos, 1.0);
|
let clip_pos = view_bindings::view.clip_from_world * vec4(world_pos, 1.0);
|
||||||
return clip_pos;
|
return clip_pos;
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Convert a view space position to clip space
|
/// Convert a view space position to clip space
|
||||||
fn position_view_to_clip(view_pos: vec3<f32>) -> vec4<f32> {
|
fn position_view_to_clip(view_pos: vec3<f32>) -> vec4<f32> {
|
||||||
let clip_pos = view_bindings::view.projection * vec4(view_pos, 1.0);
|
let clip_pos = view_bindings::view.clip_from_view * vec4(view_pos, 1.0);
|
||||||
return clip_pos;
|
return clip_pos;
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Convert a world space direction to clip space
|
/// Convert a world space direction to clip space
|
||||||
fn direction_world_to_clip(world_dir: vec3<f32>) -> vec4<f32> {
|
fn direction_world_to_clip(world_dir: vec3<f32>) -> vec4<f32> {
|
||||||
let clip_dir = view_bindings::view.view_proj * vec4(world_dir, 0.0);
|
let clip_dir = view_bindings::view.clip_from_world * vec4(world_dir, 0.0);
|
||||||
return clip_dir;
|
return clip_dir;
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Convert a view space direction to clip space
|
/// Convert a view space direction to clip space
|
||||||
fn direction_view_to_clip(view_dir: vec3<f32>) -> vec4<f32> {
|
fn direction_view_to_clip(view_dir: vec3<f32>) -> vec4<f32> {
|
||||||
let clip_dir = view_bindings::view.projection * vec4(view_dir, 0.0);
|
let clip_dir = view_bindings::view.clip_from_view * vec4(view_dir, 0.0);
|
||||||
return clip_dir;
|
return clip_dir;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -127,13 +127,13 @@ fn direction_view_to_clip(view_dir: vec3<f32>) -> vec4<f32> {
|
||||||
|
|
||||||
/// Convert a world space position to ndc space
|
/// Convert a world space position to ndc space
|
||||||
fn position_world_to_ndc(world_pos: vec3<f32>) -> vec3<f32> {
|
fn position_world_to_ndc(world_pos: vec3<f32>) -> vec3<f32> {
|
||||||
let ndc_pos = view_bindings::view.view_proj * vec4(world_pos, 1.0);
|
let ndc_pos = view_bindings::view.clip_from_world * vec4(world_pos, 1.0);
|
||||||
return ndc_pos.xyz / ndc_pos.w;
|
return ndc_pos.xyz / ndc_pos.w;
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Convert a view space position to ndc space
|
/// Convert a view space position to ndc space
|
||||||
fn position_view_to_ndc(view_pos: vec3<f32>) -> vec3<f32> {
|
fn position_view_to_ndc(view_pos: vec3<f32>) -> vec3<f32> {
|
||||||
let ndc_pos = view_bindings::view.projection * vec4(view_pos, 1.0);
|
let ndc_pos = view_bindings::view.clip_from_view * vec4(view_pos, 1.0);
|
||||||
return ndc_pos.xyz / ndc_pos.w;
|
return ndc_pos.xyz / ndc_pos.w;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -143,7 +143,7 @@ fn position_view_to_ndc(view_pos: vec3<f32>) -> vec3<f32> {
|
||||||
|
|
||||||
/// Retrieve the perspective camera near clipping plane
|
/// Retrieve the perspective camera near clipping plane
|
||||||
fn perspective_camera_near() -> f32 {
|
fn perspective_camera_near() -> f32 {
|
||||||
return view_bindings::view.projection[3][2];
|
return view_bindings::view.clip_from_view[3][2];
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Convert ndc depth to linear view z.
|
/// Convert ndc depth to linear view z.
|
||||||
|
@ -152,9 +152,9 @@ fn depth_ndc_to_view_z(ndc_depth: f32) -> f32 {
|
||||||
#ifdef VIEW_PROJECTION_PERSPECTIVE
|
#ifdef VIEW_PROJECTION_PERSPECTIVE
|
||||||
return -perspective_camera_near() / ndc_depth;
|
return -perspective_camera_near() / ndc_depth;
|
||||||
#else ifdef VIEW_PROJECTION_ORTHOGRAPHIC
|
#else ifdef VIEW_PROJECTION_ORTHOGRAPHIC
|
||||||
return -(view_bindings::view.projection[3][2] - ndc_depth) / view_bindings::view.projection[2][2];
|
return -(view_bindings::view.clip_from_view[3][2] - ndc_depth) / view_bindings::view.clip_from_view[2][2];
|
||||||
#else
|
#else
|
||||||
let view_pos = view_bindings::view.inverse_projection * vec4(0.0, 0.0, ndc_depth, 1.0);
|
let view_pos = view_bindings::view.view_from_clip * vec4(0.0, 0.0, ndc_depth, 1.0);
|
||||||
return view_pos.z / view_pos.w;
|
return view_pos.z / view_pos.w;
|
||||||
#endif
|
#endif
|
||||||
}
|
}
|
||||||
|
@ -165,9 +165,9 @@ fn view_z_to_depth_ndc(view_z: f32) -> f32 {
|
||||||
#ifdef VIEW_PROJECTION_PERSPECTIVE
|
#ifdef VIEW_PROJECTION_PERSPECTIVE
|
||||||
return -perspective_camera_near() / view_z;
|
return -perspective_camera_near() / view_z;
|
||||||
#else ifdef VIEW_PROJECTION_ORTHOGRAPHIC
|
#else ifdef VIEW_PROJECTION_ORTHOGRAPHIC
|
||||||
return view_bindings::view.projection[3][2] + view_z * view_bindings::view.projection[2][2];
|
return view_bindings::view.clip_from_view[3][2] + view_z * view_bindings::view.clip_from_view[2][2];
|
||||||
#else
|
#else
|
||||||
let ndc_pos = view_bindings::view.projection * vec4(0.0, 0.0, view_z, 1.0);
|
let ndc_pos = view_bindings::view.clip_from_view * vec4(0.0, 0.0, view_z, 1.0);
|
||||||
return ndc_pos.z / ndc_pos.w;
|
return ndc_pos.z / ndc_pos.w;
|
||||||
#endif
|
#endif
|
||||||
}
|
}
|
||||||
|
|
|
@ -65,17 +65,17 @@ fn calculate_neighboring_depth_differences(pixel_coordinates: vec2<i32>) -> f32
|
||||||
fn load_normal_view_space(uv: vec2<f32>) -> vec3<f32> {
|
fn load_normal_view_space(uv: vec2<f32>) -> vec3<f32> {
|
||||||
var world_normal = textureSampleLevel(normals, point_clamp_sampler, uv, 0.0).xyz;
|
var world_normal = textureSampleLevel(normals, point_clamp_sampler, uv, 0.0).xyz;
|
||||||
world_normal = (world_normal * 2.0) - 1.0;
|
world_normal = (world_normal * 2.0) - 1.0;
|
||||||
let inverse_view = mat3x3<f32>(
|
let view_from_world = mat3x3<f32>(
|
||||||
view.inverse_view[0].xyz,
|
view.view_from_world[0].xyz,
|
||||||
view.inverse_view[1].xyz,
|
view.view_from_world[1].xyz,
|
||||||
view.inverse_view[2].xyz,
|
view.view_from_world[2].xyz,
|
||||||
);
|
);
|
||||||
return inverse_view * world_normal;
|
return view_from_world * world_normal;
|
||||||
}
|
}
|
||||||
|
|
||||||
fn reconstruct_view_space_position(depth: f32, uv: vec2<f32>) -> vec3<f32> {
|
fn reconstruct_view_space_position(depth: f32, uv: vec2<f32>) -> vec3<f32> {
|
||||||
let clip_xy = vec2<f32>(uv.x * 2.0 - 1.0, 1.0 - 2.0 * uv.y);
|
let clip_xy = vec2<f32>(uv.x * 2.0 - 1.0, 1.0 - 2.0 * uv.y);
|
||||||
let t = view.inverse_projection * vec4<f32>(clip_xy, depth, 1.0);
|
let t = view.view_from_clip * vec4<f32>(clip_xy, depth, 1.0);
|
||||||
let view_xyz = t.xyz / t.w;
|
let view_xyz = t.xyz / t.w;
|
||||||
return view_xyz;
|
return view_xyz;
|
||||||
}
|
}
|
||||||
|
@ -107,7 +107,7 @@ fn gtao(@builtin(global_invocation_id) global_id: vec3<u32>) {
|
||||||
let view_vec = normalize(-pixel_position);
|
let view_vec = normalize(-pixel_position);
|
||||||
|
|
||||||
let noise = load_noise(pixel_coordinates);
|
let noise = load_noise(pixel_coordinates);
|
||||||
let sample_scale = (-0.5 * effect_radius * view.projection[0][0]) / pixel_position.z;
|
let sample_scale = (-0.5 * effect_radius * view.clip_from_view[0][0]) / pixel_position.z;
|
||||||
|
|
||||||
var visibility = 0.0;
|
var visibility = 0.0;
|
||||||
for (var slice_t = 0.0; slice_t < slice_count; slice_t += 1.0) {
|
for (var slice_t = 0.0; slice_t < slice_count; slice_t += 1.0) {
|
||||||
|
|
|
@ -82,7 +82,7 @@ pub struct RenderTargetInfo {
|
||||||
/// Holds internally computed [`Camera`] values.
|
/// Holds internally computed [`Camera`] values.
|
||||||
#[derive(Default, Debug, Clone)]
|
#[derive(Default, Debug, Clone)]
|
||||||
pub struct ComputedCameraValues {
|
pub struct ComputedCameraValues {
|
||||||
projection_matrix: Mat4,
|
clip_from_view: Mat4,
|
||||||
target_info: Option<RenderTargetInfo>,
|
target_info: Option<RenderTargetInfo>,
|
||||||
// size of the `Viewport`
|
// size of the `Viewport`
|
||||||
old_viewport_size: Option<UVec2>,
|
old_viewport_size: Option<UVec2>,
|
||||||
|
@ -340,8 +340,8 @@ impl Camera {
|
||||||
|
|
||||||
/// The projection matrix computed using this camera's [`CameraProjection`].
|
/// The projection matrix computed using this camera's [`CameraProjection`].
|
||||||
#[inline]
|
#[inline]
|
||||||
pub fn projection_matrix(&self) -> Mat4 {
|
pub fn clip_from_view(&self) -> Mat4 {
|
||||||
self.computed.projection_matrix
|
self.computed.clip_from_view
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Given a position in world space, use the camera to compute the viewport-space coordinates.
|
/// Given a position in world space, use the camera to compute the viewport-space coordinates.
|
||||||
|
@ -398,7 +398,7 @@ impl Camera {
|
||||||
let ndc = viewport_position * 2. / target_size - Vec2::ONE;
|
let ndc = viewport_position * 2. / target_size - Vec2::ONE;
|
||||||
|
|
||||||
let ndc_to_world =
|
let ndc_to_world =
|
||||||
camera_transform.compute_matrix() * self.computed.projection_matrix.inverse();
|
camera_transform.compute_matrix() * self.computed.clip_from_view.inverse();
|
||||||
let world_near_plane = ndc_to_world.project_point3(ndc.extend(1.));
|
let world_near_plane = ndc_to_world.project_point3(ndc.extend(1.));
|
||||||
// Using EPSILON because an ndc with Z = 0 returns NaNs.
|
// Using EPSILON because an ndc with Z = 0 returns NaNs.
|
||||||
let world_far_plane = ndc_to_world.project_point3(ndc.extend(f32::EPSILON));
|
let world_far_plane = ndc_to_world.project_point3(ndc.extend(f32::EPSILON));
|
||||||
|
@ -453,9 +453,9 @@ impl Camera {
|
||||||
world_position: Vec3,
|
world_position: Vec3,
|
||||||
) -> Option<Vec3> {
|
) -> Option<Vec3> {
|
||||||
// Build a transformation matrix to convert from world space to NDC using camera data
|
// Build a transformation matrix to convert from world space to NDC using camera data
|
||||||
let world_to_ndc: Mat4 =
|
let clip_from_world: Mat4 =
|
||||||
self.computed.projection_matrix * camera_transform.compute_matrix().inverse();
|
self.computed.clip_from_view * camera_transform.compute_matrix().inverse();
|
||||||
let ndc_space_coords: Vec3 = world_to_ndc.project_point3(world_position);
|
let ndc_space_coords: Vec3 = clip_from_world.project_point3(world_position);
|
||||||
|
|
||||||
(!ndc_space_coords.is_nan()).then_some(ndc_space_coords)
|
(!ndc_space_coords.is_nan()).then_some(ndc_space_coords)
|
||||||
}
|
}
|
||||||
|
@ -473,7 +473,7 @@ impl Camera {
|
||||||
pub fn ndc_to_world(&self, camera_transform: &GlobalTransform, ndc: Vec3) -> Option<Vec3> {
|
pub fn ndc_to_world(&self, camera_transform: &GlobalTransform, ndc: Vec3) -> Option<Vec3> {
|
||||||
// Build a transformation matrix to convert from NDC to world space using camera data
|
// Build a transformation matrix to convert from NDC to world space using camera data
|
||||||
let ndc_to_world =
|
let ndc_to_world =
|
||||||
camera_transform.compute_matrix() * self.computed.projection_matrix.inverse();
|
camera_transform.compute_matrix() * self.computed.clip_from_view.inverse();
|
||||||
|
|
||||||
let world_space_coords = ndc_to_world.project_point3(ndc);
|
let world_space_coords = ndc_to_world.project_point3(ndc);
|
||||||
|
|
||||||
|
@ -786,7 +786,7 @@ pub fn camera_system<T: CameraProjection + Component>(
|
||||||
camera.computed.target_info = new_computed_target_info;
|
camera.computed.target_info = new_computed_target_info;
|
||||||
if let Some(size) = camera.logical_viewport_size() {
|
if let Some(size) = camera.logical_viewport_size() {
|
||||||
camera_projection.update(size.x, size.y);
|
camera_projection.update(size.x, size.y);
|
||||||
camera.computed.projection_matrix = camera_projection.get_projection_matrix();
|
camera.computed.clip_from_view = camera_projection.get_clip_from_view();
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -905,9 +905,9 @@ pub fn extract_cameras(
|
||||||
.unwrap_or_else(|| Exposure::default().exposure()),
|
.unwrap_or_else(|| Exposure::default().exposure()),
|
||||||
},
|
},
|
||||||
ExtractedView {
|
ExtractedView {
|
||||||
projection: camera.projection_matrix(),
|
clip_from_view: camera.clip_from_view(),
|
||||||
transform: *transform,
|
world_from_view: *transform,
|
||||||
view_projection: None,
|
clip_from_world: None,
|
||||||
hdr: camera.hdr,
|
hdr: camera.hdr,
|
||||||
viewport: UVec4::new(
|
viewport: UVec4::new(
|
||||||
viewport_origin.x,
|
viewport_origin.x,
|
||||||
|
@ -1021,8 +1021,8 @@ pub struct TemporalJitter {
|
||||||
}
|
}
|
||||||
|
|
||||||
impl TemporalJitter {
|
impl TemporalJitter {
|
||||||
pub fn jitter_projection(&self, projection: &mut Mat4, view_size: Vec2) {
|
pub fn jitter_projection(&self, clip_from_view: &mut Mat4, view_size: Vec2) {
|
||||||
if projection.w_axis.w == 1.0 {
|
if clip_from_view.w_axis.w == 1.0 {
|
||||||
warn!(
|
warn!(
|
||||||
"TemporalJitter not supported with OrthographicProjection. Use PerspectiveProjection instead."
|
"TemporalJitter not supported with OrthographicProjection. Use PerspectiveProjection instead."
|
||||||
);
|
);
|
||||||
|
@ -1032,8 +1032,8 @@ impl TemporalJitter {
|
||||||
// https://github.com/GPUOpen-LibrariesAndSDKs/FidelityFX-SDK/blob/d7531ae47d8b36a5d4025663e731a47a38be882f/docs/techniques/media/super-resolution-temporal/jitter-space.svg
|
// https://github.com/GPUOpen-LibrariesAndSDKs/FidelityFX-SDK/blob/d7531ae47d8b36a5d4025663e731a47a38be882f/docs/techniques/media/super-resolution-temporal/jitter-space.svg
|
||||||
let jitter = (self.offset * vec2(2.0, -2.0)) / view_size;
|
let jitter = (self.offset * vec2(2.0, -2.0)) / view_size;
|
||||||
|
|
||||||
projection.z_axis.x += jitter.x;
|
clip_from_view.z_axis.x += jitter.x;
|
||||||
projection.z_axis.y += jitter.y;
|
clip_from_view.z_axis.y += jitter.y;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -75,7 +75,7 @@ pub struct CameraUpdateSystem;
|
||||||
///
|
///
|
||||||
/// [`Camera`]: crate::camera::Camera
|
/// [`Camera`]: crate::camera::Camera
|
||||||
pub trait CameraProjection {
|
pub trait CameraProjection {
|
||||||
fn get_projection_matrix(&self) -> Mat4;
|
fn get_clip_from_view(&self) -> Mat4;
|
||||||
fn update(&mut self, width: f32, height: f32);
|
fn update(&mut self, width: f32, height: f32);
|
||||||
fn far(&self) -> f32;
|
fn far(&self) -> f32;
|
||||||
fn get_frustum_corners(&self, z_near: f32, z_far: f32) -> [Vec3A; 8];
|
fn get_frustum_corners(&self, z_near: f32, z_far: f32) -> [Vec3A; 8];
|
||||||
|
@ -85,10 +85,10 @@ pub trait CameraProjection {
|
||||||
/// This code is called by [`update_frusta`](crate::view::visibility::update_frusta) system
|
/// This code is called by [`update_frusta`](crate::view::visibility::update_frusta) system
|
||||||
/// for each camera to update its frustum.
|
/// for each camera to update its frustum.
|
||||||
fn compute_frustum(&self, camera_transform: &GlobalTransform) -> Frustum {
|
fn compute_frustum(&self, camera_transform: &GlobalTransform) -> Frustum {
|
||||||
let view_projection =
|
let clip_from_world =
|
||||||
self.get_projection_matrix() * camera_transform.compute_matrix().inverse();
|
self.get_clip_from_view() * camera_transform.compute_matrix().inverse();
|
||||||
Frustum::from_view_projection_custom_far(
|
Frustum::from_clip_from_world_custom_far(
|
||||||
&view_projection,
|
&clip_from_world,
|
||||||
&camera_transform.translation(),
|
&camera_transform.translation(),
|
||||||
&camera_transform.back(),
|
&camera_transform.back(),
|
||||||
self.far(),
|
self.far(),
|
||||||
|
@ -117,10 +117,10 @@ impl From<OrthographicProjection> for Projection {
|
||||||
}
|
}
|
||||||
|
|
||||||
impl CameraProjection for Projection {
|
impl CameraProjection for Projection {
|
||||||
fn get_projection_matrix(&self) -> Mat4 {
|
fn get_clip_from_view(&self) -> Mat4 {
|
||||||
match self {
|
match self {
|
||||||
Projection::Perspective(projection) => projection.get_projection_matrix(),
|
Projection::Perspective(projection) => projection.get_clip_from_view(),
|
||||||
Projection::Orthographic(projection) => projection.get_projection_matrix(),
|
Projection::Orthographic(projection) => projection.get_clip_from_view(),
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -185,7 +185,7 @@ pub struct PerspectiveProjection {
|
||||||
}
|
}
|
||||||
|
|
||||||
impl CameraProjection for PerspectiveProjection {
|
impl CameraProjection for PerspectiveProjection {
|
||||||
fn get_projection_matrix(&self) -> Mat4 {
|
fn get_clip_from_view(&self) -> Mat4 {
|
||||||
Mat4::perspective_infinite_reverse_rh(self.fov, self.aspect_ratio, self.near)
|
Mat4::perspective_infinite_reverse_rh(self.fov, self.aspect_ratio, self.near)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -391,7 +391,7 @@ pub struct OrthographicProjection {
|
||||||
}
|
}
|
||||||
|
|
||||||
impl CameraProjection for OrthographicProjection {
|
impl CameraProjection for OrthographicProjection {
|
||||||
fn get_projection_matrix(&self) -> Mat4 {
|
fn get_clip_from_view(&self) -> Mat4 {
|
||||||
Mat4::orthographic_rh(
|
Mat4::orthographic_rh(
|
||||||
self.area.min.x,
|
self.area.min.x,
|
||||||
self.area.max.x,
|
self.area.max.x,
|
||||||
|
|
|
@ -76,13 +76,13 @@ impl Aabb {
|
||||||
|
|
||||||
/// Calculate the relative radius of the AABB with respect to a plane
|
/// Calculate the relative radius of the AABB with respect to a plane
|
||||||
#[inline]
|
#[inline]
|
||||||
pub fn relative_radius(&self, p_normal: &Vec3A, model: &Mat3A) -> f32 {
|
pub fn relative_radius(&self, p_normal: &Vec3A, world_from_local: &Mat3A) -> f32 {
|
||||||
// NOTE: dot products on Vec3A use SIMD and even with the overhead of conversion are net faster than Vec3
|
// NOTE: dot products on Vec3A use SIMD and even with the overhead of conversion are net faster than Vec3
|
||||||
let half_extents = self.half_extents;
|
let half_extents = self.half_extents;
|
||||||
Vec3A::new(
|
Vec3A::new(
|
||||||
p_normal.dot(model.x_axis),
|
p_normal.dot(world_from_local.x_axis),
|
||||||
p_normal.dot(model.y_axis),
|
p_normal.dot(world_from_local.y_axis),
|
||||||
p_normal.dot(model.z_axis),
|
p_normal.dot(world_from_local.z_axis),
|
||||||
)
|
)
|
||||||
.abs()
|
.abs()
|
||||||
.dot(half_extents)
|
.dot(half_extents)
|
||||||
|
@ -117,11 +117,11 @@ pub struct Sphere {
|
||||||
|
|
||||||
impl Sphere {
|
impl Sphere {
|
||||||
#[inline]
|
#[inline]
|
||||||
pub fn intersects_obb(&self, aabb: &Aabb, local_to_world: &Affine3A) -> bool {
|
pub fn intersects_obb(&self, aabb: &Aabb, world_from_local: &Affine3A) -> bool {
|
||||||
let aabb_center_world = local_to_world.transform_point3a(aabb.center);
|
let aabb_center_world = world_from_local.transform_point3a(aabb.center);
|
||||||
let v = aabb_center_world - self.center;
|
let v = aabb_center_world - self.center;
|
||||||
let d = v.length();
|
let d = v.length();
|
||||||
let relative_radius = aabb.relative_radius(&(v / d), &local_to_world.matrix3);
|
let relative_radius = aabb.relative_radius(&(v / d), &world_from_local.matrix3);
|
||||||
d < self.radius + relative_radius
|
d < self.radius + relative_radius
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -219,24 +219,24 @@ pub struct Frustum {
|
||||||
}
|
}
|
||||||
|
|
||||||
impl Frustum {
|
impl Frustum {
|
||||||
/// Returns a frustum derived from `view_projection`.
|
/// Returns a frustum derived from `clip_from_world`.
|
||||||
#[inline]
|
#[inline]
|
||||||
pub fn from_view_projection(view_projection: &Mat4) -> Self {
|
pub fn from_clip_from_world(clip_from_world: &Mat4) -> Self {
|
||||||
let mut frustum = Frustum::from_view_projection_no_far(view_projection);
|
let mut frustum = Frustum::from_clip_from_world_no_far(clip_from_world);
|
||||||
frustum.half_spaces[5] = HalfSpace::new(view_projection.row(2));
|
frustum.half_spaces[5] = HalfSpace::new(clip_from_world.row(2));
|
||||||
frustum
|
frustum
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Returns a frustum derived from `view_projection`,
|
/// Returns a frustum derived from `clip_from_world`,
|
||||||
/// but with a custom far plane.
|
/// but with a custom far plane.
|
||||||
#[inline]
|
#[inline]
|
||||||
pub fn from_view_projection_custom_far(
|
pub fn from_clip_from_world_custom_far(
|
||||||
view_projection: &Mat4,
|
clip_from_world: &Mat4,
|
||||||
view_translation: &Vec3,
|
view_translation: &Vec3,
|
||||||
view_backward: &Vec3,
|
view_backward: &Vec3,
|
||||||
far: f32,
|
far: f32,
|
||||||
) -> Self {
|
) -> Self {
|
||||||
let mut frustum = Frustum::from_view_projection_no_far(view_projection);
|
let mut frustum = Frustum::from_clip_from_world_no_far(clip_from_world);
|
||||||
let far_center = *view_translation - far * *view_backward;
|
let far_center = *view_translation - far * *view_backward;
|
||||||
frustum.half_spaces[5] =
|
frustum.half_spaces[5] =
|
||||||
HalfSpace::new(view_backward.extend(-view_backward.dot(far_center)));
|
HalfSpace::new(view_backward.extend(-view_backward.dot(far_center)));
|
||||||
|
@ -248,11 +248,11 @@ impl Frustum {
|
||||||
// Rendering by Lengyel.
|
// Rendering by Lengyel.
|
||||||
/// Returns a frustum derived from `view_projection`,
|
/// Returns a frustum derived from `view_projection`,
|
||||||
/// without a far plane.
|
/// without a far plane.
|
||||||
fn from_view_projection_no_far(view_projection: &Mat4) -> Self {
|
fn from_clip_from_world_no_far(clip_from_world: &Mat4) -> Self {
|
||||||
let row3 = view_projection.row(3);
|
let row3 = clip_from_world.row(3);
|
||||||
let mut half_spaces = [HalfSpace::default(); 6];
|
let mut half_spaces = [HalfSpace::default(); 6];
|
||||||
for (i, half_space) in half_spaces.iter_mut().enumerate().take(5) {
|
for (i, half_space) in half_spaces.iter_mut().enumerate().take(5) {
|
||||||
let row = view_projection.row(i / 2);
|
let row = clip_from_world.row(i / 2);
|
||||||
*half_space = HalfSpace::new(if (i & 1) == 0 && i != 4 {
|
*half_space = HalfSpace::new(if (i & 1) == 0 && i != 4 {
|
||||||
row3 + row
|
row3 + row
|
||||||
} else {
|
} else {
|
||||||
|
@ -280,11 +280,11 @@ impl Frustum {
|
||||||
pub fn intersects_obb(
|
pub fn intersects_obb(
|
||||||
&self,
|
&self,
|
||||||
aabb: &Aabb,
|
aabb: &Aabb,
|
||||||
model_to_world: &Affine3A,
|
world_from_local: &Affine3A,
|
||||||
intersect_near: bool,
|
intersect_near: bool,
|
||||||
intersect_far: bool,
|
intersect_far: bool,
|
||||||
) -> bool {
|
) -> bool {
|
||||||
let aabb_center_world = model_to_world.transform_point3a(aabb.center).extend(1.0);
|
let aabb_center_world = world_from_local.transform_point3a(aabb.center).extend(1.0);
|
||||||
for (idx, half_space) in self.half_spaces.into_iter().enumerate() {
|
for (idx, half_space) in self.half_spaces.into_iter().enumerate() {
|
||||||
if idx == 4 && !intersect_near {
|
if idx == 4 && !intersect_near {
|
||||||
continue;
|
continue;
|
||||||
|
@ -293,7 +293,7 @@ impl Frustum {
|
||||||
continue;
|
continue;
|
||||||
}
|
}
|
||||||
let p_normal = half_space.normal();
|
let p_normal = half_space.normal();
|
||||||
let relative_radius = aabb.relative_radius(&p_normal, &model_to_world.matrix3);
|
let relative_radius = aabb.relative_radius(&p_normal, &world_from_local.matrix3);
|
||||||
if half_space.normal_d().dot(aabb_center_world) + relative_radius <= 0.0 {
|
if half_space.normal_d().dot(aabb_center_world) + relative_radius <= 0.0 {
|
||||||
return false;
|
return false;
|
||||||
}
|
}
|
||||||
|
|
|
@ -2,16 +2,16 @@ use bevy_math::{Mat4, Vec3, Vec4};
|
||||||
|
|
||||||
/// A distance calculator for the draw order of [`PhaseItem`](crate::render_phase::PhaseItem)s.
|
/// A distance calculator for the draw order of [`PhaseItem`](crate::render_phase::PhaseItem)s.
|
||||||
pub struct ViewRangefinder3d {
|
pub struct ViewRangefinder3d {
|
||||||
inverse_view_row_2: Vec4,
|
view_from_world_row_2: Vec4,
|
||||||
}
|
}
|
||||||
|
|
||||||
impl ViewRangefinder3d {
|
impl ViewRangefinder3d {
|
||||||
/// Creates a 3D rangefinder for a view matrix.
|
/// Creates a 3D rangefinder for a view matrix.
|
||||||
pub fn from_view_matrix(view_matrix: &Mat4) -> ViewRangefinder3d {
|
pub fn from_world_from_view(world_from_view: &Mat4) -> ViewRangefinder3d {
|
||||||
let inverse_view_matrix = view_matrix.inverse();
|
let view_from_world = world_from_view.inverse();
|
||||||
|
|
||||||
ViewRangefinder3d {
|
ViewRangefinder3d {
|
||||||
inverse_view_row_2: inverse_view_matrix.row(2),
|
view_from_world_row_2: view_from_world.row(2),
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -20,7 +20,7 @@ impl ViewRangefinder3d {
|
||||||
pub fn distance_translation(&self, translation: &Vec3) -> f32 {
|
pub fn distance_translation(&self, translation: &Vec3) -> f32 {
|
||||||
// NOTE: row 2 of the inverse view matrix dotted with the translation from the model matrix
|
// NOTE: row 2 of the inverse view matrix dotted with the translation from the model matrix
|
||||||
// gives the z component of translation of the mesh in view-space
|
// gives the z component of translation of the mesh in view-space
|
||||||
self.inverse_view_row_2.dot(translation.extend(1.0))
|
self.view_from_world_row_2.dot(translation.extend(1.0))
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Calculates the distance, or view-space `Z` value, for the given `transform`.
|
/// Calculates the distance, or view-space `Z` value, for the given `transform`.
|
||||||
|
@ -28,7 +28,7 @@ impl ViewRangefinder3d {
|
||||||
pub fn distance(&self, transform: &Mat4) -> f32 {
|
pub fn distance(&self, transform: &Mat4) -> f32 {
|
||||||
// NOTE: row 2 of the inverse view matrix dotted with column 3 of the model matrix
|
// NOTE: row 2 of the inverse view matrix dotted with column 3 of the model matrix
|
||||||
// gives the z component of translation of the mesh in view-space
|
// gives the z component of translation of the mesh in view-space
|
||||||
self.inverse_view_row_2.dot(transform.col(3))
|
self.view_from_world_row_2.dot(transform.col(3))
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -40,7 +40,7 @@ mod tests {
|
||||||
#[test]
|
#[test]
|
||||||
fn distance() {
|
fn distance() {
|
||||||
let view_matrix = Mat4::from_translation(Vec3::new(0.0, 0.0, -1.0));
|
let view_matrix = Mat4::from_translation(Vec3::new(0.0, 0.0, -1.0));
|
||||||
let rangefinder = ViewRangefinder3d::from_view_matrix(&view_matrix);
|
let rangefinder = ViewRangefinder3d::from_world_from_view(&view_matrix);
|
||||||
assert_eq!(rangefinder.distance(&Mat4::IDENTITY), 1.0);
|
assert_eq!(rangefinder.distance(&Mat4::IDENTITY), 1.0);
|
||||||
assert_eq!(
|
assert_eq!(
|
||||||
rangefinder.distance(&Mat4::from_translation(Vec3::new(0.0, 0.0, 1.0))),
|
rangefinder.distance(&Mat4::from_translation(Vec3::new(0.0, 0.0, 1.0))),
|
||||||
|
|
|
@ -172,12 +172,12 @@ impl Msaa {
|
||||||
|
|
||||||
#[derive(Component)]
|
#[derive(Component)]
|
||||||
pub struct ExtractedView {
|
pub struct ExtractedView {
|
||||||
pub projection: Mat4,
|
pub clip_from_view: Mat4,
|
||||||
pub transform: GlobalTransform,
|
pub world_from_view: GlobalTransform,
|
||||||
// The view-projection matrix. When provided it is used instead of deriving it from
|
// The view-projection matrix. When provided it is used instead of deriving it from
|
||||||
// `projection` and `transform` fields, which can be helpful in cases where numerical
|
// `projection` and `transform` fields, which can be helpful in cases where numerical
|
||||||
// stability matters and there is a more direct way to derive the view-projection matrix.
|
// stability matters and there is a more direct way to derive the view-projection matrix.
|
||||||
pub view_projection: Option<Mat4>,
|
pub clip_from_world: Option<Mat4>,
|
||||||
pub hdr: bool,
|
pub hdr: bool,
|
||||||
// uvec4(origin.x, origin.y, width, height)
|
// uvec4(origin.x, origin.y, width, height)
|
||||||
pub viewport: UVec4,
|
pub viewport: UVec4,
|
||||||
|
@ -187,7 +187,7 @@ pub struct ExtractedView {
|
||||||
impl ExtractedView {
|
impl ExtractedView {
|
||||||
/// Creates a 3D rangefinder for a view
|
/// Creates a 3D rangefinder for a view
|
||||||
pub fn rangefinder3d(&self) -> ViewRangefinder3d {
|
pub fn rangefinder3d(&self) -> ViewRangefinder3d {
|
||||||
ViewRangefinder3d::from_view_matrix(&self.transform.compute_matrix())
|
ViewRangefinder3d::from_world_from_view(&self.world_from_view.compute_matrix())
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -404,13 +404,13 @@ impl ColorGrading {
|
||||||
|
|
||||||
#[derive(Clone, ShaderType)]
|
#[derive(Clone, ShaderType)]
|
||||||
pub struct ViewUniform {
|
pub struct ViewUniform {
|
||||||
view_proj: Mat4,
|
clip_from_world: Mat4,
|
||||||
unjittered_view_proj: Mat4,
|
unjittered_clip_from_world: Mat4,
|
||||||
inverse_view_proj: Mat4,
|
world_from_clip: Mat4,
|
||||||
view: Mat4,
|
world_from_view: Mat4,
|
||||||
inverse_view: Mat4,
|
view_from_world: Mat4,
|
||||||
projection: Mat4,
|
clip_from_view: Mat4,
|
||||||
inverse_projection: Mat4,
|
view_from_clip: Mat4,
|
||||||
world_position: Vec3,
|
world_position: Vec3,
|
||||||
exposure: f32,
|
exposure: f32,
|
||||||
// viewport(x_origin, y_origin, width, height)
|
// viewport(x_origin, y_origin, width, height)
|
||||||
|
@ -727,23 +727,23 @@ pub fn prepare_view_uniforms(
|
||||||
};
|
};
|
||||||
for (entity, extracted_camera, extracted_view, frustum, temporal_jitter, mip_bias) in &views {
|
for (entity, extracted_camera, extracted_view, frustum, temporal_jitter, mip_bias) in &views {
|
||||||
let viewport = extracted_view.viewport.as_vec4();
|
let viewport = extracted_view.viewport.as_vec4();
|
||||||
let unjittered_projection = extracted_view.projection;
|
let unjittered_projection = extracted_view.clip_from_view;
|
||||||
let mut projection = unjittered_projection;
|
let mut clip_from_view = unjittered_projection;
|
||||||
|
|
||||||
if let Some(temporal_jitter) = temporal_jitter {
|
if let Some(temporal_jitter) = temporal_jitter {
|
||||||
temporal_jitter.jitter_projection(&mut projection, viewport.zw());
|
temporal_jitter.jitter_projection(&mut clip_from_view, viewport.zw());
|
||||||
}
|
}
|
||||||
|
|
||||||
let inverse_projection = projection.inverse();
|
let view_from_clip = clip_from_view.inverse();
|
||||||
let view = extracted_view.transform.compute_matrix();
|
let world_from_view = extracted_view.world_from_view.compute_matrix();
|
||||||
let inverse_view = view.inverse();
|
let view_from_world = world_from_view.inverse();
|
||||||
|
|
||||||
let view_proj = if temporal_jitter.is_some() {
|
let clip_from_world = if temporal_jitter.is_some() {
|
||||||
projection * inverse_view
|
clip_from_view * view_from_world
|
||||||
} else {
|
} else {
|
||||||
extracted_view
|
extracted_view
|
||||||
.view_projection
|
.clip_from_world
|
||||||
.unwrap_or_else(|| projection * inverse_view)
|
.unwrap_or_else(|| clip_from_view * view_from_world)
|
||||||
};
|
};
|
||||||
|
|
||||||
// Map Frustum type to shader array<vec4<f32>, 6>
|
// Map Frustum type to shader array<vec4<f32>, 6>
|
||||||
|
@ -753,14 +753,14 @@ pub fn prepare_view_uniforms(
|
||||||
|
|
||||||
let view_uniforms = ViewUniformOffset {
|
let view_uniforms = ViewUniformOffset {
|
||||||
offset: writer.write(&ViewUniform {
|
offset: writer.write(&ViewUniform {
|
||||||
view_proj,
|
clip_from_world,
|
||||||
unjittered_view_proj: unjittered_projection * inverse_view,
|
unjittered_clip_from_world: unjittered_projection * view_from_world,
|
||||||
inverse_view_proj: view * inverse_projection,
|
world_from_clip: world_from_view * view_from_clip,
|
||||||
view,
|
world_from_view,
|
||||||
inverse_view,
|
view_from_world,
|
||||||
projection,
|
clip_from_view,
|
||||||
inverse_projection,
|
view_from_clip,
|
||||||
world_position: extracted_view.transform.translation(),
|
world_position: extracted_view.world_from_view.translation(),
|
||||||
exposure: extracted_camera
|
exposure: extracted_camera
|
||||||
.map(|c| c.exposure)
|
.map(|c| c.exposure)
|
||||||
.unwrap_or_else(|| Exposure::default().exposure()),
|
.unwrap_or_else(|| Exposure::default().exposure()),
|
||||||
|
|
|
@ -14,13 +14,13 @@ struct ColorGrading {
|
||||||
}
|
}
|
||||||
|
|
||||||
struct View {
|
struct View {
|
||||||
view_proj: mat4x4<f32>,
|
clip_from_world: mat4x4<f32>,
|
||||||
unjittered_view_proj: mat4x4<f32>,
|
unjittered_clip_from_world: mat4x4<f32>,
|
||||||
inverse_view_proj: mat4x4<f32>,
|
world_from_clip: mat4x4<f32>,
|
||||||
view: mat4x4<f32>,
|
world_from_view: mat4x4<f32>,
|
||||||
inverse_view: mat4x4<f32>,
|
view_from_world: mat4x4<f32>,
|
||||||
projection: mat4x4<f32>,
|
clip_from_view: mat4x4<f32>,
|
||||||
inverse_projection: mat4x4<f32>,
|
view_from_clip: mat4x4<f32>,
|
||||||
world_position: vec3<f32>,
|
world_position: vec3<f32>,
|
||||||
exposure: f32,
|
exposure: f32,
|
||||||
// viewport(x_origin, y_origin, width, height)
|
// viewport(x_origin, y_origin, width, height)
|
||||||
|
|
|
@ -468,9 +468,9 @@ pub fn check_visibility<QF>(
|
||||||
// If we have an aabb, do frustum culling
|
// If we have an aabb, do frustum culling
|
||||||
if !no_frustum_culling && !no_cpu_culling {
|
if !no_frustum_culling && !no_cpu_culling {
|
||||||
if let Some(model_aabb) = maybe_model_aabb {
|
if let Some(model_aabb) = maybe_model_aabb {
|
||||||
let model = transform.affine();
|
let world_from_local = transform.affine();
|
||||||
let model_sphere = Sphere {
|
let model_sphere = Sphere {
|
||||||
center: model.transform_point3a(model_aabb.center),
|
center: world_from_local.transform_point3a(model_aabb.center),
|
||||||
radius: transform.radius_vec3a(model_aabb.half_extents),
|
radius: transform.radius_vec3a(model_aabb.half_extents),
|
||||||
};
|
};
|
||||||
// Do quick sphere-based frustum culling
|
// Do quick sphere-based frustum culling
|
||||||
|
@ -478,7 +478,7 @@ pub fn check_visibility<QF>(
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
// Do aabb-based frustum culling
|
// Do aabb-based frustum culling
|
||||||
if !frustum.intersects_obb(model_aabb, &model, true, false) {
|
if !frustum.intersects_obb(model_aabb, &world_from_local, true, false) {
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
|
@ -444,7 +444,7 @@ pub fn queue_material2d_meshes<M: Material2d>(
|
||||||
|
|
||||||
mesh_instance.material_bind_group_id = material_2d.get_bind_group_id();
|
mesh_instance.material_bind_group_id = material_2d.get_bind_group_id();
|
||||||
|
|
||||||
let mesh_z = mesh_instance.transforms.transform.translation.z;
|
let mesh_z = mesh_instance.transforms.world_from_local.translation.z;
|
||||||
transparent_phase.add(Transparent2d {
|
transparent_phase.add(Transparent2d {
|
||||||
entity: *visible_entity,
|
entity: *visible_entity,
|
||||||
draw_function: draw_transparent_2d,
|
draw_function: draw_transparent_2d,
|
||||||
|
|
|
@ -156,31 +156,31 @@ impl Plugin for Mesh2dRenderPlugin {
|
||||||
|
|
||||||
#[derive(Component)]
|
#[derive(Component)]
|
||||||
pub struct Mesh2dTransforms {
|
pub struct Mesh2dTransforms {
|
||||||
pub transform: Affine3,
|
pub world_from_local: Affine3,
|
||||||
pub flags: u32,
|
pub flags: u32,
|
||||||
}
|
}
|
||||||
|
|
||||||
#[derive(ShaderType, Clone)]
|
#[derive(ShaderType, Clone)]
|
||||||
pub struct Mesh2dUniform {
|
pub struct Mesh2dUniform {
|
||||||
// Affine 4x3 matrix transposed to 3x4
|
// Affine 4x3 matrix transposed to 3x4
|
||||||
pub transform: [Vec4; 3],
|
pub world_from_local: [Vec4; 3],
|
||||||
// 3x3 matrix packed in mat2x4 and f32 as:
|
// 3x3 matrix packed in mat2x4 and f32 as:
|
||||||
// [0].xyz, [1].x,
|
// [0].xyz, [1].x,
|
||||||
// [1].yz, [2].xy
|
// [1].yz, [2].xy
|
||||||
// [2].z
|
// [2].z
|
||||||
pub inverse_transpose_model_a: [Vec4; 2],
|
pub local_from_world_transpose_a: [Vec4; 2],
|
||||||
pub inverse_transpose_model_b: f32,
|
pub local_from_world_transpose_b: f32,
|
||||||
pub flags: u32,
|
pub flags: u32,
|
||||||
}
|
}
|
||||||
|
|
||||||
impl From<&Mesh2dTransforms> for Mesh2dUniform {
|
impl From<&Mesh2dTransforms> for Mesh2dUniform {
|
||||||
fn from(mesh_transforms: &Mesh2dTransforms) -> Self {
|
fn from(mesh_transforms: &Mesh2dTransforms) -> Self {
|
||||||
let (inverse_transpose_model_a, inverse_transpose_model_b) =
|
let (local_from_world_transpose_a, local_from_world_transpose_b) =
|
||||||
mesh_transforms.transform.inverse_transpose_3x3();
|
mesh_transforms.world_from_local.inverse_transpose_3x3();
|
||||||
Self {
|
Self {
|
||||||
transform: mesh_transforms.transform.to_transpose(),
|
world_from_local: mesh_transforms.world_from_local.to_transpose(),
|
||||||
inverse_transpose_model_a,
|
local_from_world_transpose_a,
|
||||||
inverse_transpose_model_b,
|
local_from_world_transpose_b,
|
||||||
flags: mesh_transforms.flags,
|
flags: mesh_transforms.flags,
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -236,7 +236,7 @@ pub fn extract_mesh2d(
|
||||||
entity,
|
entity,
|
||||||
RenderMesh2dInstance {
|
RenderMesh2dInstance {
|
||||||
transforms: Mesh2dTransforms {
|
transforms: Mesh2dTransforms {
|
||||||
transform: (&transform.affine()).into(),
|
world_from_local: (&transform.affine()).into(),
|
||||||
flags: MeshFlags::empty().bits(),
|
flags: MeshFlags::empty().bits(),
|
||||||
},
|
},
|
||||||
mesh_asset_id: handle.0.id(),
|
mesh_asset_id: handle.0.id(),
|
||||||
|
|
|
@ -35,9 +35,9 @@ fn vertex(vertex: Vertex) -> VertexOutput {
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
#ifdef VERTEX_POSITIONS
|
#ifdef VERTEX_POSITIONS
|
||||||
var model = mesh_functions::get_model_matrix(vertex.instance_index);
|
var world_from_local = mesh_functions::get_world_from_local(vertex.instance_index);
|
||||||
out.world_position = mesh_functions::mesh2d_position_local_to_world(
|
out.world_position = mesh_functions::mesh2d_position_local_to_world(
|
||||||
model,
|
world_from_local,
|
||||||
vec4<f32>(vertex.position, 1.0)
|
vec4<f32>(vertex.position, 1.0)
|
||||||
);
|
);
|
||||||
out.position = mesh_functions::mesh2d_position_world_to_clip(out.world_position);
|
out.position = mesh_functions::mesh2d_position_world_to_clip(out.world_position);
|
||||||
|
@ -49,7 +49,7 @@ fn vertex(vertex: Vertex) -> VertexOutput {
|
||||||
|
|
||||||
#ifdef VERTEX_TANGENTS
|
#ifdef VERTEX_TANGENTS
|
||||||
out.world_tangent = mesh_functions::mesh2d_tangent_local_to_world(
|
out.world_tangent = mesh_functions::mesh2d_tangent_local_to_world(
|
||||||
model,
|
world_from_local,
|
||||||
vertex.tangent
|
vertex.tangent
|
||||||
);
|
);
|
||||||
#endif
|
#endif
|
||||||
|
|
|
@ -6,39 +6,39 @@
|
||||||
}
|
}
|
||||||
#import bevy_render::maths::{affine3_to_square, mat2x4_f32_to_mat3x3_unpack}
|
#import bevy_render::maths::{affine3_to_square, mat2x4_f32_to_mat3x3_unpack}
|
||||||
|
|
||||||
fn get_model_matrix(instance_index: u32) -> mat4x4<f32> {
|
fn get_world_from_local(instance_index: u32) -> mat4x4<f32> {
|
||||||
return affine3_to_square(mesh[instance_index].model);
|
return affine3_to_square(mesh[instance_index].world_from_local);
|
||||||
}
|
}
|
||||||
|
|
||||||
fn mesh2d_position_local_to_world(model: mat4x4<f32>, vertex_position: vec4<f32>) -> vec4<f32> {
|
fn mesh2d_position_local_to_world(world_from_local: mat4x4<f32>, vertex_position: vec4<f32>) -> vec4<f32> {
|
||||||
return model * vertex_position;
|
return world_from_local * vertex_position;
|
||||||
}
|
}
|
||||||
|
|
||||||
fn mesh2d_position_world_to_clip(world_position: vec4<f32>) -> vec4<f32> {
|
fn mesh2d_position_world_to_clip(world_position: vec4<f32>) -> vec4<f32> {
|
||||||
return view.view_proj * world_position;
|
return view.clip_from_world * world_position;
|
||||||
}
|
}
|
||||||
|
|
||||||
// NOTE: The intermediate world_position assignment is important
|
// NOTE: The intermediate world_position assignment is important
|
||||||
// for precision purposes when using the 'equals' depth comparison
|
// for precision purposes when using the 'equals' depth comparison
|
||||||
// function.
|
// function.
|
||||||
fn mesh2d_position_local_to_clip(model: mat4x4<f32>, vertex_position: vec4<f32>) -> vec4<f32> {
|
fn mesh2d_position_local_to_clip(world_from_local: mat4x4<f32>, vertex_position: vec4<f32>) -> vec4<f32> {
|
||||||
let world_position = mesh2d_position_local_to_world(model, vertex_position);
|
let world_position = mesh2d_position_local_to_world(world_from_local, vertex_position);
|
||||||
return mesh2d_position_world_to_clip(world_position);
|
return mesh2d_position_world_to_clip(world_position);
|
||||||
}
|
}
|
||||||
|
|
||||||
fn mesh2d_normal_local_to_world(vertex_normal: vec3<f32>, instance_index: u32) -> vec3<f32> {
|
fn mesh2d_normal_local_to_world(vertex_normal: vec3<f32>, instance_index: u32) -> vec3<f32> {
|
||||||
return mat2x4_f32_to_mat3x3_unpack(
|
return mat2x4_f32_to_mat3x3_unpack(
|
||||||
mesh[instance_index].inverse_transpose_model_a,
|
mesh[instance_index].local_from_world_transpose_a,
|
||||||
mesh[instance_index].inverse_transpose_model_b,
|
mesh[instance_index].local_from_world_transpose_b,
|
||||||
) * vertex_normal;
|
) * vertex_normal;
|
||||||
}
|
}
|
||||||
|
|
||||||
fn mesh2d_tangent_local_to_world(model: mat4x4<f32>, vertex_tangent: vec4<f32>) -> vec4<f32> {
|
fn mesh2d_tangent_local_to_world(world_from_local: mat4x4<f32>, vertex_tangent: vec4<f32>) -> vec4<f32> {
|
||||||
return vec4<f32>(
|
return vec4<f32>(
|
||||||
mat3x3<f32>(
|
mat3x3<f32>(
|
||||||
model[0].xyz,
|
world_from_local[0].xyz,
|
||||||
model[1].xyz,
|
world_from_local[1].xyz,
|
||||||
model[2].xyz
|
world_from_local[2].xyz
|
||||||
) * vertex_tangent.xyz,
|
) * vertex_tangent.xyz,
|
||||||
vertex_tangent.w
|
vertex_tangent.w
|
||||||
);
|
);
|
||||||
|
|
|
@ -3,14 +3,14 @@
|
||||||
struct Mesh2d {
|
struct Mesh2d {
|
||||||
// Affine 4x3 matrix transposed to 3x4
|
// Affine 4x3 matrix transposed to 3x4
|
||||||
// Use bevy_render::maths::affine3_to_square to unpack
|
// Use bevy_render::maths::affine3_to_square to unpack
|
||||||
model: mat3x4<f32>,
|
world_from_local: mat3x4<f32>,
|
||||||
// 3x3 matrix packed in mat2x4 and f32 as:
|
// 3x3 matrix packed in mat2x4 and f32 as:
|
||||||
// [0].xyz, [1].x,
|
// [0].xyz, [1].x,
|
||||||
// [1].yz, [2].xy
|
// [1].yz, [2].xy
|
||||||
// [2].z
|
// [2].z
|
||||||
// Use bevy_render::maths::mat2x4_f32_to_mat3x3_unpack to unpack
|
// Use bevy_render::maths::mat2x4_f32_to_mat3x3_unpack to unpack
|
||||||
inverse_transpose_model_a: mat2x4<f32>,
|
local_from_world_transpose_a: mat2x4<f32>,
|
||||||
inverse_transpose_model_b: f32,
|
local_from_world_transpose_b: f32,
|
||||||
// 'flags' is a bit field indicating various options. u32 is 32 bits so we have up to 32 options.
|
// 'flags' is a bit field indicating various options. u32 is 32 bits so we have up to 32 options.
|
||||||
flags: u32,
|
flags: u32,
|
||||||
};
|
};
|
||||||
|
|
|
@ -37,7 +37,7 @@ fn vertex(in: VertexInput) -> VertexOutput {
|
||||||
0.0
|
0.0
|
||||||
);
|
);
|
||||||
|
|
||||||
out.clip_position = view.view_proj * affine3_to_square(mat3x4<f32>(
|
out.clip_position = view.clip_from_world * affine3_to_square(mat3x4<f32>(
|
||||||
in.i_model_transpose_col0,
|
in.i_model_transpose_col0,
|
||||||
in.i_model_transpose_col1,
|
in.i_model_transpose_col1,
|
||||||
in.i_model_transpose_col2,
|
in.i_model_transpose_col2,
|
||||||
|
|
|
@ -224,8 +224,8 @@ impl From<Affine3A> for GlobalTransform {
|
||||||
}
|
}
|
||||||
|
|
||||||
impl From<Mat4> for GlobalTransform {
|
impl From<Mat4> for GlobalTransform {
|
||||||
fn from(matrix: Mat4) -> Self {
|
fn from(world_from_local: Mat4) -> Self {
|
||||||
Self(Affine3A::from_mat4(matrix))
|
Self(Affine3A::from_mat4(world_from_local))
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -80,8 +80,8 @@ impl Transform {
|
||||||
/// Extracts the translation, rotation, and scale from `matrix`. It must be a 3d affine
|
/// Extracts the translation, rotation, and scale from `matrix`. It must be a 3d affine
|
||||||
/// transformation matrix.
|
/// transformation matrix.
|
||||||
#[inline]
|
#[inline]
|
||||||
pub fn from_matrix(matrix: Mat4) -> Self {
|
pub fn from_matrix(world_from_local: Mat4) -> Self {
|
||||||
let (scale, rotation, translation) = matrix.to_scale_rotation_translation();
|
let (scale, rotation, translation) = world_from_local.to_scale_rotation_translation();
|
||||||
|
|
||||||
Transform {
|
Transform {
|
||||||
translation,
|
translation,
|
||||||
|
|
|
@ -671,7 +671,7 @@ pub fn extract_uinode_outlines(
|
||||||
),
|
),
|
||||||
];
|
];
|
||||||
|
|
||||||
let transform = global_transform.compute_matrix();
|
let world_from_local = global_transform.compute_matrix();
|
||||||
for edge in outline_edges {
|
for edge in outline_edges {
|
||||||
if edge.min.x < edge.max.x && edge.min.y < edge.max.y {
|
if edge.min.x < edge.max.x && edge.min.y < edge.max.y {
|
||||||
extracted_uinodes.uinodes.insert(
|
extracted_uinodes.uinodes.insert(
|
||||||
|
@ -679,7 +679,8 @@ pub fn extract_uinode_outlines(
|
||||||
ExtractedUiNode {
|
ExtractedUiNode {
|
||||||
stack_index: node.stack_index,
|
stack_index: node.stack_index,
|
||||||
// This translates the uinode's transform to the center of the current border rectangle
|
// This translates the uinode's transform to the center of the current border rectangle
|
||||||
transform: transform * Mat4::from_translation(edge.center().extend(0.)),
|
transform: world_from_local
|
||||||
|
* Mat4::from_translation(edge.center().extend(0.)),
|
||||||
color: outline.color.into(),
|
color: outline.color.into(),
|
||||||
rect: Rect {
|
rect: Rect {
|
||||||
max: edge.size(),
|
max: edge.size(),
|
||||||
|
@ -755,13 +756,13 @@ pub fn extract_default_ui_camera_view(
|
||||||
);
|
);
|
||||||
let default_camera_view = commands
|
let default_camera_view = commands
|
||||||
.spawn(ExtractedView {
|
.spawn(ExtractedView {
|
||||||
projection: projection_matrix,
|
clip_from_view: projection_matrix,
|
||||||
transform: GlobalTransform::from_xyz(
|
world_from_view: GlobalTransform::from_xyz(
|
||||||
0.0,
|
0.0,
|
||||||
0.0,
|
0.0,
|
||||||
UI_CAMERA_FAR + UI_CAMERA_TRANSFORM_OFFSET,
|
UI_CAMERA_FAR + UI_CAMERA_TRANSFORM_OFFSET,
|
||||||
),
|
),
|
||||||
view_projection: None,
|
clip_from_world: None,
|
||||||
hdr: camera.hdr,
|
hdr: camera.hdr,
|
||||||
viewport: UVec4::new(
|
viewport: UVec4::new(
|
||||||
physical_origin.x,
|
physical_origin.x,
|
||||||
|
|
|
@ -41,7 +41,7 @@ fn vertex(
|
||||||
) -> VertexOutput {
|
) -> VertexOutput {
|
||||||
var out: VertexOutput;
|
var out: VertexOutput;
|
||||||
out.uv = vertex_uv;
|
out.uv = vertex_uv;
|
||||||
out.position = view.view_proj * vec4(vertex_position, 1.0);
|
out.position = view.clip_from_world * vec4(vertex_position, 1.0);
|
||||||
out.color = vertex_color;
|
out.color = vertex_color;
|
||||||
out.flags = flags;
|
out.flags = flags;
|
||||||
out.radius = radius;
|
out.radius = radius;
|
||||||
|
|
|
@ -18,7 +18,7 @@ fn vertex(
|
||||||
) -> UiVertexOutput {
|
) -> UiVertexOutput {
|
||||||
var out: UiVertexOutput;
|
var out: UiVertexOutput;
|
||||||
out.uv = vertex_uv;
|
out.uv = vertex_uv;
|
||||||
out.position = view.view_proj * vec4<f32>(vertex_position, 1.0);
|
out.position = view.clip_from_world * vec4<f32>(vertex_position, 1.0);
|
||||||
out.size = size;
|
out.size = size;
|
||||||
out.border_widths = border_widths;
|
out.border_widths = border_widths;
|
||||||
return out;
|
return out;
|
||||||
|
|
|
@ -246,7 +246,7 @@ struct VertexOutput {
|
||||||
fn vertex(vertex: Vertex) -> VertexOutput {
|
fn vertex(vertex: Vertex) -> VertexOutput {
|
||||||
var out: VertexOutput;
|
var out: VertexOutput;
|
||||||
// Project the world position of the mesh into screen position
|
// Project the world position of the mesh into screen position
|
||||||
let model = mesh2d_functions::get_model_matrix(vertex.instance_index);
|
let model = mesh2d_functions::get_world_from_local(vertex.instance_index);
|
||||||
out.clip_position = mesh2d_functions::mesh2d_position_local_to_clip(model, vec4<f32>(vertex.position, 1.0));
|
out.clip_position = mesh2d_functions::mesh2d_position_local_to_clip(model, vec4<f32>(vertex.position, 1.0));
|
||||||
// Unpack the `u32` from the vertex buffer into the `vec4<f32>` used by the fragment shader
|
// Unpack the `u32` from the vertex buffer into the `vec4<f32>` used by the fragment shader
|
||||||
out.color = vec4<f32>((vec4<u32>(vertex.color) >> vec4<u32>(0u, 8u, 16u, 24u)) & vec4<u32>(255u)) / 255.0;
|
out.color = vec4<f32>((vec4<u32>(vertex.color) >> vec4<u32>(0u, 8u, 16u, 24u)) & vec4<u32>(255u)) / 255.0;
|
||||||
|
@ -325,7 +325,7 @@ pub fn extract_colored_mesh2d(
|
||||||
}
|
}
|
||||||
|
|
||||||
let transforms = Mesh2dTransforms {
|
let transforms = Mesh2dTransforms {
|
||||||
transform: (&transform.affine()).into(),
|
world_from_local: (&transform.affine()).into(),
|
||||||
flags: MeshFlags::empty().bits(),
|
flags: MeshFlags::empty().bits(),
|
||||||
};
|
};
|
||||||
|
|
||||||
|
@ -386,7 +386,7 @@ pub fn queue_colored_mesh2d(
|
||||||
let pipeline_id =
|
let pipeline_id =
|
||||||
pipelines.specialize(&pipeline_cache, &colored_mesh2d_pipeline, mesh2d_key);
|
pipelines.specialize(&pipeline_cache, &colored_mesh2d_pipeline, mesh2d_key);
|
||||||
|
|
||||||
let mesh_z = mesh2d_transforms.transform.translation.z;
|
let mesh_z = mesh2d_transforms.world_from_local.translation.z;
|
||||||
transparent_phase.add(Transparent2d {
|
transparent_phase.add(Transparent2d {
|
||||||
entity: *visible_entity,
|
entity: *visible_entity,
|
||||||
draw_function: draw_colored_mesh2d,
|
draw_function: draw_colored_mesh2d,
|
||||||
|
|
|
@ -50,7 +50,7 @@ static CLICK_TO_MOVE_HELP_TEXT: &str = "Left click: Move the object";
|
||||||
|
|
||||||
static GIZMO_COLOR: Color = Color::Srgba(YELLOW);
|
static GIZMO_COLOR: Color = Color::Srgba(YELLOW);
|
||||||
|
|
||||||
static VOXEL_TRANSFORM: Mat4 = Mat4::from_cols_array_2d(&[
|
static VOXEL_FROM_WORLD: Mat4 = Mat4::from_cols_array_2d(&[
|
||||||
[-42.317566, 0.0, 0.0, 0.0],
|
[-42.317566, 0.0, 0.0, 0.0],
|
||||||
[0.0, 0.0, 44.601563, 0.0],
|
[0.0, 0.0, 44.601563, 0.0],
|
||||||
[0.0, 16.73776, 0.0, 0.0],
|
[0.0, 16.73776, 0.0, 0.0],
|
||||||
|
@ -132,8 +132,8 @@ struct VoxelVisualizationExtension {
|
||||||
|
|
||||||
#[derive(ShaderType, Debug, Clone)]
|
#[derive(ShaderType, Debug, Clone)]
|
||||||
struct VoxelVisualizationIrradianceVolumeInfo {
|
struct VoxelVisualizationIrradianceVolumeInfo {
|
||||||
transform: Mat4,
|
world_from_voxel: Mat4,
|
||||||
inverse_transform: Mat4,
|
voxel_from_world: Mat4,
|
||||||
resolution: UVec3,
|
resolution: UVec3,
|
||||||
intensity: f32,
|
intensity: f32,
|
||||||
}
|
}
|
||||||
|
@ -242,7 +242,7 @@ fn spawn_camera(commands: &mut Commands, assets: &ExampleAssets) {
|
||||||
fn spawn_irradiance_volume(commands: &mut Commands, assets: &ExampleAssets) {
|
fn spawn_irradiance_volume(commands: &mut Commands, assets: &ExampleAssets) {
|
||||||
commands
|
commands
|
||||||
.spawn(SpatialBundle {
|
.spawn(SpatialBundle {
|
||||||
transform: Transform::from_matrix(VOXEL_TRANSFORM),
|
transform: Transform::from_matrix(VOXEL_FROM_WORLD),
|
||||||
..SpatialBundle::default()
|
..SpatialBundle::default()
|
||||||
})
|
})
|
||||||
.insert(IrradianceVolume {
|
.insert(IrradianceVolume {
|
||||||
|
@ -571,8 +571,8 @@ fn create_cubes(
|
||||||
base: StandardMaterial::from(Color::from(RED)),
|
base: StandardMaterial::from(Color::from(RED)),
|
||||||
extension: VoxelVisualizationExtension {
|
extension: VoxelVisualizationExtension {
|
||||||
irradiance_volume_info: VoxelVisualizationIrradianceVolumeInfo {
|
irradiance_volume_info: VoxelVisualizationIrradianceVolumeInfo {
|
||||||
transform: VOXEL_TRANSFORM.inverse(),
|
world_from_voxel: VOXEL_FROM_WORLD.inverse(),
|
||||||
inverse_transform: VOXEL_TRANSFORM,
|
voxel_from_world: VOXEL_FROM_WORLD,
|
||||||
resolution: uvec3(
|
resolution: uvec3(
|
||||||
resolution.width,
|
resolution.width,
|
||||||
resolution.height,
|
resolution.height,
|
||||||
|
|
Loading…
Reference in a new issue