Callable PBR functions (#4939)
# Objective
- Builds on top of #4938
- Make clustered-forward PBR lighting/shadows functionality callable
- See #3969 for details
## Solution
- Add `PbrInput` struct type containing a `StandardMaterial`, occlusion, world_position, world_normal, and frag_coord
- Split functionality to calculate the unit view vector, and normal-mapped normal into `bevy_pbr::pbr_functions`
- Split high-level shading flow into `pbr(in: PbrInput, N: vec3<f32>, V: vec3<f32>, is_orthographic: bool)` function in `bevy_pbr::pbr_functions`
- Rework `pbr.wgsl` fragment stage entry point to make use of the new functions
- This has been benchmarked on an M1 Max using `many_cubes -- sphere`. `main` had a median frame time of 15.88ms, this PR 15.99ms, which is a 0.69% frame time increase, which is within noise in my opinion.
---
## Changelog
- Added: PBR shading code is now callable. Import `bevy_pbr::pbr_functions` and its dependencies, create a `PbrInput`, calculate the unit view and normal-mapped normal vectors and whether the projection is orthographic, and call `pbr()`!
2022-06-21 20:50:06 +00:00
|
|
|
#define_import_path bevy_pbr::pbr_functions
|
|
|
|
|
|
|
|
// NOTE: This ensures that the world_normal is normalized and if
|
|
|
|
// vertex tangents and normal maps then normal mapping may be applied.
|
|
|
|
fn prepare_normal(
|
|
|
|
world_normal: vec3<f32>,
|
|
|
|
#ifdef VERTEX_TANGENTS
|
|
|
|
#ifdef STANDARDMATERIAL_NORMAL_MAP
|
|
|
|
world_tangent: vec4<f32>,
|
|
|
|
#endif
|
|
|
|
#endif
|
|
|
|
uv: vec2<f32>,
|
|
|
|
is_front: bool,
|
|
|
|
) -> vec3<f32> {
|
|
|
|
var N: vec3<f32> = normalize(world_normal);
|
|
|
|
|
|
|
|
#ifdef VERTEX_TANGENTS
|
|
|
|
#ifdef STANDARDMATERIAL_NORMAL_MAP
|
|
|
|
// NOTE: The mikktspace method of normal mapping explicitly requires that these NOT be
|
|
|
|
// normalized nor any Gram-Schmidt applied to ensure the vertex normal is orthogonal to the
|
|
|
|
// vertex tangent! Do not change this code unless you really know what you are doing.
|
|
|
|
// http://www.mikktspace.com/
|
|
|
|
var T: vec3<f32> = world_tangent.xyz;
|
|
|
|
var B: vec3<f32> = world_tangent.w * cross(N, T);
|
|
|
|
#endif
|
|
|
|
#endif
|
|
|
|
|
|
|
|
if ((material.flags & STANDARD_MATERIAL_FLAGS_DOUBLE_SIDED_BIT) != 0u) {
|
|
|
|
if (!is_front) {
|
|
|
|
N = -N;
|
|
|
|
#ifdef VERTEX_TANGENTS
|
|
|
|
#ifdef STANDARDMATERIAL_NORMAL_MAP
|
|
|
|
T = -T;
|
|
|
|
B = -B;
|
|
|
|
#endif
|
|
|
|
#endif
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
#ifdef VERTEX_TANGENTS
|
|
|
|
#ifdef STANDARDMATERIAL_NORMAL_MAP
|
|
|
|
// Nt is the tangent-space normal.
|
|
|
|
var Nt: vec3<f32>;
|
|
|
|
if ((material.flags & STANDARD_MATERIAL_FLAGS_TWO_COMPONENT_NORMAL_MAP) != 0u) {
|
|
|
|
// Only use the xy components and derive z for 2-component normal maps.
|
|
|
|
Nt = vec3<f32>(textureSample(normal_map_texture, normal_map_sampler, uv).rg * 2.0 - 1.0, 0.0);
|
|
|
|
Nt.z = sqrt(1.0 - Nt.x * Nt.x - Nt.y * Nt.y);
|
|
|
|
} else {
|
|
|
|
Nt = textureSample(normal_map_texture, normal_map_sampler, uv).rgb * 2.0 - 1.0;
|
|
|
|
}
|
|
|
|
// Normal maps authored for DirectX require flipping the y component
|
|
|
|
if ((material.flags & STANDARD_MATERIAL_FLAGS_FLIP_NORMAL_MAP_Y) != 0u) {
|
|
|
|
Nt.y = -Nt.y;
|
|
|
|
}
|
|
|
|
// NOTE: The mikktspace method of normal mapping applies maps the tangent-space normal from
|
|
|
|
// the normal map texture in this way to be an EXACT inverse of how the normal map baker
|
|
|
|
// calculates the normal maps so there is no error introduced. Do not change this code
|
|
|
|
// unless you really know what you are doing.
|
|
|
|
// http://www.mikktspace.com/
|
|
|
|
N = normalize(Nt.x * T + Nt.y * B + Nt.z * N);
|
|
|
|
#endif
|
|
|
|
#endif
|
|
|
|
|
|
|
|
return N;
|
|
|
|
}
|
|
|
|
|
|
|
|
// NOTE: Correctly calculates the view vector depending on whether
|
|
|
|
// the projection is orthographic or perspective.
|
|
|
|
fn calculate_view(
|
|
|
|
world_position: vec4<f32>,
|
|
|
|
is_orthographic: bool,
|
|
|
|
) -> vec3<f32> {
|
|
|
|
var V: vec3<f32>;
|
|
|
|
if (is_orthographic) {
|
|
|
|
// Orthographic view vector
|
|
|
|
V = normalize(vec3<f32>(view.view_proj[0].z, view.view_proj[1].z, view.view_proj[2].z));
|
|
|
|
} else {
|
|
|
|
// Only valid for a perpective projection
|
|
|
|
V = normalize(view.world_position.xyz - world_position.xyz);
|
|
|
|
}
|
|
|
|
return V;
|
|
|
|
}
|
|
|
|
|
|
|
|
struct PbrInput {
|
|
|
|
material: StandardMaterial;
|
|
|
|
occlusion: f32;
|
|
|
|
frag_coord: vec4<f32>;
|
|
|
|
world_position: vec4<f32>;
|
|
|
|
world_normal: vec3<f32>;
|
|
|
|
N: vec3<f32>;
|
|
|
|
V: vec3<f32>;
|
|
|
|
is_orthographic: bool;
|
|
|
|
};
|
|
|
|
|
|
|
|
fn pbr(
|
|
|
|
in: PbrInput,
|
|
|
|
) -> vec4<f32> {
|
|
|
|
var output_color: vec4<f32> = in.material.base_color;
|
|
|
|
|
|
|
|
// TODO use .a for exposure compensation in HDR
|
|
|
|
let emissive = in.material.emissive;
|
|
|
|
|
|
|
|
// calculate non-linear roughness from linear perceptualRoughness
|
|
|
|
let metallic = in.material.metallic;
|
|
|
|
let perceptual_roughness = in.material.perceptual_roughness;
|
|
|
|
let roughness = perceptualRoughnessToRoughness(perceptual_roughness);
|
|
|
|
|
|
|
|
let occlusion = in.occlusion;
|
|
|
|
|
|
|
|
if ((in.material.flags & STANDARD_MATERIAL_FLAGS_ALPHA_MODE_OPAQUE) != 0u) {
|
|
|
|
// NOTE: If rendering as opaque, alpha should be ignored so set to 1.0
|
|
|
|
output_color.a = 1.0;
|
|
|
|
} else if ((in.material.flags & STANDARD_MATERIAL_FLAGS_ALPHA_MODE_MASK) != 0u) {
|
|
|
|
if (output_color.a >= in.material.alpha_cutoff) {
|
|
|
|
// NOTE: If rendering as masked alpha and >= the cutoff, render as fully opaque
|
|
|
|
output_color.a = 1.0;
|
|
|
|
} else {
|
|
|
|
// NOTE: output_color.a < in.material.alpha_cutoff should not is not rendered
|
|
|
|
// NOTE: This and any other discards mean that early-z testing cannot be done!
|
|
|
|
discard;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// Neubelt and Pettineo 2013, "Crafting a Next-gen Material Pipeline for The Order: 1886"
|
|
|
|
let NdotV = max(dot(in.N, in.V), 0.0001);
|
|
|
|
|
|
|
|
// Remapping [0,1] reflectance to F0
|
|
|
|
// See https://google.github.io/filament/Filament.html#materialsystem/parameterization/remapping
|
|
|
|
let reflectance = in.material.reflectance;
|
|
|
|
let F0 = 0.16 * reflectance * reflectance * (1.0 - metallic) + output_color.rgb * metallic;
|
|
|
|
|
|
|
|
// Diffuse strength inversely related to metallicity
|
|
|
|
let diffuse_color = output_color.rgb * (1.0 - metallic);
|
|
|
|
|
|
|
|
let R = reflect(-in.V, in.N);
|
|
|
|
|
|
|
|
// accumulate color
|
|
|
|
var light_accum: vec3<f32> = vec3<f32>(0.0);
|
|
|
|
|
|
|
|
let view_z = dot(vec4<f32>(
|
|
|
|
view.inverse_view[0].z,
|
|
|
|
view.inverse_view[1].z,
|
|
|
|
view.inverse_view[2].z,
|
|
|
|
view.inverse_view[3].z
|
|
|
|
), in.world_position);
|
|
|
|
let cluster_index = fragment_cluster_index(in.frag_coord.xy, view_z, in.is_orthographic);
|
|
|
|
let offset_and_count = unpack_offset_and_count(cluster_index);
|
|
|
|
for (var i: u32 = offset_and_count[0]; i < offset_and_count[0] + offset_and_count[1]; i = i + 1u) {
|
|
|
|
let light_id = get_light_id(i);
|
|
|
|
let light = point_lights.data[light_id];
|
|
|
|
var shadow: f32 = 1.0;
|
|
|
|
if ((mesh.flags & MESH_FLAGS_SHADOW_RECEIVER_BIT) != 0u
|
|
|
|
&& (light.flags & POINT_LIGHT_FLAGS_SHADOWS_ENABLED_BIT) != 0u) {
|
|
|
|
shadow = fetch_point_shadow(light_id, in.world_position, in.world_normal);
|
|
|
|
}
|
|
|
|
let light_contrib = point_light(in.world_position.xyz, light, roughness, NdotV, in.N, in.V, R, F0, diffuse_color);
|
|
|
|
light_accum = light_accum + light_contrib * shadow;
|
|
|
|
}
|
|
|
|
|
|
|
|
let n_directional_lights = lights.n_directional_lights;
|
|
|
|
for (var i: u32 = 0u; i < n_directional_lights; i = i + 1u) {
|
|
|
|
let light = lights.directional_lights[i];
|
|
|
|
var shadow: f32 = 1.0;
|
|
|
|
if ((mesh.flags & MESH_FLAGS_SHADOW_RECEIVER_BIT) != 0u
|
|
|
|
&& (light.flags & DIRECTIONAL_LIGHT_FLAGS_SHADOWS_ENABLED_BIT) != 0u) {
|
|
|
|
shadow = fetch_directional_shadow(i, in.world_position, in.world_normal);
|
|
|
|
}
|
|
|
|
let light_contrib = directional_light(light, roughness, NdotV, in.N, in.V, R, F0, diffuse_color);
|
|
|
|
light_accum = light_accum + light_contrib * shadow;
|
|
|
|
}
|
|
|
|
|
|
|
|
let diffuse_ambient = EnvBRDFApprox(diffuse_color, 1.0, NdotV);
|
|
|
|
let specular_ambient = EnvBRDFApprox(F0, perceptual_roughness, NdotV);
|
|
|
|
|
|
|
|
output_color = vec4<f32>(
|
|
|
|
light_accum +
|
|
|
|
(diffuse_ambient + specular_ambient) * lights.ambient_color.rgb * occlusion +
|
|
|
|
emissive.rgb * output_color.a,
|
|
|
|
output_color.a);
|
|
|
|
|
|
|
|
output_color = cluster_debug_visualization(
|
|
|
|
output_color,
|
|
|
|
view_z,
|
|
|
|
in.is_orthographic,
|
|
|
|
offset_and_count,
|
|
|
|
cluster_index,
|
|
|
|
);
|
|
|
|
|
2022-06-26 00:00:23 +00:00
|
|
|
return output_color;
|
|
|
|
}
|
|
|
|
|
|
|
|
fn tone_mapping(in: vec4<f32>) -> vec4<f32> {
|
Callable PBR functions (#4939)
# Objective
- Builds on top of #4938
- Make clustered-forward PBR lighting/shadows functionality callable
- See #3969 for details
## Solution
- Add `PbrInput` struct type containing a `StandardMaterial`, occlusion, world_position, world_normal, and frag_coord
- Split functionality to calculate the unit view vector, and normal-mapped normal into `bevy_pbr::pbr_functions`
- Split high-level shading flow into `pbr(in: PbrInput, N: vec3<f32>, V: vec3<f32>, is_orthographic: bool)` function in `bevy_pbr::pbr_functions`
- Rework `pbr.wgsl` fragment stage entry point to make use of the new functions
- This has been benchmarked on an M1 Max using `many_cubes -- sphere`. `main` had a median frame time of 15.88ms, this PR 15.99ms, which is a 0.69% frame time increase, which is within noise in my opinion.
---
## Changelog
- Added: PBR shading code is now callable. Import `bevy_pbr::pbr_functions` and its dependencies, create a `PbrInput`, calculate the unit view and normal-mapped normal vectors and whether the projection is orthographic, and call `pbr()`!
2022-06-21 20:50:06 +00:00
|
|
|
// tone_mapping
|
2022-06-26 00:00:23 +00:00
|
|
|
return vec4<f32>(reinhard_luminance(in.rgb), in.a);
|
|
|
|
|
Callable PBR functions (#4939)
# Objective
- Builds on top of #4938
- Make clustered-forward PBR lighting/shadows functionality callable
- See #3969 for details
## Solution
- Add `PbrInput` struct type containing a `StandardMaterial`, occlusion, world_position, world_normal, and frag_coord
- Split functionality to calculate the unit view vector, and normal-mapped normal into `bevy_pbr::pbr_functions`
- Split high-level shading flow into `pbr(in: PbrInput, N: vec3<f32>, V: vec3<f32>, is_orthographic: bool)` function in `bevy_pbr::pbr_functions`
- Rework `pbr.wgsl` fragment stage entry point to make use of the new functions
- This has been benchmarked on an M1 Max using `many_cubes -- sphere`. `main` had a median frame time of 15.88ms, this PR 15.99ms, which is a 0.69% frame time increase, which is within noise in my opinion.
---
## Changelog
- Added: PBR shading code is now callable. Import `bevy_pbr::pbr_functions` and its dependencies, create a `PbrInput`, calculate the unit view and normal-mapped normal vectors and whether the projection is orthographic, and call `pbr()`!
2022-06-21 20:50:06 +00:00
|
|
|
// Gamma correction.
|
|
|
|
// Not needed with sRGB buffer
|
|
|
|
// output_color.rgb = pow(output_color.rgb, vec3(1.0 / 2.2));
|
|
|
|
}
|