diff --git a/Cargo.toml b/Cargo.toml index ef6cb40a7da7b..d76fe4739b163 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -444,6 +444,10 @@ name = "scene" path = "examples/scene/scene.rs" # Shaders +[[example]] +name = "array_texture" +path = "examples/shader/array_texture.rs" + [[example]] name = "shader_defs" path = "examples/shader/shader_defs.rs" diff --git a/assets/shaders/array_texture.wgsl b/assets/shaders/array_texture.wgsl new file mode 100644 index 0000000000000..2860f3fd4d6b7 --- /dev/null +++ b/assets/shaders/array_texture.wgsl @@ -0,0 +1,55 @@ +#import bevy_pbr::mesh_view_types +#import bevy_pbr::mesh_view_bindings +#import bevy_pbr::mesh_types +#import bevy_pbr::mesh_bindings +// NOTE: Bindings must come before functions that use them! +#import bevy_pbr::mesh_functions + +[[group(1), binding(0)]] +var my_array_texture: texture_2d_array; +[[group(1), binding(1)]] +var my_array_texture_sampler: sampler; + +struct Vertex { + [[location(0)]] position: vec3; + [[location(1)]] normal: vec3; + [[location(2)]] uv: vec2; +}; + +struct VertexOutput { + [[builtin(position)]] clip_position: vec4; + [[location(0)]] position: vec4; +}; + +[[stage(vertex)]] +fn vertex(vertex: Vertex) -> VertexOutput { + var out: VertexOutput; + out.clip_position = mesh_model_position_to_clip(vec4(vertex.position, 1.0)); + out.position = out.clip_position; + return out; +} + +struct FragmentInput { + [[location(0)]] clip_position: vec4; +}; + +[[stage(fragment)]] +fn fragment(in: FragmentInput) -> [[location(0)]] vec4 { + // Screen-space coordinates determine which layer of the array texture we sample. + let ss = in.clip_position.xy / in.clip_position.w; + var layer: f32 = 0.0; + if (ss.x > 0.0 && ss.y > 0.0) { + layer = 0.0; + } else if (ss.x < 0.0 && ss.y > 0.0) { + layer = 1.0; + } else if (ss.x > 0.0 && ss.y < 0.0) { + layer = 2.0; + } else { + layer = 3.0; + } + + // Convert to texture coordinates. + let uv = (ss + vec2(1.0)) / 2.0; + + return textureSampleLevel(my_array_texture, my_array_texture_sampler, uv, i32(layer), 0.0); +} diff --git a/assets/shaders/shader_defs.wgsl b/assets/shaders/shader_defs.wgsl index 0d1c93d37e5ea..9a291d3ac06d6 100644 --- a/assets/shaders/shader_defs.wgsl +++ b/assets/shaders/shader_defs.wgsl @@ -1,9 +1,13 @@ -#import bevy_pbr::mesh_view_bind_group -#import bevy_pbr::mesh_struct +#import bevy_pbr::mesh_view_types +#import bevy_pbr::mesh_view_bindings +#import bevy_pbr::mesh_types [[group(1), binding(0)]] var mesh: Mesh; +// NOTE: Bindings must come before functions that use them! +#import bevy_pbr::mesh_functions + struct Vertex { [[location(0)]] position: vec3; [[location(1)]] normal: vec3; @@ -16,10 +20,8 @@ struct VertexOutput { [[stage(vertex)]] fn vertex(vertex: Vertex) -> VertexOutput { - let world_position = mesh.model * vec4(vertex.position, 1.0); - var out: VertexOutput; - out.clip_position = view.view_proj * world_position; + out.clip_position = mesh_model_position_to_clip(vec4(vertex.position, 1.0)); return out; } diff --git a/crates/bevy_pbr/src/lib.rs b/crates/bevy_pbr/src/lib.rs index 8325030519b98..5d281ba460b8c 100644 --- a/crates/bevy_pbr/src/lib.rs +++ b/crates/bevy_pbr/src/lib.rs @@ -48,6 +48,12 @@ use bevy_transform::TransformSystem; pub const PBR_SHADER_HANDLE: HandleUntyped = HandleUntyped::weak_from_u64(Shader::TYPE_UUID, 4805239651767701046); +pub const PBR_TYPES_HANDLE: HandleUntyped = + HandleUntyped::weak_from_u64(Shader::TYPE_UUID, 14465578778686805602); +pub const PBR_BINDINGS_HANDLE: HandleUntyped = + HandleUntyped::weak_from_u64(Shader::TYPE_UUID, 1501114814264999179); +pub const PBR_FUNCTIONS_HANDLE: HandleUntyped = + HandleUntyped::weak_from_u64(Shader::TYPE_UUID, 1167493567156271479); pub const SHADOW_SHADER_HANDLE: HandleUntyped = HandleUntyped::weak_from_u64(Shader::TYPE_UUID, 1836745567947005696); @@ -62,6 +68,21 @@ impl Plugin for PbrPlugin { PBR_SHADER_HANDLE, Shader::from_wgsl(include_str!("render/pbr.wgsl")), ); + shaders.set_untracked( + PBR_TYPES_HANDLE, + Shader::from_wgsl(include_str!("render/pbr_types.wgsl")) + .with_import_path("bevy_pbr::pbr_types"), + ); + shaders.set_untracked( + PBR_BINDINGS_HANDLE, + Shader::from_wgsl(include_str!("render/pbr_bindings.wgsl")) + .with_import_path("bevy_pbr::pbr_bindings"), + ); + shaders.set_untracked( + PBR_FUNCTIONS_HANDLE, + Shader::from_wgsl(include_str!("render/pbr_functions.wgsl")) + .with_import_path("bevy_pbr::pbr_functions"), + ); shaders.set_untracked( SHADOW_SHADER_HANDLE, Shader::from_wgsl(include_str!("render/depth.wgsl")), diff --git a/crates/bevy_pbr/src/render/depth.wgsl b/crates/bevy_pbr/src/render/depth.wgsl index 857ece24728b0..b7cc15acb5100 100644 --- a/crates/bevy_pbr/src/render/depth.wgsl +++ b/crates/bevy_pbr/src/render/depth.wgsl @@ -1,17 +1,15 @@ -#import bevy_pbr::mesh_struct +#import bevy_pbr::mesh_view_types +#import bevy_pbr::mesh_types -// NOTE: Keep in sync with pbr.wgsl -struct View { - view_proj: mat4x4; - projection: mat4x4; - world_position: vec3; -}; [[group(0), binding(0)]] var view: View; [[group(1), binding(0)]] var mesh: Mesh; +// NOTE: Bindings must come before functions that use them! +#import bevy_pbr::mesh_functions + struct Vertex { [[location(0)]] position: vec3; }; @@ -23,6 +21,6 @@ struct VertexOutput { [[stage(vertex)]] fn vertex(vertex: Vertex) -> VertexOutput { var out: VertexOutput; - out.clip_position = view.view_proj * mesh.model * vec4(vertex.position, 1.0); + out.clip_position = mesh_model_position_to_clip(vec4(vertex.position, 1.0)); return out; } diff --git a/crates/bevy_pbr/src/render/mesh.rs b/crates/bevy_pbr/src/render/mesh.rs index 3e8965aa28ee1..f99855e0eaeaf 100644 --- a/crates/bevy_pbr/src/render/mesh.rs +++ b/crates/bevy_pbr/src/render/mesh.rs @@ -26,10 +26,16 @@ use bevy_transform::components::GlobalTransform; #[derive(Default)] pub struct MeshRenderPlugin; -pub const MESH_VIEW_BIND_GROUP_HANDLE: HandleUntyped = +pub const MESH_VIEW_TYPES_HANDLE: HandleUntyped = + HandleUntyped::weak_from_u64(Shader::TYPE_UUID, 6944437233335238185); +pub const MESH_VIEW_BINDINGS_HANDLE: HandleUntyped = HandleUntyped::weak_from_u64(Shader::TYPE_UUID, 9076678235888822571); -pub const MESH_STRUCT_HANDLE: HandleUntyped = +pub const MESH_TYPES_HANDLE: HandleUntyped = HandleUntyped::weak_from_u64(Shader::TYPE_UUID, 2506024101911992377); +pub const MESH_BINDINGS_HANDLE: HandleUntyped = + HandleUntyped::weak_from_u64(Shader::TYPE_UUID, 17763658410392053870); +pub const MESH_FUNCTIONS_HANDLE: HandleUntyped = + HandleUntyped::weak_from_u64(Shader::TYPE_UUID, 8157763673499264335); pub const MESH_SHADER_HANDLE: HandleUntyped = HandleUntyped::weak_from_u64(Shader::TYPE_UUID, 3252377289100772450); @@ -41,14 +47,29 @@ impl Plugin for MeshRenderPlugin { Shader::from_wgsl(include_str!("mesh.wgsl")), ); shaders.set_untracked( - MESH_STRUCT_HANDLE, - Shader::from_wgsl(include_str!("mesh_struct.wgsl")) - .with_import_path("bevy_pbr::mesh_struct"), + MESH_VIEW_TYPES_HANDLE, + Shader::from_wgsl(include_str!("mesh_view_types.wgsl")) + .with_import_path("bevy_pbr::mesh_view_types"), ); shaders.set_untracked( - MESH_VIEW_BIND_GROUP_HANDLE, - Shader::from_wgsl(include_str!("mesh_view_bind_group.wgsl")) - .with_import_path("bevy_pbr::mesh_view_bind_group"), + MESH_VIEW_BINDINGS_HANDLE, + Shader::from_wgsl(include_str!("mesh_view_bindings.wgsl")) + .with_import_path("bevy_pbr::mesh_view_bindings"), + ); + shaders.set_untracked( + MESH_TYPES_HANDLE, + Shader::from_wgsl(include_str!("mesh_types.wgsl")) + .with_import_path("bevy_pbr::mesh_types"), + ); + shaders.set_untracked( + MESH_BINDINGS_HANDLE, + Shader::from_wgsl(include_str!("mesh_bindings.wgsl")) + .with_import_path("bevy_pbr::mesh_bindings"), + ); + shaders.set_untracked( + MESH_FUNCTIONS_HANDLE, + Shader::from_wgsl(include_str!("mesh_functions.wgsl")) + .with_import_path("bevy_pbr::mesh_functions"), ); app.add_plugin(UniformComponentPlugin::::default()); diff --git a/crates/bevy_pbr/src/render/mesh.wgsl b/crates/bevy_pbr/src/render/mesh.wgsl index 0bb20aeea0e62..547302c109ef9 100644 --- a/crates/bevy_pbr/src/render/mesh.wgsl +++ b/crates/bevy_pbr/src/render/mesh.wgsl @@ -1,5 +1,9 @@ -#import bevy_pbr::mesh_view_bind_group -#import bevy_pbr::mesh_struct +#import bevy_pbr::mesh_view_types +#import bevy_pbr::mesh_view_bindings +#import bevy_pbr::mesh_types +#import bevy_pbr::mesh_bindings +// NOTE: Bindings must come before functions that use them! +#import bevy_pbr::mesh_functions struct Vertex { [[location(0)]] position: vec3; @@ -20,31 +24,17 @@ struct VertexOutput { #endif }; -[[group(2), binding(0)]] -var mesh: Mesh; - [[stage(vertex)]] fn vertex(vertex: Vertex) -> VertexOutput { - let world_position = mesh.model * vec4(vertex.position, 1.0); + let world_position = mesh_model_position_to_world(vec4(vertex.position, 1.0)); var out: VertexOutput; out.uv = vertex.uv; out.world_position = world_position; - out.clip_position = view.view_proj * world_position; - out.world_normal = mat3x3( - mesh.inverse_transpose_model[0].xyz, - mesh.inverse_transpose_model[1].xyz, - mesh.inverse_transpose_model[2].xyz - ) * vertex.normal; + out.clip_position = mesh_world_position_to_clip(world_position); + out.world_normal = mesh_model_normal_to_world(vertex.normal); #ifdef VERTEX_TANGENTS - out.world_tangent = vec4( - mat3x3( - mesh.model[0].xyz, - mesh.model[1].xyz, - mesh.model[2].xyz - ) * vertex.tangent.xyz, - vertex.tangent.w - ); + out.world_tangent = mesh_model_tangent_to_world(vertex.tangent); #endif return out; } @@ -62,4 +52,4 @@ struct FragmentInput { [[stage(fragment)]] fn fragment(in: FragmentInput) -> [[location(0)]] vec4 { return vec4(1.0, 0.0, 1.0, 1.0); -} \ No newline at end of file +} diff --git a/crates/bevy_pbr/src/render/mesh_bindings.wgsl b/crates/bevy_pbr/src/render/mesh_bindings.wgsl new file mode 100644 index 0000000000000..dfea952250dde --- /dev/null +++ b/crates/bevy_pbr/src/render/mesh_bindings.wgsl @@ -0,0 +1,2 @@ +[[group(2), binding(0)]] +var mesh: Mesh; diff --git a/crates/bevy_pbr/src/render/mesh_functions.wgsl b/crates/bevy_pbr/src/render/mesh_functions.wgsl new file mode 100644 index 0000000000000..8467ca7084dda --- /dev/null +++ b/crates/bevy_pbr/src/render/mesh_functions.wgsl @@ -0,0 +1,34 @@ +fn mesh_model_position_to_world(vertex_position: vec4) -> vec4 { + return mesh.model * vertex_position; +} + +fn mesh_world_position_to_clip(world_position: vec4) -> vec4 { + return view.view_proj * world_position; +} + +// NOTE: The intermediate world_position assignment is important +// for precision purposes when using the 'equals' depth comparison +// function. +fn mesh_model_position_to_clip(vertex_position: vec4) -> vec4 { + let world_position = mesh_model_position_to_world(vertex_position); + return mesh_world_position_to_clip(world_position); +} + +fn mesh_model_normal_to_world(vertex_normal: vec3) -> vec3 { + return mat3x3( + mesh.inverse_transpose_model[0].xyz, + mesh.inverse_transpose_model[1].xyz, + mesh.inverse_transpose_model[2].xyz + ) * vertex_normal; +} + +fn mesh_model_tangent_to_world(vertex_tangent: vec4) -> vec4 { + return vec4( + mat3x3( + mesh.model[0].xyz, + mesh.model[1].xyz, + mesh.model[2].xyz + ) * vertex_tangent.xyz, + vertex_tangent.w + ); +} diff --git a/crates/bevy_pbr/src/render/mesh_struct.wgsl b/crates/bevy_pbr/src/render/mesh_types.wgsl similarity index 100% rename from crates/bevy_pbr/src/render/mesh_struct.wgsl rename to crates/bevy_pbr/src/render/mesh_types.wgsl diff --git a/crates/bevy_pbr/src/render/mesh_view_bindings.wgsl b/crates/bevy_pbr/src/render/mesh_view_bindings.wgsl new file mode 100644 index 0000000000000..680b39a6a6d91 --- /dev/null +++ b/crates/bevy_pbr/src/render/mesh_view_bindings.wgsl @@ -0,0 +1,28 @@ +[[group(0), binding(0)]] +var view: View; +[[group(0), binding(1)]] +var lights: Lights; +#ifdef NO_ARRAY_TEXTURES_SUPPORT +[[group(0), binding(2)]] +var point_shadow_textures: texture_depth_cube; +#else +[[group(0), binding(2)]] +var point_shadow_textures: texture_depth_cube_array; +#endif +[[group(0), binding(3)]] +var point_shadow_textures_sampler: sampler_comparison; +#ifdef NO_ARRAY_TEXTURES_SUPPORT +[[group(0), binding(4)]] +var directional_shadow_textures: texture_depth_2d; +#else +[[group(0), binding(4)]] +var directional_shadow_textures: texture_depth_2d_array; +#endif +[[group(0), binding(5)]] +var directional_shadow_textures_sampler: sampler_comparison; +[[group(0), binding(6)]] +var point_lights: PointLights; +[[group(0), binding(7)]] +var cluster_light_index_lists: ClusterLightIndexLists; +[[group(0), binding(8)]] +var cluster_offsets_and_counts: ClusterOffsetsAndCounts; diff --git a/crates/bevy_pbr/src/render/mesh_view_bind_group.wgsl b/crates/bevy_pbr/src/render/mesh_view_types.wgsl similarity index 70% rename from crates/bevy_pbr/src/render/mesh_view_bind_group.wgsl rename to crates/bevy_pbr/src/render/mesh_view_types.wgsl index fe8be1b0e4357..07897491baaff 100644 --- a/crates/bevy_pbr/src/render/mesh_view_bind_group.wgsl +++ b/crates/bevy_pbr/src/render/mesh_view_types.wgsl @@ -69,32 +69,3 @@ struct ClusterOffsetsAndCounts { // and an 8-bit count of the number of lights in the low 8 bits data: array, 1024u>; }; - -[[group(0), binding(0)]] -var view: View; -[[group(0), binding(1)]] -var lights: Lights; -#ifdef NO_ARRAY_TEXTURES_SUPPORT -[[group(0), binding(2)]] -var point_shadow_textures: texture_depth_cube; -#else -[[group(0), binding(2)]] -var point_shadow_textures: texture_depth_cube_array; -#endif -[[group(0), binding(3)]] -var point_shadow_textures_sampler: sampler_comparison; -#ifdef NO_ARRAY_TEXTURES_SUPPORT -[[group(0), binding(4)]] -var directional_shadow_textures: texture_depth_2d; -#else -[[group(0), binding(4)]] -var directional_shadow_textures: texture_depth_2d_array; -#endif -[[group(0), binding(5)]] -var directional_shadow_textures_sampler: sampler_comparison; -[[group(0), binding(6)]] -var point_lights: PointLights; -[[group(0), binding(7)]] -var cluster_light_index_lists: ClusterLightIndexLists; -[[group(0), binding(8)]] -var cluster_offsets_and_counts: ClusterOffsetsAndCounts; diff --git a/crates/bevy_pbr/src/render/pbr.wgsl b/crates/bevy_pbr/src/render/pbr.wgsl index abf9dfaae7cbb..9688de1206083 100644 --- a/crates/bevy_pbr/src/render/pbr.wgsl +++ b/crates/bevy_pbr/src/render/pbr.wgsl @@ -1,451 +1,10 @@ -// From the Filament design doc -// https://google.github.io/filament/Filament.html#table_symbols -// Symbol Definition -// v View unit vector -// l Incident light unit vector -// n Surface normal unit vector -// h Half unit vector between l and v -// f BRDF -// f_d Diffuse component of a BRDF -// f_r Specular component of a BRDF -// α Roughness, remapped from using input perceptualRoughness -// σ Diffuse reflectance -// Ω Spherical domain -// f0 Reflectance at normal incidence -// f90 Reflectance at grazing angle -// χ+(a) Heaviside function (1 if a>0 and 0 otherwise) -// nior Index of refraction (IOR) of an interface -// ⟨n⋅l⟩ Dot product clamped to [0..1] -// ⟨a⟩ Saturated value (clamped to [0..1]) - -// The Bidirectional Reflectance Distribution Function (BRDF) describes the surface response of a standard material -// and consists of two components, the diffuse component (f_d) and the specular component (f_r): -// f(v,l) = f_d(v,l) + f_r(v,l) -// -// The form of the microfacet model is the same for diffuse and specular -// f_r(v,l) = f_d(v,l) = 1 / { |n⋅v||n⋅l| } ∫_Ω D(m,α) G(v,l,m) f_m(v,l,m) (v⋅m) (l⋅m) dm -// -// In which: -// D, also called the Normal Distribution Function (NDF) models the distribution of the microfacets -// G models the visibility (or occlusion or shadow-masking) of the microfacets -// f_m is the microfacet BRDF and differs between specular and diffuse components -// -// The above integration needs to be approximated. - -#import bevy_pbr::mesh_view_bind_group -#import bevy_pbr::mesh_struct - -[[group(2), binding(0)]] -var mesh: Mesh; - -struct StandardMaterial { - base_color: vec4; - emissive: vec4; - perceptual_roughness: f32; - metallic: f32; - reflectance: f32; - // 'flags' is a bit field indicating various options. u32 is 32 bits so we have up to 32 options. - flags: u32; - alpha_cutoff: f32; -}; - -let STANDARD_MATERIAL_FLAGS_BASE_COLOR_TEXTURE_BIT: u32 = 1u; -let STANDARD_MATERIAL_FLAGS_EMISSIVE_TEXTURE_BIT: u32 = 2u; -let STANDARD_MATERIAL_FLAGS_METALLIC_ROUGHNESS_TEXTURE_BIT: u32 = 4u; -let STANDARD_MATERIAL_FLAGS_OCCLUSION_TEXTURE_BIT: u32 = 8u; -let STANDARD_MATERIAL_FLAGS_DOUBLE_SIDED_BIT: u32 = 16u; -let STANDARD_MATERIAL_FLAGS_UNLIT_BIT: u32 = 32u; -let STANDARD_MATERIAL_FLAGS_ALPHA_MODE_OPAQUE: u32 = 64u; -let STANDARD_MATERIAL_FLAGS_ALPHA_MODE_MASK: u32 = 128u; -let STANDARD_MATERIAL_FLAGS_ALPHA_MODE_BLEND: u32 = 256u; - -[[group(1), binding(0)]] -var material: StandardMaterial; -[[group(1), binding(1)]] -var base_color_texture: texture_2d; -[[group(1), binding(2)]] -var base_color_sampler: sampler; -[[group(1), binding(3)]] -var emissive_texture: texture_2d; -[[group(1), binding(4)]] -var emissive_sampler: sampler; -[[group(1), binding(5)]] -var metallic_roughness_texture: texture_2d; -[[group(1), binding(6)]] -var metallic_roughness_sampler: sampler; -[[group(1), binding(7)]] -var occlusion_texture: texture_2d; -[[group(1), binding(8)]] -var occlusion_sampler: sampler; -[[group(1), binding(9)]] -var normal_map_texture: texture_2d; -[[group(1), binding(10)]] -var normal_map_sampler: sampler; - -let PI: f32 = 3.141592653589793; - -fn saturate(value: f32) -> f32 { - return clamp(value, 0.0, 1.0); -} - -// distanceAttenuation is simply the square falloff of light intensity -// combined with a smooth attenuation at the edge of the light radius -// -// light radius is a non-physical construct for efficiency purposes, -// because otherwise every light affects every fragment in the scene -fn getDistanceAttenuation(distanceSquare: f32, inverseRangeSquared: f32) -> f32 { - let factor = distanceSquare * inverseRangeSquared; - let smoothFactor = saturate(1.0 - factor * factor); - let attenuation = smoothFactor * smoothFactor; - return attenuation * 1.0 / max(distanceSquare, 0.0001); -} - -// Normal distribution function (specular D) -// Based on https://google.github.io/filament/Filament.html#citation-walter07 - -// D_GGX(h,α) = α^2 / { π ((n⋅h)^2 (α2−1) + 1)^2 } - -// Simple implementation, has precision problems when using fp16 instead of fp32 -// see https://google.github.io/filament/Filament.html#listing_speculardfp16 -fn D_GGX(roughness: f32, NoH: f32, h: vec3) -> f32 { - let oneMinusNoHSquared = 1.0 - NoH * NoH; - let a = NoH * roughness; - let k = roughness / (oneMinusNoHSquared + a * a); - let d = k * k * (1.0 / PI); - return d; -} - -// Visibility function (Specular G) -// V(v,l,a) = G(v,l,α) / { 4 (n⋅v) (n⋅l) } -// such that f_r becomes -// f_r(v,l) = D(h,α) V(v,l,α) F(v,h,f0) -// where -// V(v,l,α) = 0.5 / { n⋅l sqrt((n⋅v)^2 (1−α2) + α2) + n⋅v sqrt((n⋅l)^2 (1−α2) + α2) } -// Note the two sqrt's, that may be slow on mobile, see https://google.github.io/filament/Filament.html#listing_approximatedspecularv -fn V_SmithGGXCorrelated(roughness: f32, NoV: f32, NoL: f32) -> f32 { - let a2 = roughness * roughness; - let lambdaV = NoL * sqrt((NoV - a2 * NoV) * NoV + a2); - let lambdaL = NoV * sqrt((NoL - a2 * NoL) * NoL + a2); - let v = 0.5 / (lambdaV + lambdaL); - return v; -} - -// Fresnel function -// see https://google.github.io/filament/Filament.html#citation-schlick94 -// F_Schlick(v,h,f_0,f_90) = f_0 + (f_90 − f_0) (1 − v⋅h)^5 -fn F_Schlick_vec(f0: vec3, f90: f32, VoH: f32) -> vec3 { - // not using mix to keep the vec3 and float versions identical - return f0 + (f90 - f0) * pow(1.0 - VoH, 5.0); -} - -fn F_Schlick(f0: f32, f90: f32, VoH: f32) -> f32 { - // not using mix to keep the vec3 and float versions identical - return f0 + (f90 - f0) * pow(1.0 - VoH, 5.0); -} - -fn fresnel(f0: vec3, LoH: f32) -> vec3 { - // f_90 suitable for ambient occlusion - // see https://google.github.io/filament/Filament.html#lighting/occlusion - let f90 = saturate(dot(f0, vec3(50.0 * 0.33))); - return F_Schlick_vec(f0, f90, LoH); -} - -// Specular BRDF -// https://google.github.io/filament/Filament.html#materialsystem/specularbrdf - -// Cook-Torrance approximation of the microfacet model integration using Fresnel law F to model f_m -// f_r(v,l) = { D(h,α) G(v,l,α) F(v,h,f0) } / { 4 (n⋅v) (n⋅l) } -fn specular(f0: vec3, roughness: f32, h: vec3, NoV: f32, NoL: f32, - NoH: f32, LoH: f32, specularIntensity: f32) -> vec3 { - let D = D_GGX(roughness, NoH, h); - let V = V_SmithGGXCorrelated(roughness, NoV, NoL); - let F = fresnel(f0, LoH); - - return (specularIntensity * D * V) * F; -} - -// Diffuse BRDF -// https://google.github.io/filament/Filament.html#materialsystem/diffusebrdf -// fd(v,l) = σ/π * 1 / { |n⋅v||n⋅l| } ∫Ω D(m,α) G(v,l,m) (v⋅m) (l⋅m) dm -// -// simplest approximation -// float Fd_Lambert() { -// return 1.0 / PI; -// } -// -// vec3 Fd = diffuseColor * Fd_Lambert(); -// -// Disney approximation -// See https://google.github.io/filament/Filament.html#citation-burley12 -// minimal quality difference -fn Fd_Burley(roughness: f32, NoV: f32, NoL: f32, LoH: f32) -> f32 { - let f90 = 0.5 + 2.0 * roughness * LoH * LoH; - let lightScatter = F_Schlick(1.0, f90, NoL); - let viewScatter = F_Schlick(1.0, f90, NoV); - return lightScatter * viewScatter * (1.0 / PI); -} - -// From https://www.unrealengine.com/en-US/blog/physically-based-shading-on-mobile -fn EnvBRDFApprox(f0: vec3, perceptual_roughness: f32, NoV: f32) -> vec3 { - let c0 = vec4(-1.0, -0.0275, -0.572, 0.022); - let c1 = vec4(1.0, 0.0425, 1.04, -0.04); - let r = perceptual_roughness * c0 + c1; - let a004 = min(r.x * r.x, exp2(-9.28 * NoV)) * r.x + r.y; - let AB = vec2(-1.04, 1.04) * a004 + r.zw; - return f0 * AB.x + AB.y; -} - -fn perceptualRoughnessToRoughness(perceptualRoughness: f32) -> f32 { - // clamp perceptual roughness to prevent precision problems - // According to Filament design 0.089 is recommended for mobile - // Filament uses 0.045 for non-mobile - let clampedPerceptualRoughness = clamp(perceptualRoughness, 0.089, 1.0); - return clampedPerceptualRoughness * clampedPerceptualRoughness; -} - -// from https://64.github.io/tonemapping/ -// reinhard on RGB oversaturates colors -fn reinhard(color: vec3) -> vec3 { - return color / (1.0 + color); -} - -fn reinhard_extended(color: vec3, max_white: f32) -> vec3 { - let numerator = color * (1.0 + (color / vec3(max_white * max_white))); - return numerator / (1.0 + color); -} - -// luminance coefficients from Rec. 709. -// https://en.wikipedia.org/wiki/Rec._709 -fn luminance(v: vec3) -> f32 { - return dot(v, vec3(0.2126, 0.7152, 0.0722)); -} - -fn change_luminance(c_in: vec3, l_out: f32) -> vec3 { - let l_in = luminance(c_in); - return c_in * (l_out / l_in); -} - -fn reinhard_luminance(color: vec3) -> vec3 { - let l_old = luminance(color); - let l_new = l_old / (1.0 + l_old); - return change_luminance(color, l_new); -} - -fn reinhard_extended_luminance(color: vec3, max_white_l: f32) -> vec3 { - let l_old = luminance(color); - let numerator = l_old * (1.0 + (l_old / (max_white_l * max_white_l))); - let l_new = numerator / (1.0 + l_old); - return change_luminance(color, l_new); -} - -fn view_z_to_z_slice(view_z: f32, is_orthographic: bool) -> u32 { - if (is_orthographic) { - // NOTE: view_z is correct in the orthographic case - return u32(floor((view_z - lights.cluster_factors.z) * lights.cluster_factors.w)); - } else { - // NOTE: had to use -view_z to make it positive else log(negative) is nan - return min( - u32(log(-view_z) * lights.cluster_factors.z - lights.cluster_factors.w + 1.0), - lights.cluster_dimensions.z - 1u - ); - } -} - -fn fragment_cluster_index(frag_coord: vec2, view_z: f32, is_orthographic: bool) -> u32 { - let xy = vec2(floor(frag_coord * lights.cluster_factors.xy)); - let z_slice = view_z_to_z_slice(view_z, is_orthographic); - // NOTE: Restricting cluster index to avoid undefined behavior when accessing uniform buffer - // arrays based on the cluster index. - return min( - (xy.y * lights.cluster_dimensions.x + xy.x) * lights.cluster_dimensions.z + z_slice, - lights.cluster_dimensions.w - 1u - ); -} - -struct ClusterOffsetAndCount { - offset: u32; - count: u32; -}; - -fn unpack_offset_and_count(cluster_index: u32) -> ClusterOffsetAndCount { - let offset_and_count = cluster_offsets_and_counts.data[cluster_index >> 2u][cluster_index & ((1u << 2u) - 1u)]; - var output: ClusterOffsetAndCount; - // The offset is stored in the upper 24 bits - output.offset = (offset_and_count >> 8u) & ((1u << 24u) - 1u); - // The count is stored in the lower 8 bits - output.count = offset_and_count & ((1u << 8u) - 1u); - return output; -} - -fn get_light_id(index: u32) -> u32 { - // The index is correct but in cluster_light_index_lists we pack 4 u8s into a u32 - // This means the index into cluster_light_index_lists is index / 4 - let indices = cluster_light_index_lists.data[index >> 4u][(index >> 2u) & ((1u << 2u) - 1u)]; - // And index % 4 gives the sub-index of the u8 within the u32 so we shift by 8 * sub-index - return (indices >> (8u * (index & ((1u << 2u) - 1u)))) & ((1u << 8u) - 1u); -} - -fn point_light( - world_position: vec3, light: PointLight, roughness: f32, NdotV: f32, N: vec3, V: vec3, - R: vec3, F0: vec3, diffuseColor: vec3 -) -> vec3 { - let light_to_frag = light.position_radius.xyz - world_position.xyz; - let distance_square = dot(light_to_frag, light_to_frag); - let rangeAttenuation = - getDistanceAttenuation(distance_square, light.color_inverse_square_range.w); - - // Specular. - // Representative Point Area Lights. - // see http://blog.selfshadow.com/publications/s2013-shading-course/karis/s2013_pbs_epic_notes_v2.pdf p14-16 - let a = roughness; - let centerToRay = dot(light_to_frag, R) * R - light_to_frag; - let closestPoint = light_to_frag + centerToRay * saturate(light.position_radius.w * inverseSqrt(dot(centerToRay, centerToRay))); - let LspecLengthInverse = inverseSqrt(dot(closestPoint, closestPoint)); - let normalizationFactor = a / saturate(a + (light.position_radius.w * 0.5 * LspecLengthInverse)); - let specularIntensity = normalizationFactor * normalizationFactor; - - var L: vec3 = closestPoint * LspecLengthInverse; // normalize() equivalent? - var H: vec3 = normalize(L + V); - var NoL: f32 = saturate(dot(N, L)); - var NoH: f32 = saturate(dot(N, H)); - var LoH: f32 = saturate(dot(L, H)); - - let specular_light = specular(F0, roughness, H, NdotV, NoL, NoH, LoH, specularIntensity); - - // Diffuse. - // Comes after specular since its NoL is used in the lighting equation. - L = normalize(light_to_frag); - H = normalize(L + V); - NoL = saturate(dot(N, L)); - NoH = saturate(dot(N, H)); - LoH = saturate(dot(L, H)); - - let diffuse = diffuseColor * Fd_Burley(roughness, NdotV, NoL, LoH); - - // See https://google.github.io/filament/Filament.html#mjx-eqn-pointLightLuminanceEquation - // Lout = f(v,l) Φ / { 4 π d^2 }⟨n⋅l⟩ - // where - // f(v,l) = (f_d(v,l) + f_r(v,l)) * light_color - // Φ is luminous power in lumens - // our rangeAttentuation = 1 / d^2 multiplied with an attenuation factor for smoothing at the edge of the non-physical maximum light radius - - // For a point light, luminous intensity, I, in lumens per steradian is given by: - // I = Φ / 4 π - // The derivation of this can be seen here: https://google.github.io/filament/Filament.html#mjx-eqn-pointLightLuminousPower - - // NOTE: light.color.rgb is premultiplied with light.intensity / 4 π (which would be the luminous intensity) on the CPU - - // TODO compensate for energy loss https://google.github.io/filament/Filament.html#materialsystem/improvingthebrdfs/energylossinspecularreflectance - - return ((diffuse + specular_light) * light.color_inverse_square_range.rgb) * (rangeAttenuation * NoL); -} - -fn directional_light(light: DirectionalLight, roughness: f32, NdotV: f32, normal: vec3, view: vec3, R: vec3, F0: vec3, diffuseColor: vec3) -> vec3 { - let incident_light = light.direction_to_light.xyz; - - let half_vector = normalize(incident_light + view); - let NoL = saturate(dot(normal, incident_light)); - let NoH = saturate(dot(normal, half_vector)); - let LoH = saturate(dot(incident_light, half_vector)); - - let diffuse = diffuseColor * Fd_Burley(roughness, NdotV, NoL, LoH); - let specularIntensity = 1.0; - let specular_light = specular(F0, roughness, half_vector, NdotV, NoL, NoH, LoH, specularIntensity); - - return (specular_light + diffuse) * light.color.rgb * NoL; -} - -fn fetch_point_shadow(light_id: u32, frag_position: vec4, surface_normal: vec3) -> f32 { - let light = point_lights.data[light_id]; - - // because the shadow maps align with the axes and the frustum planes are at 45 degrees - // we can get the worldspace depth by taking the largest absolute axis - let surface_to_light = light.position_radius.xyz - frag_position.xyz; - let surface_to_light_abs = abs(surface_to_light); - let distance_to_light = max(surface_to_light_abs.x, max(surface_to_light_abs.y, surface_to_light_abs.z)); - - // The normal bias here is already scaled by the texel size at 1 world unit from the light. - // The texel size increases proportionally with distance from the light so multiplying by - // distance to light scales the normal bias to the texel size at the fragment distance. - let normal_offset = light.shadow_normal_bias * distance_to_light * surface_normal.xyz; - let depth_offset = light.shadow_depth_bias * normalize(surface_to_light.xyz); - let offset_position = frag_position.xyz + normal_offset + depth_offset; - - // similar largest-absolute-axis trick as above, but now with the offset fragment position - let frag_ls = light.position_radius.xyz - offset_position.xyz; - let abs_position_ls = abs(frag_ls); - let major_axis_magnitude = max(abs_position_ls.x, max(abs_position_ls.y, abs_position_ls.z)); - - // NOTE: These simplifications come from multiplying: - // projection * vec4(0, 0, -major_axis_magnitude, 1.0) - // and keeping only the terms that have any impact on the depth. - // Projection-agnostic approach: - let zw = -major_axis_magnitude * light.projection_lr.xy + light.projection_lr.zw; - let depth = zw.x / zw.y; - - // do the lookup, using HW PCF and comparison - // NOTE: Due to the non-uniform control flow above, we must use the Level variant of - // textureSampleCompare to avoid undefined behaviour due to some of the fragments in - // a quad (2x2 fragments) being processed not being sampled, and this messing with - // mip-mapping functionality. The shadow maps have no mipmaps so Level just samples - // from LOD 0. -#ifdef NO_ARRAY_TEXTURES_SUPPORT - return textureSampleCompare(point_shadow_textures, point_shadow_textures_sampler, frag_ls, depth); -#else - return textureSampleCompareLevel(point_shadow_textures, point_shadow_textures_sampler, frag_ls, i32(light_id), depth); -#endif -} - -fn fetch_directional_shadow(light_id: u32, frag_position: vec4, surface_normal: vec3) -> f32 { - let light = lights.directional_lights[light_id]; - - // The normal bias is scaled to the texel size. - let normal_offset = light.shadow_normal_bias * surface_normal.xyz; - let depth_offset = light.shadow_depth_bias * light.direction_to_light.xyz; - let offset_position = vec4(frag_position.xyz + normal_offset + depth_offset, frag_position.w); - - let offset_position_clip = light.view_projection * offset_position; - if (offset_position_clip.w <= 0.0) { - return 1.0; - } - let offset_position_ndc = offset_position_clip.xyz / offset_position_clip.w; - // No shadow outside the orthographic projection volume - if (any(offset_position_ndc.xy < vec2(-1.0)) || offset_position_ndc.z < 0.0 - || any(offset_position_ndc > vec3(1.0))) { - return 1.0; - } - - // compute texture coordinates for shadow lookup, compensating for the Y-flip difference - // between the NDC and texture coordinates - let flip_correction = vec2(0.5, -0.5); - let light_local = offset_position_ndc.xy * flip_correction + vec2(0.5, 0.5); - - let depth = offset_position_ndc.z; - // do the lookup, using HW PCF and comparison - // NOTE: Due to non-uniform control flow above, we must use the level variant of the texture - // sampler to avoid use of implicit derivatives causing possible undefined behavior. -#ifdef NO_ARRAY_TEXTURES_SUPPORT - return textureSampleCompareLevel(directional_shadow_textures, directional_shadow_textures_sampler, light_local, depth); -#else - return textureSampleCompareLevel(directional_shadow_textures, directional_shadow_textures_sampler, light_local, i32(light_id), depth); -#endif -} - -fn hsv2rgb(hue: f32, saturation: f32, value: f32) -> vec3 { - let rgb = clamp( - abs( - ((hue * 6.0 + vec3(0.0, 4.0, 2.0)) % 6.0) - 3.0 - ) - 1.0, - vec3(0.0), - vec3(1.0) - ); - - return value * mix( vec3(1.0), rgb, vec3(saturation)); -} - -fn random1D(s: f32) -> f32 { - return fract(sin(s * 12.9898) * 43758.5453123); -} +#import bevy_pbr::mesh_view_types +#import bevy_pbr::mesh_view_bindings +#import bevy_pbr::mesh_types +#import bevy_pbr::mesh_bindings +#import bevy_pbr::pbr_types +#import bevy_pbr::pbr_bindings +#import bevy_pbr::pbr_functions struct FragmentInput { [[builtin(front_facing)]] is_front: bool; @@ -465,13 +24,21 @@ fn fragment(in: FragmentInput) -> [[location(0)]] vec4 { output_color = output_color * textureSample(base_color_texture, base_color_sampler, in.uv); } - // // NOTE: Unlit bit not set means == 0 is true, so the true case is if lit + // NOTE: Unlit bit not set means == 0 is true, so the true case is if lit if ((material.flags & STANDARD_MATERIAL_FLAGS_UNLIT_BIT) == 0u) { + // Prepare a 'processed' StandardMaterial by sampling all textures to resolve + // the material members + var pbr_material: PbrMaterial; + + pbr_material.material.flags = material.flags; + pbr_material.material.base_color = output_color; + // TODO use .a for exposure compensation in HDR var emissive: vec4 = material.emissive; if ((material.flags & STANDARD_MATERIAL_FLAGS_EMISSIVE_TEXTURE_BIT) != 0u) { emissive = vec4(emissive.rgb * textureSample(emissive_texture, emissive_sampler, in.uv).rgb, 1.0); } + pbr_material.material.emissive = emissive; // calculate non-linear roughness from linear perceptualRoughness var metallic: f32 = material.metallic; @@ -482,163 +49,36 @@ fn fragment(in: FragmentInput) -> [[location(0)]] vec4 { metallic = metallic * metallic_roughness.b; perceptual_roughness = perceptual_roughness * metallic_roughness.g; } - let roughness = perceptualRoughnessToRoughness(perceptual_roughness); + pbr_material.material.metallic = metallic; + pbr_material.material.perceptual_roughness = perceptual_roughness; var occlusion: f32 = 1.0; if ((material.flags & STANDARD_MATERIAL_FLAGS_OCCLUSION_TEXTURE_BIT) != 0u) { occlusion = textureSample(occlusion_texture, occlusion_sampler, in.uv).r; } + pbr_material.occlusion = occlusion; - var N: vec3 = normalize(in.world_normal); - -#ifdef VERTEX_TANGENTS -#ifdef STANDARDMATERIAL_NORMAL_MAP - var T: vec3 = normalize(in.world_tangent.xyz - N * dot(in.world_tangent.xyz, N)); - var B: vec3 = cross(N, T) * in.world_tangent.w; -#endif -#endif - - if ((material.flags & STANDARD_MATERIAL_FLAGS_DOUBLE_SIDED_BIT) != 0u) { - if (!in.is_front) { - N = -N; -#ifdef VERTEX_TANGENTS -#ifdef STANDARDMATERIAL_NORMAL_MAP - T = -T; - B = -B; -#endif -#endif - } - } - + let N = prepare_normal( + in.world_normal, #ifdef VERTEX_TANGENTS #ifdef STANDARDMATERIAL_NORMAL_MAP - let TBN = mat3x3(T, B, N); - N = TBN * normalize(textureSample(normal_map_texture, normal_map_sampler, in.uv).rgb * 2.0 - 1.0); + in.world_tangent, #endif #endif + in.uv, + in.is_front, + ); - if ((material.flags & STANDARD_MATERIAL_FLAGS_ALPHA_MODE_OPAQUE) != 0u) { - // NOTE: If rendering as opaque, alpha should be ignored so set to 1.0 - output_color.a = 1.0; - } else if ((material.flags & STANDARD_MATERIAL_FLAGS_ALPHA_MODE_MASK) != 0u) { - if (output_color.a >= material.alpha_cutoff) { - // NOTE: If rendering as masked alpha and >= the cutoff, render as fully opaque - output_color.a = 1.0; - } else { - // NOTE: output_color.a < material.alpha_cutoff should not is not rendered - // NOTE: This and any other discards mean that early-z testing cannot be done! - discard; - } - } - - var V: vec3; - // If the projection is not orthographic - let is_orthographic = view.projection[3].w == 1.0; - if (is_orthographic) { - // Orthographic view vector - V = normalize(vec3(view.view_proj[0].z, view.view_proj[1].z, view.view_proj[2].z)); - } else { - // Only valid for a perpective projection - V = normalize(view.world_position.xyz - in.world_position.xyz); - } - - // Neubelt and Pettineo 2013, "Crafting a Next-gen Material Pipeline for The Order: 1886" - let NdotV = max(dot(N, V), 0.0001); - - // Remapping [0,1] reflectance to F0 - // See https://google.github.io/filament/Filament.html#materialsystem/parameterization/remapping - let reflectance = material.reflectance; - let F0 = 0.16 * reflectance * reflectance * (1.0 - metallic) + output_color.rgb * metallic; - - // Diffuse strength inversely related to metallicity - let diffuse_color = output_color.rgb * (1.0 - metallic); - - let R = reflect(-V, N); - - // accumulate color - var light_accum: vec3 = vec3(0.0); - - let view_z = dot(vec4( - view.inverse_view[0].z, - view.inverse_view[1].z, - view.inverse_view[2].z, - view.inverse_view[3].z - ), in.world_position); - let cluster_index = fragment_cluster_index(in.frag_coord.xy, view_z, is_orthographic); - let offset_and_count = unpack_offset_and_count(cluster_index); - for (var i: u32 = offset_and_count.offset; i < offset_and_count.offset + offset_and_count.count; i = i + 1u) { - let light_id = get_light_id(i); - let light = point_lights.data[light_id]; - var shadow: f32 = 1.0; - if ((mesh.flags & MESH_FLAGS_SHADOW_RECEIVER_BIT) != 0u - && (light.flags & POINT_LIGHT_FLAGS_SHADOWS_ENABLED_BIT) != 0u) { - shadow = fetch_point_shadow(light_id, in.world_position, in.world_normal); - } - let light_contrib = point_light(in.world_position.xyz, light, roughness, NdotV, N, V, R, F0, diffuse_color); - light_accum = light_accum + light_contrib * shadow; - } - - let n_directional_lights = lights.n_directional_lights; - for (var i: u32 = 0u; i < n_directional_lights; i = i + 1u) { - let light = lights.directional_lights[i]; - var shadow: f32 = 1.0; - if ((mesh.flags & MESH_FLAGS_SHADOW_RECEIVER_BIT) != 0u - && (light.flags & DIRECTIONAL_LIGHT_FLAGS_SHADOWS_ENABLED_BIT) != 0u) { - shadow = fetch_directional_shadow(i, in.world_position, in.world_normal); - } - let light_contrib = directional_light(light, roughness, NdotV, N, V, R, F0, diffuse_color); - light_accum = light_accum + light_contrib * shadow; - } - - let diffuse_ambient = EnvBRDFApprox(diffuse_color, 1.0, NdotV); - let specular_ambient = EnvBRDFApprox(F0, perceptual_roughness, NdotV); + let V = calculate_view(in.world_position); - output_color = vec4( - light_accum + - (diffuse_ambient + specular_ambient) * lights.ambient_color.rgb * occlusion + - emissive.rgb * output_color.a, - output_color.a); + pbr_material.material.reflectance = material.reflectance; - // Cluster allocation debug (using 'over' alpha blending) -#ifdef CLUSTERED_FORWARD_DEBUG_Z_SLICES - // NOTE: This debug mode visualises the z-slices - let cluster_overlay_alpha = 0.1; - var z_slice: u32 = view_z_to_z_slice(view_z, is_orthographic); - // A hack to make the colors alternate a bit more - if ((z_slice & 1u) == 1u) { - z_slice = z_slice + lights.cluster_dimensions.z / 2u; - } - let slice_color = hsv2rgb(f32(z_slice) / f32(lights.cluster_dimensions.z + 1u), 1.0, 0.5); - output_color = vec4( - (1.0 - cluster_overlay_alpha) * output_color.rgb + cluster_overlay_alpha * slice_color, - output_color.a - ); -#endif // CLUSTERED_FORWARD_DEBUG_Z_SLICES -#ifdef CLUSTERED_FORWARD_DEBUG_CLUSTER_LIGHT_COMPLEXITY - // NOTE: This debug mode visualises the number of lights within the cluster that contains - // the fragment. It shows a sort of lighting complexity measure. - let cluster_overlay_alpha = 0.1; - let max_light_complexity_per_cluster = 64.0; - output_color.r = (1.0 - cluster_overlay_alpha) * output_color.r - + cluster_overlay_alpha * smoothStep(0.0, max_light_complexity_per_cluster, f32(offset_and_count.count)); - output_color.g = (1.0 - cluster_overlay_alpha) * output_color.g - + cluster_overlay_alpha * (1.0 - smoothStep(0.0, max_light_complexity_per_cluster, f32(offset_and_count.count))); -#endif // CLUSTERED_FORWARD_DEBUG_CLUSTER_LIGHT_COMPLEXITY -#ifdef CLUSTERED_FORWARD_DEBUG_CLUSTER_COHERENCY - // NOTE: Visualizes the cluster to which the fragment belongs - let cluster_overlay_alpha = 0.1; - let cluster_color = hsv2rgb(random1D(f32(cluster_index)), 1.0, 0.5); - output_color = vec4( - (1.0 - cluster_overlay_alpha) * output_color.rgb + cluster_overlay_alpha * cluster_color, - output_color.a - ); -#endif // CLUSTERED_FORWARD_DEBUG_CLUSTER_COHERENCY + var pbr_in: PbrInput; + pbr_in.frag_coord = in.frag_coord; + pbr_in.world_position = in.world_position; + pbr_in.world_normal = in.world_normal; - // tone_mapping - output_color = vec4(reinhard_luminance(output_color.rgb), output_color.a); - // Gamma correction. - // Not needed with sRGB buffer - // output_color.rgb = pow(output_color.rgb, vec3(1.0 / 2.2)); + output_color = pbr(pbr_in, pbr_material, N, V); } return output_color; diff --git a/crates/bevy_pbr/src/render/pbr_bindings.wgsl b/crates/bevy_pbr/src/render/pbr_bindings.wgsl new file mode 100644 index 0000000000000..d13edc63dfada --- /dev/null +++ b/crates/bevy_pbr/src/render/pbr_bindings.wgsl @@ -0,0 +1,22 @@ +[[group(1), binding(0)]] +var material: StandardMaterial; +[[group(1), binding(1)]] +var base_color_texture: texture_2d; +[[group(1), binding(2)]] +var base_color_sampler: sampler; +[[group(1), binding(3)]] +var emissive_texture: texture_2d; +[[group(1), binding(4)]] +var emissive_sampler: sampler; +[[group(1), binding(5)]] +var metallic_roughness_texture: texture_2d; +[[group(1), binding(6)]] +var metallic_roughness_sampler: sampler; +[[group(1), binding(7)]] +var occlusion_texture: texture_2d; +[[group(1), binding(8)]] +var occlusion_sampler: sampler; +[[group(1), binding(9)]] +var normal_map_texture: texture_2d; +[[group(1), binding(10)]] +var normal_map_sampler: sampler; diff --git a/crates/bevy_pbr/src/render/pbr_functions.wgsl b/crates/bevy_pbr/src/render/pbr_functions.wgsl new file mode 100644 index 0000000000000..8516af786c70e --- /dev/null +++ b/crates/bevy_pbr/src/render/pbr_functions.wgsl @@ -0,0 +1,602 @@ +// From the Filament design doc +// https://google.github.io/filament/Filament.html#table_symbols +// Symbol Definition +// v View unit vector +// l Incident light unit vector +// n Surface normal unit vector +// h Half unit vector between l and v +// f BRDF +// f_d Diffuse component of a BRDF +// f_r Specular component of a BRDF +// α Roughness, remapped from using input perceptualRoughness +// σ Diffuse reflectance +// Ω Spherical domain +// f0 Reflectance at normal incidence +// f90 Reflectance at grazing angle +// χ+(a) Heaviside function (1 if a>0 and 0 otherwise) +// nior Index of refraction (IOR) of an interface +// ⟨n⋅l⟩ Dot product clamped to [0..1] +// ⟨a⟩ Saturated value (clamped to [0..1]) + +// The Bidirectional Reflectance Distribution Function (BRDF) describes the surface response of a standard material +// and consists of two components, the diffuse component (f_d) and the specular component (f_r): +// f(v,l) = f_d(v,l) + f_r(v,l) +// +// The form of the microfacet model is the same for diffuse and specular +// f_r(v,l) = f_d(v,l) = 1 / { |n⋅v||n⋅l| } ∫_Ω D(m,α) G(v,l,m) f_m(v,l,m) (v⋅m) (l⋅m) dm +// +// In which: +// D, also called the Normal Distribution Function (NDF) models the distribution of the microfacets +// G models the visibility (or occlusion or shadow-masking) of the microfacets +// f_m is the microfacet BRDF and differs between specular and diffuse components +// +// The above integration needs to be approximated. + +let PI: f32 = 3.141592653589793; + +fn saturate(value: f32) -> f32 { + return clamp(value, 0.0, 1.0); +} + +// distanceAttenuation is simply the square falloff of light intensity +// combined with a smooth attenuation at the edge of the light radius +// +// light radius is a non-physical construct for efficiency purposes, +// because otherwise every light affects every fragment in the scene +fn getDistanceAttenuation(distanceSquare: f32, inverseRangeSquared: f32) -> f32 { + let factor = distanceSquare * inverseRangeSquared; + let smoothFactor = saturate(1.0 - factor * factor); + let attenuation = smoothFactor * smoothFactor; + return attenuation * 1.0 / max(distanceSquare, 0.0001); +} + +// Normal distribution function (specular D) +// Based on https://google.github.io/filament/Filament.html#citation-walter07 + +// D_GGX(h,α) = α^2 / { π ((n⋅h)^2 (α2−1) + 1)^2 } + +// Simple implementation, has precision problems when using fp16 instead of fp32 +// see https://google.github.io/filament/Filament.html#listing_speculardfp16 +fn D_GGX(roughness: f32, NoH: f32, h: vec3) -> f32 { + let oneMinusNoHSquared = 1.0 - NoH * NoH; + let a = NoH * roughness; + let k = roughness / (oneMinusNoHSquared + a * a); + let d = k * k * (1.0 / PI); + return d; +} + +// Visibility function (Specular G) +// V(v,l,a) = G(v,l,α) / { 4 (n⋅v) (n⋅l) } +// such that f_r becomes +// f_r(v,l) = D(h,α) V(v,l,α) F(v,h,f0) +// where +// V(v,l,α) = 0.5 / { n⋅l sqrt((n⋅v)^2 (1−α2) + α2) + n⋅v sqrt((n⋅l)^2 (1−α2) + α2) } +// Note the two sqrt's, that may be slow on mobile, see https://google.github.io/filament/Filament.html#listing_approximatedspecularv +fn V_SmithGGXCorrelated(roughness: f32, NoV: f32, NoL: f32) -> f32 { + let a2 = roughness * roughness; + let lambdaV = NoL * sqrt((NoV - a2 * NoV) * NoV + a2); + let lambdaL = NoV * sqrt((NoL - a2 * NoL) * NoL + a2); + let v = 0.5 / (lambdaV + lambdaL); + return v; +} + +// Fresnel function +// see https://google.github.io/filament/Filament.html#citation-schlick94 +// F_Schlick(v,h,f_0,f_90) = f_0 + (f_90 − f_0) (1 − v⋅h)^5 +fn F_Schlick_vec(f0: vec3, f90: f32, VoH: f32) -> vec3 { + // not using mix to keep the vec3 and float versions identical + return f0 + (f90 - f0) * pow(1.0 - VoH, 5.0); +} + +fn F_Schlick(f0: f32, f90: f32, VoH: f32) -> f32 { + // not using mix to keep the vec3 and float versions identical + return f0 + (f90 - f0) * pow(1.0 - VoH, 5.0); +} + +fn fresnel(f0: vec3, LoH: f32) -> vec3 { + // f_90 suitable for ambient occlusion + // see https://google.github.io/filament/Filament.html#lighting/occlusion + let f90 = saturate(dot(f0, vec3(50.0 * 0.33))); + return F_Schlick_vec(f0, f90, LoH); +} + +// Specular BRDF +// https://google.github.io/filament/Filament.html#materialsystem/specularbrdf + +// Cook-Torrance approximation of the microfacet model integration using Fresnel law F to model f_m +// f_r(v,l) = { D(h,α) G(v,l,α) F(v,h,f0) } / { 4 (n⋅v) (n⋅l) } +fn specular(f0: vec3, roughness: f32, h: vec3, NoV: f32, NoL: f32, + NoH: f32, LoH: f32, specularIntensity: f32) -> vec3 { + let D = D_GGX(roughness, NoH, h); + let V = V_SmithGGXCorrelated(roughness, NoV, NoL); + let F = fresnel(f0, LoH); + + return (specularIntensity * D * V) * F; +} + +// Diffuse BRDF +// https://google.github.io/filament/Filament.html#materialsystem/diffusebrdf +// fd(v,l) = σ/π * 1 / { |n⋅v||n⋅l| } ∫Ω D(m,α) G(v,l,m) (v⋅m) (l⋅m) dm +// +// simplest approximation +// float Fd_Lambert() { +// return 1.0 / PI; +// } +// +// vec3 Fd = diffuseColor * Fd_Lambert(); +// +// Disney approximation +// See https://google.github.io/filament/Filament.html#citation-burley12 +// minimal quality difference +fn Fd_Burley(roughness: f32, NoV: f32, NoL: f32, LoH: f32) -> f32 { + let f90 = 0.5 + 2.0 * roughness * LoH * LoH; + let lightScatter = F_Schlick(1.0, f90, NoL); + let viewScatter = F_Schlick(1.0, f90, NoV); + return lightScatter * viewScatter * (1.0 / PI); +} + +// From https://www.unrealengine.com/en-US/blog/physically-based-shading-on-mobile +fn EnvBRDFApprox(f0: vec3, perceptual_roughness: f32, NoV: f32) -> vec3 { + let c0 = vec4(-1.0, -0.0275, -0.572, 0.022); + let c1 = vec4(1.0, 0.0425, 1.04, -0.04); + let r = perceptual_roughness * c0 + c1; + let a004 = min(r.x * r.x, exp2(-9.28 * NoV)) * r.x + r.y; + let AB = vec2(-1.04, 1.04) * a004 + r.zw; + return f0 * AB.x + AB.y; +} + +fn perceptualRoughnessToRoughness(perceptualRoughness: f32) -> f32 { + // clamp perceptual roughness to prevent precision problems + // According to Filament design 0.089 is recommended for mobile + // Filament uses 0.045 for non-mobile + let clampedPerceptualRoughness = clamp(perceptualRoughness, 0.089, 1.0); + return clampedPerceptualRoughness * clampedPerceptualRoughness; +} + +// from https://64.github.io/tonemapping/ +// reinhard on RGB oversaturates colors +fn reinhard(color: vec3) -> vec3 { + return color / (1.0 + color); +} + +fn reinhard_extended(color: vec3, max_white: f32) -> vec3 { + let numerator = color * (1.0 + (color / vec3(max_white * max_white))); + return numerator / (1.0 + color); +} + +// luminance coefficients from Rec. 709. +// https://en.wikipedia.org/wiki/Rec._709 +fn luminance(v: vec3) -> f32 { + return dot(v, vec3(0.2126, 0.7152, 0.0722)); +} + +fn change_luminance(c_in: vec3, l_out: f32) -> vec3 { + let l_in = luminance(c_in); + return c_in * (l_out / l_in); +} + +fn reinhard_luminance(color: vec3) -> vec3 { + let l_old = luminance(color); + let l_new = l_old / (1.0 + l_old); + return change_luminance(color, l_new); +} + +fn reinhard_extended_luminance(color: vec3, max_white_l: f32) -> vec3 { + let l_old = luminance(color); + let numerator = l_old * (1.0 + (l_old / (max_white_l * max_white_l))); + let l_new = numerator / (1.0 + l_old); + return change_luminance(color, l_new); +} + +fn view_z_to_z_slice(view_z: f32, is_orthographic: bool) -> u32 { + if (is_orthographic) { + // NOTE: view_z is correct in the orthographic case + return u32(floor((view_z - lights.cluster_factors.z) * lights.cluster_factors.w)); + } else { + // NOTE: had to use -view_z to make it positive else log(negative) is nan + return min( + u32(log(-view_z) * lights.cluster_factors.z - lights.cluster_factors.w + 1.0), + lights.cluster_dimensions.z - 1u + ); + } +} + +fn fragment_cluster_index(frag_coord: vec2, view_z: f32, is_orthographic: bool) -> u32 { + let xy = vec2(floor(frag_coord * lights.cluster_factors.xy)); + let z_slice = view_z_to_z_slice(view_z, is_orthographic); + // NOTE: Restricting cluster index to avoid undefined behavior when accessing uniform buffer + // arrays based on the cluster index. + return min( + (xy.y * lights.cluster_dimensions.x + xy.x) * lights.cluster_dimensions.z + z_slice, + lights.cluster_dimensions.w - 1u + ); +} + +struct ClusterOffsetAndCount { + offset: u32; + count: u32; +}; + +fn unpack_offset_and_count(cluster_index: u32) -> ClusterOffsetAndCount { + let offset_and_count = cluster_offsets_and_counts.data[cluster_index >> 2u][cluster_index & ((1u << 2u) - 1u)]; + var output: ClusterOffsetAndCount; + // The offset is stored in the upper 24 bits + output.offset = (offset_and_count >> 8u) & ((1u << 24u) - 1u); + // The count is stored in the lower 8 bits + output.count = offset_and_count & ((1u << 8u) - 1u); + return output; +} + +fn get_light_id(index: u32) -> u32 { + // The index is correct but in cluster_light_index_lists we pack 4 u8s into a u32 + // This means the index into cluster_light_index_lists is index / 4 + let indices = cluster_light_index_lists.data[index >> 4u][(index >> 2u) & ((1u << 2u) - 1u)]; + // And index % 4 gives the sub-index of the u8 within the u32 so we shift by 8 * sub-index + return (indices >> (8u * (index & ((1u << 2u) - 1u)))) & ((1u << 8u) - 1u); +} + +fn point_light( + world_position: vec3, light: PointLight, roughness: f32, NdotV: f32, N: vec3, V: vec3, + R: vec3, F0: vec3, diffuseColor: vec3 +) -> vec3 { + let light_to_frag = light.position_radius.xyz - world_position.xyz; + let distance_square = dot(light_to_frag, light_to_frag); + let rangeAttenuation = + getDistanceAttenuation(distance_square, light.color_inverse_square_range.w); + + // Specular. + // Representative Point Area Lights. + // see http://blog.selfshadow.com/publications/s2013-shading-course/karis/s2013_pbs_epic_notes_v2.pdf p14-16 + let a = roughness; + let centerToRay = dot(light_to_frag, R) * R - light_to_frag; + let closestPoint = light_to_frag + centerToRay * saturate(light.position_radius.w * inverseSqrt(dot(centerToRay, centerToRay))); + let LspecLengthInverse = inverseSqrt(dot(closestPoint, closestPoint)); + let normalizationFactor = a / saturate(a + (light.position_radius.w * 0.5 * LspecLengthInverse)); + let specularIntensity = normalizationFactor * normalizationFactor; + + var L: vec3 = closestPoint * LspecLengthInverse; // normalize() equivalent? + var H: vec3 = normalize(L + V); + var NoL: f32 = saturate(dot(N, L)); + var NoH: f32 = saturate(dot(N, H)); + var LoH: f32 = saturate(dot(L, H)); + + let specular_light = specular(F0, roughness, H, NdotV, NoL, NoH, LoH, specularIntensity); + + // Diffuse. + // Comes after specular since its NoL is used in the lighting equation. + L = normalize(light_to_frag); + H = normalize(L + V); + NoL = saturate(dot(N, L)); + NoH = saturate(dot(N, H)); + LoH = saturate(dot(L, H)); + + let diffuse = diffuseColor * Fd_Burley(roughness, NdotV, NoL, LoH); + + // See https://google.github.io/filament/Filament.html#mjx-eqn-pointLightLuminanceEquation + // Lout = f(v,l) Φ / { 4 π d^2 }⟨n⋅l⟩ + // where + // f(v,l) = (f_d(v,l) + f_r(v,l)) * light_color + // Φ is luminous power in lumens + // our rangeAttentuation = 1 / d^2 multiplied with an attenuation factor for smoothing at the edge of the non-physical maximum light radius + + // For a point light, luminous intensity, I, in lumens per steradian is given by: + // I = Φ / 4 π + // The derivation of this can be seen here: https://google.github.io/filament/Filament.html#mjx-eqn-pointLightLuminousPower + + // NOTE: light.color.rgb is premultiplied with light.intensity / 4 π (which would be the luminous intensity) on the CPU + + // TODO compensate for energy loss https://google.github.io/filament/Filament.html#materialsystem/improvingthebrdfs/energylossinspecularreflectance + + return ((diffuse + specular_light) * light.color_inverse_square_range.rgb) * (rangeAttenuation * NoL); +} + +fn directional_light(light: DirectionalLight, roughness: f32, NdotV: f32, normal: vec3, view: vec3, R: vec3, F0: vec3, diffuseColor: vec3) -> vec3 { + let incident_light = light.direction_to_light.xyz; + + let half_vector = normalize(incident_light + view); + let NoL = saturate(dot(normal, incident_light)); + let NoH = saturate(dot(normal, half_vector)); + let LoH = saturate(dot(incident_light, half_vector)); + + let diffuse = diffuseColor * Fd_Burley(roughness, NdotV, NoL, LoH); + let specularIntensity = 1.0; + let specular_light = specular(F0, roughness, half_vector, NdotV, NoL, NoH, LoH, specularIntensity); + + return (specular_light + diffuse) * light.color.rgb * NoL; +} + +fn fetch_point_shadow(light_id: u32, frag_position: vec4, surface_normal: vec3) -> f32 { + let light = point_lights.data[light_id]; + + // because the shadow maps align with the axes and the frustum planes are at 45 degrees + // we can get the worldspace depth by taking the largest absolute axis + let surface_to_light = light.position_radius.xyz - frag_position.xyz; + let surface_to_light_abs = abs(surface_to_light); + let distance_to_light = max(surface_to_light_abs.x, max(surface_to_light_abs.y, surface_to_light_abs.z)); + + // The normal bias here is already scaled by the texel size at 1 world unit from the light. + // The texel size increases proportionally with distance from the light so multiplying by + // distance to light scales the normal bias to the texel size at the fragment distance. + let normal_offset = light.shadow_normal_bias * distance_to_light * surface_normal.xyz; + let depth_offset = light.shadow_depth_bias * normalize(surface_to_light.xyz); + let offset_position = frag_position.xyz + normal_offset + depth_offset; + + // similar largest-absolute-axis trick as above, but now with the offset fragment position + let frag_ls = light.position_radius.xyz - offset_position.xyz; + let abs_position_ls = abs(frag_ls); + let major_axis_magnitude = max(abs_position_ls.x, max(abs_position_ls.y, abs_position_ls.z)); + + // NOTE: These simplifications come from multiplying: + // projection * vec4(0, 0, -major_axis_magnitude, 1.0) + // and keeping only the terms that have any impact on the depth. + // Projection-agnostic approach: + let zw = -major_axis_magnitude * light.projection_lr.xy + light.projection_lr.zw; + let depth = zw.x / zw.y; + + // do the lookup, using HW PCF and comparison + // NOTE: Due to the non-uniform control flow above, we must use the Level variant of + // textureSampleCompare to avoid undefined behaviour due to some of the fragments in + // a quad (2x2 fragments) being processed not being sampled, and this messing with + // mip-mapping functionality. The shadow maps have no mipmaps so Level just samples + // from LOD 0. +#ifdef NO_ARRAY_TEXTURES_SUPPORT + return textureSampleCompare(point_shadow_textures, point_shadow_textures_sampler, frag_ls, depth); +#else + return textureSampleCompareLevel(point_shadow_textures, point_shadow_textures_sampler, frag_ls, i32(light_id), depth); +#endif +} + +fn fetch_directional_shadow(light_id: u32, frag_position: vec4, surface_normal: vec3) -> f32 { + let light = lights.directional_lights[light_id]; + + // The normal bias is scaled to the texel size. + let normal_offset = light.shadow_normal_bias * surface_normal.xyz; + let depth_offset = light.shadow_depth_bias * light.direction_to_light.xyz; + let offset_position = vec4(frag_position.xyz + normal_offset + depth_offset, frag_position.w); + + let offset_position_clip = light.view_projection * offset_position; + if (offset_position_clip.w <= 0.0) { + return 1.0; + } + let offset_position_ndc = offset_position_clip.xyz / offset_position_clip.w; + // No shadow outside the orthographic projection volume + if (any(offset_position_ndc.xy < vec2(-1.0)) || offset_position_ndc.z < 0.0 + || any(offset_position_ndc > vec3(1.0))) { + return 1.0; + } + + // compute texture coordinates for shadow lookup, compensating for the Y-flip difference + // between the NDC and texture coordinates + let flip_correction = vec2(0.5, -0.5); + let light_local = offset_position_ndc.xy * flip_correction + vec2(0.5, 0.5); + + let depth = offset_position_ndc.z; + // do the lookup, using HW PCF and comparison + // NOTE: Due to non-uniform control flow above, we must use the level variant of the texture + // sampler to avoid use of implicit derivatives causing possible undefined behavior. +#ifdef NO_ARRAY_TEXTURES_SUPPORT + return textureSampleCompareLevel(directional_shadow_textures, directional_shadow_textures_sampler, light_local, depth); +#else + return textureSampleCompareLevel(directional_shadow_textures, directional_shadow_textures_sampler, light_local, i32(light_id), depth); +#endif +} + +fn hsv2rgb(hue: f32, saturation: f32, value: f32) -> vec3 { + let rgb = clamp( + abs( + ((hue * 6.0 + vec3(0.0, 4.0, 2.0)) % 6.0) - 3.0 + ) - 1.0, + vec3(0.0), + vec3(1.0) + ); + + return value * mix( vec3(1.0), rgb, vec3(saturation)); +} + +fn random1D(s: f32) -> f32 { + return fract(sin(s * 12.9898) * 43758.5453123); +} + +// NOTE: This ensures that the world_normal is normalized and if +// vertex tangents and normal maps then normal mapping may be applied. +fn prepare_normal( + world_normal: vec3, +#ifdef VERTEX_TANGENTS +#ifdef STANDARDMATERIAL_NORMAL_MAP + world_tangent: vec4, +#endif +#endif + uv: vec2, + is_front: bool, +) -> vec3 { + var N: vec3 = normalize(world_normal); + +#ifdef VERTEX_TANGENTS +#ifdef STANDARDMATERIAL_NORMAL_MAP + var T: vec3 = normalize(world_tangent.xyz - N * dot(world_tangent.xyz, N)); + var B: vec3 = cross(N, T) * world_tangent.w; +#endif +#endif + + if ((material.flags & STANDARD_MATERIAL_FLAGS_DOUBLE_SIDED_BIT) != 0u) { + if (!is_front) { + N = -N; +#ifdef VERTEX_TANGENTS +#ifdef STANDARDMATERIAL_NORMAL_MAP + T = -T; + B = -B; +#endif +#endif + } + } + +#ifdef VERTEX_TANGENTS +#ifdef STANDARDMATERIAL_NORMAL_MAP + let TBN = mat3x3(T, B, N); + N = TBN * normalize(textureSample(normal_map_texture, normal_map_sampler, uv).rgb * 2.0 - 1.0); +#endif +#endif + + return N; +} + +// NOTE: Correctly calculates the view vector depending on whether +// the projection is orthographic or perspective. +fn calculate_view( + world_position: vec4, +) -> vec3 { + var V: vec3; + let is_orthographic = view.projection[3].w == 1.0; + if (is_orthographic) { + V = normalize(vec3(view.view_proj[0].z, view.view_proj[1].z, view.view_proj[2].z)); + } else { + // Only valid for a perspective projection + V = normalize(view.world_position.xyz - world_position.xyz); + } + return V; +} + +struct PbrMaterial { + material: StandardMaterial; + occlusion: f32; +}; + +struct PbrInput { + frag_coord: vec4; + world_position: vec4; + world_normal: vec3; +}; + +fn pbr( + in: PbrInput, + pbr: PbrMaterial, + N: vec3, + V: vec3, +) -> vec4 { + var output_color: vec4 = pbr.material.base_color; + + // TODO use .a for exposure compensation in HDR + let emissive = pbr.material.emissive; + + // calculate non-linear roughness from linear perceptualRoughness + let metallic = pbr.material.metallic; + let perceptual_roughness = pbr.material.perceptual_roughness; + let roughness = perceptualRoughnessToRoughness(perceptual_roughness); + + let occlusion = pbr.occlusion; + + if ((pbr.material.flags & STANDARD_MATERIAL_FLAGS_ALPHA_MODE_OPAQUE) != 0u) { + // NOTE: If rendering as opaque, alpha should be ignored so set to 1.0 + output_color.a = 1.0; + } else if ((pbr.material.flags & STANDARD_MATERIAL_FLAGS_ALPHA_MODE_MASK) != 0u) { + if (output_color.a >= pbr.material.alpha_cutoff) { + // NOTE: If rendering as masked alpha and >= the cutoff, render as fully opaque + output_color.a = 1.0; + } else { + // NOTE: output_color.a < pbr.material.alpha_cutoff should not is not rendered + // NOTE: This and any other discards mean that early-z testing cannot be done! + discard; + } + } + + // Neubelt and Pettineo 2013, "Crafting a Next-gen Material Pipeline for The Order: 1886" + let NdotV = max(dot(N, V), 0.0001); + + // Remapping [0,1] reflectance to F0 + // See https://google.github.io/filament/Filament.html#materialsystem/parameterization/remapping + let reflectance = pbr.material.reflectance; + let F0 = 0.16 * reflectance * reflectance * (1.0 - metallic) + output_color.rgb * metallic; + + // Diffuse strength inversely related to metallicity + let diffuse_color = output_color.rgb * (1.0 - metallic); + + let R = reflect(-V, N); + + // accumulate color + var light_accum: vec3 = vec3(0.0); + + let view_z = dot(vec4( + view.inverse_view[0].z, + view.inverse_view[1].z, + view.inverse_view[2].z, + view.inverse_view[3].z + ), in.world_position); + let is_orthographic = view.projection[3].w == 1.0; + let cluster_index = fragment_cluster_index(in.frag_coord.xy, view_z, is_orthographic); + let offset_and_count = unpack_offset_and_count(cluster_index); + for (var i: u32 = offset_and_count.offset; i < offset_and_count.offset + offset_and_count.count; i = i + 1u) { + let light_id = get_light_id(i); + let light = point_lights.data[light_id]; + var shadow: f32 = 1.0; + if ((mesh.flags & MESH_FLAGS_SHADOW_RECEIVER_BIT) != 0u + && (light.flags & POINT_LIGHT_FLAGS_SHADOWS_ENABLED_BIT) != 0u) { + shadow = fetch_point_shadow(light_id, in.world_position, in.world_normal); + } + let light_contrib = point_light(in.world_position.xyz, light, roughness, NdotV, N, V, R, F0, diffuse_color); + light_accum = light_accum + light_contrib * shadow; + } + + let n_directional_lights = lights.n_directional_lights; + for (var i: u32 = 0u; i < n_directional_lights; i = i + 1u) { + let light = lights.directional_lights[i]; + var shadow: f32 = 1.0; + if ((mesh.flags & MESH_FLAGS_SHADOW_RECEIVER_BIT) != 0u + && (light.flags & DIRECTIONAL_LIGHT_FLAGS_SHADOWS_ENABLED_BIT) != 0u) { + shadow = fetch_directional_shadow(i, in.world_position, in.world_normal); + } + let light_contrib = directional_light(light, roughness, NdotV, N, V, R, F0, diffuse_color); + light_accum = light_accum + light_contrib * shadow; + } + + let diffuse_ambient = EnvBRDFApprox(diffuse_color, 1.0, NdotV); + let specular_ambient = EnvBRDFApprox(F0, perceptual_roughness, NdotV); + + output_color = vec4( + light_accum + + (diffuse_ambient + specular_ambient) * lights.ambient_color.rgb * occlusion + + emissive.rgb * output_color.a, + output_color.a); + + // Cluster allocation debug (using 'over' alpha blending) +#ifdef CLUSTERED_FORWARD_DEBUG_Z_SLICES + // NOTE: This debug mode visualises the z-slices + let cluster_overlay_alpha = 0.1; + var z_slice: u32 = view_z_to_z_slice(view_z, is_orthographic); + // A hack to make the colors alternate a bit more + if ((z_slice & 1u) == 1u) { + z_slice = z_slice + lights.cluster_dimensions.z / 2u; + } + let slice_color = hsv2rgb(f32(z_slice) / f32(lights.cluster_dimensions.z + 1u), 1.0, 0.5); + output_color = vec4( + (1.0 - cluster_overlay_alpha) * output_color.rgb + cluster_overlay_alpha * slice_color, + output_color.a + ); +#endif // CLUSTERED_FORWARD_DEBUG_Z_SLICES +#ifdef CLUSTERED_FORWARD_DEBUG_CLUSTER_LIGHT_COMPLEXITY + // NOTE: This debug mode visualises the number of lights within the cluster that contains + // the fragment. It shows a sort of lighting complexity measure. + let cluster_overlay_alpha = 0.1; + let max_light_complexity_per_cluster = 64.0; + output_color.r = (1.0 - cluster_overlay_alpha) * output_color.r + + cluster_overlay_alpha * smoothStep(0.0, max_light_complexity_per_cluster, f32(offset_and_count.count)); + output_color.g = (1.0 - cluster_overlay_alpha) * output_color.g + + cluster_overlay_alpha * (1.0 - smoothStep(0.0, max_light_complexity_per_cluster, f32(offset_and_count.count))); +#endif // CLUSTERED_FORWARD_DEBUG_CLUSTER_LIGHT_COMPLEXITY +#ifdef CLUSTERED_FORWARD_DEBUG_CLUSTER_COHERENCY + // NOTE: Visualizes the cluster to which the fragment belongs + let cluster_overlay_alpha = 0.1; + let cluster_color = hsv2rgb(random1D(f32(cluster_index)), 1.0, 0.5); + output_color = vec4( + (1.0 - cluster_overlay_alpha) * output_color.rgb + cluster_overlay_alpha * cluster_color, + output_color.a + ); +#endif // CLUSTERED_FORWARD_DEBUG_CLUSTER_COHERENCY + + // tone_mapping + output_color = vec4(reinhard_luminance(output_color.rgb), output_color.a); + // Gamma correction. + // Not needed with sRGB buffer + // output_color.rgb = pow(output_color.rgb, vec3(1.0 / 2.2)); + + return output_color; +} diff --git a/crates/bevy_pbr/src/render/pbr_types.wgsl b/crates/bevy_pbr/src/render/pbr_types.wgsl new file mode 100644 index 0000000000000..8f595c49da00f --- /dev/null +++ b/crates/bevy_pbr/src/render/pbr_types.wgsl @@ -0,0 +1,20 @@ +struct StandardMaterial { + base_color: vec4; + emissive: vec4; + perceptual_roughness: f32; + metallic: f32; + reflectance: f32; + // 'flags' is a bit field indicating various options. u32 is 32 bits so we have up to 32 options. + flags: u32; + alpha_cutoff: f32; +}; + +let STANDARD_MATERIAL_FLAGS_BASE_COLOR_TEXTURE_BIT: u32 = 1u; +let STANDARD_MATERIAL_FLAGS_EMISSIVE_TEXTURE_BIT: u32 = 2u; +let STANDARD_MATERIAL_FLAGS_METALLIC_ROUGHNESS_TEXTURE_BIT: u32 = 4u; +let STANDARD_MATERIAL_FLAGS_OCCLUSION_TEXTURE_BIT: u32 = 8u; +let STANDARD_MATERIAL_FLAGS_DOUBLE_SIDED_BIT: u32 = 16u; +let STANDARD_MATERIAL_FLAGS_UNLIT_BIT: u32 = 32u; +let STANDARD_MATERIAL_FLAGS_ALPHA_MODE_OPAQUE: u32 = 64u; +let STANDARD_MATERIAL_FLAGS_ALPHA_MODE_MASK: u32 = 128u; +let STANDARD_MATERIAL_FLAGS_ALPHA_MODE_BLEND: u32 = 256u; diff --git a/crates/bevy_pbr/src/render/wireframe.wgsl b/crates/bevy_pbr/src/render/wireframe.wgsl index b76c39e695582..1bb8111041a14 100644 --- a/crates/bevy_pbr/src/render/wireframe.wgsl +++ b/crates/bevy_pbr/src/render/wireframe.wgsl @@ -1,24 +1,25 @@ -#import bevy_pbr::mesh_view_bind_group -#import bevy_pbr::mesh_struct +#import bevy_pbr::mesh_view_types +#import bevy_pbr::mesh_view_bindings +#import bevy_pbr::mesh_types + +[[group(1), binding(0)]] +var mesh: Mesh; + +// NOTE: Bindings must come before functions that use them! +#import bevy_pbr::mesh_functions struct Vertex { [[location(0)]] position: vec3; }; -[[group(1), binding(0)]] -var mesh: Mesh; - struct VertexOutput { [[builtin(position)]] clip_position: vec4; }; [[stage(vertex)]] fn vertex(vertex: Vertex) -> VertexOutput { - let world_position = mesh.model * vec4(vertex.position, 1.0); - var out: VertexOutput; - out.clip_position = view.view_proj * world_position; - + out.clip_position = mesh_model_position_to_clip(vec4(vertex.position, 1.0)); return out; } diff --git a/examples/README.md b/examples/README.md index aeaff8c7845f2..cc4d26829b292 100644 --- a/examples/README.md +++ b/examples/README.md @@ -222,6 +222,7 @@ Example | File | Description Example | File | Description --- | --- | --- +`array_texture` | [`shader/array_texture.rs`](./shader/array_texture.rs) | Illustrates how to create a texture for use with a texture_2d_array shader uniform variable `shader_material` | [`shader/shader_material.rs`](./shader/shader_material.rs) | Illustrates creating a custom material and a shader that uses it `shader_material_glsl` | [`shader/shader_material_glsl.rs`](./shader/shader_material_glsl.rs) | A custom shader using the GLSL shading language. `shader_instancing` | [`shader/shader_instancing.rs`](./shader/shader_instancing.rs) | A custom shader showing off rendering a mesh multiple times in one draw call. diff --git a/examples/shader/array_texture.rs b/examples/shader/array_texture.rs new file mode 100644 index 0000000000000..ad43e0c38fd51 --- /dev/null +++ b/examples/shader/array_texture.rs @@ -0,0 +1,173 @@ +use bevy::{ + asset::LoadState, + ecs::system::{lifetimeless::SRes, SystemParamItem}, + pbr::MaterialPipeline, + prelude::*, + reflect::TypeUuid, + render::{ + render_asset::{PrepareAssetError, RenderAsset, RenderAssets}, + render_resource::{ + AddressMode, BindGroup, BindGroupDescriptor, BindGroupEntry, BindGroupLayout, + BindGroupLayoutDescriptor, BindGroupLayoutEntry, BindingResource, BindingType, + SamplerBindingType, ShaderStages, TextureSampleType, TextureViewDimension, + }, + renderer::RenderDevice, + }, +}; + +/// This example illustrates how to create a texture for use with a `texture_2d_array` shader +/// uniform variable. +fn main() { + App::new() + .add_plugins(DefaultPlugins) + .add_plugin(MaterialPlugin::::default()) + .add_startup_system(setup) + .add_system(create_array_texture) + .run(); +} + +struct LoadingTexture { + is_loaded: bool, + handle: Handle, +} + +fn setup(mut commands: Commands, asset_server: Res) { + // Start loading the texture. + commands.insert_resource(LoadingTexture { + is_loaded: false, + handle: asset_server.load("textures/array_texture.png"), + }); + + // camera + commands.spawn_bundle(PerspectiveCameraBundle { + transform: Transform::from_xyz(2.0, 2.0, 2.0).looking_at(Vec3::ZERO, Vec3::Y), + ..Default::default() + }); +} + +fn create_array_texture( + mut commands: Commands, + asset_server: Res, + mut loading_texture: ResMut, + mut textures: ResMut>, + mut meshes: ResMut>, + mut materials: ResMut>, +) { + if loading_texture.is_loaded + || asset_server.get_load_state(loading_texture.handle.clone()) != LoadState::Loaded + { + return; + } + loading_texture.is_loaded = true; + let mut texture = textures.get_mut(loading_texture.handle.clone()).unwrap(); + + // Create a new array texture asset from the loaded texture. + let array_layers = 4; + texture.reinterpret_stacked_2d_as_array(array_layers); + texture.sampler_descriptor.address_mode_u = AddressMode::Repeat; + texture.sampler_descriptor.address_mode_v = AddressMode::Repeat; + + // Spawn a cube that's shaded using the array texture. + commands.spawn().insert_bundle(MaterialMeshBundle { + mesh: meshes.add(Mesh::from(shape::Cube { size: 1.0 })), + material: materials.add(ArrayTextureMaterial { + array_texture: loading_texture.handle.clone(), + }), + ..Default::default() + }); +} + +#[derive(Debug, Clone, TypeUuid)] +#[uuid = "9c5a0ddf-1eaf-41b4-9832-ed736fd26af3"] +struct ArrayTextureMaterial { + array_texture: Handle, +} + +#[derive(Clone)] +pub struct GpuArrayTextureMaterial { + bind_group: BindGroup, +} + +impl RenderAsset for ArrayTextureMaterial { + type ExtractedAsset = ArrayTextureMaterial; + type PreparedAsset = GpuArrayTextureMaterial; + type Param = ( + SRes, + SRes>, + SRes>, + ); + fn extract_asset(&self) -> Self::ExtractedAsset { + self.clone() + } + + fn prepare_asset( + extracted_asset: Self::ExtractedAsset, + (render_device, material_pipeline, gpu_images): &mut SystemParamItem, + ) -> Result> { + let (array_texture_texture_view, array_texture_sampler) = if let Some(result) = + material_pipeline + .mesh_pipeline + .get_image_texture(gpu_images, &Some(extracted_asset.array_texture.clone())) + { + result + } else { + return Err(PrepareAssetError::RetryNextUpdate(extracted_asset)); + }; + let bind_group = render_device.create_bind_group(&BindGroupDescriptor { + entries: &[ + BindGroupEntry { + binding: 0, + resource: BindingResource::TextureView(array_texture_texture_view), + }, + BindGroupEntry { + binding: 1, + resource: BindingResource::Sampler(array_texture_sampler), + }, + ], + label: Some("array_texture_material_bind_group"), + layout: &material_pipeline.material_layout, + }); + + Ok(GpuArrayTextureMaterial { bind_group }) + } +} + +impl Material for ArrayTextureMaterial { + fn vertex_shader(asset_server: &AssetServer) -> Option> { + Some(asset_server.load("shaders/array_texture.wgsl")) + } + + fn fragment_shader(asset_server: &AssetServer) -> Option> { + Some(asset_server.load("shaders/array_texture.wgsl")) + } + + fn bind_group(render_asset: &::PreparedAsset) -> &BindGroup { + &render_asset.bind_group + } + + fn bind_group_layout(render_device: &RenderDevice) -> BindGroupLayout { + render_device.create_bind_group_layout(&BindGroupLayoutDescriptor { + entries: &[ + // Array Texture + BindGroupLayoutEntry { + binding: 0, + visibility: ShaderStages::FRAGMENT, + ty: BindingType::Texture { + multisampled: false, + sample_type: TextureSampleType::Float { filterable: true }, + view_dimension: TextureViewDimension::D2Array, + }, + count: None, + }, + // Array Texture Sampler + BindGroupLayoutEntry { + binding: 1, + visibility: ShaderStages::FRAGMENT, + ty: BindingType::Sampler(SamplerBindingType::Filtering), + count: None, + }, + ], + label: None, + }) + } +}