Added HDR skybox and better rendering in general

This commit is contained in:
Verox001 2025-01-16 17:00:50 +01:00
parent b8cc592235
commit 05c733dc25
19 changed files with 3761 additions and 478 deletions

11
Cargo.lock generated
View File

@ -734,6 +734,15 @@ dependencies = [
"hashbrown", "hashbrown",
] ]
[[package]]
name = "instant"
version = "0.1.13"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "e0242819d153cba4b4b05a5a8f2a7e9bbf97b6055b2a002b395c96b5ff3c0222"
dependencies = [
"cfg-if",
]
[[package]] [[package]]
name = "is-terminal" name = "is-terminal"
version = "0.4.13" version = "0.4.13"
@ -1047,11 +1056,13 @@ version = "0.1.0"
dependencies = [ dependencies = [
"anyhow", "anyhow",
"bytemuck", "bytemuck",
"cfg-if",
"cgmath", "cgmath",
"env_logger", "env_logger",
"fs_extra", "fs_extra",
"glob", "glob",
"image", "image",
"instant",
"log", "log",
"pollster", "pollster",
"tobj", "tobj",

View File

@ -4,22 +4,22 @@ version = "0.1.0"
edition = "2021" edition = "2021"
[dependencies] [dependencies]
cgmath = "0.18" cfg-if = "1"
winit = { version = "0.29", features = ["rwh_05"] }
env_logger = "0.10"
log = "0.4"
pollster = "0.3"
wgpu = "22.0"
bytemuck = { version = "1.16", features = [ "derive" ] }
anyhow = "1.0" anyhow = "1.0"
bytemuck = { version = "1.16", features = [ "derive" ] }
cgmath = "0.18"
env_logger = "0.10"
pollster = "0.3"
log = "0.4"
tobj = { version = "3.2", default-features = false, features = ["async"]} tobj = { version = "3.2", default-features = false, features = ["async"]}
fs_extra = "1.2" wgpu = { version = "22.0"}
glob = "0.3" winit = { version = "0.29", features = ["rwh_05"] }
instant = "0.1"
[dependencies.image] [dependencies.image]
version = "0.24" version = "0.24"
default-features = false default-features = false
features = ["png", "jpeg"] features = ["png", "jpeg", "hdr"]
[build-dependencies] [build-dependencies]
anyhow = "1.0" anyhow = "1.0"

BIN
res/cobble-diffuse.png Normal file

Binary file not shown.

After

Width:  |  Height:  |  Size: 1.1 MiB

BIN
res/cobble-normal.png Normal file

Binary file not shown.

After

Width:  |  Height:  |  Size: 2.1 MiB

14
res/cobble_sphere.mtl Normal file
View File

@ -0,0 +1,14 @@
# Blender MTL File: 'cobble_sphere.blend'
# Material Count: 1
newmtl Material
Ns 250.000000
Ka 1.000000 1.000000 1.000000
Kd 0.800000 0.800000 0.800000
Ks 0.500000 0.500000 0.500000
Ke 0.000000 0.000000 0.000000
Ni 1.450000
d 1.000000
illum 2
map_Bump cobble-normal.png
map_Kd cobble-diffuse.png

2041
res/cobble_sphere.obj Normal file

File diff suppressed because it is too large Load Diff

BIN
res/pure-sky.hdr Normal file

Binary file not shown.

190
src/camera.rs Normal file
View File

@ -0,0 +1,190 @@
use cgmath::*;
use std::f32::consts::FRAC_PI_2;
use std::time::Duration;
use winit::dpi::PhysicalPosition;
use winit::event::*;
use winit::keyboard::KeyCode;
const SAFE_FRAC_PI_2: f32 = FRAC_PI_2 - 0.0001;
#[derive(Debug)]
pub struct Camera {
pub position: Point3<f32>,
yaw: Rad<f32>,
pitch: Rad<f32>,
}
impl Camera {
pub fn new<V: Into<Point3<f32>>, Y: Into<Rad<f32>>, P: Into<Rad<f32>>>(
position: V,
yaw: Y,
pitch: P,
) -> Self {
Self {
position: position.into(),
yaw: yaw.into(),
pitch: pitch.into(),
}
}
pub fn calc_matrix(&self) -> Matrix4<f32> {
let (sin_pitch, cos_pitch) = self.pitch.0.sin_cos();
let (sin_yaw, cos_yaw) = self.yaw.0.sin_cos();
Matrix4::look_to_rh(
self.position,
Vector3::new(cos_pitch * cos_yaw, sin_pitch, cos_pitch * sin_yaw).normalize(),
Vector3::unit_y(),
)
}
}
pub struct Projection {
aspect: f32,
fovy: Rad<f32>,
znear: f32,
zfar: f32,
}
impl Projection {
pub fn new<F: Into<Rad<f32>>>(width: u32, height: u32, fovy: F, znear: f32, zfar: f32) -> Self {
Self {
aspect: width as f32 / height as f32,
fovy: fovy.into(),
znear,
zfar,
}
}
pub fn resize(&mut self, width: u32, height: u32) {
self.aspect = width as f32 / height as f32;
}
pub fn calc_matrix(&self) -> Matrix4<f32> {
// UDPATE
perspective(self.fovy, self.aspect, self.znear, self.zfar)
}
}
#[derive(Debug)]
pub struct CameraController {
amount_left: f32,
amount_right: f32,
amount_forward: f32,
amount_backward: f32,
amount_up: f32,
amount_down: f32,
rotate_horizontal: f32,
rotate_vertical: f32,
scroll: f32,
speed: f32,
sensitivity: f32,
}
impl CameraController {
pub fn new(speed: f32, sensitivity: f32) -> Self {
Self {
amount_left: 0.0,
amount_right: 0.0,
amount_forward: 0.0,
amount_backward: 0.0,
amount_up: 0.0,
amount_down: 0.0,
rotate_horizontal: 0.0,
rotate_vertical: 0.0,
scroll: 0.0,
speed,
sensitivity,
}
}
pub fn process_keyboard(&mut self, key: KeyCode, state: ElementState) -> bool {
let amount = if state == ElementState::Pressed {
1.0
} else {
0.0
};
match key {
KeyCode::KeyW | KeyCode::ArrowUp => {
self.amount_forward = amount;
true
}
KeyCode::KeyS | KeyCode::ArrowDown => {
self.amount_backward = amount;
true
}
KeyCode::KeyA | KeyCode::ArrowLeft => {
self.amount_left = amount;
true
}
KeyCode::KeyD | KeyCode::ArrowRight => {
self.amount_right = amount;
true
}
KeyCode::Space => {
self.amount_up = amount;
true
}
KeyCode::ShiftLeft => {
self.amount_down = amount;
true
}
_ => false,
}
}
pub fn process_mouse(&mut self, mouse_dx: f64, mouse_dy: f64) {
self.rotate_horizontal = mouse_dx as f32;
self.rotate_vertical = mouse_dy as f32;
}
pub fn process_scroll(&mut self, delta: &MouseScrollDelta) {
self.scroll = match delta {
// I'm assuming a line is about 100 pixels
MouseScrollDelta::LineDelta(_, scroll) => -scroll * 0.5,
MouseScrollDelta::PixelDelta(PhysicalPosition { y: scroll, .. }) => -*scroll as f32,
};
}
pub fn update_camera(&mut self, camera: &mut Camera, dt: Duration) {
let dt = dt.as_secs_f32();
// Move forward/backward and left/right
let (yaw_sin, yaw_cos) = camera.yaw.0.sin_cos();
let forward = Vector3::new(yaw_cos, 0.0, yaw_sin).normalize();
let right = Vector3::new(-yaw_sin, 0.0, yaw_cos).normalize();
camera.position += forward * (self.amount_forward - self.amount_backward) * self.speed * dt;
camera.position += right * (self.amount_right - self.amount_left) * self.speed * dt;
// Move in/out (aka. "zoom")
// Note: this isn't an actual zoom. The camera's position
// changes when zooming. I've added this to make it easier
// to get closer to an object you want to focus on.
let (pitch_sin, pitch_cos) = camera.pitch.0.sin_cos();
let scrollward =
Vector3::new(pitch_cos * yaw_cos, pitch_sin, pitch_cos * yaw_sin).normalize();
camera.position += scrollward * self.scroll * self.speed * self.sensitivity * dt;
self.scroll = 0.0;
// Move up/down. Since we don't use roll, we can just
// modify the y coordinate directly.
camera.position.y += (self.amount_up - self.amount_down) * self.speed * dt;
// Rotate
camera.yaw += Rad(self.rotate_horizontal) * self.sensitivity * dt;
camera.pitch += Rad(-self.rotate_vertical) * self.sensitivity * dt;
// If process_mouse isn't called every frame, these values
// will not get set to zero, and the camera will rotate
// when moving in a non cardinal direction.
self.rotate_horizontal = 0.0;
self.rotate_vertical = 0.0;
// Keep the camera's angle from going too high/low.
if camera.pitch < -Rad(SAFE_FRAC_PI_2) {
camera.pitch = -Rad(SAFE_FRAC_PI_2);
} else if camera.pitch > Rad(SAFE_FRAC_PI_2) {
camera.pitch = Rad(SAFE_FRAC_PI_2);
}
}
}

99
src/debug.rs Normal file
View File

@ -0,0 +1,99 @@
use std::mem::size_of;
use wgpu::util::{BufferInitDescriptor, DeviceExt};
use crate::create_render_pipeline;
#[repr(C)]
#[derive(Debug, Clone, Copy, bytemuck::Pod, bytemuck::Zeroable)]
pub struct PositionColor {
position: [f32; 3],
color: [f32; 3],
}
const AXIS_COLORS: &'static [PositionColor] = &[
// X
PositionColor {
position: [0.0, 0.0, 0.0],
color: [0.5, 0.0, 0.0],
},
PositionColor {
position: [1.0, 0.0, 0.0],
color: [1.0, 0.0, 0.0],
},
// Y
PositionColor {
position: [0.0, 0.0, 0.0],
color: [0.0, 0.5, 0.0],
},
PositionColor {
position: [0.0, 1.0, 0.0],
color: [0.0, 1.0, 0.0],
},
// Z
PositionColor {
position: [0.0, 0.0, 0.0],
color: [0.0, 0.0, 0.5],
},
PositionColor {
position: [0.0, 0.0, 1.0],
color: [0.0, 0.0, 1.0],
},
];
const POSITION_COLOR_LAYOUT: wgpu::VertexBufferLayout<'static> = wgpu::VertexBufferLayout {
array_stride: size_of::<PositionColor>() as _,
step_mode: wgpu::VertexStepMode::Vertex,
attributes: &wgpu::vertex_attr_array![
0 => Float32x3,
1 => Float32x3,
],
};
pub struct Debug {
color_lines: wgpu::RenderPipeline,
axis: wgpu::Buffer,
}
impl Debug {
pub fn new(
device: &wgpu::Device,
camera_layout: &wgpu::BindGroupLayout,
color_format: wgpu::TextureFormat,
) -> Self {
let axis = device.create_buffer_init(&BufferInitDescriptor {
label: Some("Debug::axis"),
contents: bytemuck::cast_slice(AXIS_COLORS),
usage: wgpu::BufferUsages::COPY_DST | wgpu::BufferUsages::VERTEX,
});
let shader = wgpu::include_wgsl!("debug.wgsl");
let layout = device.create_pipeline_layout(&wgpu::PipelineLayoutDescriptor {
label: None,
bind_group_layouts: &[camera_layout],
push_constant_ranges: &[],
});
let color_lines = create_render_pipeline(
device,
&layout,
color_format,
None,
&[POSITION_COLOR_LAYOUT],
wgpu::PrimitiveTopology::LineList,
shader,
);
Self { color_lines, axis }
}
pub fn draw_axis<'a: 'b, 'b>(
&'a self,
pass: &'b mut wgpu::RenderPass<'a>,
camera: &'a wgpu::BindGroup,
) {
pass.set_pipeline(&self.color_lines);
pass.set_bind_group(0, camera, &[]);
pass.set_vertex_buffer(0, self.axis.slice(..));
pass.draw(0..AXIS_COLORS.len() as u32, 0..1);
}
}

87
src/equirectangular.wgsl Normal file
View File

@ -0,0 +1,87 @@
const PI: f32 = 3.1415926535897932384626433832795;
struct Face {
forward: vec3<f32>,
up: vec3<f32>,
right: vec3<f32>,
}
@group(0)
@binding(0)
var src: texture_2d<f32>;
@group(0)
@binding(1)
var dst: texture_storage_2d_array<rgba32float, write>;
@compute
@workgroup_size(16, 16, 1)
fn compute_equirect_to_cubemap(
@builtin(global_invocation_id)
gid: vec3<u32>,
) {
// If texture size is not divisible by 32 we
// need to make sure we don't try to write to
// pixels that don't exist.
if gid.x >= u32(textureDimensions(dst).x) {
return;
}
var FACES: array<Face, 6> = array(
// FACES +X
Face(
vec3(1.0, 0.0, 0.0), // forward
vec3(0.0, 1.0, 0.0), // up
vec3(0.0, 0.0, -1.0), // right
),
// FACES -X
Face (
vec3(-1.0, 0.0, 0.0),
vec3(0.0, 1.0, 0.0),
vec3(0.0, 0.0, 1.0),
),
// FACES +Y
Face (
vec3(0.0, -1.0, 0.0),
vec3(0.0, 0.0, 1.0),
vec3(1.0, 0.0, 0.0),
),
// FACES -Y
Face (
vec3(0.0, 1.0, 0.0),
vec3(0.0, 0.0, -1.0),
vec3(1.0, 0.0, 0.0),
),
// FACES +Z
Face (
vec3(0.0, 0.0, 1.0),
vec3(0.0, 1.0, 0.0),
vec3(1.0, 0.0, 0.0),
),
// FACES -Z
Face (
vec3(0.0, 0.0, -1.0),
vec3(0.0, 1.0, 0.0),
vec3(-1.0, 0.0, 0.0),
),
);
// Get texture coords relative to cubemap face
let dst_dimensions = vec2<f32>(textureDimensions(dst));
let cube_uv = vec2<f32>(gid.xy) / dst_dimensions * 2.0 - 1.0;
// Get spherical coordinate from cube_uv
let face = FACES[gid.z];
let spherical = normalize(face.forward + face.right * cube_uv.x + face.up * cube_uv.y);
// Get coordinate on the equirectangular texture
let inv_atan = vec2(0.1591, 0.3183);
let eq_uv = vec2(atan2(spherical.z, spherical.x), asin(spherical.y)) * inv_atan + 0.5;
let eq_pixel = vec2<i32>(eq_uv * vec2<f32>(textureDimensions(src)));
// We use textureLoad() as textureSample() is not allowed in compute shaders
var sample = textureLoad(src, eq_pixel, 0);
textureStore(dst, gid.xy, gid.z, sample);
}

160
src/hdr.rs Normal file
View File

@ -0,0 +1,160 @@
use wgpu::Operations;
use crate::{create_render_pipeline, texture};
/// Owns the render texture and controls tonemapping
pub struct HdrPipeline {
pipeline: wgpu::RenderPipeline,
bind_group: wgpu::BindGroup,
texture: texture::Texture,
width: u32,
height: u32,
format: wgpu::TextureFormat,
layout: wgpu::BindGroupLayout,
}
impl HdrPipeline {
pub fn new(device: &wgpu::Device, config: &wgpu::SurfaceConfiguration) -> Self {
let width = config.width;
let height = config.height;
// We could use `Rgba32Float`, but that requires some extra
// features to be enabled.
let format = wgpu::TextureFormat::Rgba16Float;
let texture = texture::Texture::create_2d_texture(
device,
width,
height,
format,
wgpu::TextureUsages::TEXTURE_BINDING | wgpu::TextureUsages::RENDER_ATTACHMENT,
wgpu::FilterMode::Nearest,
Some("Hdr::texture"),
);
let layout = device.create_bind_group_layout(&wgpu::BindGroupLayoutDescriptor {
label: Some("Hdr::layout"),
entries: &[
wgpu::BindGroupLayoutEntry {
binding: 0,
visibility: wgpu::ShaderStages::FRAGMENT,
ty: wgpu::BindingType::Texture {
// The Rgba16Float format cannot be filtered
sample_type: wgpu::TextureSampleType::Float { filterable: true },
view_dimension: wgpu::TextureViewDimension::D2,
multisampled: false,
},
count: None,
},
wgpu::BindGroupLayoutEntry {
binding: 1,
visibility: wgpu::ShaderStages::FRAGMENT,
ty: wgpu::BindingType::Sampler(wgpu::SamplerBindingType::Filtering),
count: None,
},
],
});
let bind_group = device.create_bind_group(&wgpu::BindGroupDescriptor {
label: Some("Hdr::bind_group"),
layout: &layout,
entries: &[
wgpu::BindGroupEntry {
binding: 0,
resource: wgpu::BindingResource::TextureView(&texture.view),
},
wgpu::BindGroupEntry {
binding: 1,
resource: wgpu::BindingResource::Sampler(&texture.sampler),
},
],
});
let shader = wgpu::include_wgsl!("hdr.wgsl");
let pipeline_layout = device.create_pipeline_layout(&wgpu::PipelineLayoutDescriptor {
label: None,
bind_group_layouts: &[&layout],
push_constant_ranges: &[],
});
let pipeline = create_render_pipeline(
device,
&pipeline_layout,
config.format.add_srgb_suffix(),
None,
&[],
wgpu::PrimitiveTopology::TriangleList,
shader,
);
Self {
pipeline,
bind_group,
layout,
texture,
width,
height,
format,
}
}
/// Resize the HDR texture
pub fn resize(&mut self, device: &wgpu::Device, width: u32, height: u32) {
self.texture = texture::Texture::create_2d_texture(
device,
width,
height,
wgpu::TextureFormat::Rgba16Float,
wgpu::TextureUsages::TEXTURE_BINDING | wgpu::TextureUsages::RENDER_ATTACHMENT,
wgpu::FilterMode::Nearest,
Some("Hdr::texture"),
);
self.bind_group = device.create_bind_group(&wgpu::BindGroupDescriptor {
label: Some("Hdr::bind_group"),
layout: &self.layout,
entries: &[
wgpu::BindGroupEntry {
binding: 0,
resource: wgpu::BindingResource::TextureView(&self.texture.view),
},
wgpu::BindGroupEntry {
binding: 1,
resource: wgpu::BindingResource::Sampler(&self.texture.sampler),
},
],
});
self.width = width;
self.height = height;
}
/// Exposes the HDR texture
pub fn view(&self) -> &wgpu::TextureView {
&self.texture.view
}
/// The format of the HDR texture
pub fn format(&self) -> wgpu::TextureFormat {
self.format
}
/// This renders the internal HDR texture to the [TextureView]
/// supplied as parameter.
pub fn process(&self, encoder: &mut wgpu::CommandEncoder, output: &wgpu::TextureView) {
let mut pass = encoder.begin_render_pass(&wgpu::RenderPassDescriptor {
label: Some("Hdr::process"),
color_attachments: &[Some(wgpu::RenderPassColorAttachment {
view: &output,
resolve_target: None,
ops: Operations {
load: wgpu::LoadOp::Load,
store: wgpu::StoreOp::Store,
},
})],
depth_stencil_attachment: None,
occlusion_query_set: None,
timestamp_writes: None,
});
pass.set_pipeline(&self.pipeline);
pass.set_bind_group(0, &self.bind_group, &[]);
pass.draw(0..3, 0..1);
}
}

55
src/hdr.wgsl Normal file
View File

@ -0,0 +1,55 @@
// Maps HDR values to linear values
// Based on http://www.oscars.org/science-technology/sci-tech-projects/aces
fn aces_tone_map(hdr: vec3<f32>) -> vec3<f32> {
let m1 = mat3x3(
0.59719, 0.07600, 0.02840,
0.35458, 0.90834, 0.13383,
0.04823, 0.01566, 0.83777,
);
let m2 = mat3x3(
1.60475, -0.10208, -0.00327,
-0.53108, 1.10813, -0.07276,
-0.07367, -0.00605, 1.07602,
);
let v = m1 * hdr;
let a = v * (v + 0.0245786) - 0.000090537;
let b = v * (0.983729 * v + 0.4329510) + 0.238081;
return clamp(m2 * (a / b), vec3(0.0), vec3(1.0));
}
struct VertexOutput {
@location(0) uv: vec2<f32>,
@builtin(position) clip_position: vec4<f32>,
};
@vertex
fn vs_main(
@builtin(vertex_index) vi: u32,
) -> VertexOutput {
var out: VertexOutput;
// Generate a triangle that covers the whole screen
out.uv = vec2<f32>(
f32((vi << 1u) & 2u),
f32(vi & 2u),
);
out.clip_position = vec4<f32>(out.uv * 2.0 - 1.0, 0.0, 1.0);
// We need to invert the y coordinate so the image
// is not upside down
out.uv.y = 1.0 - out.uv.y;
return out;
}
@group(0)
@binding(0)
var hdr_image: texture_2d<f32>;
@group(0)
@binding(1)
var hdr_sampler: sampler;
@fragment
fn fs_main(vs: VertexOutput) -> @location(0) vec4<f32> {
let hdr = textureSample(hdr_image, hdr_sampler, vs.uv);
let sdr = aces_tone_map(hdr.rgb);
return vec4(sdr, hdr.a);
}

View File

@ -1,4 +1,4 @@
use std::iter; use std::{f32::consts::PI, iter};
use cgmath::prelude::*; use cgmath::prelude::*;
use wgpu::util::DeviceExt; use wgpu::util::DeviceExt;
@ -9,170 +9,50 @@ use winit::{
window::Window, window::Window,
}; };
mod camera;
mod hdr;
mod model; mod model;
mod resources; mod resources;
mod texture; mod texture;
use model::{DrawModel, Vertex}; #[cfg(feature = "debug")]
mod debug;
#[rustfmt::skip] use model::{DrawLight, DrawModel, Vertex};
pub const OPENGL_TO_WGPU_MATRIX: cgmath::Matrix4<f32> = cgmath::Matrix4::new(
1.0, 0.0, 0.0, 0.0,
0.0, 1.0, 0.0, 0.0,
0.0, 0.0, 0.5, 0.5,
0.0, 0.0, 0.0, 1.0,
);
const NUM_INSTANCES_PER_ROW: u32 = 10; const NUM_INSTANCES_PER_ROW: u32 = 10;
#[repr(C)]
#[derive(Debug, Copy, Clone, bytemuck::Pod, bytemuck::Zeroable)]
struct LightUniform {
position: [f32; 3],
// Due to uniforms requiring 16 byte (4 float) spacing, we need to use a padding field here
_padding: u32,
color: [f32; 3],
// Due to uniforms requiring 16 byte (4 float) spacing, we need to use a padding field here
_padding2: u32,
}
#[derive(Debug)]
struct Camera {
eye: cgmath::Point3<f32>,
target: cgmath::Point3<f32>,
up: cgmath::Vector3<f32>,
aspect: f32,
fovy: f32,
znear: f32,
zfar: f32,
}
impl Camera {
fn build_view_projection_matrix(&self) -> cgmath::Matrix4<f32> {
let view = cgmath::Matrix4::look_at_rh(self.eye, self.target, self.up);
let proj = cgmath::perspective(cgmath::Deg(self.fovy), self.aspect, self.znear, self.zfar);
proj * view
}
}
#[repr(C)] #[repr(C)]
#[derive(Copy, Clone, bytemuck::Pod, bytemuck::Zeroable)] #[derive(Copy, Clone, bytemuck::Pod, bytemuck::Zeroable)]
struct CameraUniform { struct CameraUniform {
view_position: [f32; 4], view_position: [f32; 4],
view: [[f32; 4]; 4], // NEW!
view_proj: [[f32; 4]; 4], view_proj: [[f32; 4]; 4],
inv_proj: [[f32; 4]; 4], // NEW!
inv_view: [[f32; 4]; 4], // NEW!
} }
impl CameraUniform { impl CameraUniform {
fn new() -> Self { fn new() -> Self {
Self { Self {
view_position: [0.0; 4], view_position: [0.0; 4],
view: cgmath::Matrix4::identity().into(), // NEW!
view_proj: cgmath::Matrix4::identity().into(), view_proj: cgmath::Matrix4::identity().into(),
inv_proj: cgmath::Matrix4::identity().into(), // NEW!
inv_view: cgmath::Matrix4::identity().into(), // NEW!
} }
} }
fn update_view_proj(&mut self, camera: &Camera) { // UPDATED!
// We're using Vector4 because of the uniforms 16 byte spacing requirement fn update_view_proj(&mut self, camera: &camera::Camera, projection: &camera::Projection) {
self.view_position = camera.eye.to_homogeneous().into(); self.view_position = camera.position.to_homogeneous().into();
self.view_proj = (OPENGL_TO_WGPU_MATRIX * camera.build_view_projection_matrix()).into(); let proj = projection.calc_matrix();
} let view = camera.calc_matrix();
} let view_proj = proj * view;
self.view = view.into();
struct CameraController { self.view_proj = view_proj.into();
speed: f32, self.inv_proj = proj.invert().unwrap().into();
is_up_pressed: bool, self.inv_view = view.transpose().into();
is_down_pressed: bool,
is_forward_pressed: bool,
is_backward_pressed: bool,
is_left_pressed: bool,
is_right_pressed: bool,
}
impl CameraController {
fn new(speed: f32) -> Self {
Self {
speed,
is_up_pressed: false,
is_down_pressed: false,
is_forward_pressed: false,
is_backward_pressed: false,
is_left_pressed: false,
is_right_pressed: false,
}
}
fn process_events(&mut self, event: &WindowEvent) -> bool {
match event {
WindowEvent::KeyboardInput {
event:
KeyEvent {
state,
physical_key: PhysicalKey::Code(keycode),
..
},
..
} => {
let is_pressed = *state == ElementState::Pressed;
match keycode {
KeyCode::Space => {
self.is_up_pressed = is_pressed;
true
}
KeyCode::ShiftLeft => {
self.is_down_pressed = is_pressed;
true
}
KeyCode::KeyW | KeyCode::ArrowUp => {
self.is_forward_pressed = is_pressed;
true
}
KeyCode::KeyA | KeyCode::ArrowLeft => {
self.is_left_pressed = is_pressed;
true
}
KeyCode::KeyS | KeyCode::ArrowDown => {
self.is_backward_pressed = is_pressed;
true
}
KeyCode::KeyD | KeyCode::ArrowRight => {
self.is_right_pressed = is_pressed;
true
}
_ => false,
}
}
_ => false,
}
}
fn update_camera(&self, camera: &mut Camera) {
let forward = camera.target - camera.eye;
let forward_norm = forward.normalize();
let forward_mag = forward.magnitude();
// Prevents glitching when camera gets too close to the
// center of the scene.
if self.is_forward_pressed && forward_mag > self.speed {
camera.eye += forward_norm * self.speed;
}
if self.is_backward_pressed {
camera.eye -= forward_norm * self.speed;
}
let right = forward_norm.cross(camera.up);
// Redo radius calc in case the up/ down is pressed.
let forward = camera.target - camera.eye;
let forward_mag = forward.magnitude();
if self.is_right_pressed {
// Rescale the distance between the target and eye so
// that it doesn't change. The eye therefore still
// lies on the circle made by the target and eye.
camera.eye = camera.target - (forward + right * self.speed).normalize() * forward_mag;
}
if self.is_left_pressed {
camera.eye = camera.target - (forward - right * self.speed).normalize() * forward_mag;
}
} }
} }
@ -183,10 +63,10 @@ struct Instance {
impl Instance { impl Instance {
fn to_raw(&self) -> InstanceRaw { fn to_raw(&self) -> InstanceRaw {
let model =
cgmath::Matrix4::from_translation(self.position) * cgmath::Matrix4::from(self.rotation);
InstanceRaw { InstanceRaw {
model: model.into(), model: (cgmath::Matrix4::from_translation(self.position)
* cgmath::Matrix4::from(self.rotation))
.into(),
normal: cgmath::Matrix3::from(self.rotation).into(), normal: cgmath::Matrix3::from(self.rotation).into(),
} }
} }
@ -212,13 +92,13 @@ impl model::Vertex for InstanceRaw {
attributes: &[ attributes: &[
wgpu::VertexAttribute { wgpu::VertexAttribute {
offset: 0, offset: 0,
// While our vertex shader only uses locations 0, and 1 now, in later tutorials, we'll // While our vertex shader only uses locations 0, and 1 now, in later tutorials we'll
// be using 2, 3, and 4 for Vertex. We'll start at slot 5 to not conflict with them later // be using 2, 3, and 4, for Vertex. We'll start at slot 5 not conflict with them later
shader_location: 5, shader_location: 5,
format: wgpu::VertexFormat::Float32x4, format: wgpu::VertexFormat::Float32x4,
}, },
// A mat4 takes up 4 vertex slots as it is technically 4 vec4s. We need to define a slot // A mat4 takes up 4 vertex slots as it is technically 4 vec4s. We need to define a slot
// for each vec4. We don't have to do this in code, though. // for each vec4. We don't have to do this in code though.
wgpu::VertexAttribute { wgpu::VertexAttribute {
offset: mem::size_of::<[f32; 4]>() as wgpu::BufferAddress, offset: mem::size_of::<[f32; 4]>() as wgpu::BufferAddress,
shader_location: 6, shader_location: 6,
@ -254,16 +134,27 @@ impl model::Vertex for InstanceRaw {
} }
} }
#[repr(C)]
#[derive(Debug, Copy, Clone, bytemuck::Pod, bytemuck::Zeroable)]
struct LightUniform {
position: [f32; 3],
// Due to uniforms requiring 16 byte (4 float) spacing, we need to use a padding field here
_padding: u32,
color: [f32; 3],
_padding2: u32,
}
struct State<'a> { struct State<'a> {
window: &'a Window,
surface: wgpu::Surface<'a>, surface: wgpu::Surface<'a>,
device: wgpu::Device, device: wgpu::Device,
queue: wgpu::Queue, queue: wgpu::Queue,
config: wgpu::SurfaceConfiguration, config: wgpu::SurfaceConfiguration,
size: winit::dpi::PhysicalSize<u32>,
render_pipeline: wgpu::RenderPipeline, render_pipeline: wgpu::RenderPipeline,
obj_model: model::Model, obj_model: model::Model,
camera: Camera, camera: camera::Camera,
camera_controller: CameraController, projection: camera::Projection,
camera_controller: camera::CameraController,
camera_uniform: CameraUniform, camera_uniform: CameraUniform,
camera_buffer: wgpu::Buffer, camera_buffer: wgpu::Buffer,
camera_bind_group: wgpu::BindGroup, camera_bind_group: wgpu::BindGroup,
@ -271,12 +162,20 @@ struct State<'a> {
#[allow(dead_code)] #[allow(dead_code)]
instance_buffer: wgpu::Buffer, instance_buffer: wgpu::Buffer,
depth_texture: texture::Texture, depth_texture: texture::Texture,
window: &'a Window, size: winit::dpi::PhysicalSize<u32>,
light_uniform: LightUniform, light_uniform: LightUniform,
light_buffer: wgpu::Buffer, light_buffer: wgpu::Buffer,
light_bind_group_layout: wgpu::BindGroupLayout,
light_bind_group: wgpu::BindGroup, light_bind_group: wgpu::BindGroup,
light_render_pipeline: wgpu::RenderPipeline, light_render_pipeline: wgpu::RenderPipeline,
#[allow(dead_code)]
debug_material: model::Material,
mouse_pressed: bool,
// NEW!
hdr: hdr::HdrPipeline,
environment_bind_group: wgpu::BindGroup,
sky_pipeline: wgpu::RenderPipeline,
#[cfg(feature = "debug")]
debug: debug::Debug,
} }
fn create_render_pipeline( fn create_render_pipeline(
@ -285,13 +184,13 @@ fn create_render_pipeline(
color_format: wgpu::TextureFormat, color_format: wgpu::TextureFormat,
depth_format: Option<wgpu::TextureFormat>, depth_format: Option<wgpu::TextureFormat>,
vertex_layouts: &[wgpu::VertexBufferLayout], vertex_layouts: &[wgpu::VertexBufferLayout],
topology: wgpu::PrimitiveTopology, // NEW!
shader: wgpu::ShaderModuleDescriptor, shader: wgpu::ShaderModuleDescriptor,
) -> wgpu::RenderPipeline { ) -> wgpu::RenderPipeline {
let shader = device.create_shader_module(shader); let shader = device.create_shader_module(shader);
device.create_render_pipeline(&wgpu::RenderPipelineDescriptor { device.create_render_pipeline(&wgpu::RenderPipelineDescriptor {
label: Some("Render Pipeline"), label: Some(&format!("{:?}", shader)),
cache: None,
layout: Some(layout), layout: Some(layout),
vertex: wgpu::VertexState { vertex: wgpu::VertexState {
module: &shader, module: &shader,
@ -304,16 +203,13 @@ fn create_render_pipeline(
entry_point: "fs_main", entry_point: "fs_main",
targets: &[Some(wgpu::ColorTargetState { targets: &[Some(wgpu::ColorTargetState {
format: color_format, format: color_format,
blend: Some(wgpu::BlendState { blend: None,
alpha: wgpu::BlendComponent::REPLACE,
color: wgpu::BlendComponent::REPLACE,
}),
write_mask: wgpu::ColorWrites::ALL, write_mask: wgpu::ColorWrites::ALL,
})], })],
compilation_options: Default::default(), compilation_options: Default::default(),
}), }),
primitive: wgpu::PrimitiveState { primitive: wgpu::PrimitiveState {
topology: wgpu::PrimitiveTopology::TriangleList, topology, // NEW!
strip_index_format: None, strip_index_format: None,
front_face: wgpu::FrontFace::Ccw, front_face: wgpu::FrontFace::Ccw,
cull_mode: Some(wgpu::Face::Back), cull_mode: Some(wgpu::Face::Back),
@ -327,7 +223,7 @@ fn create_render_pipeline(
depth_stencil: depth_format.map(|format| wgpu::DepthStencilState { depth_stencil: depth_format.map(|format| wgpu::DepthStencilState {
format, format,
depth_write_enabled: true, depth_write_enabled: true,
depth_compare: wgpu::CompareFunction::Less, depth_compare: wgpu::CompareFunction::LessEqual, // UDPATED!
stencil: wgpu::StencilState::default(), stencil: wgpu::StencilState::default(),
bias: wgpu::DepthBiasState::default(), bias: wgpu::DepthBiasState::default(),
}), }),
@ -336,17 +232,19 @@ fn create_render_pipeline(
mask: !0, mask: !0,
alpha_to_coverage_enabled: false, alpha_to_coverage_enabled: false,
}, },
// If the pipeline will be used with a multiview render pass, this
// indicates how many array layers the attachments will have.
multiview: None, multiview: None,
cache: None,
}) })
} }
impl<'a> State<'a> { impl<'a> State<'a> {
async fn new(window: &'a Window) -> State<'a> { async fn new(window: &'a Window) -> anyhow::Result<State<'a>> {
let size = window.inner_size(); let size = window.inner_size();
// The instance is a handle to our GPU // The instance is a handle to our GPU
// BackendBit::PRIMARY => Vulkan + Metal + DX12 + Browser WebGPU // BackendBit::PRIMARY => Vulkan + Metal + DX12 + Browser WebGPU
log::warn!("WGPU setup");
let instance = wgpu::Instance::new(wgpu::InstanceDescriptor { let instance = wgpu::Instance::new(wgpu::InstanceDescriptor {
backends: wgpu::Backends::PRIMARY, backends: wgpu::Backends::PRIMARY,
..Default::default() ..Default::default()
@ -362,26 +260,23 @@ impl<'a> State<'a> {
}) })
.await .await
.unwrap(); .unwrap();
log::warn!("device and queue");
let (device, queue) = adapter let (device, queue) = adapter
.request_device( .request_device(
&wgpu::DeviceDescriptor { &wgpu::DeviceDescriptor {
label: None, label: None,
// UPDATED!
required_features: wgpu::Features::empty(), required_features: wgpu::Features::empty(),
// WebGL doesn't support all of wgpu's features, so if // UPDATED!
// we're building for the web we'll have to disable some. required_limits: wgpu::Limits::downlevel_defaults(),
required_limits: wgpu::Limits::default(),
memory_hints: Default::default(), memory_hints: Default::default(),
}, },
// Some(&std::path::Path::new("trace")), // Trace path
None, // Trace path None, // Trace path
) )
.await .await
.unwrap(); .unwrap();
log::warn!("Surface");
let surface_caps = surface.get_capabilities(&adapter); let surface_caps = surface.get_capabilities(&adapter);
// Shader code assumes an Srgb surface texture. Using a different // Shader code in this tutorial assumes an Srgb surface texture. Using a different
// one will result all the colors comming out darker. If you want to support non // one will result all the colors comming out darker. If you want to support non
// Srgb surfaces, you'll need to account for that when drawing to the frame. // Srgb surfaces, you'll need to account for that when drawing to the frame.
let surface_format = surface_caps let surface_format = surface_caps
@ -397,7 +292,8 @@ impl<'a> State<'a> {
height: size.height, height: size.height,
present_mode: surface_caps.present_modes[0], present_mode: surface_caps.present_modes[0],
alpha_mode: surface_caps.alpha_modes[0], alpha_mode: surface_caps.alpha_modes[0],
view_formats: vec![], // NEW!
view_formats: vec![surface_format.add_srgb_suffix()],
desired_maximum_frame_latency: 2, desired_maximum_frame_latency: 2,
}; };
@ -409,8 +305,8 @@ impl<'a> State<'a> {
visibility: wgpu::ShaderStages::FRAGMENT, visibility: wgpu::ShaderStages::FRAGMENT,
ty: wgpu::BindingType::Texture { ty: wgpu::BindingType::Texture {
multisampled: false, multisampled: false,
view_dimension: wgpu::TextureViewDimension::D2,
sample_type: wgpu::TextureSampleType::Float { filterable: true }, sample_type: wgpu::TextureSampleType::Float { filterable: true },
view_dimension: wgpu::TextureViewDimension::D2,
}, },
count: None, count: None,
}, },
@ -420,23 +316,34 @@ impl<'a> State<'a> {
ty: wgpu::BindingType::Sampler(wgpu::SamplerBindingType::Filtering), ty: wgpu::BindingType::Sampler(wgpu::SamplerBindingType::Filtering),
count: None, count: None,
}, },
// normal map
wgpu::BindGroupLayoutEntry {
binding: 2,
visibility: wgpu::ShaderStages::FRAGMENT,
ty: wgpu::BindingType::Texture {
multisampled: false,
sample_type: wgpu::TextureSampleType::Float { filterable: true },
view_dimension: wgpu::TextureViewDimension::D2,
},
count: None,
},
wgpu::BindGroupLayoutEntry {
binding: 3,
visibility: wgpu::ShaderStages::FRAGMENT,
ty: wgpu::BindingType::Sampler(wgpu::SamplerBindingType::Filtering),
count: None,
},
], ],
label: Some("texture_bind_group_layout"), label: Some("texture_bind_group_layout"),
}); });
let camera = Camera { let camera = camera::Camera::new((0.0, 5.0, 10.0), cgmath::Deg(-90.0), cgmath::Deg(-20.0));
eye: (0.0, 5.0, -10.0).into(), let projection =
target: (0.0, 0.0, 0.0).into(), camera::Projection::new(config.width, config.height, cgmath::Deg(45.0), 0.1, 100.0);
up: cgmath::Vector3::unit_y(), let camera_controller = camera::CameraController::new(4.0, 0.4);
aspect: config.width as f32 / config.height as f32,
fovy: 45.0,
znear: 0.1,
zfar: 100.0,
};
let camera_controller = CameraController::new(0.2);
let mut camera_uniform = CameraUniform::new(); let mut camera_uniform = CameraUniform::new();
camera_uniform.update_view_proj(&camera); camera_uniform.update_view_proj(&camera, &projection);
let camera_buffer = device.create_buffer_init(&wgpu::util::BufferInitDescriptor { let camera_buffer = device.create_buffer_init(&wgpu::util::BufferInitDescriptor {
label: Some("Camera Buffer"), label: Some("Camera Buffer"),
@ -453,7 +360,14 @@ impl<'a> State<'a> {
let position = cgmath::Vector3 { x, y: 0.0, z }; let position = cgmath::Vector3 { x, y: 0.0, z };
let rotation = cgmath::Quaternion::from_axis_angle((0.0, 1.0, 0.0).into(), cgmath::Deg(180.0)); let rotation = if position.is_zero() {
cgmath::Quaternion::from_axis_angle(
cgmath::Vector3::unit_z(),
cgmath::Deg(0.0),
)
} else {
cgmath::Quaternion::from_axis_angle(position.normalize(), cgmath::Deg(45.0))
};
Instance { position, rotation } Instance { position, rotation }
}) })
@ -491,20 +405,11 @@ impl<'a> State<'a> {
label: Some("camera_bind_group"), label: Some("camera_bind_group"),
}); });
log::warn!("Load model");
let obj_model = let obj_model =
resources::load_model("cube.obj", &device, &queue, &texture_bind_group_layout) resources::load_model("cube.obj", &device, &queue, &texture_bind_group_layout)
.await .await
.unwrap(); .unwrap();
let shader = device.create_shader_module(wgpu::ShaderModuleDescriptor {
label: Some("shader.wgsl"),
source: wgpu::ShaderSource::Wgsl(include_str!("shader.wgsl").into()),
});
let depth_texture =
texture::Texture::create_depth_texture(&device, &config, "depth_texture");
let light_uniform = LightUniform { let light_uniform = LightUniform {
position: [2.0, 2.0, 2.0], position: [2.0, 2.0, 2.0],
_padding: 0, _padding: 0,
@ -512,14 +417,11 @@ impl<'a> State<'a> {
_padding2: 0, _padding2: 0,
}; };
// We'll want to update our lights position, so we use COPY_DST let light_buffer = device.create_buffer_init(&wgpu::util::BufferInitDescriptor {
let light_buffer = device.create_buffer_init( label: Some("Light VB"),
&wgpu::util::BufferInitDescriptor { contents: bytemuck::cast_slice(&[light_uniform]),
label: Some("Light VB"), usage: wgpu::BufferUsages::UNIFORM | wgpu::BufferUsages::COPY_DST,
contents: bytemuck::cast_slice(&[light_uniform]), });
usage: wgpu::BufferUsages::UNIFORM | wgpu::BufferUsages::COPY_DST,
}
);
let light_bind_group_layout = let light_bind_group_layout =
device.create_bind_group_layout(&wgpu::BindGroupLayoutDescriptor { device.create_bind_group_layout(&wgpu::BindGroupLayoutDescriptor {
@ -545,16 +447,71 @@ impl<'a> State<'a> {
label: None, label: None,
}); });
let render_pipeline_layout = device.create_pipeline_layout(&wgpu::PipelineLayoutDescriptor { let depth_texture =
bind_group_layouts: &[ texture::Texture::create_depth_texture(&device, &config, "depth_texture");
&texture_bind_group_layout,
&camera_bind_group_layout, let hdr = hdr::HdrPipeline::new(&device, &config);
&light_bind_group_layout,
let hdr_loader = resources::HdrLoader::new(&device);
let sky_bytes = resources::load_binary("pure-sky.hdr").await?;
let sky_texture = hdr_loader.from_equirectangular_bytes(
&device,
&queue,
&sky_bytes,
1080,
Some("Sky Texture"),
)?;
let environment_layout =
device.create_bind_group_layout(&wgpu::BindGroupLayoutDescriptor {
label: Some("environment_layout"),
entries: &[
wgpu::BindGroupLayoutEntry {
binding: 0,
visibility: wgpu::ShaderStages::FRAGMENT,
ty: wgpu::BindingType::Texture {
sample_type: wgpu::TextureSampleType::Float { filterable: false },
view_dimension: wgpu::TextureViewDimension::Cube,
multisampled: false,
},
count: None,
},
wgpu::BindGroupLayoutEntry {
binding: 1,
visibility: wgpu::ShaderStages::FRAGMENT,
ty: wgpu::BindingType::Sampler(wgpu::SamplerBindingType::NonFiltering),
count: None,
},
],
});
let environment_bind_group = device.create_bind_group(&wgpu::BindGroupDescriptor {
label: Some("environment_bind_group"),
layout: &environment_layout,
entries: &[
wgpu::BindGroupEntry {
binding: 0,
resource: wgpu::BindingResource::TextureView(&sky_texture.view()),
},
wgpu::BindGroupEntry {
binding: 1,
resource: wgpu::BindingResource::Sampler(sky_texture.sampler()),
},
], ],
label: Some("Render Pipeline Layout"),
push_constant_ranges: &[],
}); });
let render_pipeline_layout =
device.create_pipeline_layout(&wgpu::PipelineLayoutDescriptor {
label: Some("Render Pipeline Layout"),
bind_group_layouts: &[
&texture_bind_group_layout,
&camera_bind_group_layout,
&light_bind_group_layout,
&environment_layout, // UPDATED!
],
push_constant_ranges: &[],
});
let render_pipeline = { let render_pipeline = {
let shader = wgpu::ShaderModuleDescriptor { let shader = wgpu::ShaderModuleDescriptor {
label: Some("Normal Shader"), label: Some("Normal Shader"),
@ -563,9 +520,10 @@ impl<'a> State<'a> {
create_render_pipeline( create_render_pipeline(
&device, &device,
&render_pipeline_layout, &render_pipeline_layout,
config.format, hdr.format(),
Some(texture::Texture::DEPTH_FORMAT), Some(texture::Texture::DEPTH_FORMAT),
&[model::ModelVertex::desc(), InstanceRaw::desc()], &[model::ModelVertex::desc(), InstanceRaw::desc()],
wgpu::PrimitiveTopology::TriangleList,
shader, shader,
) )
}; };
@ -583,22 +541,76 @@ impl<'a> State<'a> {
create_render_pipeline( create_render_pipeline(
&device, &device,
&layout, &layout,
config.format, hdr.format(),
Some(texture::Texture::DEPTH_FORMAT), Some(texture::Texture::DEPTH_FORMAT),
&[model::ModelVertex::desc()], &[model::ModelVertex::desc()],
wgpu::PrimitiveTopology::TriangleList,
shader, shader,
) )
}; };
Self { // NEW!
let sky_pipeline = {
let layout = device.create_pipeline_layout(&wgpu::PipelineLayoutDescriptor {
label: Some("Sky Pipeline Layout"),
bind_group_layouts: &[&camera_bind_group_layout, &environment_layout],
push_constant_ranges: &[],
});
let shader = wgpu::include_wgsl!("sky.wgsl");
create_render_pipeline(
&device,
&layout,
hdr.format(),
Some(texture::Texture::DEPTH_FORMAT),
&[],
wgpu::PrimitiveTopology::TriangleList,
shader,
)
};
let debug_material = {
let diffuse_bytes = include_bytes!("../res/cobble-diffuse.png");
let normal_bytes = include_bytes!("../res/cobble-normal.png");
let diffuse_texture = texture::Texture::from_bytes(
&device,
&queue,
diffuse_bytes,
"res/alt-diffuse.png",
false,
)
.unwrap();
let normal_texture = texture::Texture::from_bytes(
&device,
&queue,
normal_bytes,
"res/alt-normal.png",
true,
)
.unwrap();
model::Material::new(
&device,
"alt-material",
diffuse_texture,
normal_texture,
&texture_bind_group_layout,
)
};
#[cfg(feature = "debug")]
let debug = debug::Debug::new(&device, &camera_bind_group_layout, surface_format);
Ok(Self {
window,
surface, surface,
device, device,
queue, queue,
config, config,
size,
render_pipeline, render_pipeline,
obj_model, obj_model,
camera, camera,
projection,
camera_controller, camera_controller,
camera_buffer, camera_buffer,
camera_bind_group, camera_bind_group,
@ -606,13 +618,22 @@ impl<'a> State<'a> {
instances, instances,
instance_buffer, instance_buffer,
depth_texture, depth_texture,
window, size,
light_uniform, light_uniform,
light_buffer, light_buffer,
light_bind_group_layout,
light_bind_group, light_bind_group,
light_render_pipeline, light_render_pipeline,
} #[allow(dead_code)]
debug_material,
mouse_pressed: false,
// NEW!
hdr,
environment_bind_group,
sky_pipeline,
#[cfg(feature = "debug")]
debug,
})
} }
pub fn window(&self) -> &Window { pub fn window(&self) -> &Window {
@ -621,42 +642,76 @@ impl<'a> State<'a> {
fn resize(&mut self, new_size: winit::dpi::PhysicalSize<u32>) { fn resize(&mut self, new_size: winit::dpi::PhysicalSize<u32>) {
if new_size.width > 0 && new_size.height > 0 { if new_size.width > 0 && new_size.height > 0 {
println!("Resizing to {:?}", new_size);
self.projection.resize(new_size.width, new_size.height);
self.hdr
.resize(&self.device, new_size.width, new_size.height);
self.size = new_size;
self.config.width = new_size.width; self.config.width = new_size.width;
self.config.height = new_size.height; self.config.height = new_size.height;
self.size = new_size;
self.camera.aspect = self.config.width as f32 / self.config.height as f32;
self.surface.configure(&self.device, &self.config); self.surface.configure(&self.device, &self.config);
self.depth_texture = self.depth_texture =
texture::Texture::create_depth_texture(&self.device, &self.config, "depth_texture"); texture::Texture::create_depth_texture(&self.device, &self.config, "depth_texture");
} }
} }
fn input(&mut self, event: &WindowEvent) -> bool { fn input(&mut self, event: &WindowEvent) -> bool {
self.camera_controller.process_events(event) match event {
WindowEvent::KeyboardInput {
event:
KeyEvent {
physical_key: PhysicalKey::Code(key),
state,
..
},
..
} => self.camera_controller.process_keyboard(*key, *state),
WindowEvent::MouseWheel { delta, .. } => {
self.camera_controller.process_scroll(delta);
true
}
WindowEvent::MouseInput {
button: MouseButton::Left,
state,
..
} => {
self.mouse_pressed = *state == ElementState::Pressed;
true
}
_ => false,
}
} }
fn update(&mut self) { fn update(&mut self, dt: std::time::Duration) {
self.camera_controller.update_camera(&mut self.camera); self.camera_controller.update_camera(&mut self.camera, dt);
log::info!("{:?}", self.camera); self.camera_uniform
self.camera_uniform.update_view_proj(&self.camera); .update_view_proj(&self.camera, &self.projection);
self.queue.write_buffer( self.queue.write_buffer(
&self.camera_buffer, &self.camera_buffer,
0, 0,
bytemuck::cast_slice(&[self.camera_uniform]), bytemuck::cast_slice(&[self.camera_uniform]),
); );
// Update the light
let old_position: cgmath::Vector3<_> = self.light_uniform.position.into(); let old_position: cgmath::Vector3<_> = self.light_uniform.position.into();
self.light_uniform.position = self.light_uniform.position = (cgmath::Quaternion::from_axis_angle(
(cgmath::Quaternion::from_axis_angle((0.0, 1.0, 0.0).into(), cgmath::Deg(1.0)) (0.0, 1.0, 0.0).into(),
* old_position) cgmath::Deg(PI * dt.as_secs_f32()),
.into(); ) * old_position)
self.queue.write_buffer(&self.light_buffer, 0, bytemuck::cast_slice(&[self.light_uniform])); .into();
self.queue.write_buffer(
&self.light_buffer,
0,
bytemuck::cast_slice(&[self.light_uniform]),
);
} }
fn render(&mut self) -> Result<(), wgpu::SurfaceError> { fn render(&mut self) -> Result<(), wgpu::SurfaceError> {
let output = self.surface.get_current_texture()?; let output = self.surface.get_current_texture()?;
let view = output let view = output.texture.create_view(&wgpu::TextureViewDescriptor {
.texture format: Some(self.config.format.add_srgb_suffix()),
.create_view(&wgpu::TextureViewDescriptor::default()); ..Default::default()
});
let mut encoder = self let mut encoder = self
.device .device
@ -668,7 +723,7 @@ impl<'a> State<'a> {
let mut render_pass = encoder.begin_render_pass(&wgpu::RenderPassDescriptor { let mut render_pass = encoder.begin_render_pass(&wgpu::RenderPassDescriptor {
label: Some("Render Pass"), label: Some("Render Pass"),
color_attachments: &[Some(wgpu::RenderPassColorAttachment { color_attachments: &[Some(wgpu::RenderPassColorAttachment {
view: &view, view: self.hdr.view(), // UPDATED!
resolve_target: None, resolve_target: None,
ops: wgpu::Operations { ops: wgpu::Operations {
load: wgpu::LoadOp::Clear(wgpu::Color { load: wgpu::LoadOp::Clear(wgpu::Color {
@ -693,8 +748,6 @@ impl<'a> State<'a> {
}); });
render_pass.set_vertex_buffer(1, self.instance_buffer.slice(..)); render_pass.set_vertex_buffer(1, self.instance_buffer.slice(..));
use crate::model::DrawLight;
render_pass.set_pipeline(&self.light_render_pipeline); render_pass.set_pipeline(&self.light_render_pipeline);
render_pass.draw_light_model( render_pass.draw_light_model(
&self.obj_model, &self.obj_model,
@ -708,7 +761,36 @@ impl<'a> State<'a> {
0..self.instances.len() as u32, 0..self.instances.len() as u32,
&self.camera_bind_group, &self.camera_bind_group,
&self.light_bind_group, &self.light_bind_group,
&self.environment_bind_group,
); );
render_pass.set_pipeline(&self.sky_pipeline);
render_pass.set_bind_group(0, &self.camera_bind_group, &[]);
render_pass.set_bind_group(1, &self.environment_bind_group, &[]);
render_pass.draw(0..3, 0..1);
}
// NEW!
// Apply tonemapping
self.hdr.process(&mut encoder, &view);
#[cfg(feature = "debug")]
{
let mut pass = encoder.begin_render_pass(&wgpu::RenderPassDescriptor {
label: Some("Debug"),
color_attachments: &[Some(wgpu::RenderPassColorAttachment {
view: &view,
resolve_target: None,
ops: wgpu::Operations {
load: wgpu::LoadOp::Load,
store: wgpu::StoreOp::Store,
},
})],
depth_stencil_attachment: None,
occlusion_query_set: None,
timestamp_writes: None,
});
self.debug.draw_axis(&mut pass, &self.camera_bind_group);
} }
self.queue.submit(iter::once(encoder.finish())); self.queue.submit(iter::once(encoder.finish()));
@ -728,66 +810,55 @@ pub async fn run() {
.build(&event_loop) .build(&event_loop)
.unwrap(); .unwrap();
// State::new uses async code, so we're going to wait for it to finish let mut state = State::new(&window).await.unwrap();
let mut state = State::new(&window).await; let mut last_render_time = instant::Instant::now();
let mut surface_configured = false; event_loop.run(move |event, control_flow| {
match event {
event_loop Event::DeviceEvent {
.run(move |event, control_flow| { event: DeviceEvent::MouseMotion{ delta, },
match event { .. // We're not using device_id currently
Event::WindowEvent { } => if state.mouse_pressed {
ref event, state.camera_controller.process_mouse(delta.0, delta.1)
window_id, }
} if window_id == state.window().id() => { // UPDATED!
if !state.input(event) { Event::WindowEvent {
match event { ref event,
WindowEvent::CloseRequested window_id,
| WindowEvent::KeyboardInput { } if window_id == state.window().id() && !state.input(event) => {
event: match event {
KeyEvent { WindowEvent::CloseRequested
state: ElementState::Pressed, | WindowEvent::KeyboardInput {
physical_key: PhysicalKey::Code(KeyCode::Escape), event:
.. KeyEvent {
}, state: ElementState::Pressed,
.. physical_key: PhysicalKey::Code(KeyCode::Escape),
} => control_flow.exit(), ..
WindowEvent::Resized(physical_size) => { },
surface_configured = true; ..
state.resize(*physical_size); } => control_flow.exit(),
} WindowEvent::Resized(physical_size) => {
WindowEvent::RedrawRequested => { state.resize(*physical_size);
// This tells winit that we want another frame after this one }
state.window().request_redraw(); WindowEvent::RedrawRequested => {
state.window().request_redraw();
if !surface_configured { let now = instant::Instant::now();
return; let dt = now - last_render_time;
} last_render_time = now;
state.update(dt);
state.update(); match state.render() {
match state.render() { Ok(_) => {}
Ok(_) => {} // Reconfigure the surface if it's lost or outdated
// Reconfigure the surface if it's lost or outdated Err(wgpu::SurfaceError::Lost | wgpu::SurfaceError::Outdated) => state.resize(state.size),
Err( // The system is out of memory, we should probably quit
wgpu::SurfaceError::Lost | wgpu::SurfaceError::Outdated, Err(wgpu::SurfaceError::OutOfMemory) => control_flow.exit(),
) => state.resize(state.size), // We're ignoring timeouts
// The system is out of memory, we should probably quit Err(wgpu::SurfaceError::Timeout) => log::warn!("Surface timeout"),
Err(wgpu::SurfaceError::OutOfMemory) => {
log::error!("OutOfMemory");
control_flow.exit();
}
// This happens when the a frame takes too long to present
Err(wgpu::SurfaceError::Timeout) => {
log::warn!("Surface timeout")
}
}
}
_ => {}
} }
} }
_ => {}
} }
_ => {}
} }
}) _ => {}
.unwrap(); }
}).unwrap();
} }

View File

@ -1,8 +1,11 @@
// light.wgsl
// Vertex shader // Vertex shader
struct Camera { struct Camera {
view_pos: vec4<f32>,
view: mat4x4<f32>,
view_proj: mat4x4<f32>, view_proj: mat4x4<f32>,
inv_proj: mat4x4<f32>,
inv_view: mat4x4<f32>,
} }
@group(0) @binding(0) @group(0) @binding(0)
var<uniform> camera: Camera; var<uniform> camera: Camera;

View File

@ -12,6 +12,8 @@ pub struct ModelVertex {
pub position: [f32; 3], pub position: [f32; 3],
pub tex_coords: [f32; 2], pub tex_coords: [f32; 2],
pub normal: [f32; 3], pub normal: [f32; 3],
pub tangent: [f32; 3],
pub bitangent: [f32; 3],
} }
impl Vertex for ModelVertex { impl Vertex for ModelVertex {
@ -36,6 +38,17 @@ impl Vertex for ModelVertex {
shader_location: 2, shader_location: 2,
format: wgpu::VertexFormat::Float32x3, format: wgpu::VertexFormat::Float32x3,
}, },
// Tangent and bitangent
wgpu::VertexAttribute {
offset: mem::size_of::<[f32; 8]>() as wgpu::BufferAddress,
shader_location: 3,
format: wgpu::VertexFormat::Float32x3,
},
wgpu::VertexAttribute {
offset: mem::size_of::<[f32; 11]>() as wgpu::BufferAddress,
shader_location: 4,
format: wgpu::VertexFormat::Float32x3,
},
], ],
} }
} }
@ -46,9 +59,51 @@ pub struct Material {
pub name: String, pub name: String,
#[allow(unused)] #[allow(unused)]
pub diffuse_texture: texture::Texture, pub diffuse_texture: texture::Texture,
#[allow(unused)]
pub normal_texture: texture::Texture,
pub bind_group: wgpu::BindGroup, pub bind_group: wgpu::BindGroup,
} }
impl Material {
pub fn new(
device: &wgpu::Device,
name: &str,
diffuse_texture: texture::Texture,
normal_texture: texture::Texture,
layout: &wgpu::BindGroupLayout,
) -> Self {
let bind_group = device.create_bind_group(&wgpu::BindGroupDescriptor {
layout,
entries: &[
wgpu::BindGroupEntry {
binding: 0,
resource: wgpu::BindingResource::TextureView(&diffuse_texture.view),
},
wgpu::BindGroupEntry {
binding: 1,
resource: wgpu::BindingResource::Sampler(&diffuse_texture.sampler),
},
wgpu::BindGroupEntry {
binding: 2,
resource: wgpu::BindingResource::TextureView(&normal_texture.view),
},
wgpu::BindGroupEntry {
binding: 3,
resource: wgpu::BindingResource::Sampler(&normal_texture.sampler),
},
],
label: Some(name),
});
Self {
name: String::from(name),
diffuse_texture,
normal_texture,
bind_group,
}
}
}
pub struct Mesh { pub struct Mesh {
#[allow(unused)] #[allow(unused)]
pub name: String, pub name: String,
@ -63,14 +118,15 @@ pub struct Model {
pub materials: Vec<Material>, pub materials: Vec<Material>,
} }
// model.rs
pub trait DrawModel<'a> { pub trait DrawModel<'a> {
#[allow(unused)]
fn draw_mesh( fn draw_mesh(
&mut self, &mut self,
mesh: &'a Mesh, mesh: &'a Mesh,
material: &'a Material, material: &'a Material,
camera_bind_group: &'a wgpu::BindGroup, camera_bind_group: &'a wgpu::BindGroup,
light_bind_group: &'a wgpu::BindGroup, light_bind_group: &'a wgpu::BindGroup,
environment_bind_group: &'a wgpu::BindGroup,
); );
fn draw_mesh_instanced( fn draw_mesh_instanced(
&mut self, &mut self,
@ -79,13 +135,16 @@ pub trait DrawModel<'a> {
instances: Range<u32>, instances: Range<u32>,
camera_bind_group: &'a wgpu::BindGroup, camera_bind_group: &'a wgpu::BindGroup,
light_bind_group: &'a wgpu::BindGroup, light_bind_group: &'a wgpu::BindGroup,
environment_bind_group: &'a wgpu::BindGroup,
); );
#[allow(unused)]
fn draw_model( fn draw_model(
&mut self, &mut self,
model: &'a Model, model: &'a Model,
camera_bind_group: &'a wgpu::BindGroup, camera_bind_group: &'a wgpu::BindGroup,
light_bind_group: &'a wgpu::BindGroup, light_bind_group: &'a wgpu::BindGroup,
environment_bind_group: &'a wgpu::BindGroup,
); );
fn draw_model_instanced( fn draw_model_instanced(
&mut self, &mut self,
@ -93,6 +152,17 @@ pub trait DrawModel<'a> {
instances: Range<u32>, instances: Range<u32>,
camera_bind_group: &'a wgpu::BindGroup, camera_bind_group: &'a wgpu::BindGroup,
light_bind_group: &'a wgpu::BindGroup, light_bind_group: &'a wgpu::BindGroup,
environment_bind_group: &'a wgpu::BindGroup,
);
#[allow(unused)]
fn draw_model_instanced_with_material(
&mut self,
model: &'a Model,
material: &'a Material,
instances: Range<u32>,
camera_bind_group: &'a wgpu::BindGroup,
light_bind_group: &'a wgpu::BindGroup,
environment_bind_group: &'a wgpu::BindGroup,
); );
} }
@ -106,8 +176,16 @@ where
material: &'b Material, material: &'b Material,
camera_bind_group: &'b wgpu::BindGroup, camera_bind_group: &'b wgpu::BindGroup,
light_bind_group: &'b wgpu::BindGroup, light_bind_group: &'b wgpu::BindGroup,
environment_bind_group: &'b wgpu::BindGroup,
) { ) {
self.draw_mesh_instanced(mesh, material, 0..1, camera_bind_group, light_bind_group); self.draw_mesh_instanced(
mesh,
material,
0..1,
camera_bind_group,
light_bind_group,
environment_bind_group,
);
} }
fn draw_mesh_instanced( fn draw_mesh_instanced(
@ -117,12 +195,14 @@ where
instances: Range<u32>, instances: Range<u32>,
camera_bind_group: &'b wgpu::BindGroup, camera_bind_group: &'b wgpu::BindGroup,
light_bind_group: &'b wgpu::BindGroup, light_bind_group: &'b wgpu::BindGroup,
environment_bind_group: &'b wgpu::BindGroup,
) { ) {
self.set_vertex_buffer(0, mesh.vertex_buffer.slice(..)); self.set_vertex_buffer(0, mesh.vertex_buffer.slice(..));
self.set_index_buffer(mesh.index_buffer.slice(..), wgpu::IndexFormat::Uint32); self.set_index_buffer(mesh.index_buffer.slice(..), wgpu::IndexFormat::Uint32);
self.set_bind_group(0, &material.bind_group, &[]); self.set_bind_group(0, &material.bind_group, &[]);
self.set_bind_group(1, camera_bind_group, &[]); self.set_bind_group(1, camera_bind_group, &[]);
self.set_bind_group(2, light_bind_group, &[]); self.set_bind_group(2, light_bind_group, &[]);
self.set_bind_group(3, environment_bind_group, &[]);
self.draw_indexed(0..mesh.num_elements, 0, instances); self.draw_indexed(0..mesh.num_elements, 0, instances);
} }
@ -131,8 +211,15 @@ where
model: &'b Model, model: &'b Model,
camera_bind_group: &'b wgpu::BindGroup, camera_bind_group: &'b wgpu::BindGroup,
light_bind_group: &'b wgpu::BindGroup, light_bind_group: &'b wgpu::BindGroup,
environment_bind_group: &'b wgpu::BindGroup,
) { ) {
self.draw_model_instanced(model, 0..1, camera_bind_group, light_bind_group); self.draw_model_instanced(
model,
0..1,
camera_bind_group,
light_bind_group,
environment_bind_group,
);
} }
fn draw_model_instanced( fn draw_model_instanced(
@ -141,16 +228,45 @@ where
instances: Range<u32>, instances: Range<u32>,
camera_bind_group: &'b wgpu::BindGroup, camera_bind_group: &'b wgpu::BindGroup,
light_bind_group: &'b wgpu::BindGroup, light_bind_group: &'b wgpu::BindGroup,
environment_bind_group: &'b wgpu::BindGroup, // NEW!
) { ) {
for mesh in &model.meshes { for mesh in &model.meshes {
let material = &model.materials[mesh.material]; let material = &model.materials[mesh.material];
self.draw_mesh_instanced(mesh, material, instances.clone(), camera_bind_group, light_bind_group); self.draw_mesh_instanced(
mesh,
material,
instances.clone(),
camera_bind_group,
light_bind_group,
environment_bind_group,
);
}
}
fn draw_model_instanced_with_material(
&mut self,
model: &'b Model,
material: &'b Material,
instances: Range<u32>,
camera_bind_group: &'b wgpu::BindGroup,
light_bind_group: &'b wgpu::BindGroup,
environment_bind_group: &'b wgpu::BindGroup,
) {
for mesh in &model.meshes {
self.draw_mesh_instanced(
mesh,
material,
instances.clone(),
camera_bind_group,
light_bind_group,
environment_bind_group,
);
} }
} }
} }
// model.rs
pub trait DrawLight<'a> { pub trait DrawLight<'a> {
#[allow(unused)]
fn draw_light_mesh( fn draw_light_mesh(
&mut self, &mut self,
mesh: &'a Mesh, mesh: &'a Mesh,
@ -223,7 +339,12 @@ where
light_bind_group: &'b wgpu::BindGroup, light_bind_group: &'b wgpu::BindGroup,
) { ) {
for mesh in &model.meshes { for mesh in &model.meshes {
self.draw_light_mesh_instanced(mesh, instances.clone(), camera_bind_group, light_bind_group); self.draw_light_mesh_instanced(
mesh,
instances.clone(),
camera_bind_group,
light_bind_group,
);
} }
} }
} }

View File

@ -1,5 +1,7 @@
use std::io::{BufReader, Cursor}; use std::io::{BufReader, Cursor};
use cfg_if::cfg_if;
use image::codecs::hdr::HdrDecoder;
use wgpu::util::DeviceExt; use wgpu::util::DeviceExt;
use crate::{model, texture}; use crate::{model, texture};
@ -24,11 +26,12 @@ pub async fn load_binary(file_name: &str) -> anyhow::Result<Vec<u8>> {
pub async fn load_texture( pub async fn load_texture(
file_name: &str, file_name: &str,
is_normal_map: bool,
device: &wgpu::Device, device: &wgpu::Device,
queue: &wgpu::Queue, queue: &wgpu::Queue,
) -> anyhow::Result<texture::Texture> { ) -> anyhow::Result<texture::Texture> {
let data = load_binary(file_name).await?; let data = load_binary(file_name).await?;
texture::Texture::from_bytes(device, queue, &data, file_name) texture::Texture::from_bytes(device, queue, &data, file_name, is_normal_map)
} }
pub async fn load_model( pub async fn load_model(
@ -53,66 +56,112 @@ pub async fn load_model(
tobj::load_mtl_buf(&mut BufReader::new(Cursor::new(mat_text))) tobj::load_mtl_buf(&mut BufReader::new(Cursor::new(mat_text)))
}, },
) )
.await?; .await?;
let mut materials = Vec::new(); let mut materials = Vec::new();
for m in obj_materials? { for m in obj_materials? {
let diffuse_texture = load_texture(&m.diffuse_texture, device, queue).await?; let diffuse_texture = load_texture(&m.diffuse_texture, false, device, queue).await?;
let bind_group = device.create_bind_group(&wgpu::BindGroupDescriptor { let normal_texture = load_texture(&m.normal_texture, true, device, queue).await?;
layout,
entries: &[
wgpu::BindGroupEntry {
binding: 0,
resource: wgpu::BindingResource::TextureView(&diffuse_texture.view),
},
wgpu::BindGroupEntry {
binding: 1,
resource: wgpu::BindingResource::Sampler(&diffuse_texture.sampler),
},
],
label: None,
});
materials.push(model::Material { materials.push(model::Material::new(
name: m.name, device,
&m.name,
diffuse_texture, diffuse_texture,
bind_group, normal_texture,
}) layout,
));
} }
let meshes = models let meshes = models
.into_iter() .into_iter()
.map(|m| { .map(|m| {
let vertices = (0..m.mesh.positions.len() / 3) let mut vertices = (0..m.mesh.positions.len() / 3)
.map(|i| { .map(|i| model::ModelVertex {
if m.mesh.normals.is_empty(){ position: [
model::ModelVertex { m.mesh.positions[i * 3],
position: [ m.mesh.positions[i * 3 + 1],
m.mesh.positions[i * 3], m.mesh.positions[i * 3 + 2],
m.mesh.positions[i * 3 + 1], ],
m.mesh.positions[i * 3 + 2], tex_coords: [m.mesh.texcoords[i * 2], 1.0 - m.mesh.texcoords[i * 2 + 1]],
], normal: [
tex_coords: [m.mesh.texcoords[i * 2], 1.0 - m.mesh.texcoords[i * 2 + 1]], m.mesh.normals[i * 3],
normal: [0.0, 0.0, 0.0], m.mesh.normals[i * 3 + 1],
} m.mesh.normals[i * 3 + 2],
}else{ ],
model::ModelVertex { // We'll calculate these later
position: [ tangent: [0.0; 3],
m.mesh.positions[i * 3], bitangent: [0.0; 3],
m.mesh.positions[i * 3 + 1],
m.mesh.positions[i * 3 + 2],
],
tex_coords: [m.mesh.texcoords[i * 2], 1.0 - m.mesh.texcoords[i * 2 + 1]],
normal: [
m.mesh.normals[i * 3],
m.mesh.normals[i * 3 + 1],
m.mesh.normals[i * 3 + 2],
],
}
}
}) })
.collect::<Vec<_>>(); .collect::<Vec<_>>();
let indices = &m.mesh.indices;
let mut triangles_included = vec![0; vertices.len()];
// Calculate tangents and bitangets. We're going to
// use the triangles, so we need to loop through the
// indices in chunks of 3
for c in indices.chunks(3) {
let v0 = vertices[c[0] as usize];
let v1 = vertices[c[1] as usize];
let v2 = vertices[c[2] as usize];
let pos0: cgmath::Vector3<_> = v0.position.into();
let pos1: cgmath::Vector3<_> = v1.position.into();
let pos2: cgmath::Vector3<_> = v2.position.into();
let uv0: cgmath::Vector2<_> = v0.tex_coords.into();
let uv1: cgmath::Vector2<_> = v1.tex_coords.into();
let uv2: cgmath::Vector2<_> = v2.tex_coords.into();
// Calculate the edges of the triangle
let delta_pos1 = pos1 - pos0;
let delta_pos2 = pos2 - pos0;
// This will give us a direction to calculate the
// tangent and bitangent
let delta_uv1 = uv1 - uv0;
let delta_uv2 = uv2 - uv0;
// Solving the following system of equations will
// give us the tangent and bitangent.
// delta_pos1 = delta_uv1.x * T + delta_u.y * B
// delta_pos2 = delta_uv2.x * T + delta_uv2.y * B
// Luckily, the place I found this equation provided
// the solution!
let r = 1.0 / (delta_uv1.x * delta_uv2.y - delta_uv1.y * delta_uv2.x);
let tangent = (delta_pos1 * delta_uv2.y - delta_pos2 * delta_uv1.y) * r;
// We flip the bitangent to enable right-handed normal
// maps with wgpu texture coordinate system
let bitangent = (delta_pos2 * delta_uv1.x - delta_pos1 * delta_uv2.x) * -r;
// We'll use the same tangent/bitangent for each vertex in the triangle
vertices[c[0] as usize].tangent =
(tangent + cgmath::Vector3::from(vertices[c[0] as usize].tangent)).into();
vertices[c[1] as usize].tangent =
(tangent + cgmath::Vector3::from(vertices[c[1] as usize].tangent)).into();
vertices[c[2] as usize].tangent =
(tangent + cgmath::Vector3::from(vertices[c[2] as usize].tangent)).into();
vertices[c[0] as usize].bitangent =
(bitangent + cgmath::Vector3::from(vertices[c[0] as usize].bitangent)).into();
vertices[c[1] as usize].bitangent =
(bitangent + cgmath::Vector3::from(vertices[c[1] as usize].bitangent)).into();
vertices[c[2] as usize].bitangent =
(bitangent + cgmath::Vector3::from(vertices[c[2] as usize].bitangent)).into();
// Used to average the tangents/bitangents
triangles_included[c[0] as usize] += 1;
triangles_included[c[1] as usize] += 1;
triangles_included[c[2] as usize] += 1;
}
// Average the tangents/bitangents
for (i, n) in triangles_included.into_iter().enumerate() {
let denom = 1.0 / n as f32;
let v = &mut vertices[i];
v.tangent = (cgmath::Vector3::from(v.tangent) * denom).into();
v.bitangent = (cgmath::Vector3::from(v.bitangent) * denom).into();
}
let vertex_buffer = device.create_buffer_init(&wgpu::util::BufferInitDescriptor { let vertex_buffer = device.create_buffer_init(&wgpu::util::BufferInitDescriptor {
label: Some(&format!("{:?} Vertex Buffer", file_name)), label: Some(&format!("{:?} Vertex Buffer", file_name)),
contents: bytemuck::cast_slice(&vertices), contents: bytemuck::cast_slice(&vertices),
@ -124,7 +173,6 @@ pub async fn load_model(
usage: wgpu::BufferUsages::INDEX, usage: wgpu::BufferUsages::INDEX,
}); });
log::info!("Mesh: {}", m.name);
model::Mesh { model::Mesh {
name: file_name.to_string(), name: file_name.to_string(),
vertex_buffer, vertex_buffer,
@ -137,3 +185,163 @@ pub async fn load_model(
Ok(model::Model { meshes, materials }) Ok(model::Model { meshes, materials })
} }
pub struct HdrLoader {
texture_format: wgpu::TextureFormat,
equirect_layout: wgpu::BindGroupLayout,
equirect_to_cubemap: wgpu::ComputePipeline,
}
impl HdrLoader {
pub fn new(device: &wgpu::Device) -> Self {
let module = device.create_shader_module(wgpu::include_wgsl!("equirectangular.wgsl"));
let texture_format = wgpu::TextureFormat::Rgba32Float;
let equirect_layout = device.create_bind_group_layout(&wgpu::BindGroupLayoutDescriptor {
label: Some("HdrLoader::equirect_layout"),
entries: &[
wgpu::BindGroupLayoutEntry {
binding: 0,
visibility: wgpu::ShaderStages::COMPUTE,
ty: wgpu::BindingType::Texture {
sample_type: wgpu::TextureSampleType::Float { filterable: false },
view_dimension: wgpu::TextureViewDimension::D2,
multisampled: false,
},
count: None,
},
wgpu::BindGroupLayoutEntry {
binding: 1,
visibility: wgpu::ShaderStages::COMPUTE,
ty: wgpu::BindingType::StorageTexture {
access: wgpu::StorageTextureAccess::WriteOnly,
format: texture_format,
view_dimension: wgpu::TextureViewDimension::D2Array,
},
count: None,
},
],
});
let pipeline_layout = device.create_pipeline_layout(&wgpu::PipelineLayoutDescriptor {
label: None,
bind_group_layouts: &[&equirect_layout],
push_constant_ranges: &[],
});
let equirect_to_cubemap =
device.create_compute_pipeline(&wgpu::ComputePipelineDescriptor {
label: Some("equirect_to_cubemap"),
layout: Some(&pipeline_layout),
module: &module,
entry_point: "compute_equirect_to_cubemap",
compilation_options: Default::default(),
cache: None,
});
Self {
equirect_to_cubemap,
texture_format,
equirect_layout,
}
}
pub fn from_equirectangular_bytes(
&self,
device: &wgpu::Device,
queue: &wgpu::Queue,
data: &[u8],
dst_size: u32,
label: Option<&str>,
) -> anyhow::Result<texture::CubeTexture> {
let hdr_decoder = HdrDecoder::new(Cursor::new(data))?;
let meta = hdr_decoder.metadata();
let pixels = {
let mut pixels = vec![[0.0, 0.0, 0.0, 0.0]; meta.width as usize * meta.height as usize];
hdr_decoder.read_image_transform(
|pix| {
let rgb = pix.to_hdr();
[rgb.0[0], rgb.0[1], rgb.0[2], 1.0f32]
},
&mut pixels[..],
)?;
pixels
};
let src = texture::Texture::create_2d_texture(
device,
meta.width,
meta.height,
self.texture_format,
wgpu::TextureUsages::TEXTURE_BINDING | wgpu::TextureUsages::COPY_DST,
wgpu::FilterMode::Linear,
None,
);
queue.write_texture(
wgpu::ImageCopyTexture {
texture: &src.texture,
mip_level: 0,
origin: wgpu::Origin3d::ZERO,
aspect: wgpu::TextureAspect::All,
},
&bytemuck::cast_slice(&pixels),
wgpu::ImageDataLayout {
offset: 0,
bytes_per_row: Some(src.size.width * std::mem::size_of::<[f32; 4]>() as u32),
rows_per_image: Some(src.size.height),
},
src.size,
);
let dst = texture::CubeTexture::create_2d(
device,
dst_size,
dst_size,
self.texture_format,
1,
wgpu::TextureUsages::STORAGE_BINDING | wgpu::TextureUsages::TEXTURE_BINDING,
wgpu::FilterMode::Nearest,
label,
);
let dst_view = dst.texture().create_view(&wgpu::TextureViewDescriptor {
label,
dimension: Some(wgpu::TextureViewDimension::D2Array),
// array_layer_count: Some(6),
..Default::default()
});
let bind_group = device.create_bind_group(&wgpu::BindGroupDescriptor {
label,
layout: &self.equirect_layout,
entries: &[
wgpu::BindGroupEntry {
binding: 0,
resource: wgpu::BindingResource::TextureView(&src.view),
},
wgpu::BindGroupEntry {
binding: 1,
resource: wgpu::BindingResource::TextureView(&dst_view),
},
],
});
let mut encoder = device.create_command_encoder(&Default::default());
let mut pass = encoder.begin_compute_pass(&wgpu::ComputePassDescriptor {
label,
timestamp_writes: None,
});
let num_workgroups = (dst_size + 15) / 16;
pass.set_pipeline(&self.equirect_to_cubemap);
pass.set_bind_group(0, &bind_group, &[]);
pass.dispatch_workgroups(num_workgroups, num_workgroups, 6);
drop(pass);
queue.submit([encoder.finish()]);
Ok(dst)
}
}

View File

@ -1,5 +1,15 @@
// Vertex shader // Vertex shader
struct Camera {
view_pos: vec4<f32>,
view: mat4x4<f32>,
view_proj: mat4x4<f32>,
inv_proj: mat4x4<f32>,
inv_view: mat4x4<f32>,
}
@group(1) @binding(0)
var<uniform> camera: Camera;
struct Light { struct Light {
position: vec3<f32>, position: vec3<f32>,
color: vec3<f32>, color: vec3<f32>,
@ -7,34 +17,34 @@ struct Light {
@group(2) @binding(0) @group(2) @binding(0)
var<uniform> light: Light; var<uniform> light: Light;
struct Camera {
view_pos: vec4<f32>,
view_proj: mat4x4<f32>,
}
@group(1) @binding(0)
var<uniform> camera: Camera;
struct VertexInput { struct VertexInput {
@location(0) position: vec3<f32>, @location(0) position: vec3<f32>,
@location(1) tex_coords: vec2<f32>, @location(1) tex_coords: vec2<f32>,
@location(2) normal: vec3<f32>, @location(2) normal: vec3<f32>,
}; @location(3) tangent: vec3<f32>,
@location(4) bitangent: vec3<f32>,
}
struct InstanceInput { struct InstanceInput {
@location(5) model_matrix_0: vec4<f32>, @location(5) model_matrix_0: vec4<f32>,
@location(6) model_matrix_1: vec4<f32>, @location(6) model_matrix_1: vec4<f32>,
@location(7) model_matrix_2: vec4<f32>, @location(7) model_matrix_2: vec4<f32>,
@location(8) model_matrix_3: vec4<f32>, @location(8) model_matrix_3: vec4<f32>,
@location(9) normal_matrix_0: vec3<f32>, @location(9) normal_matrix_0: vec3<f32>,
@location(10) normal_matrix_1: vec3<f32>, @location(10) normal_matrix_1: vec3<f32>,
@location(11) normal_matrix_2: vec3<f32>, @location(11) normal_matrix_2: vec3<f32>,
} }
struct VertexOutput { struct VertexOutput {
@builtin(position) clip_position: vec4<f32>, @builtin(position) clip_position: vec4<f32>,
@location(0) tex_coords: vec2<f32>, @location(0) tex_coords: vec2<f32>,
@location(1) world_normal: vec3<f32>, // Updated!
@location(2) world_position: vec3<f32>, @location(1) world_position: vec3<f32>,
}; @location(2) world_view_position: vec3<f32>,
@location(3) world_light_position: vec3<f32>,
@location(4) world_normal: vec3<f32>,
@location(5) world_tangent: vec3<f32>,
@location(6) world_bitangent: vec3<f32>,
}
@vertex @vertex
fn vs_main( fn vs_main(
@ -47,18 +57,23 @@ fn vs_main(
instance.model_matrix_2, instance.model_matrix_2,
instance.model_matrix_3, instance.model_matrix_3,
); );
let normal_matrix = mat3x3<f32>( let normal_matrix = mat3x3<f32>(
instance.normal_matrix_0, instance.normal_matrix_0,
instance.normal_matrix_1, instance.normal_matrix_1,
instance.normal_matrix_2, instance.normal_matrix_2,
); );
// UPDATED!
let world_position = model_matrix * vec4<f32>(model.position, 1.0);
var out: VertexOutput; var out: VertexOutput;
out.tex_coords = model.tex_coords;
out.world_normal = normal_matrix * model.normal;
var world_position: vec4<f32> = model_matrix * vec4<f32>(model.position, 1.0);
out.world_position = world_position.xyz;
out.clip_position = camera.view_proj * world_position; out.clip_position = camera.view_proj * world_position;
out.tex_coords = model.tex_coords;
out.world_normal = normalize(normal_matrix * model.normal);
out.world_tangent = normalize(normal_matrix * model.tangent);
out.world_bitangent = normalize(normal_matrix * model.bitangent);
out.world_position = world_position.xyz;
out.world_view_position = camera.view_pos.xyz;
return out; return out;
} }
@ -68,27 +83,57 @@ fn vs_main(
var t_diffuse: texture_2d<f32>; var t_diffuse: texture_2d<f32>;
@group(0)@binding(1) @group(0)@binding(1)
var s_diffuse: sampler; var s_diffuse: sampler;
@group(0)@binding(2)
var t_normal: texture_2d<f32>;
@group(0) @binding(3)
var s_normal: sampler;
@group(3)
@binding(0)
var env_map: texture_cube<f32>;
@group(3)
@binding(1)
var env_sampler: sampler;
@fragment @fragment
fn fs_main(in: VertexOutput) -> @location(0) vec4<f32> { fn fs_main(in: VertexOutput) -> @location(0) vec4<f32> {
let object_color: vec4<f32> = textureSample(t_diffuse, s_diffuse, in.tex_coords); let object_color: vec4<f32> = textureSample(t_diffuse, s_diffuse, in.tex_coords);
let object_normal: vec4<f32> = textureSample(t_normal, s_normal, in.tex_coords);
// NEW!
// Adjust the tangent and bitangent using the Gramm-Schmidt process
// This makes sure that they are perpedicular to each other and the
// normal of the surface.
let world_tangent = normalize(in.world_tangent - dot(in.world_tangent, in.world_normal) * in.world_normal);
let world_bitangent = cross(world_tangent, in.world_normal);
// Convert the normal sample to world space
let TBN = mat3x3(
world_tangent,
world_bitangent,
in.world_normal,
);
let tangent_normal = object_normal.xyz * 2.0 - 1.0;
let world_normal = TBN * tangent_normal;
// Create the lighting vectors
let light_dir = normalize(light.position - in.world_position); let light_dir = normalize(light.position - in.world_position);
let view_dir = normalize(in.world_view_position - in.world_position);
let view_dir = normalize(camera.view_pos.xyz - in.world_position);
let half_dir = normalize(view_dir + light_dir); let half_dir = normalize(view_dir + light_dir);
let specular_strength = pow(max(dot(in.world_normal, half_dir), 0.0), 32.0); let diffuse_strength = max(dot(world_normal, light_dir), 0.0);
let specular_color = specular_strength * light.color;
let diffuse_strength = max(dot(in.world_normal, light_dir), 0.0);
let diffuse_color = light.color * diffuse_strength; let diffuse_color = light.color * diffuse_strength;
// We don't need (or want) much ambient light, so 0.1 is fine let specular_strength = pow(max(dot(world_normal, half_dir), 0.0), 32.0);
let ambient_strength = 0.03; let specular_color = specular_strength * light.color;
let ambient_color = light.color * ambient_strength;
let result = (ambient_color + diffuse_color + specular_color) * object_color.xyz; // NEW!
// Calculate reflections
let world_reflect = reflect(-view_dir, world_normal);
let reflection = textureSample(env_map, env_sampler, world_reflect).rgb;
let shininess = 0.1;
let result = (diffuse_color + specular_color) * object_color.xyz + reflection * shininess;
return vec4<f32>(result, object_color.a); return vec4<f32>(result, object_color.a);
} }

45
src/sky.wgsl Normal file
View File

@ -0,0 +1,45 @@
struct Camera {
view_pos: vec4<f32>,
view: mat4x4<f32>,
view_proj: mat4x4<f32>,
inv_proj: mat4x4<f32>,
inv_view: mat4x4<f32>,
}
@group(0) @binding(0)
var<uniform> camera: Camera;
@group(1)
@binding(0)
var env_map: texture_cube<f32>;
@group(1)
@binding(1)
var env_sampler: sampler;
struct VertexOutput {
@builtin(position) frag_position: vec4<f32>,
@location(0) clip_position: vec4<f32>,
}
@vertex
fn vs_main(
@builtin(vertex_index) id: u32,
) -> VertexOutput {
let uv = vec2<f32>(vec2<u32>(
id & 1u,
(id >> 1u) & 1u,
));
var out: VertexOutput;
out.clip_position = vec4(uv * 4.0 - 1.0, 1.0, 1.0);
out.frag_position = out.clip_position;
return out;
}
@fragment
fn fs_main(in: VertexOutput) -> @location(0) vec4<f32> {
let view_pos_homogeneous = camera.inv_proj * in.clip_position;
let view_ray_direction = view_pos_homogeneous.xyz / view_pos_homogeneous.w;
var ray_direction = normalize((camera.inv_view * vec4(view_ray_direction, 0.0)).xyz);
let sample = textureSample(env_map, env_sampler, ray_direction);
return sample;
}

View File

@ -1,88 +1,22 @@
use image::GenericImageView;
use anyhow::*; use anyhow::*;
use image::GenericImageView;
pub struct Texture { pub struct Texture {
#[allow(unused)] #[allow(unused)]
pub texture: wgpu::Texture, pub texture: wgpu::Texture,
pub view: wgpu::TextureView, pub view: wgpu::TextureView,
pub sampler: wgpu::Sampler, pub sampler: wgpu::Sampler,
} pub size: wgpu::Extent3d,
impl Texture {
pub fn from_bytes(
device: &wgpu::Device,
queue: &wgpu::Queue,
bytes: &[u8],
label: &str
) -> Result<Self> {
let img = image::load_from_memory(bytes)?;
Self::from_image(device, queue, &img, Some(label))
}
pub fn from_image(
device: &wgpu::Device,
queue: &wgpu::Queue,
img: &image::DynamicImage,
label: Option<&str>
) -> Result<Self> {
let rgba = img.to_rgba8();
let dimensions = img.dimensions();
let size = wgpu::Extent3d {
width: dimensions.0,
height: dimensions.1,
depth_or_array_layers: 1,
};
let texture = device.create_texture(
&wgpu::TextureDescriptor {
label,
size,
mip_level_count: 1,
sample_count: 1,
dimension: wgpu::TextureDimension::D2,
format: wgpu::TextureFormat::Rgba8UnormSrgb,
usage: wgpu::TextureUsages::TEXTURE_BINDING | wgpu::TextureUsages::COPY_DST,
view_formats: &[],
}
);
queue.write_texture(
wgpu::ImageCopyTexture {
aspect: wgpu::TextureAspect::All,
texture: &texture,
mip_level: 0,
origin: wgpu::Origin3d::ZERO,
},
&rgba,
wgpu::ImageDataLayout {
offset: 0,
bytes_per_row: Some(4 * dimensions.0),
rows_per_image: Some(dimensions.1),
},
size,
);
let view = texture.create_view(&wgpu::TextureViewDescriptor::default());
let sampler = device.create_sampler(
&wgpu::SamplerDescriptor {
address_mode_u: wgpu::AddressMode::ClampToEdge,
address_mode_v: wgpu::AddressMode::ClampToEdge,
address_mode_w: wgpu::AddressMode::ClampToEdge,
mag_filter: wgpu::FilterMode::Linear,
min_filter: wgpu::FilterMode::Nearest,
mipmap_filter: wgpu::FilterMode::Nearest,
..Default::default()
}
);
Ok(Self { texture, view, sampler })
}
} }
impl Texture { impl Texture {
pub const DEPTH_FORMAT: wgpu::TextureFormat = wgpu::TextureFormat::Depth32Float; pub const DEPTH_FORMAT: wgpu::TextureFormat = wgpu::TextureFormat::Depth32Float;
pub fn create_depth_texture(device: &wgpu::Device, config: &wgpu::SurfaceConfiguration, label: &str) -> Self { pub fn create_depth_texture(
device: &wgpu::Device,
config: &wgpu::SurfaceConfiguration,
label: &str,
) -> Self {
let size = wgpu::Extent3d { let size = wgpu::Extent3d {
width: config.width.max(1), width: config.width.max(1),
height: config.height.max(1), height: config.height.max(1),
@ -95,28 +29,227 @@ impl Texture {
sample_count: 1, sample_count: 1,
dimension: wgpu::TextureDimension::D2, dimension: wgpu::TextureDimension::D2,
format: Self::DEPTH_FORMAT, format: Self::DEPTH_FORMAT,
usage: wgpu::TextureUsages::RENDER_ATTACHMENT usage: wgpu::TextureUsages::RENDER_ATTACHMENT | wgpu::TextureUsages::TEXTURE_BINDING,
| wgpu::TextureUsages::TEXTURE_BINDING, view_formats: &[Self::DEPTH_FORMAT],
view_formats: &[],
}; };
let texture = device.create_texture(&desc); let texture = device.create_texture(&desc);
let view = texture.create_view(&wgpu::TextureViewDescriptor::default()); let view = texture.create_view(&wgpu::TextureViewDescriptor::default());
let sampler = device.create_sampler( let sampler = device.create_sampler(&wgpu::SamplerDescriptor {
&wgpu::SamplerDescriptor { address_mode_u: wgpu::AddressMode::ClampToEdge,
address_mode_u: wgpu::AddressMode::ClampToEdge, address_mode_v: wgpu::AddressMode::ClampToEdge,
address_mode_v: wgpu::AddressMode::ClampToEdge, address_mode_w: wgpu::AddressMode::ClampToEdge,
address_mode_w: wgpu::AddressMode::ClampToEdge, mag_filter: wgpu::FilterMode::Linear,
mag_filter: wgpu::FilterMode::Linear, min_filter: wgpu::FilterMode::Linear,
min_filter: wgpu::FilterMode::Linear, mipmap_filter: wgpu::FilterMode::Nearest,
mipmap_filter: wgpu::FilterMode::Nearest, compare: Some(wgpu::CompareFunction::LessEqual),
compare: Some(wgpu::CompareFunction::LessEqual), lod_min_clamp: 0.0,
lod_min_clamp: 0.0, lod_max_clamp: 100.0,
lod_max_clamp: 100.0, ..Default::default()
..Default::default() });
}
Self {
texture,
view,
sampler,
size, // NEW!
}
}
#[allow(dead_code)]
pub fn from_bytes(
device: &wgpu::Device,
queue: &wgpu::Queue,
bytes: &[u8],
label: &str,
is_normal_map: bool,
) -> Result<Self> {
let img = image::load_from_memory(bytes)?;
Self::from_image(device, queue, &img, Some(label), is_normal_map)
}
pub fn from_image(
device: &wgpu::Device,
queue: &wgpu::Queue,
img: &image::DynamicImage,
label: Option<&str>,
is_normal_map: bool,
) -> Result<Self> {
let dimensions = img.dimensions();
let rgba = img.to_rgba8();
let format = if is_normal_map {
wgpu::TextureFormat::Rgba8Unorm
} else {
wgpu::TextureFormat::Rgba8UnormSrgb
};
let usage = wgpu::TextureUsages::TEXTURE_BINDING | wgpu::TextureUsages::COPY_DST;
let size = wgpu::Extent3d {
width: img.width(),
height: img.height(),
depth_or_array_layers: 1,
};
let texture = Self::create_2d_texture(
device,
size.width,
size.height,
format,
usage,
wgpu::FilterMode::Linear,
label,
); );
Self { texture, view, sampler } queue.write_texture(
wgpu::ImageCopyTexture {
aspect: wgpu::TextureAspect::All,
texture: &texture.texture,
mip_level: 0,
origin: wgpu::Origin3d::ZERO,
},
&rgba,
wgpu::ImageDataLayout {
offset: 0,
bytes_per_row: Some(4 * dimensions.0),
rows_per_image: Some(dimensions.1),
},
size,
);
Ok(texture)
}
pub(crate) fn create_2d_texture(
device: &wgpu::Device,
width: u32,
height: u32,
format: wgpu::TextureFormat,
usage: wgpu::TextureUsages,
mag_filter: wgpu::FilterMode,
label: Option<&str>,
) -> Self {
let size = wgpu::Extent3d {
width,
height,
depth_or_array_layers: 1,
};
Self::create_texture(
device,
label,
size,
format,
usage,
wgpu::TextureDimension::D2,
mag_filter,
)
}
pub fn create_texture(
device: &wgpu::Device,
label: Option<&str>,
size: wgpu::Extent3d,
format: wgpu::TextureFormat,
usage: wgpu::TextureUsages,
dimension: wgpu::TextureDimension,
mag_filter: wgpu::FilterMode,
) -> Self {
let texture = device.create_texture(&wgpu::TextureDescriptor {
label,
size,
mip_level_count: 1,
sample_count: 1,
dimension,
format,
usage,
view_formats: &[],
});
let view = texture.create_view(&wgpu::TextureViewDescriptor::default());
let sampler = device.create_sampler(&wgpu::SamplerDescriptor {
address_mode_u: wgpu::AddressMode::ClampToEdge,
address_mode_v: wgpu::AddressMode::ClampToEdge,
address_mode_w: wgpu::AddressMode::ClampToEdge,
mag_filter,
min_filter: wgpu::FilterMode::Nearest,
mipmap_filter: wgpu::FilterMode::Nearest,
..Default::default()
});
Self {
texture,
view,
sampler,
size,
}
}
}
pub struct CubeTexture {
texture: wgpu::Texture,
sampler: wgpu::Sampler,
view: wgpu::TextureView,
}
impl CubeTexture {
pub fn create_2d(
device: &wgpu::Device,
width: u32,
height: u32,
format: wgpu::TextureFormat,
mip_level_count: u32,
usage: wgpu::TextureUsages,
mag_filter: wgpu::FilterMode,
label: Option<&str>,
) -> Self {
let texture = device.create_texture(&wgpu::TextureDescriptor {
label,
size: wgpu::Extent3d {
width,
height,
// A cube has 6 sides, so we need 6 layers
depth_or_array_layers: 6,
},
mip_level_count,
sample_count: 1,
dimension: wgpu::TextureDimension::D2,
format,
usage,
view_formats: &[],
});
let view = texture.create_view(&wgpu::TextureViewDescriptor {
label,
dimension: Some(wgpu::TextureViewDimension::Cube),
array_layer_count: Some(6),
..Default::default()
});
let sampler = device.create_sampler(&wgpu::SamplerDescriptor {
label,
address_mode_u: wgpu::AddressMode::ClampToEdge,
address_mode_v: wgpu::AddressMode::ClampToEdge,
address_mode_w: wgpu::AddressMode::ClampToEdge,
mag_filter,
min_filter: wgpu::FilterMode::Nearest,
mipmap_filter: wgpu::FilterMode::Nearest,
..Default::default()
});
Self {
texture,
sampler,
view,
}
}
pub fn texture(&self) -> &wgpu::Texture {
&self.texture
}
pub fn view(&self) -> &wgpu::TextureView {
&self.view
}
pub fn sampler(&self) -> &wgpu::Sampler {
&self.sampler
} }
} }