497 lines
19 KiB
Rust
Raw Normal View History

use std::cmp::max;
use std::collections::HashMap;
2025-05-05 15:42:37 +02:00
use std::sync::{Arc};
2025-05-05 20:07:20 +02:00
use cgmath::{perspective, Deg, Matrix4, Point3, Vector3};
use log::info;
use pollster::FutureExt;
use wgpu::util::DeviceExt;
2025-05-07 19:45:35 +02:00
use wgpu::{Adapter, BindGroup, BindGroupLayout, Device, Instance, PresentMode, Queue, Surface, SurfaceCapabilities, SurfaceConfiguration};
use winit::dpi::PhysicalSize;
2025-05-05 15:42:37 +02:00
use winit::window::{Window};
2025-05-05 20:07:20 +02:00
use crate::camera::Camera;
2025-05-07 19:45:35 +02:00
use crate::light::{ClusterBuffers, GpuLight, LightManager};
use crate::render::{create_circle_vertices, create_sphere_vertices, Geometry, Globals, InstanceRaw, RenderInstance, SampleCount, Shape, Vertex};
pub struct State<'a> {
surface: Surface<'a>,
device: Device,
queue: Queue,
config: SurfaceConfiguration,
sample_count: SampleCount,
size: PhysicalSize<u32>,
window: Arc<Window>,
render_pipeline: wgpu::RenderPipeline,
instances: Vec<RenderInstance>,
instance_buffer: wgpu::Buffer,
geometries: HashMap<Shape, Geometry>,
2025-05-07 19:45:35 +02:00
global_bind_group: BindGroup,
global_buffer: wgpu::Buffer,
2025-05-05 20:07:20 +02:00
camera: Camera,
depth_texture: wgpu::TextureView,
2025-05-06 17:12:22 +02:00
pub light_manager: LightManager,
}
impl<'a> State<'a> {
pub(crate) fn new(window: Window) -> Self {
let window_arc = Arc::new(window);
let size = window_arc.inner_size();
let instance = Self::create_gpu_instance();
let surface = instance.create_surface(window_arc.clone()).unwrap();
let adapter = Self::create_adapter(instance, &surface);
let (device, queue) = Self::create_device(&adapter);
let surface_caps = surface.get_capabilities(&adapter);
let config = Self::create_surface_config(size, surface_caps);
surface.configure(&device, &config);
let sample_count = SampleCount(Self::probe_msaa_support(&device, &config));
info!("MSAA sample count: {}", sample_count.0);
2025-05-05 20:07:20 +02:00
let aspect = config.width as f32 / config.height as f32;
let camera = Camera::new(aspect);
let view_proj = camera.build_view_projection_matrix();
let globals = Globals {
2025-05-05 20:07:20 +02:00
view_proj: view_proj.into(),
2025-05-07 19:45:35 +02:00
resolution: [config.width as f32, config.height as f32],
_padding: [0.0, 0.0],
};
2025-05-07 19:45:35 +02:00
let (global_buffer, global_bind_group_layout, global_bind_group) = Self::create_global_buffer(&device);
queue.write_buffer(&global_buffer, 0, bytemuck::cast_slice(&[globals]));
2025-05-07 19:45:35 +02:00
let mut light_manager = LightManager::new(&device, 10);
2025-05-06 17:12:22 +02:00
2025-05-07 19:45:35 +02:00
let render_pipeline = Self::create_render_pipeline(&queue, &device, &config, sample_count.0, &global_bind_group_layout, &mut light_manager, &camera);
let geometries = Self::create_geometries(&device);
let instances = vec![];
let instance_data: Vec<InstanceRaw> = instances.iter().map(RenderInstance::to_raw).collect();
let instance_buffer = device.create_buffer_init(&wgpu::util::BufferInitDescriptor {
label: Some("Instance Buffer"),
contents: bytemuck::cast_slice(&instance_data),
usage: wgpu::BufferUsages::VERTEX | wgpu::BufferUsages::COPY_DST,
});
let depth_texture = Self::create_depth_texture(&device, config.width, config.height, sample_count.get());
Self {
surface,
device,
queue,
config,
sample_count,
size,
window: window_arc,
render_pipeline,
geometries,
global_bind_group,
global_buffer,
instances,
instance_buffer,
2025-05-05 20:07:20 +02:00
camera,
depth_texture,
2025-05-06 17:12:22 +02:00
light_manager
}
}
2025-05-07 19:45:35 +02:00
fn create_global_buffer(device: &wgpu::Device) -> (wgpu::Buffer, BindGroupLayout, BindGroup) {
let global_buffer = device.create_buffer(&wgpu::BufferDescriptor {
label: Some("Global Buffer"),
size: size_of::<Globals>() as u64,
usage: wgpu::BufferUsages::UNIFORM | wgpu::BufferUsages::COPY_DST,
mapped_at_creation: false,
});
let global_bind_group_layout = device.create_bind_group_layout(&wgpu::BindGroupLayoutDescriptor {
label: Some("Global Bind Group Layout"),
entries: &[wgpu::BindGroupLayoutEntry {
binding: 0,
visibility: wgpu::ShaderStages::VERTEX_FRAGMENT,
ty: wgpu::BindingType::Buffer {
ty: wgpu::BufferBindingType::Uniform,
has_dynamic_offset: false,
min_binding_size: None,
},
count: None,
}],
});
let global_bind_group = device.create_bind_group(&wgpu::BindGroupDescriptor {
layout: &global_bind_group_layout,
entries: &[wgpu::BindGroupEntry {
binding: 0,
resource: global_buffer.as_entire_binding(),
}],
label: Some("Global Bind Group"),
});
(global_buffer, global_bind_group_layout, global_bind_group)
}
2025-05-06 17:12:22 +02:00
fn update_lights(&mut self) {
let light_data: Vec<GpuLight> = self.light_manager.lights.iter().map(|l| l.to_gpu()).collect();
self.queue.write_buffer(&self.light_manager.buffer, 0, bytemuck::cast_slice(&light_data));
}
fn create_depth_texture(device: &Device, width: u32, height: u32, sample_count: u32) -> wgpu::TextureView {
let texture = device.create_texture(&wgpu::TextureDescriptor {
label: Some("Depth Texture"),
size: wgpu::Extent3d {
width,
height,
depth_or_array_layers: 1,
},
mip_level_count: 1,
sample_count,
dimension: wgpu::TextureDimension::D2,
format: wgpu::TextureFormat::Depth32Float,
usage: wgpu::TextureUsages::RENDER_ATTACHMENT,
view_formats: &[],
});
texture.create_view(&Default::default())
}
fn create_geometries(device: &Device) -> HashMap<Shape, Geometry> {
let mut geometries = HashMap::new();
let (circle_vertices, circle_indices) = create_circle_vertices(512, 0.5, [0.5, 0.5, 0.5]);
let circle_geometry = Self::create_geometry(device, &circle_vertices, &circle_indices);
geometries.insert(Shape::Circle, circle_geometry);
let (sphere_vertices, sphere_indices) = create_sphere_vertices(32, 32, 0.5, [0.5, 0.5, 0.5]);
let sphere_geometry = Self::create_geometry(device, &sphere_vertices, &sphere_indices);
geometries.insert(Shape::Sphere, sphere_geometry);
geometries
}
fn create_geometry(device: &Device, vertices: &[Vertex], indices: &[u16]) -> Geometry {
let vertex_buffer = device.create_buffer_init(&wgpu::util::BufferInitDescriptor {
label: Some("Vertex Buffer"),
contents: bytemuck::cast_slice(vertices),
usage: wgpu::BufferUsages::VERTEX,
});
let index_buffer = device.create_buffer_init(&wgpu::util::BufferInitDescriptor {
label: Some("Index Buffer"),
contents: bytemuck::cast_slice(indices),
usage: wgpu::BufferUsages::INDEX,
});
Geometry {
vertex_buffer,
index_buffer,
index_count: indices.len() as u32,
}
}
fn probe_msaa_support(device: &Device, config: &SurfaceConfiguration) -> u32 {
pollster::block_on(async {
for &count in &[16, 8, 4, 2] {
device.push_error_scope(wgpu::ErrorFilter::Validation);
let _ = device.create_texture(&wgpu::TextureDescriptor {
label: Some("MSAA Probe"),
size: wgpu::Extent3d {
width: 4,
height: 4,
depth_or_array_layers: 1,
},
mip_level_count: 1,
sample_count: count,
dimension: wgpu::TextureDimension::D2,
format: config.format,
usage: wgpu::TextureUsages::RENDER_ATTACHMENT,
view_formats: &[],
});
if device.pop_error_scope().await.is_none() {
return count;
}
}
1 // fallback
})
}
2025-05-07 19:45:35 +02:00
fn create_render_pipeline(queue: &Queue, device: &Device, config: &SurfaceConfiguration, sample_count: u32, global_bind_group_layout: &BindGroupLayout, light_manager: &mut LightManager, camera: &Camera) -> wgpu::RenderPipeline {
let shader = device.create_shader_module(wgpu::ShaderModuleDescriptor {
label: Some("Shader"),
source: wgpu::ShaderSource::Wgsl(include_str!("shader.wgsl").into()),
});
2025-05-07 19:45:35 +02:00
let cluster_assignment = light_manager.compute_cluster_assignments(
camera.build_view_matrix(),
camera.build_view_projection_matrix(),
config.width as f32,
config.height as f32,
);
let cluster_buffers = light_manager.create_cluster_buffers(&device, &cluster_assignment);
let render_pipeline_layout =
device.create_pipeline_layout(&wgpu::PipelineLayoutDescriptor {
label: Some("Render Pipeline Layout"),
2025-05-07 19:45:35 +02:00
bind_group_layouts: &[
&global_bind_group_layout,
&light_manager.layout,
&cluster_buffers.layout,
],
push_constant_ranges: &[],
});
device.create_render_pipeline(&wgpu::RenderPipelineDescriptor {
label: Some("Render Pipeline"),
layout: Some(&render_pipeline_layout),
vertex: wgpu::VertexState {
module: &shader,
entry_point: Some("vs_main"),
buffers: &[Vertex::desc(), InstanceRaw::desc()],
compilation_options: Default::default(),
},
fragment: Some(wgpu::FragmentState {
module: &shader,
entry_point: Some("fs_main"),
targets: &[Some(wgpu::ColorTargetState {
format: config.format,
blend: Some(wgpu::BlendState::REPLACE),
write_mask: wgpu::ColorWrites::ALL,
})],
compilation_options: wgpu::PipelineCompilationOptions::default(),
}),
primitive: wgpu::PrimitiveState {
topology: wgpu::PrimitiveTopology::TriangleList,
strip_index_format: None,
front_face: wgpu::FrontFace::Ccw,
cull_mode: None,
// Setting this to anything other than Fill requires Features::NON_FILL_POLYGON_MODE
polygon_mode: wgpu::PolygonMode::Fill,
// Requires Features::DEPTH_CLIP_CONTROL
unclipped_depth: false,
// Requires Features::CONSERVATIVE_RASTERIZATION
conservative: false,
},
depth_stencil: Some(wgpu::DepthStencilState {
format: wgpu::TextureFormat::Depth32Float,
depth_write_enabled: true,
depth_compare: wgpu::CompareFunction::Less,
stencil: wgpu::StencilState::default(),
bias: wgpu::DepthBiasState::default(),
}),
multisample: wgpu::MultisampleState {
count: sample_count,
mask: !0,
alpha_to_coverage_enabled: false,
},
multiview: None,
cache: None,
})
}
fn create_surface_config(size: PhysicalSize<u32>, capabilities: SurfaceCapabilities) -> wgpu::SurfaceConfiguration {
let surface_format = capabilities.formats.iter()
.find(|f| f.is_srgb())
.copied()
.unwrap_or(capabilities.formats[0]);
SurfaceConfiguration {
usage: wgpu::TextureUsages::RENDER_ATTACHMENT,
format: surface_format,
width: size.width,
height: size.height,
present_mode: PresentMode::AutoVsync,
alpha_mode: capabilities.alpha_modes[0],
view_formats: vec![],
desired_maximum_frame_latency: 2,
}
}
fn create_device(adapter: &Adapter) -> (Device, Queue) {
adapter.request_device(
&wgpu::DeviceDescriptor {
required_features: wgpu::Features::TEXTURE_ADAPTER_SPECIFIC_FORMAT_FEATURES,
required_limits: wgpu::Limits::default(),
memory_hints: Default::default(),
label: None,
trace: Default::default(),
}).block_on().unwrap()
}
fn create_adapter(instance: Instance, surface: &Surface) -> Adapter {
instance.request_adapter(
&wgpu::RequestAdapterOptions {
power_preference: wgpu::PowerPreference::default(),
compatible_surface: Some(&surface),
force_fallback_adapter: false,
}
).block_on().unwrap()
}
fn create_gpu_instance() -> Instance {
Instance::new(&wgpu::InstanceDescriptor {
backends: wgpu::Backends::PRIMARY,
..Default::default()
})
}
pub(crate) fn resize(&mut self, new_size: PhysicalSize<u32>) {
self.size = new_size;
self.config.width = max(new_size.width, 1);
self.config.height = max(new_size.height, 1);
self.surface.configure(&self.device, &self.config);
2025-05-05 20:07:20 +02:00
self.camera.set_aspect(self.config.width as f32 / self.config.height as f32);
let view_proj = self.camera.build_view_projection_matrix();
let new_globals = Globals {
2025-05-05 20:07:20 +02:00
view_proj: view_proj.into(),
2025-05-07 19:45:35 +02:00
resolution: [self.config.width as f32, self.config.height as f32],
_padding: [0.0, 0.0],
};
self.queue.write_buffer(&self.global_buffer, 0, bytemuck::cast_slice(&[new_globals]));
self.depth_texture = Self::create_depth_texture(&self.device, self.config.width, self.config.height, self.sample_count.get());
println!("Resized to {:?} from state!", new_size);
}
pub(crate) fn render(&mut self) -> Result<(), wgpu::SurfaceError> {
let output = self.surface.get_current_texture()?;
let multisampled_texture = self.device.create_texture(&wgpu::TextureDescriptor {
label: Some("Multisampled Render Target"),
size: wgpu::Extent3d {
width: self.config.width,
height: self.config.height,
depth_or_array_layers: 1,
},
mip_level_count: 1,
sample_count: self.sample_count.get(),
dimension: wgpu::TextureDimension::D2,
format: self.config.format,
usage: wgpu::TextureUsages::RENDER_ATTACHMENT,
view_formats: &[],
});
let multisampled_view = multisampled_texture.create_view(&Default::default());
2025-05-05 21:29:51 +02:00
let view_proj = self.camera.build_view_projection_matrix();
let globals = Globals {
view_proj: view_proj.into(),
2025-05-07 19:45:35 +02:00
resolution: [self.config.width as f32, self.config.height as f32],
_padding: [0.0, 0.0],
2025-05-05 21:29:51 +02:00
};
self.queue.write_buffer(&self.global_buffer, 0, bytemuck::cast_slice(&[globals]));
2025-05-07 19:45:35 +02:00
let assignment = self.light_manager.compute_cluster_assignments(
self.camera.build_view_matrix(),
self.camera.build_view_projection_matrix(),
self.config.width as f32,
self.config.height as f32,
);
self.light_manager.update_cluster_buffers(&self.device, &self.queue, &assignment);
let mut encoder = self.device.create_command_encoder(&wgpu::CommandEncoderDescriptor {
label: Some("Render Encoder"),
});
{
let mut render_pass = encoder.begin_render_pass(&wgpu::RenderPassDescriptor {
label: Some("Render Pass"),
color_attachments: &[Some(wgpu::RenderPassColorAttachment {
view: &multisampled_view,
resolve_target: Some(&output.texture.create_view(&Default::default())),
ops: wgpu::Operations {
load: wgpu::LoadOp::Clear(wgpu::Color {
r: 1.0,
g: 0.2,
b: 0.3,
a: 1.0,
}),
store: wgpu::StoreOp::Store,
}
})],
depth_stencil_attachment: Some(wgpu::RenderPassDepthStencilAttachment {
view: &self.depth_texture,
depth_ops: Some(wgpu::Operations {
load: wgpu::LoadOp::Clear(1.0),
store: wgpu::StoreOp::Store,
}),
stencil_ops: None,
}),
occlusion_query_set: None,
timestamp_writes: None,
});
render_pass.set_pipeline(&self.render_pipeline);
render_pass.set_bind_group(0, &self.global_bind_group, &[]);
2025-05-06 17:12:22 +02:00
// Update the light manager buffer
self.light_manager.update_gpu(&self.queue);
render_pass.set_bind_group(1, &self.light_manager.bind_group, &[]);
2025-05-07 19:45:35 +02:00
if let Some(clusters) = &self.light_manager.cluster_buffers {
render_pass.set_bind_group(2, &clusters.bind_group, &[]);
}
for shape in self.geometries.keys().copied() {
let geometry = &self.geometries[&shape];
let relevant_instances: Vec<_> = self.instances
.iter()
.enumerate()
.filter(|(_, inst)| inst.shape == shape)
.map(|(i, _)| i as u32)
.collect();
if relevant_instances.is_empty() {
continue;
}
render_pass.set_vertex_buffer(0, geometry.vertex_buffer.slice(..));
render_pass.set_vertex_buffer(1, self.instance_buffer.slice(..));
render_pass.set_index_buffer(geometry.index_buffer.slice(..), wgpu::IndexFormat::Uint16);
render_pass.draw_indexed(0..geometry.index_count, 0, 0..relevant_instances.len() as u32);
}
}
self.queue.submit(std::iter::once(encoder.finish()));
output.present();
Ok(())
}
pub fn set_instances(&mut self, instances: Vec<RenderInstance>) {
let raw_data: Vec<InstanceRaw> = instances.iter().map(RenderInstance::to_raw).collect();
let byte_len = (raw_data.len() * size_of::<InstanceRaw>()) as wgpu::BufferAddress;
// Resize the buffer if necessary
if byte_len > self.instance_buffer.size() {
self.instance_buffer = self.device.create_buffer_init(&wgpu::util::BufferInitDescriptor {
label: Some("Instance Buffer (resized)"),
contents: bytemuck::cast_slice(&raw_data),
usage: wgpu::BufferUsages::VERTEX | wgpu::BufferUsages::COPY_DST,
});
} else {
self.queue.write_buffer(&self.instance_buffer, 0, bytemuck::cast_slice(&raw_data));
}
self.instances = instances;
}
pub fn window(&self) -> &Window {
&self.window
}
2025-05-05 21:29:51 +02:00
pub fn camera_mut(&mut self) -> &mut crate::camera::Camera {
&mut self.camera
}
pub fn camera(&self) -> &crate::camera::Camera {
&self.camera
}
}