Finished simple camera
This commit is contained in:
parent
c310e20c50
commit
eaf97e3f57
507
src/lib.rs
507
src/lib.rs
@ -1,14 +1,13 @@
|
||||
use std::iter;
|
||||
|
||||
use wgpu::util::DeviceExt;
|
||||
use winit::{
|
||||
event::*,
|
||||
event_loop::EventLoop,
|
||||
keyboard::{KeyCode, PhysicalKey},
|
||||
window::WindowBuilder,
|
||||
window::{Window, WindowBuilder},
|
||||
};
|
||||
|
||||
use winit::window::Window;
|
||||
|
||||
use wgpu::util::DeviceExt;
|
||||
|
||||
mod texture;
|
||||
|
||||
#[repr(C)]
|
||||
@ -35,24 +34,180 @@ impl Vertex {
|
||||
shader_location: 1,
|
||||
format: wgpu::VertexFormat::Float32x2,
|
||||
},
|
||||
]
|
||||
],
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
const VERTICES: &[Vertex] = &[
|
||||
Vertex { position: [-0.0868241, 0.49240386, 0.0], tex_coords: [0.4131759, 0.00759614], },
|
||||
Vertex { position: [-0.49513406, 0.06958647, 0.0], tex_coords: [0.0048659444, 0.43041354], },
|
||||
Vertex { position: [-0.21918549, -0.44939706, 0.0], tex_coords: [0.28081453, 0.949397], },
|
||||
Vertex { position: [0.35966998, -0.3473291, 0.0], tex_coords: [0.85967, 0.84732914], },
|
||||
Vertex { position: [0.44147372, 0.2347359, 0.0], tex_coords: [0.9414737, 0.2652641], },
|
||||
Vertex {
|
||||
position: [-0.0868241, 0.49240386, 0.0],
|
||||
tex_coords: [0.4131759, 0.00759614],
|
||||
}, // A
|
||||
Vertex {
|
||||
position: [-0.49513406, 0.06958647, 0.0],
|
||||
tex_coords: [0.0048659444, 0.43041354],
|
||||
}, // B
|
||||
Vertex {
|
||||
position: [-0.21918549, -0.44939706, 0.0],
|
||||
tex_coords: [0.28081453, 0.949397],
|
||||
}, // C
|
||||
Vertex {
|
||||
position: [0.35966998, -0.3473291, 0.0],
|
||||
tex_coords: [0.85967, 0.84732914],
|
||||
}, // D
|
||||
Vertex {
|
||||
position: [0.44147372, 0.2347359, 0.0],
|
||||
tex_coords: [0.9414737, 0.2652641],
|
||||
}, // E
|
||||
];
|
||||
|
||||
const INDICES: &[u16] = &[
|
||||
0, 1, 4,
|
||||
1, 2, 4,
|
||||
2, 3, 4,
|
||||
];
|
||||
const INDICES: &[u16] = &[0, 1, 4, 1, 2, 4, 2, 3, 4, /* padding */ 0];
|
||||
|
||||
#[rustfmt::skip]
|
||||
pub const OPENGL_TO_WGPU_MATRIX: cgmath::Matrix4<f32> = cgmath::Matrix4::new(
|
||||
1.0, 0.0, 0.0, 0.0,
|
||||
0.0, 1.0, 0.0, 0.0,
|
||||
0.0, 0.0, 0.5, 0.5,
|
||||
0.0, 0.0, 0.0, 1.0,
|
||||
);
|
||||
|
||||
struct Camera {
|
||||
eye: cgmath::Point3<f32>,
|
||||
target: cgmath::Point3<f32>,
|
||||
up: cgmath::Vector3<f32>,
|
||||
aspect: f32,
|
||||
fovy: f32,
|
||||
znear: f32,
|
||||
zfar: f32,
|
||||
}
|
||||
|
||||
impl Camera {
|
||||
fn build_view_projection_matrix(&self) -> cgmath::Matrix4<f32> {
|
||||
let view = cgmath::Matrix4::look_at_rh(self.eye, self.target, self.up);
|
||||
let proj = cgmath::perspective(cgmath::Deg(self.fovy), self.aspect, self.znear, self.zfar);
|
||||
proj * view
|
||||
}
|
||||
}
|
||||
|
||||
#[repr(C)]
|
||||
#[derive(Debug, Copy, Clone, bytemuck::Pod, bytemuck::Zeroable)]
|
||||
struct CameraUniform {
|
||||
view_proj: [[f32; 4]; 4],
|
||||
}
|
||||
|
||||
impl CameraUniform {
|
||||
fn new() -> Self {
|
||||
use cgmath::SquareMatrix;
|
||||
Self {
|
||||
view_proj: cgmath::Matrix4::identity().into(),
|
||||
}
|
||||
}
|
||||
|
||||
fn update_view_proj(&mut self, camera: &Camera) {
|
||||
self.view_proj = (OPENGL_TO_WGPU_MATRIX * camera.build_view_projection_matrix()).into();
|
||||
}
|
||||
}
|
||||
|
||||
struct CameraController {
|
||||
speed: f32,
|
||||
is_up_pressed: bool,
|
||||
is_down_pressed: bool,
|
||||
is_forward_pressed: bool,
|
||||
is_backward_pressed: bool,
|
||||
is_left_pressed: bool,
|
||||
is_right_pressed: bool,
|
||||
}
|
||||
|
||||
impl CameraController {
|
||||
fn new(speed: f32) -> Self {
|
||||
Self {
|
||||
speed,
|
||||
is_up_pressed: false,
|
||||
is_down_pressed: false,
|
||||
is_forward_pressed: false,
|
||||
is_backward_pressed: false,
|
||||
is_left_pressed: false,
|
||||
is_right_pressed: false,
|
||||
}
|
||||
}
|
||||
|
||||
fn process_events(&mut self, event: &WindowEvent) -> bool {
|
||||
match event {
|
||||
WindowEvent::KeyboardInput {
|
||||
event:
|
||||
KeyEvent {
|
||||
state,
|
||||
physical_key: PhysicalKey::Code(keycode),
|
||||
..
|
||||
},
|
||||
..
|
||||
} => {
|
||||
let is_pressed = *state == ElementState::Pressed;
|
||||
match keycode {
|
||||
KeyCode::Space => {
|
||||
self.is_up_pressed = is_pressed;
|
||||
true
|
||||
}
|
||||
KeyCode::ShiftLeft => {
|
||||
self.is_down_pressed = is_pressed;
|
||||
true
|
||||
}
|
||||
KeyCode::KeyW | KeyCode::ArrowUp => {
|
||||
self.is_forward_pressed = is_pressed;
|
||||
true
|
||||
}
|
||||
KeyCode::KeyA | KeyCode::ArrowLeft => {
|
||||
self.is_left_pressed = is_pressed;
|
||||
true
|
||||
}
|
||||
KeyCode::KeyS | KeyCode::ArrowDown => {
|
||||
self.is_backward_pressed = is_pressed;
|
||||
true
|
||||
}
|
||||
KeyCode::KeyD | KeyCode::ArrowRight => {
|
||||
self.is_right_pressed = is_pressed;
|
||||
true
|
||||
}
|
||||
_ => false,
|
||||
}
|
||||
}
|
||||
_ => false,
|
||||
}
|
||||
}
|
||||
|
||||
fn update_camera(&self, camera: &mut Camera) {
|
||||
use cgmath::InnerSpace;
|
||||
let forward = camera.target - camera.eye;
|
||||
let forward_norm = forward.normalize();
|
||||
let forward_mag = forward.magnitude();
|
||||
|
||||
// Prevents glitching when camera gets too close to the
|
||||
// center of the scene.
|
||||
if self.is_forward_pressed && forward_mag > self.speed {
|
||||
camera.eye += forward_norm * self.speed;
|
||||
}
|
||||
if self.is_backward_pressed {
|
||||
camera.eye -= forward_norm * self.speed;
|
||||
}
|
||||
|
||||
let right = forward_norm.cross(camera.up);
|
||||
|
||||
// Redo radius calc in case the up/ down is pressed.
|
||||
let forward = camera.target - camera.eye;
|
||||
let forward_mag = forward.magnitude();
|
||||
|
||||
if self.is_right_pressed {
|
||||
// Rescale the distance between the target and eye so
|
||||
// that it doesn't change. The eye therefore still
|
||||
// lies on the circle made by the target and eye.
|
||||
camera.eye = camera.target - (forward + right * self.speed).normalize() * forward_mag;
|
||||
}
|
||||
if self.is_left_pressed {
|
||||
camera.eye = camera.target - (forward - right * self.speed).normalize() * forward_mag;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
struct State<'a> {
|
||||
surface: wgpu::Surface<'a>,
|
||||
@ -60,100 +215,73 @@ struct State<'a> {
|
||||
queue: wgpu::Queue,
|
||||
config: wgpu::SurfaceConfiguration,
|
||||
size: winit::dpi::PhysicalSize<u32>,
|
||||
// The window must be declared after the surface so
|
||||
// it gets dropped after it as the surface contains
|
||||
// unsafe references to the window's resources.
|
||||
window: &'a Window,
|
||||
render_pipeline: wgpu::RenderPipeline,
|
||||
vertex_buffer: wgpu::Buffer,
|
||||
index_buffer: wgpu::Buffer,
|
||||
num_indices: u32,
|
||||
diffuse_bind_group: wgpu::BindGroup,
|
||||
#[allow(dead_code)]
|
||||
diffuse_texture: texture::Texture,
|
||||
diffuse_bind_group: wgpu::BindGroup,
|
||||
// NEW!
|
||||
camera: Camera,
|
||||
camera_controller: CameraController,
|
||||
camera_uniform: CameraUniform,
|
||||
camera_buffer: wgpu::Buffer,
|
||||
camera_bind_group: wgpu::BindGroup,
|
||||
window: &'a Window,
|
||||
}
|
||||
|
||||
impl<'a> State<'a> {
|
||||
// Creating some of the wgpu types requires async code
|
||||
async fn new(window: &'a Window) -> State<'a> {
|
||||
let size = window.inner_size();
|
||||
let num_vertices = VERTICES.len() as u32;
|
||||
|
||||
// The instance is a handle to our GPU
|
||||
// Backends::all => Vulkan + Metal + DX12 + Browser WebGPU
|
||||
// BackendBit::PRIMARY => Vulkan + Metal + DX12 + Browser WebGPU
|
||||
let instance = wgpu::Instance::new(wgpu::InstanceDescriptor {
|
||||
// TODO: Change this when enabling switchable graphics for the user
|
||||
// If this is not for the web, we can use Vulkan + Metal + DX12
|
||||
#[cfg(not(target_arch="wasm32"))]
|
||||
backends: wgpu::Backends::PRIMARY,
|
||||
// TODO: Probably remove this, because WebAssembly won't be used for the foreseeable future
|
||||
#[cfg(target_arch="wasm32")]
|
||||
backends: wgpu::Backends::GL,
|
||||
..Default::default()
|
||||
});
|
||||
|
||||
let surface = instance.create_surface(window).unwrap();
|
||||
|
||||
// This checks all the adapters on a machine and returns an iterator
|
||||
/*let adapter = instance
|
||||
.enumerate_adapters(wgpu::Backends::all())
|
||||
.filter(|adapter| {
|
||||
// Check if this adapter supports our surface
|
||||
adapter.is_surface_supported(&surface)
|
||||
})
|
||||
.next()
|
||||
.unwrap()*/
|
||||
|
||||
// TODO: Include Telemetry to see how many users WGPU chooses a suboptimal adapter for
|
||||
let adapter = instance.request_adapter(
|
||||
&wgpu::RequestAdapterOptions {
|
||||
let adapter = instance
|
||||
.request_adapter(&wgpu::RequestAdapterOptions {
|
||||
power_preference: wgpu::PowerPreference::default(),
|
||||
compatible_surface: Some(&surface),
|
||||
force_fallback_adapter: false,
|
||||
},
|
||||
).await.unwrap();
|
||||
|
||||
log::info!("Adapter: {:?}", adapter.get_info());
|
||||
|
||||
// List of all features the current device supports
|
||||
// let features = adapter.features();
|
||||
|
||||
let (device, queue) = adapter.request_device(
|
||||
})
|
||||
.await
|
||||
.unwrap();
|
||||
let (device, queue) = adapter
|
||||
.request_device(
|
||||
&wgpu::DeviceDescriptor {
|
||||
label: None,
|
||||
required_features: wgpu::Features::empty(),
|
||||
// WebGL doesn't support all of wgpu's features, so if
|
||||
// we're building for the web, we'll have to disable some.
|
||||
// TODO: Probably remove check for wasm32, because WebAssembly won't be used for the foreseeable future
|
||||
required_limits: if cfg!(target_arch = "wasm32") {
|
||||
wgpu::Limits::downlevel_webgl2_defaults()
|
||||
} else {
|
||||
wgpu::Limits::default()
|
||||
},
|
||||
label: None,
|
||||
// Default is performance, which requires more memory, but is faster
|
||||
// we're building for the web we'll have to disable some.
|
||||
required_limits: wgpu::Limits::default(),
|
||||
memory_hints: Default::default(),
|
||||
},
|
||||
None, // Trace path
|
||||
).await.unwrap();
|
||||
)
|
||||
.await
|
||||
.unwrap();
|
||||
|
||||
let surface_caps = surface.get_capabilities(&adapter);
|
||||
// Shader code assumes an sRGB surface texture. Using a different
|
||||
// one will result in all the colors coming out darker. If you want to support non
|
||||
// sRGB surfaces, you'll need to account for that when drawing to the frame.
|
||||
let surface_format = surface_caps.formats.iter()
|
||||
.find(|f| f.is_srgb())
|
||||
// Shader code assumes an Srgb surface texture. Using a different
|
||||
// one will result all the colors comming out darker. If you want to support non
|
||||
// Srgb surfaces, you'll need to account for that when drawing to the frame.
|
||||
let surface_format = surface_caps
|
||||
.formats
|
||||
.iter()
|
||||
.copied()
|
||||
.find(|f| f.is_srgb())
|
||||
.unwrap_or(surface_caps.formats[0]);
|
||||
|
||||
log::info!("Surface format: {:?}", surface_format);
|
||||
log::info!("Surface present modes: {:?}", surface_caps.present_modes);
|
||||
log::info!("Surface alpha modes: {:?}", surface_caps.alpha_modes);
|
||||
|
||||
let config = wgpu::SurfaceConfiguration {
|
||||
usage: wgpu::TextureUsages::RENDER_ATTACHMENT,
|
||||
format: surface_format,
|
||||
width: size.width,
|
||||
height: size.height,
|
||||
// PresentMod::Fifo => VSync (Always supported)
|
||||
present_mode: surface_caps.present_modes[0],
|
||||
alpha_mode: surface_caps.alpha_modes[0],
|
||||
view_formats: vec![],
|
||||
@ -161,9 +289,8 @@ impl<'a> State<'a> {
|
||||
};
|
||||
|
||||
let diffuse_bytes = include_bytes!("happy-tree.png");
|
||||
let diffuse_texture = texture::Texture::from_bytes(&device, &queue, diffuse_bytes, "happy-tree.png").unwrap();
|
||||
|
||||
|
||||
let diffuse_texture =
|
||||
texture::Texture::from_bytes(&device, &queue, diffuse_bytes, "happy-tree.png").unwrap();
|
||||
|
||||
let texture_bind_group_layout =
|
||||
device.create_bind_group_layout(&wgpu::BindGroupLayoutDescriptor {
|
||||
@ -181,8 +308,6 @@ impl<'a> State<'a> {
|
||||
wgpu::BindGroupLayoutEntry {
|
||||
binding: 1,
|
||||
visibility: wgpu::ShaderStages::FRAGMENT,
|
||||
// This should match the filterable field of the
|
||||
// corresponding Texture entry above.
|
||||
ty: wgpu::BindingType::Sampler(wgpu::SamplerBindingType::Filtering),
|
||||
count: None,
|
||||
},
|
||||
@ -190,8 +315,7 @@ impl<'a> State<'a> {
|
||||
label: Some("texture_bind_group_layout"),
|
||||
});
|
||||
|
||||
let diffuse_bind_group = device.create_bind_group(
|
||||
&wgpu::BindGroupDescriptor {
|
||||
let diffuse_bind_group = device.create_bind_group(&wgpu::BindGroupDescriptor {
|
||||
layout: &texture_bind_group_layout,
|
||||
entries: &[
|
||||
wgpu::BindGroupEntry {
|
||||
@ -201,25 +325,67 @@ impl<'a> State<'a> {
|
||||
wgpu::BindGroupEntry {
|
||||
binding: 1,
|
||||
resource: wgpu::BindingResource::Sampler(&diffuse_texture.sampler),
|
||||
}
|
||||
},
|
||||
],
|
||||
label: Some("diffuse_bind_group"),
|
||||
}
|
||||
);
|
||||
});
|
||||
|
||||
let render_pipeline_layout = device.create_pipeline_layout(
|
||||
&wgpu::PipelineLayoutDescriptor {
|
||||
label: Some("Render Pipeline Layout"),
|
||||
bind_group_layouts: &[&texture_bind_group_layout],
|
||||
push_constant_ranges: &[],
|
||||
}
|
||||
);
|
||||
let camera = Camera {
|
||||
eye: (0.0, 1.0, 2.0).into(),
|
||||
target: (0.0, 0.0, 0.0).into(),
|
||||
up: cgmath::Vector3::unit_y(),
|
||||
aspect: config.width as f32 / config.height as f32,
|
||||
fovy: 45.0,
|
||||
znear: 0.1,
|
||||
zfar: 100.0,
|
||||
};
|
||||
let camera_controller = CameraController::new(0.005);
|
||||
|
||||
let mut camera_uniform = CameraUniform::new();
|
||||
camera_uniform.update_view_proj(&camera);
|
||||
|
||||
let camera_buffer = device.create_buffer_init(&wgpu::util::BufferInitDescriptor {
|
||||
label: Some("Camera Buffer"),
|
||||
contents: bytemuck::cast_slice(&[camera_uniform]),
|
||||
usage: wgpu::BufferUsages::UNIFORM | wgpu::BufferUsages::COPY_DST,
|
||||
});
|
||||
|
||||
let camera_bind_group_layout =
|
||||
device.create_bind_group_layout(&wgpu::BindGroupLayoutDescriptor {
|
||||
entries: &[wgpu::BindGroupLayoutEntry {
|
||||
binding: 0,
|
||||
visibility: wgpu::ShaderStages::VERTEX,
|
||||
ty: wgpu::BindingType::Buffer {
|
||||
ty: wgpu::BufferBindingType::Uniform,
|
||||
has_dynamic_offset: false,
|
||||
min_binding_size: None,
|
||||
},
|
||||
count: None,
|
||||
}],
|
||||
label: Some("camera_bind_group_layout"),
|
||||
});
|
||||
|
||||
let camera_bind_group = device.create_bind_group(&wgpu::BindGroupDescriptor {
|
||||
layout: &camera_bind_group_layout,
|
||||
entries: &[wgpu::BindGroupEntry {
|
||||
binding: 0,
|
||||
resource: camera_buffer.as_entire_binding(),
|
||||
}],
|
||||
label: Some("camera_bind_group"),
|
||||
});
|
||||
|
||||
let shader = device.create_shader_module(wgpu::ShaderModuleDescriptor {
|
||||
label: Some("Shader"),
|
||||
source: wgpu::ShaderSource::Wgsl(include_str!("shader.wgsl").into()),
|
||||
});
|
||||
|
||||
let render_pipeline_layout =
|
||||
device.create_pipeline_layout(&wgpu::PipelineLayoutDescriptor {
|
||||
label: Some("Render Pipeline Layout"),
|
||||
bind_group_layouts: &[&texture_bind_group_layout, &camera_bind_group_layout],
|
||||
push_constant_ranges: &[],
|
||||
});
|
||||
|
||||
let render_pipeline = device.create_render_pipeline(&wgpu::RenderPipelineDescriptor {
|
||||
label: Some("Render Pipeline"),
|
||||
layout: Some(&render_pipeline_layout),
|
||||
@ -234,17 +400,21 @@ impl<'a> State<'a> {
|
||||
entry_point: "fs_main",
|
||||
targets: &[Some(wgpu::ColorTargetState {
|
||||
format: config.format,
|
||||
blend: Some(wgpu::BlendState::REPLACE),
|
||||
blend: Some(wgpu::BlendState {
|
||||
color: wgpu::BlendComponent::REPLACE,
|
||||
alpha: wgpu::BlendComponent::REPLACE,
|
||||
}),
|
||||
write_mask: wgpu::ColorWrites::ALL,
|
||||
})],
|
||||
compilation_options: wgpu::PipelineCompilationOptions::default(),
|
||||
compilation_options: Default::default(),
|
||||
}),
|
||||
primitive: wgpu::PrimitiveState {
|
||||
topology: wgpu::PrimitiveTopology::TriangleList,
|
||||
strip_index_format: None,
|
||||
front_face: wgpu::FrontFace::Ccw,
|
||||
cull_mode: Some(wgpu::Face::Back),
|
||||
// Setting this to anything other than Fill requires Features::NON_FILL_POLYGON_MODE
|
||||
// Setting this to anything other than Fill requires Features::POLYGON_MODE_LINE
|
||||
// or Features::POLYGON_MODE_POINT
|
||||
polygon_mode: wgpu::PolygonMode::Fill,
|
||||
// Requires Features::DEPTH_CLIP_CONTROL
|
||||
unclipped_depth: false,
|
||||
@ -257,29 +427,26 @@ impl<'a> State<'a> {
|
||||
mask: !0,
|
||||
alpha_to_coverage_enabled: false,
|
||||
},
|
||||
// If the pipeline will be used with a multiview render pass, this
|
||||
// indicates how many array layers the attachments will have.
|
||||
multiview: None,
|
||||
// Useful for optimizing shader compilation on Android
|
||||
cache: None,
|
||||
});
|
||||
|
||||
let vertex_buffer = device.create_buffer_init(
|
||||
&wgpu::util::BufferInitDescriptor {
|
||||
let vertex_buffer = device.create_buffer_init(&wgpu::util::BufferInitDescriptor {
|
||||
label: Some("Vertex Buffer"),
|
||||
contents: bytemuck::cast_slice(VERTICES),
|
||||
usage: wgpu::BufferUsages::VERTEX,
|
||||
}
|
||||
);
|
||||
|
||||
let index_buffer = device.create_buffer_init(
|
||||
&wgpu::util::BufferInitDescriptor {
|
||||
});
|
||||
let index_buffer = device.create_buffer_init(&wgpu::util::BufferInitDescriptor {
|
||||
label: Some("Index Buffer"),
|
||||
contents: bytemuck::cast_slice(INDICES),
|
||||
usage: wgpu::BufferUsages::INDEX,
|
||||
}
|
||||
);
|
||||
});
|
||||
let num_indices = INDICES.len() as u32;
|
||||
|
||||
Self {
|
||||
window,
|
||||
surface,
|
||||
device,
|
||||
queue,
|
||||
@ -289,8 +456,14 @@ impl<'a> State<'a> {
|
||||
vertex_buffer,
|
||||
index_buffer,
|
||||
num_indices,
|
||||
diffuse_bind_group,
|
||||
diffuse_texture,
|
||||
diffuse_bind_group,
|
||||
camera,
|
||||
camera_controller,
|
||||
camera_buffer,
|
||||
camera_bind_group,
|
||||
camera_uniform,
|
||||
window,
|
||||
}
|
||||
}
|
||||
|
||||
@ -304,36 +477,50 @@ impl<'a> State<'a> {
|
||||
self.config.width = new_size.width;
|
||||
self.config.height = new_size.height;
|
||||
self.surface.configure(&self.device, &self.config);
|
||||
|
||||
self.camera.aspect = self.config.width as f32 / self.config.height as f32;
|
||||
}
|
||||
}
|
||||
|
||||
fn input(&mut self, event: &WindowEvent) -> bool {
|
||||
// Change color on mouse move
|
||||
match event {
|
||||
WindowEvent::CursorMoved { position, .. } => {
|
||||
/*let size = self.size;
|
||||
let color = wgpu::Color {
|
||||
r: position.x as f64 / size.width as f64,
|
||||
g: position.y as f64 / size.height as f64,
|
||||
b: 0.3,
|
||||
a: 1.0,
|
||||
};
|
||||
self.camera_controller.process_events(event)
|
||||
}
|
||||
|
||||
let output = self.surface.get_current_texture().unwrap();
|
||||
let view = output.texture.create_view(&wgpu::TextureViewDescriptor::default());
|
||||
fn update(&mut self) {
|
||||
self.camera_controller.update_camera(&mut self.camera);
|
||||
self.camera_uniform.update_view_proj(&self.camera);
|
||||
self.queue.write_buffer(
|
||||
&self.camera_buffer,
|
||||
0,
|
||||
bytemuck::cast_slice(&[self.camera_uniform]),
|
||||
);
|
||||
}
|
||||
|
||||
let mut encoder = self.device.create_command_encoder(&wgpu::CommandEncoderDescriptor {
|
||||
fn render(&mut self) -> Result<(), wgpu::SurfaceError> {
|
||||
let output = self.surface.get_current_texture()?;
|
||||
let view = output
|
||||
.texture
|
||||
.create_view(&wgpu::TextureViewDescriptor::default());
|
||||
|
||||
let mut encoder = self
|
||||
.device
|
||||
.create_command_encoder(&wgpu::CommandEncoderDescriptor {
|
||||
label: Some("Render Encoder"),
|
||||
});
|
||||
|
||||
{
|
||||
let _render_pass = encoder.begin_render_pass(&wgpu::RenderPassDescriptor {
|
||||
let mut render_pass = encoder.begin_render_pass(&wgpu::RenderPassDescriptor {
|
||||
label: Some("Render Pass"),
|
||||
color_attachments: &[Some(wgpu::RenderPassColorAttachment {
|
||||
view: &view,
|
||||
resolve_target: None,
|
||||
ops: wgpu::Operations {
|
||||
load: wgpu::LoadOp::Clear(color),
|
||||
load: wgpu::LoadOp::Clear(wgpu::Color {
|
||||
r: 0.1,
|
||||
g: 0.2,
|
||||
b: 0.3,
|
||||
a: 1.0,
|
||||
}),
|
||||
store: wgpu::StoreOp::Store,
|
||||
},
|
||||
})],
|
||||
@ -341,79 +528,16 @@ impl<'a> State<'a> {
|
||||
occlusion_query_set: None,
|
||||
timestamp_writes: None,
|
||||
});
|
||||
}
|
||||
|
||||
self.queue.submit(std::iter::once(encoder.finish()));
|
||||
output.present();
|
||||
return true;*/
|
||||
false
|
||||
}
|
||||
WindowEvent::KeyboardInput {
|
||||
event:
|
||||
KeyEvent {
|
||||
state: ElementState::Pressed,
|
||||
physical_key: PhysicalKey::Code(KeyCode::Space),
|
||||
..
|
||||
},
|
||||
..
|
||||
} => {
|
||||
true
|
||||
}
|
||||
|
||||
_ => {
|
||||
false
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
fn update(&mut self) {
|
||||
|
||||
}
|
||||
|
||||
fn render(&mut self) -> Result<(), wgpu::SurfaceError> {
|
||||
let output = self.surface.get_current_texture()?;
|
||||
|
||||
let view = output.texture.create_view(&wgpu::TextureViewDescriptor::default());
|
||||
|
||||
let mut encoder = self.device.create_command_encoder(&wgpu::CommandEncoderDescriptor {
|
||||
label: Some("Render Encoder"),
|
||||
});
|
||||
|
||||
{
|
||||
let mut render_pass = encoder.begin_render_pass(&wgpu::RenderPassDescriptor {
|
||||
label: Some("Render Pass"),
|
||||
color_attachments: &[
|
||||
// This is what @location(0) in the fragment shader targets
|
||||
Some(wgpu::RenderPassColorAttachment {
|
||||
view: &view,
|
||||
resolve_target: None,
|
||||
ops: wgpu::Operations {
|
||||
load: wgpu::LoadOp::Clear(
|
||||
wgpu::Color {
|
||||
r: 0.1,
|
||||
g: 0.2,
|
||||
b: 0.3,
|
||||
a: 1.0,
|
||||
}
|
||||
),
|
||||
store: wgpu::StoreOp::Store,
|
||||
}
|
||||
})
|
||||
],
|
||||
depth_stencil_attachment: None,
|
||||
occlusion_query_set: None,
|
||||
timestamp_writes: None,
|
||||
});
|
||||
|
||||
render_pass.set_pipeline(&self.render_pipeline);
|
||||
render_pass.set_bind_group(0, &self.diffuse_bind_group, &[]);
|
||||
render_pass.set_bind_group(1, &self.camera_bind_group, &[]);
|
||||
render_pass.set_vertex_buffer(0, self.vertex_buffer.slice(..));
|
||||
render_pass.set_index_buffer(self.index_buffer.slice(..), wgpu::IndexFormat::Uint16);
|
||||
render_pass.draw_indexed(0..self.num_indices, 0, 0..1);
|
||||
}
|
||||
|
||||
// submit will accept anything that implements IntoIter
|
||||
self.queue.submit(std::iter::once(encoder.finish()));
|
||||
self.queue.submit(iter::once(encoder.finish()));
|
||||
output.present();
|
||||
|
||||
Ok(())
|
||||
@ -422,19 +546,22 @@ impl<'a> State<'a> {
|
||||
|
||||
pub async fn run() {
|
||||
env_logger::init();
|
||||
|
||||
let event_loop = EventLoop::new().unwrap();
|
||||
let window = WindowBuilder::new().build(&event_loop).unwrap();
|
||||
|
||||
// State::new uses async code, so we're going to wait for it to finish
|
||||
let mut state = State::new(&window).await;
|
||||
// Variable used to prevent rendering before the surface is configured
|
||||
let mut surface_configured = false;
|
||||
|
||||
event_loop.run(move |event, control_flow| {
|
||||
event_loop
|
||||
.run(move |event, control_flow| {
|
||||
match event {
|
||||
Event::WindowEvent {
|
||||
ref event,
|
||||
window_id,
|
||||
} if window_id == state.window().id() => if !state.input(event) {
|
||||
} if window_id == state.window().id() => {
|
||||
if !state.input(event) {
|
||||
match event {
|
||||
WindowEvent::CloseRequested
|
||||
| WindowEvent::KeyboardInput {
|
||||
@ -446,12 +573,10 @@ pub async fn run() {
|
||||
},
|
||||
..
|
||||
} => control_flow.exit(),
|
||||
|
||||
WindowEvent::Resized(physical_size) => {
|
||||
surface_configured = true;
|
||||
state.resize(*physical_size);
|
||||
}
|
||||
|
||||
WindowEvent::RedrawRequested => {
|
||||
// This tells winit that we want another frame after this one
|
||||
state.window().request_redraw();
|
||||
@ -482,7 +607,9 @@ pub async fn run() {
|
||||
_ => {}
|
||||
}
|
||||
}
|
||||
}
|
||||
_ => {}
|
||||
}
|
||||
}).expect("Event loop crashed unexpectedly");
|
||||
})
|
||||
.unwrap();
|
||||
}
|
||||
@ -1,4 +1,9 @@
|
||||
// Vertex shader
|
||||
struct CameraUniform {
|
||||
view_proj: mat4x4<f32>,
|
||||
};
|
||||
@group(1) @binding(0) // 1.
|
||||
var<uniform> camera: CameraUniform;
|
||||
|
||||
struct VertexInput {
|
||||
@location(0) position: vec3<f32>,
|
||||
@ -16,7 +21,7 @@ fn vs_main(
|
||||
) -> VertexOutput {
|
||||
var out: VertexOutput;
|
||||
out.tex_coords = model.tex_coords;
|
||||
out.clip_position = vec4<f32>(model.position, 1.0);
|
||||
out.clip_position = camera.view_proj * vec4<f32>(model.position, 1.0); // 2.
|
||||
return out;
|
||||
}
|
||||
|
||||
|
||||
Loading…
x
Reference in New Issue
Block a user