use std::{convert::TryInto, mem::size_of, num::NonZeroU32}; use crate::{ camera::CameraUniform, instance::InstanceId, vertex::SQUARE, Camera, Instance, RenderWindowConfig, Vertex, }; use pollster::FutureExt; use thiserror::Error; use wgpu::{include_wgsl, util::DeviceExt}; use winit::{ dpi::PhysicalSize, error::OsError, event::{Event, WindowEvent}, event_loop::{ControlFlow, EventLoop}, window::Window, }; /// No device could be found which supports the given surface #[derive(Clone, Copy, Debug, Error)] #[error("No GPU could be found on this machine")] pub struct NoGpuError { /// Prevents this type from being constructed _priv: (), } impl NoGpuError { /// Create a new error const fn new() -> Self { Self { _priv: () } } } #[derive(Debug, Error)] pub enum NewRendererError { #[error(transparent)] NoGpu(#[from] NoGpuError), #[error(transparent)] // TODO better error WindowInitError(#[from] OsError), } #[derive(Debug)] pub struct Renderer { // TODO move some of this data elsewhere surface: wgpu::Surface, surface_config: wgpu::SurfaceConfiguration, supported_present_modes: Box<[wgpu::PresentMode]>, device: wgpu::Device, queue: wgpu::Queue, render_pipeline: wgpu::RenderPipeline, square_vertex_buffer: wgpu::Buffer, square_vertices: u32, instance_buffer: wgpu::Buffer, instance_buffer_size: usize, instances: Vec, camera: Camera, camera_buffer: wgpu::Buffer, camera_bind_group: wgpu::BindGroup, window: Window, } // TODO make this more complete impl Renderer { fn get_adapter( instance: &wgpu::Instance, surface: &wgpu::Surface, power_preference: wgpu::PowerPreference, ) -> Result { let adapter = instance .request_adapter(&wgpu::RequestAdapterOptions { power_preference, compatible_surface: Some(surface), force_fallback_adapter: false, }) .block_on(); // TODO this takes too long let adapter = adapter.or_else(|| { instance .enumerate_adapters(wgpu::Backends::PRIMARY) .find(|adapter| !surface.get_supported_formats(adapter).is_empty()) }); adapter.ok_or(NoGpuError::new()) } fn sprite_render_pipeline( device: &wgpu::Device, texture_format: wgpu::TextureFormat, render_pipeline_layout: &wgpu::PipelineLayout, ) -> wgpu::RenderPipeline { let shader = device.create_shader_module(include_wgsl!("../shaders/sprite.wgsl")); device.create_render_pipeline(&wgpu::RenderPipelineDescriptor { label: Some("Sprite Render Pipeline"), layout: Some(render_pipeline_layout), // information about the vertex shader vertex: wgpu::VertexState { module: &shader, entry_point: "vs_main", buffers: &[Vertex::desc(), Instance::desc()], }, // information about the fragment shader fragment: Some(wgpu::FragmentState { module: &shader, entry_point: "fs_main", targets: &[Some(wgpu::ColorTargetState { format: texture_format, blend: Some(wgpu::BlendState::REPLACE), write_mask: wgpu::ColorWrites::ALL, })], }), primitive: wgpu::PrimitiveState { // don't render the back of a sprite cull_mode: Some(wgpu::Face::Back), ..Default::default() }, depth_stencil: None, multisample: wgpu::MultisampleState::default(), multiview: None, }) } fn new_instance_buffer( device: &wgpu::Device, instances: &Vec, ) -> (wgpu::Buffer, usize) { let instance_buffer_size = instances.capacity(); let instance_buffer = device.create_buffer(&wgpu::BufferDescriptor { label: Some("Sprite Instance Buffer"), size: (instance_buffer_size * size_of::()) as wgpu::BufferAddress, usage: wgpu::BufferUsages::VERTEX | wgpu::BufferUsages::COPY_DST, mapped_at_creation: false, }); (instance_buffer, instance_buffer_size) } /// Initializes the renderer /// /// # Errors /// /// Returns a [`NoGpu`] error if no device could be detected that can /// display to the window /// /// # Panics /// /// This function **must** be called on the main thread, or else it may /// panic on some platforms. // TODO make it possible to use without a window (ie, use a bitmap in memory as a surface) // TODO this function needs to be smaller pub fn new( config: &RenderWindowConfig, event_loop: &EventLoop<()>, ) -> Result { // build the window let window = config.to_window().build(event_loop)?; // the instance's main purpose is to create an adapter and a surface let instance = wgpu::Instance::new(wgpu::Backends::all()); // the surface is the part of the screen we'll draw to let surface = unsafe { instance.create_surface(&window) }; let power_preference = config.power_preference(); // the adapter is the handle to the GPU let adapter = Self::get_adapter(&instance, &surface, power_preference)?; // gets a connection to the device, as well as a handle to its command queue // the options chosen here ensure that this is guaranteed to not panic let (device, queue) = adapter .request_device( &wgpu::DeviceDescriptor { features: wgpu::Features::empty(), ..Default::default() }, None, ) .block_on() .expect("there was no device with the selected features"); // configuration for the surface let supported_present_modes = surface.get_supported_modes(&adapter).into_boxed_slice(); let surface_config = config.to_surface_configuration( &surface.get_supported_modes(&adapter), surface.get_supported_formats(&adapter)[0], ); surface.configure(&device, &surface_config); // create the camera let width = window.inner_size().width; let height = window.inner_size().height; let camera = Camera::from_size(width, height); let camera_buffer = device.create_buffer(&wgpu::BufferDescriptor { label: Some("Camera Uniform"), size: size_of::() as wgpu::BufferAddress, usage: wgpu::BufferUsages::UNIFORM | wgpu::BufferUsages::COPY_DST, mapped_at_creation: false, }); let camera_bind_group_layout = device.create_bind_group_layout(&wgpu::BindGroupLayoutDescriptor { label: Some("Camera Bind Group Layout"), entries: &[wgpu::BindGroupLayoutEntry { binding: 0, visibility: wgpu::ShaderStages::VERTEX, ty: wgpu::BindingType::Buffer { ty: wgpu::BufferBindingType::Uniform, has_dynamic_offset: false, min_binding_size: None, }, count: None, }], }); let camera_bind_group = device.create_bind_group(&wgpu::BindGroupDescriptor { label: Some("Camera Bind Group"), layout: &camera_bind_group_layout, entries: &[wgpu::BindGroupEntry { binding: 0, resource: camera_buffer.as_entire_binding(), }], }); let render_pipeline_layout = device.create_pipeline_layout(&wgpu::PipelineLayoutDescriptor { label: Some("Sprite Render Pipeline Layout"), bind_group_layouts: &[&camera_bind_group_layout], push_constant_ranges: &[], }); // set up a pipeline for sprite rendering let render_pipeline = Self::sprite_render_pipeline(&device, surface_config.format, &render_pipeline_layout); // the vertex buffer used for rendering squares let square_vertices = SQUARE .len() .try_into() .expect("expected fewer than 3 billion vertices in a square"); let square_vertex_buffer = device.create_buffer_init(&wgpu::util::BufferInitDescriptor { label: Some("Square Vertex Buffer"), contents: bytemuck::cast_slice(&SQUARE), usage: wgpu::BufferUsages::VERTEX, }); // create the instance buffer let instances = Vec::with_capacity(config.instance_capacity); let (instance_buffer, instance_buffer_size) = Self::new_instance_buffer(&device, &instances); Ok(Self { surface, surface_config, supported_present_modes, device, queue, render_pipeline, square_vertex_buffer, square_vertices, instance_buffer, instance_buffer_size, instances, camera, camera_buffer, camera_bind_group, window, }) } fn reconfigure(&mut self) { self.surface.configure(&self.device, &self.surface_config); } /// Resize just the renderer. The window will remain unchanged fn resize_renderer(&mut self, size: PhysicalSize) { if size.width == 0 || size.height == 0 { log::error!("The window was somehow set to a size of zero"); return; } self.surface_config.height = size.height; self.surface_config.width = size.width; self.camera.set_size(size.width, size.height); self.reconfigure(); } /// Set the physical window and renderer size pub fn resize(&mut self, width: NonZeroU32, height: NonZeroU32) { let size = PhysicalSize::new(width.get(), height.get()); self.window.set_inner_size(size); self.resize_renderer(size); } /// Set vsync on or off. See `[RenderWindowConfig::present_mode]` for more details. pub fn set_vsync(&mut self, vsync: bool) { self.surface_config.present_mode = RenderWindowConfig::present_mode(vsync, &self.supported_present_modes); self.reconfigure(); } /// Set the window's title pub fn set_title(&mut self, title: &str) { self.window.set_title(title); } fn expand_instance_buffer(&mut self) { (self.instance_buffer, self.instance_buffer_size) = Self::new_instance_buffer(&self.device, &self.instances); } fn fill_instance_buffer(&mut self) { if self.instances.len() > self.instance_buffer_size { self.expand_instance_buffer(); } self.queue.write_buffer( &self.instance_buffer, 0 as wgpu::BufferAddress, bytemuck::cast_slice(&self.instances), ); } /// Add an instance to the renderer, and returns an `InstanceId` to the /// instance. This id becomes invalid if the instances are cleared. pub fn push_instance(&mut self, instance: Instance) -> InstanceId { let index = self.instances.len(); self.instances.push(instance); InstanceId(index) } /// Get an immutable reference to an instance pub fn instance(&self, id: InstanceId) -> Option<&Instance> { self.instances.get(id.0) } /// Get a mutable reference to an instance pub fn instance_mut(&mut self, id: InstanceId) -> Option<&mut Instance> { self.instances.get_mut(id.0) } /// Clears the list of instances, making all instance ID's invalid pub fn clear_instances(&mut self) { self.instances.clear(); } /// Get the camera information pub const fn camera(&self) -> &Camera { &self.camera } /// Get a mutable reference to the camera pub fn camera_mut(&mut self) -> &mut Camera { &mut self.camera } fn refresh_camera_buffer(&mut self) { self.queue.write_buffer( &self.camera_buffer, 0 as wgpu::BufferAddress, bytemuck::cast_slice(&self.camera.to_matrix()), ); } /// Renders a new frame to the window /// /// # Errors /// /// A number of problems could occur here. A timeout could occur while /// trying to acquire the next frame. There may also be no more memory left /// that can be used for the new frame. // TODO this is too big fn render(&mut self) -> Result<(), wgpu::SurfaceError> { // the new texture we can render to let output = self.surface.get_current_texture()?; let view = output .texture .create_view(&wgpu::TextureViewDescriptor::default()); // this will allow us to send commands to the gpu let mut encoder = self .device .create_command_encoder(&wgpu::CommandEncoderDescriptor { label: Some("Render Encoder"), }); self.fill_instance_buffer(); let num_instances = self .instances .len() .try_into() .expect("expected less than 3 billion instances"); self.refresh_camera_buffer(); { let mut render_pass = encoder.begin_render_pass(&wgpu::RenderPassDescriptor { label: Some("Render Pass"), color_attachments: &[Some(wgpu::RenderPassColorAttachment { view: &view, resolve_target: None, ops: wgpu::Operations { load: wgpu::LoadOp::Clear(wgpu::Color::BLACK), store: true, }, })], depth_stencil_attachment: None, }); render_pass.set_pipeline(&self.render_pipeline); render_pass.set_bind_group(0, &self.camera_bind_group, &[]); render_pass.set_vertex_buffer(0, self.square_vertex_buffer.slice(..)); render_pass.set_vertex_buffer(1, self.instance_buffer.slice(..)); render_pass.draw(0..self.square_vertices, 0..num_instances); } // the encoder can't finish building the command buffer until the // render pass is dropped // submit the command buffer to the GPU self.queue.submit(std::iter::once(encoder.finish())); output.present(); Ok(()) } /// Run the renderer indefinitely pub fn run(mut self, event_loop: EventLoop<()>) -> ! { self.window.set_visible(true); event_loop.run(move |event, _, control_flow| match event { Event::WindowEvent { window_id, event } => { if window_id == self.window.id() { match event { WindowEvent::Resized(size) => self.resize_renderer(size), WindowEvent::CloseRequested => *control_flow = ControlFlow::ExitWithCode(0), _ => (), } } } Event::MainEventsCleared => { match self.render() { Ok(_) => {} // reconfigure the surface if it's been lost Err(wgpu::SurfaceError::Lost) => { self.reconfigure(); } // if we ran out of memory, then we'll die Err(wgpu::SurfaceError::OutOfMemory) => { *control_flow = ControlFlow::ExitWithCode(1); } // otherwise, we'll just log the error Err(e) => log::error!("{}", e), } } _ => {} }) } }