bevy/crates/bevy_pbr/src/meshlet/persistent_buffer.rs

127 lines
5.1 KiB
Rust
Raw Normal View History

Forbid unsafe in most crates in the engine (#12684) # Objective Resolves #3824. `unsafe` code should be the exception, not the norm in Rust. It's obviously needed for various use cases as it's interfacing with platforms and essentially running the borrow checker at runtime in the ECS, but the touted benefits of Bevy is that we are able to heavily leverage Rust's safety, and we should be holding ourselves accountable to that by minimizing our unsafe footprint. ## Solution Deny `unsafe_code` workspace wide. Add explicit exceptions for the following crates, and forbid it in almost all of the others. * bevy_ecs - Obvious given how much unsafe is needed to achieve performant results * bevy_ptr - Works with raw pointers, even more low level than bevy_ecs. * bevy_render - due to needing to integrate with wgpu * bevy_window - due to needing to integrate with raw_window_handle * bevy_utils - Several unsafe utilities used by bevy_ecs. Ideally moved into bevy_ecs instead of made publicly usable. * bevy_reflect - Required for the unsafe type casting it's doing. * bevy_transform - for the parallel transform propagation * bevy_gizmos - For the SystemParam impls it has. * bevy_assets - To support reflection. Might not be required, not 100% sure yet. * bevy_mikktspace - due to being a conversion from a C library. Pending safe rewrite. * bevy_dynamic_plugin - Inherently unsafe due to the dynamic loading nature. Several uses of unsafe were rewritten, as they did not need to be using them: * bevy_text - a case of `Option::unchecked` could be rewritten as a normal for loop and match instead of an iterator. * bevy_color - the Pod/Zeroable implementations were replaceable with bytemuck's derive macros.
2024-03-27 03:30:08 +00:00
#![allow(unsafe_code)]
use bevy_render::{
render_resource::{
BindingResource, Buffer, BufferAddress, BufferDescriptor, BufferUsages,
CommandEncoderDescriptor,
},
renderer::{RenderDevice, RenderQueue},
};
use range_alloc::RangeAllocator;
use std::{num::NonZeroU64, ops::Range};
/// Wrapper for a GPU buffer holding a large amount of data that persists across frames.
pub struct PersistentGpuBuffer<T: PersistentGpuBufferable> {
/// Debug label for the buffer.
label: &'static str,
/// Handle to the GPU buffer.
buffer: Buffer,
/// Tracks free slices of the buffer.
allocation_planner: RangeAllocator<BufferAddress>,
/// Queue of pending writes, and associated metadata.
write_queue: Vec<(T, T::Metadata, Range<BufferAddress>)>,
}
impl<T: PersistentGpuBufferable> PersistentGpuBuffer<T> {
/// Create a new persistent buffer.
pub fn new(label: &'static str, render_device: &RenderDevice) -> Self {
Self {
label,
buffer: render_device.create_buffer(&BufferDescriptor {
label: Some(label),
size: 0,
usage: BufferUsages::STORAGE | BufferUsages::COPY_DST | BufferUsages::COPY_SRC,
mapped_at_creation: false,
}),
allocation_planner: RangeAllocator::new(0..0),
write_queue: Vec::new(),
}
}
/// Queue an item of type T to be added to the buffer, returning the byte range within the buffer that it will be located at.
pub fn queue_write(&mut self, data: T, metadata: T::Metadata) -> Range<BufferAddress> {
let data_size = data.size_in_bytes() as u64;
if let Ok(buffer_slice) = self.allocation_planner.allocate_range(data_size) {
self.write_queue
.push((data, metadata, buffer_slice.clone()));
return buffer_slice;
}
let buffer_size = self.allocation_planner.initial_range();
let double_buffer_size = (buffer_size.end - buffer_size.start) * 2;
let new_size = double_buffer_size.max(data_size);
self.allocation_planner.grow_to(buffer_size.end + new_size);
let buffer_slice = self.allocation_planner.allocate_range(data_size).unwrap();
self.write_queue
.push((data, metadata, buffer_slice.clone()));
buffer_slice
}
/// Upload all pending data to the GPU buffer.
pub fn perform_writes(&mut self, render_queue: &RenderQueue, render_device: &RenderDevice) {
if self.allocation_planner.initial_range().end > self.buffer.size() {
self.expand_buffer(render_device, render_queue);
}
let queue_count = self.write_queue.len();
for (data, metadata, buffer_slice) in self.write_queue.drain(..) {
let buffer_slice_size = NonZeroU64::new(buffer_slice.end - buffer_slice.start).unwrap();
let mut buffer_view = render_queue
.write_buffer_with(&self.buffer, buffer_slice.start, buffer_slice_size)
.unwrap();
data.write_bytes_le(metadata, &mut buffer_view);
}
let queue_saturation = queue_count as f32 / self.write_queue.capacity() as f32;
if queue_saturation < 0.3 {
self.write_queue = Vec::new();
}
}
/// Mark a section of the GPU buffer as no longer needed.
pub fn mark_slice_unused(&mut self, buffer_slice: Range<BufferAddress>) {
self.allocation_planner.free_range(buffer_slice);
}
pub fn binding(&self) -> BindingResource<'_> {
self.buffer.as_entire_binding()
}
/// Expand the buffer by creating a new buffer and copying old data over.
fn expand_buffer(&mut self, render_device: &RenderDevice, render_queue: &RenderQueue) {
let size = self.allocation_planner.initial_range();
let new_buffer = render_device.create_buffer(&BufferDescriptor {
label: Some(self.label),
size: size.end - size.start,
usage: BufferUsages::STORAGE | BufferUsages::COPY_DST | BufferUsages::COPY_SRC,
mapped_at_creation: false,
});
let mut command_encoder = render_device.create_command_encoder(&CommandEncoderDescriptor {
label: Some("persistent_gpu_buffer_expand"),
});
command_encoder.copy_buffer_to_buffer(&self.buffer, 0, &new_buffer, 0, self.buffer.size());
render_queue.submit([command_encoder.finish()]);
self.buffer = new_buffer;
}
}
/// A trait representing data that can be written to a [`PersistentGpuBuffer`].
///
/// # Safety
/// * All data must be a multiple of `wgpu::COPY_BUFFER_ALIGNMENT` bytes.
/// * The amount of bytes written to `buffer` in `write_bytes_le()` must match `size_in_bytes()`.
pub unsafe trait PersistentGpuBufferable {
/// Additional metadata associated with each item, made available during `write_bytes_le`.
type Metadata;
/// The size in bytes of `self`.
fn size_in_bytes(&self) -> usize;
/// Convert `self` + `metadata` into bytes (little-endian), and write to the provided buffer slice.
fn write_bytes_le(&self, metadata: Self::Metadata, buffer_slice: &mut [u8]);
}