mirror of
https://github.com/bevyengine/bevy
synced 2024-11-25 06:00:20 +00:00
Relax BufferVec's type constraints (#12866)
# Objective Since BufferVec was first introduced, `bytemuck` has added additional traits with fewer restrictions than `Pod`. Within BufferVec, we only rely on the constraints of `bytemuck::cast_slice` to a `u8` slice, which now only requires `T: NoUninit` which is a strict superset of `Pod` types. ## Solution Change out the `Pod` generic type constraint with `NoUninit`. Also taking the opportunity to substitute `cast_slice` with `must_cast_slice`, which avoids a runtime panic in place of a compile time failure if `T` cannot be used. --- ## Changelog Changed: `BufferVec` now supports working with types containing `NoUninit` but not `Pod` members. Changed: `BufferVec` will now fail to compile if used with a type that cannot be safely read from. Most notably, this includes ZSTs, which would previously always panic at runtime.
This commit is contained in:
parent
3a7923ea92
commit
a4ed1b88b8
5 changed files with 12 additions and 12 deletions
|
@ -43,7 +43,7 @@ thiserror = { version = "1", optional = true }
|
|||
bitflags = "2.3"
|
||||
fixedbitset = "0.5"
|
||||
# direct dependency required for derive macro
|
||||
bytemuck = { version = "1", features = ["derive"] }
|
||||
bytemuck = { version = "1", features = ["derive", "must_cast"] }
|
||||
radsort = "0.1"
|
||||
smallvec = "1.6"
|
||||
serde = { version = "1", features = ["derive", "rc"] }
|
||||
|
|
|
@ -174,7 +174,7 @@ pub fn queue_material_meshlet_meshes<M: Material>(
|
|||
}
|
||||
|
||||
// TODO: Try using Queue::write_buffer_with() in queue_meshlet_mesh_upload() to reduce copies
|
||||
fn upload_storage_buffer<T: ShaderSize + bytemuck::Pod>(
|
||||
fn upload_storage_buffer<T: ShaderSize + bytemuck::NoUninit>(
|
||||
buffer: &mut StorageBuffer<Vec<T>>,
|
||||
render_device: &RenderDevice,
|
||||
render_queue: &RenderQueue,
|
||||
|
@ -187,7 +187,7 @@ fn upload_storage_buffer<T: ShaderSize + bytemuck::Pod>(
|
|||
|
||||
if capacity >= size {
|
||||
let inner = inner.unwrap();
|
||||
let bytes = bytemuck::cast_slice(buffer.get().as_slice());
|
||||
let bytes = bytemuck::must_cast_slice(buffer.get().as_slice());
|
||||
render_queue.write_buffer(inner, 0, bytes);
|
||||
} else {
|
||||
buffer.write_buffer(render_device, render_queue);
|
||||
|
|
|
@ -11,7 +11,7 @@ use bevy_render::{
|
|||
view::ViewVisibility,
|
||||
Extract,
|
||||
};
|
||||
use bytemuck::Pod;
|
||||
use bytemuck::NoUninit;
|
||||
|
||||
#[derive(Component)]
|
||||
pub struct MorphIndex {
|
||||
|
@ -54,7 +54,7 @@ const fn can_align(step: usize, target: usize) -> bool {
|
|||
const WGPU_MIN_ALIGN: usize = 256;
|
||||
|
||||
/// Align a [`BufferVec`] to `N` bytes by padding the end with `T::default()` values.
|
||||
fn add_to_alignment<T: Pod + Default>(buffer: &mut BufferVec<T>) {
|
||||
fn add_to_alignment<T: NoUninit + Default>(buffer: &mut BufferVec<T>) {
|
||||
let n = WGPU_MIN_ALIGN;
|
||||
let t_size = mem::size_of::<T>();
|
||||
if !can_align(n, t_size) {
|
||||
|
|
|
@ -80,7 +80,7 @@ wgpu = { version = "0.19.3", default-features = false, features = [
|
|||
naga = { version = "0.19", features = ["wgsl-in"] }
|
||||
serde = { version = "1", features = ["derive"] }
|
||||
bitflags = { version = "2.3", features = ["serde"] }
|
||||
bytemuck = { version = "1.5", features = ["derive"] }
|
||||
bytemuck = { version = "1.5", features = ["derive", "must_cast"] }
|
||||
downcast-rs = "1.2.0"
|
||||
thread_local = "1.1"
|
||||
thiserror = "1.0"
|
||||
|
|
|
@ -2,14 +2,14 @@ use crate::{
|
|||
render_resource::Buffer,
|
||||
renderer::{RenderDevice, RenderQueue},
|
||||
};
|
||||
use bytemuck::{cast_slice, Pod};
|
||||
use bytemuck::{must_cast_slice, NoUninit};
|
||||
use wgpu::BufferUsages;
|
||||
|
||||
/// A structure for storing raw bytes that have already been properly formatted
|
||||
/// for use by the GPU.
|
||||
///
|
||||
/// "Properly formatted" means that item data already meets the alignment and padding
|
||||
/// requirements for how it will be used on the GPU. The item type must implement [`Pod`]
|
||||
/// requirements for how it will be used on the GPU. The item type must implement [`NoUninit`]
|
||||
/// for its data representation to be directly copyable.
|
||||
///
|
||||
/// Index, vertex, and instance-rate vertex buffers have no alignment nor padding requirements and
|
||||
|
@ -28,7 +28,7 @@ use wgpu::BufferUsages;
|
|||
/// * [`GpuArrayBuffer`](crate::render_resource::GpuArrayBuffer)
|
||||
/// * [`BufferVec`]
|
||||
/// * [`Texture`](crate::render_resource::Texture)
|
||||
pub struct BufferVec<T: Pod> {
|
||||
pub struct BufferVec<T: NoUninit> {
|
||||
values: Vec<T>,
|
||||
buffer: Option<Buffer>,
|
||||
capacity: usize,
|
||||
|
@ -38,7 +38,7 @@ pub struct BufferVec<T: Pod> {
|
|||
label_changed: bool,
|
||||
}
|
||||
|
||||
impl<T: Pod> BufferVec<T> {
|
||||
impl<T: NoUninit> BufferVec<T> {
|
||||
pub const fn new(buffer_usage: BufferUsages) -> Self {
|
||||
Self {
|
||||
values: Vec::new(),
|
||||
|
@ -132,7 +132,7 @@ impl<T: Pod> BufferVec<T> {
|
|||
self.reserve(self.values.len(), device);
|
||||
if let Some(buffer) = &self.buffer {
|
||||
let range = 0..self.item_size * self.values.len();
|
||||
let bytes: &[u8] = cast_slice(&self.values);
|
||||
let bytes: &[u8] = must_cast_slice(&self.values);
|
||||
queue.write_buffer(buffer, 0, &bytes[range]);
|
||||
}
|
||||
}
|
||||
|
@ -154,7 +154,7 @@ impl<T: Pod> BufferVec<T> {
|
|||
}
|
||||
}
|
||||
|
||||
impl<T: Pod> Extend<T> for BufferVec<T> {
|
||||
impl<T: NoUninit> Extend<T> for BufferVec<T> {
|
||||
#[inline]
|
||||
fn extend<I: IntoIterator<Item = T>>(&mut self, iter: I) {
|
||||
self.values.extend(iter);
|
||||
|
|
Loading…
Reference in a new issue