Migrated NonZero* to NonZero<*> (#14978)

# Objective

- Fixes #14974

## Solution

- Replace all* instances of `NonZero*` with `NonZero<*>`

## Testing

- CI passed locally.

---

## Notes

Within the `bevy_reflect` implementations for `std` types,
`impl_reflect_value!()` will continue to use the type aliases instead,
as it inappropriately parses the concrete type parameter as a generic
argument. If the `ZeroablePrimitive` trait was stable, or the macro
could be modified to accept a finite list of types, then we could fully
migrate.
This commit is contained in:
Zachary Harrold 2024-08-30 12:37:47 +10:00 committed by GitHub
parent c816cf9072
commit bc13161416
No known key found for this signature in database
GPG key ID: B5690EEEBB952194
26 changed files with 161 additions and 155 deletions

View file

@ -18,7 +18,7 @@ use std::{
process::{ExitCode, Termination},
};
use std::{
num::NonZeroU8,
num::NonZero,
panic::{catch_unwind, resume_unwind, AssertUnwindSafe},
};
use thiserror::Error;
@ -1061,14 +1061,14 @@ pub enum AppExit {
Success,
/// The [`App`] experienced an unhandleable error.
/// Holds the exit code we expect our app to return.
Error(NonZeroU8),
Error(NonZero<u8>),
}
impl AppExit {
/// Creates a [`AppExit::Error`] with a error code of 1.
#[must_use]
pub const fn error() -> Self {
Self::Error(NonZeroU8::MIN)
Self::Error(NonZero::<u8>::MIN)
}
/// Returns `true` if `self` is a [`AppExit::Success`].
@ -1089,7 +1089,7 @@ impl AppExit {
/// [`AppExit::Error`] is constructed.
#[must_use]
pub const fn from_code(code: u8) -> Self {
match NonZeroU8::new(code) {
match NonZero::<u8>::new(code) {
Some(code) => Self::Error(code),
None => Self::Success,
}

View file

@ -10,7 +10,7 @@ use bevy_render::{
texture::Image,
view::ViewUniform,
};
use std::num::NonZeroU64;
use std::num::NonZero;
#[derive(Resource)]
pub struct AutoExposurePipeline {
@ -64,8 +64,8 @@ impl FromWorld for AutoExposurePipeline {
texture_2d(TextureSampleType::Float { filterable: false }),
texture_1d(TextureSampleType::Float { filterable: false }),
uniform_buffer::<AutoExposureCompensationCurveUniform>(false),
storage_buffer_sized(false, NonZeroU64::new(HISTOGRAM_BIN_COUNT * 4)),
storage_buffer_sized(false, NonZeroU64::new(4)),
storage_buffer_sized(false, NonZero::<u64>::new(HISTOGRAM_BIN_COUNT * 4)),
storage_buffer_sized(false, NonZero::<u64>::new(4)),
storage_buffer::<ViewUniform>(true),
),
),

View file

@ -59,7 +59,7 @@ use crate::{
};
#[cfg(feature = "serialize")]
use serde::{Deserialize, Serialize};
use std::{fmt, hash::Hash, mem, num::NonZeroU32, sync::atomic::Ordering};
use std::{fmt, hash::Hash, mem, num::NonZero, sync::atomic::Ordering};
#[cfg(target_has_atomic = "64")]
use std::sync::atomic::AtomicI64 as AtomicIdCursor;
@ -157,7 +157,7 @@ pub struct Entity {
// to make this struct equivalent to a u64.
#[cfg(target_endian = "little")]
index: u32,
generation: NonZeroU32,
generation: NonZero<u32>,
#[cfg(target_endian = "big")]
index: u32,
}
@ -223,7 +223,7 @@ impl Entity {
/// Construct an [`Entity`] from a raw `index` value and a non-zero `generation` value.
/// Ensure that the generation value is never greater than `0x7FFF_FFFF`.
#[inline(always)]
pub(crate) const fn from_raw_and_generation(index: u32, generation: NonZeroU32) -> Entity {
pub(crate) const fn from_raw_and_generation(index: u32, generation: NonZero<u32>) -> Entity {
debug_assert!(generation.get() <= HIGH_MASK);
Self { index, generation }
@ -279,7 +279,7 @@ impl Entity {
/// a component.
#[inline(always)]
pub const fn from_raw(index: u32) -> Entity {
Self::from_raw_and_generation(index, NonZeroU32::MIN)
Self::from_raw_and_generation(index, NonZero::<u32>::MIN)
}
/// Convert to a form convenient for passing outside of rust.
@ -722,7 +722,7 @@ impl Entities {
meta.generation = IdentifierMask::inc_masked_high_by(meta.generation, 1);
if meta.generation == NonZeroU32::MIN {
if meta.generation == NonZero::<u32>::MIN {
warn!(
"Entity({}) generation wrapped on Entities::free, aliasing may occur",
entity.index
@ -949,7 +949,7 @@ impl Entities {
#[repr(C)]
struct EntityMeta {
/// The current generation of the [`Entity`].
pub generation: NonZeroU32,
pub generation: NonZero<u32>,
/// The current location of the [`Entity`]
pub location: EntityLocation,
}
@ -957,7 +957,7 @@ struct EntityMeta {
impl EntityMeta {
/// meta for **pending entity**
const EMPTY: EntityMeta = EntityMeta {
generation: NonZeroU32::MIN,
generation: NonZero::<u32>::MIN,
location: EntityLocation::INVALID,
};
}
@ -1014,7 +1014,8 @@ mod tests {
#[test]
fn entity_bits_roundtrip() {
// Generation cannot be greater than 0x7FFF_FFFF else it will be an invalid Entity id
let e = Entity::from_raw_and_generation(0xDEADBEEF, NonZeroU32::new(0x5AADF00D).unwrap());
let e =
Entity::from_raw_and_generation(0xDEADBEEF, NonZero::<u32>::new(0x5AADF00D).unwrap());
assert_eq!(Entity::from_bits(e.to_bits()), e);
}
@ -1091,65 +1092,65 @@ mod tests {
#[allow(clippy::nonminimal_bool)] // This is intentionally testing `lt` and `ge` as separate functions.
fn entity_comparison() {
assert_eq!(
Entity::from_raw_and_generation(123, NonZeroU32::new(456).unwrap()),
Entity::from_raw_and_generation(123, NonZeroU32::new(456).unwrap())
Entity::from_raw_and_generation(123, NonZero::<u32>::new(456).unwrap()),
Entity::from_raw_and_generation(123, NonZero::<u32>::new(456).unwrap())
);
assert_ne!(
Entity::from_raw_and_generation(123, NonZeroU32::new(789).unwrap()),
Entity::from_raw_and_generation(123, NonZeroU32::new(456).unwrap())
Entity::from_raw_and_generation(123, NonZero::<u32>::new(789).unwrap()),
Entity::from_raw_and_generation(123, NonZero::<u32>::new(456).unwrap())
);
assert_ne!(
Entity::from_raw_and_generation(123, NonZeroU32::new(456).unwrap()),
Entity::from_raw_and_generation(123, NonZeroU32::new(789).unwrap())
Entity::from_raw_and_generation(123, NonZero::<u32>::new(456).unwrap()),
Entity::from_raw_and_generation(123, NonZero::<u32>::new(789).unwrap())
);
assert_ne!(
Entity::from_raw_and_generation(123, NonZeroU32::new(456).unwrap()),
Entity::from_raw_and_generation(456, NonZeroU32::new(123).unwrap())
Entity::from_raw_and_generation(123, NonZero::<u32>::new(456).unwrap()),
Entity::from_raw_and_generation(456, NonZero::<u32>::new(123).unwrap())
);
// ordering is by generation then by index
assert!(
Entity::from_raw_and_generation(123, NonZeroU32::new(456).unwrap())
>= Entity::from_raw_and_generation(123, NonZeroU32::new(456).unwrap())
Entity::from_raw_and_generation(123, NonZero::<u32>::new(456).unwrap())
>= Entity::from_raw_and_generation(123, NonZero::<u32>::new(456).unwrap())
);
assert!(
Entity::from_raw_and_generation(123, NonZeroU32::new(456).unwrap())
<= Entity::from_raw_and_generation(123, NonZeroU32::new(456).unwrap())
Entity::from_raw_and_generation(123, NonZero::<u32>::new(456).unwrap())
<= Entity::from_raw_and_generation(123, NonZero::<u32>::new(456).unwrap())
);
assert!(
!(Entity::from_raw_and_generation(123, NonZeroU32::new(456).unwrap())
< Entity::from_raw_and_generation(123, NonZeroU32::new(456).unwrap()))
!(Entity::from_raw_and_generation(123, NonZero::<u32>::new(456).unwrap())
< Entity::from_raw_and_generation(123, NonZero::<u32>::new(456).unwrap()))
);
assert!(
!(Entity::from_raw_and_generation(123, NonZeroU32::new(456).unwrap())
> Entity::from_raw_and_generation(123, NonZeroU32::new(456).unwrap()))
!(Entity::from_raw_and_generation(123, NonZero::<u32>::new(456).unwrap())
> Entity::from_raw_and_generation(123, NonZero::<u32>::new(456).unwrap()))
);
assert!(
Entity::from_raw_and_generation(9, NonZeroU32::new(1).unwrap())
< Entity::from_raw_and_generation(1, NonZeroU32::new(9).unwrap())
Entity::from_raw_and_generation(9, NonZero::<u32>::new(1).unwrap())
< Entity::from_raw_and_generation(1, NonZero::<u32>::new(9).unwrap())
);
assert!(
Entity::from_raw_and_generation(1, NonZeroU32::new(9).unwrap())
> Entity::from_raw_and_generation(9, NonZeroU32::new(1).unwrap())
Entity::from_raw_and_generation(1, NonZero::<u32>::new(9).unwrap())
> Entity::from_raw_and_generation(9, NonZero::<u32>::new(1).unwrap())
);
assert!(
Entity::from_raw_and_generation(1, NonZeroU32::new(1).unwrap())
< Entity::from_raw_and_generation(2, NonZeroU32::new(1).unwrap())
Entity::from_raw_and_generation(1, NonZero::<u32>::new(1).unwrap())
< Entity::from_raw_and_generation(2, NonZero::<u32>::new(1).unwrap())
);
assert!(
Entity::from_raw_and_generation(1, NonZeroU32::new(1).unwrap())
<= Entity::from_raw_and_generation(2, NonZeroU32::new(1).unwrap())
Entity::from_raw_and_generation(1, NonZero::<u32>::new(1).unwrap())
<= Entity::from_raw_and_generation(2, NonZero::<u32>::new(1).unwrap())
);
assert!(
Entity::from_raw_and_generation(2, NonZeroU32::new(2).unwrap())
> Entity::from_raw_and_generation(1, NonZeroU32::new(2).unwrap())
Entity::from_raw_and_generation(2, NonZero::<u32>::new(2).unwrap())
> Entity::from_raw_and_generation(1, NonZero::<u32>::new(2).unwrap())
);
assert!(
Entity::from_raw_and_generation(2, NonZeroU32::new(2).unwrap())
>= Entity::from_raw_and_generation(1, NonZeroU32::new(2).unwrap())
Entity::from_raw_and_generation(2, NonZero::<u32>::new(2).unwrap())
>= Entity::from_raw_and_generation(1, NonZero::<u32>::new(2).unwrap())
);
}

View file

@ -1,4 +1,4 @@
use std::num::NonZeroU32;
use std::num::NonZero;
use super::kinds::IdKind;
@ -61,7 +61,7 @@ impl IdentifierMask {
/// Will never be greater than [`HIGH_MASK`] or less than `1`, and increments are masked to
/// never be greater than [`HIGH_MASK`].
#[inline(always)]
pub(crate) const fn inc_masked_high_by(lhs: NonZeroU32, rhs: u32) -> NonZeroU32 {
pub(crate) const fn inc_masked_high_by(lhs: NonZero<u32>, rhs: u32) -> NonZero<u32> {
let lo = (lhs.get() & HIGH_MASK).wrapping_add(rhs & HIGH_MASK);
// Checks high 32 bit for whether we have overflowed 31 bits.
let overflowed = lo >> 31;
@ -70,7 +70,7 @@ impl IdentifierMask {
// - Adding the overflow flag will offset overflows to start at 1 instead of 0
// - The sum of `0x7FFF_FFFF` + `u32::MAX` + 1 (overflow) == `0x7FFF_FFFF`
// - If the operation doesn't overflow at 31 bits, no offsetting takes place
unsafe { NonZeroU32::new_unchecked(lo.wrapping_add(overflowed) & HIGH_MASK) }
unsafe { NonZero::<u32>::new_unchecked(lo.wrapping_add(overflowed) & HIGH_MASK) }
}
}
@ -166,68 +166,68 @@ mod tests {
// Adding from lowest value with lowest to highest increment
// No result should ever be greater than 0x7FFF_FFFF or HIGH_MASK
assert_eq!(
NonZeroU32::MIN,
IdentifierMask::inc_masked_high_by(NonZeroU32::MIN, 0)
NonZero::<u32>::MIN,
IdentifierMask::inc_masked_high_by(NonZero::<u32>::MIN, 0)
);
assert_eq!(
NonZeroU32::new(2).unwrap(),
IdentifierMask::inc_masked_high_by(NonZeroU32::MIN, 1)
NonZero::<u32>::new(2).unwrap(),
IdentifierMask::inc_masked_high_by(NonZero::<u32>::MIN, 1)
);
assert_eq!(
NonZeroU32::new(3).unwrap(),
IdentifierMask::inc_masked_high_by(NonZeroU32::MIN, 2)
NonZero::<u32>::new(3).unwrap(),
IdentifierMask::inc_masked_high_by(NonZero::<u32>::MIN, 2)
);
assert_eq!(
NonZeroU32::MIN,
IdentifierMask::inc_masked_high_by(NonZeroU32::MIN, HIGH_MASK)
NonZero::<u32>::MIN,
IdentifierMask::inc_masked_high_by(NonZero::<u32>::MIN, HIGH_MASK)
);
assert_eq!(
NonZeroU32::MIN,
IdentifierMask::inc_masked_high_by(NonZeroU32::MIN, u32::MAX)
NonZero::<u32>::MIN,
IdentifierMask::inc_masked_high_by(NonZero::<u32>::MIN, u32::MAX)
);
// Adding from absolute highest value with lowest to highest increment
// No result should ever be greater than 0x7FFF_FFFF or HIGH_MASK
assert_eq!(
NonZeroU32::new(HIGH_MASK).unwrap(),
IdentifierMask::inc_masked_high_by(NonZeroU32::MAX, 0)
NonZero::<u32>::new(HIGH_MASK).unwrap(),
IdentifierMask::inc_masked_high_by(NonZero::<u32>::MAX, 0)
);
assert_eq!(
NonZeroU32::MIN,
IdentifierMask::inc_masked_high_by(NonZeroU32::MAX, 1)
NonZero::<u32>::MIN,
IdentifierMask::inc_masked_high_by(NonZero::<u32>::MAX, 1)
);
assert_eq!(
NonZeroU32::new(2).unwrap(),
IdentifierMask::inc_masked_high_by(NonZeroU32::MAX, 2)
NonZero::<u32>::new(2).unwrap(),
IdentifierMask::inc_masked_high_by(NonZero::<u32>::MAX, 2)
);
assert_eq!(
NonZeroU32::new(HIGH_MASK).unwrap(),
IdentifierMask::inc_masked_high_by(NonZeroU32::MAX, HIGH_MASK)
NonZero::<u32>::new(HIGH_MASK).unwrap(),
IdentifierMask::inc_masked_high_by(NonZero::<u32>::MAX, HIGH_MASK)
);
assert_eq!(
NonZeroU32::new(HIGH_MASK).unwrap(),
IdentifierMask::inc_masked_high_by(NonZeroU32::MAX, u32::MAX)
NonZero::<u32>::new(HIGH_MASK).unwrap(),
IdentifierMask::inc_masked_high_by(NonZero::<u32>::MAX, u32::MAX)
);
// Adding from actual highest value with lowest to highest increment
// No result should ever be greater than 0x7FFF_FFFF or HIGH_MASK
assert_eq!(
NonZeroU32::new(HIGH_MASK).unwrap(),
IdentifierMask::inc_masked_high_by(NonZeroU32::new(HIGH_MASK).unwrap(), 0)
NonZero::<u32>::new(HIGH_MASK).unwrap(),
IdentifierMask::inc_masked_high_by(NonZero::<u32>::new(HIGH_MASK).unwrap(), 0)
);
assert_eq!(
NonZeroU32::MIN,
IdentifierMask::inc_masked_high_by(NonZeroU32::new(HIGH_MASK).unwrap(), 1)
NonZero::<u32>::MIN,
IdentifierMask::inc_masked_high_by(NonZero::<u32>::new(HIGH_MASK).unwrap(), 1)
);
assert_eq!(
NonZeroU32::new(2).unwrap(),
IdentifierMask::inc_masked_high_by(NonZeroU32::new(HIGH_MASK).unwrap(), 2)
NonZero::<u32>::new(2).unwrap(),
IdentifierMask::inc_masked_high_by(NonZero::<u32>::new(HIGH_MASK).unwrap(), 2)
);
assert_eq!(
NonZeroU32::new(HIGH_MASK).unwrap(),
IdentifierMask::inc_masked_high_by(NonZeroU32::new(HIGH_MASK).unwrap(), HIGH_MASK)
NonZero::<u32>::new(HIGH_MASK).unwrap(),
IdentifierMask::inc_masked_high_by(NonZero::<u32>::new(HIGH_MASK).unwrap(), HIGH_MASK)
);
assert_eq!(
NonZeroU32::new(HIGH_MASK).unwrap(),
IdentifierMask::inc_masked_high_by(NonZeroU32::new(HIGH_MASK).unwrap(), u32::MAX)
NonZero::<u32>::new(HIGH_MASK).unwrap(),
IdentifierMask::inc_masked_high_by(NonZero::<u32>::new(HIGH_MASK).unwrap(), u32::MAX)
);
}
}

View file

@ -7,7 +7,7 @@
use bevy_reflect::Reflect;
use self::{error::IdentifierError, kinds::IdKind, masks::IdentifierMask};
use std::{hash::Hash, num::NonZeroU32};
use std::{hash::Hash, num::NonZero};
pub mod error;
pub(crate) mod kinds;
@ -28,7 +28,7 @@ pub struct Identifier {
// to make this struct equivalent to a u64.
#[cfg(target_endian = "little")]
low: u32,
high: NonZeroU32,
high: NonZero<u32>,
#[cfg(target_endian = "big")]
low: u32,
}
@ -56,7 +56,7 @@ impl Identifier {
unsafe {
Ok(Self {
low,
high: NonZeroU32::new_unchecked(packed_high),
high: NonZero::<u32>::new_unchecked(packed_high),
})
}
}
@ -71,7 +71,7 @@ impl Identifier {
/// Returns the value of the high segment of the [`Identifier`]. This
/// does not apply any masking.
#[inline(always)]
pub const fn high(self) -> NonZeroU32 {
pub const fn high(self) -> NonZero<u32> {
self.high
}
@ -114,7 +114,7 @@ impl Identifier {
/// This method is the fallible counterpart to [`Identifier::from_bits`].
#[inline(always)]
pub const fn try_from_bits(value: u64) -> Result<Self, IdentifierError> {
let high = NonZeroU32::new(IdentifierMask::get_high(value));
let high = NonZero::<u32>::new(IdentifierMask::get_high(value));
match high {
Some(high) => Ok(Self {

View file

@ -88,7 +88,7 @@ mod tests {
};
use bevy_tasks::{ComputeTaskPool, TaskPool};
use bevy_utils::HashSet;
use std::num::NonZeroU32;
use std::num::NonZero;
use std::{
any::TypeId,
marker::PhantomData,
@ -1659,7 +1659,7 @@ mod tests {
);
let e4_mismatched_generation =
Entity::from_raw_and_generation(3, NonZeroU32::new(2).unwrap());
Entity::from_raw_and_generation(3, NonZero::<u32>::new(2).unwrap());
assert!(
world_b.get_or_spawn(e4_mismatched_generation).is_none(),
"attempting to spawn on top of an entity with a mismatched entity generation fails"
@ -1754,7 +1754,8 @@ mod tests {
let e0 = world.spawn(A(0)).id();
let e1 = Entity::from_raw(1);
let e2 = world.spawn_empty().id();
let invalid_e2 = Entity::from_raw_and_generation(e2.index(), NonZeroU32::new(2).unwrap());
let invalid_e2 =
Entity::from_raw_and_generation(e2.index(), NonZero::<u32>::new(2).unwrap());
let values = vec![(e0, (B(0), C)), (e1, (B(1), C)), (invalid_e2, (B(2), C))];

View file

@ -1,7 +1,7 @@
use std::{
alloc::{handle_alloc_error, Layout},
cell::UnsafeCell,
num::NonZeroUsize,
num::NonZero,
ptr::NonNull,
};
@ -56,7 +56,7 @@ impl BlobVec {
drop: Option<unsafe fn(OwningPtr<'_>)>,
capacity: usize,
) -> BlobVec {
let align = NonZeroUsize::new(item_layout.align()).expect("alignment must be > 0");
let align = NonZero::<usize>::new(item_layout.align()).expect("alignment must be > 0");
let data = bevy_ptr::dangling_with_align(align);
if item_layout.size() == 0 {
BlobVec {
@ -119,7 +119,8 @@ impl BlobVec {
let available_space = self.capacity - self.len;
if available_space < additional {
// SAFETY: `available_space < additional`, so `additional - available_space > 0`
let increment = unsafe { NonZeroUsize::new_unchecked(additional - available_space) };
let increment =
unsafe { NonZero::<usize>::new_unchecked(additional - available_space) };
self.grow_exact(increment);
}
}
@ -132,7 +133,7 @@ impl BlobVec {
#[cold]
fn do_reserve(slf: &mut BlobVec, additional: usize) {
let increment = slf.capacity.max(additional - (slf.capacity - slf.len));
let increment = NonZeroUsize::new(increment).unwrap();
let increment = NonZero::<usize>::new(increment).unwrap();
slf.grow_exact(increment);
}
@ -148,7 +149,7 @@ impl BlobVec {
/// Panics if the new capacity overflows `usize`.
/// For ZST it panics unconditionally because ZST `BlobVec` capacity
/// is initialized to `usize::MAX` and always stays that way.
fn grow_exact(&mut self, increment: NonZeroUsize) {
fn grow_exact(&mut self, increment: NonZero<usize>) {
let new_capacity = self
.capacity
.checked_add(increment.get())

View file

@ -1,6 +1,6 @@
//! Spatial clustering of objects, currently just point and spot lights.
use std::num::NonZeroU64;
use std::num::NonZero;
use bevy_core_pipeline::core_3d::Camera3d;
use bevy_ecs::{
@ -468,7 +468,7 @@ impl GpuClusterableObjects {
}
}
pub fn min_size(buffer_binding_type: BufferBindingType) -> NonZeroU64 {
pub fn min_size(buffer_binding_type: BufferBindingType) -> NonZero<u64> {
match buffer_binding_type {
BufferBindingType::Storage { .. } => GpuClusterableObjectsStorage::min_size(),
BufferBindingType::Uniform => GpuClusterableObjectsUniform::min_size(),
@ -749,7 +749,7 @@ impl ViewClusterBindings {
pub fn min_size_clusterable_object_index_lists(
buffer_binding_type: BufferBindingType,
) -> NonZeroU64 {
) -> NonZero<u64> {
match buffer_binding_type {
BufferBindingType::Storage { .. } => GpuClusterableObjectIndexListsStorage::min_size(),
BufferBindingType::Uniform => GpuClusterableObjectIndexListsUniform::min_size(),
@ -758,7 +758,7 @@ impl ViewClusterBindings {
pub fn min_size_cluster_offsets_and_counts(
buffer_binding_type: BufferBindingType,
) -> NonZeroU64 {
) -> NonZero<u64> {
match buffer_binding_type {
BufferBindingType::Storage { .. } => GpuClusterOffsetsAndCountsStorage::min_size(),
BufferBindingType::Uniform => GpuClusterOffsetsAndCountsUniform::min_size(),

View file

@ -65,7 +65,7 @@ use bevy_render::{
texture::{FallbackImage, GpuImage, Image},
};
use std::num::NonZeroU32;
use std::num::NonZero;
use std::ops::Deref;
use crate::{
@ -217,7 +217,7 @@ pub(crate) fn get_bind_group_layout_entries(
binding_types::texture_cube(TextureSampleType::Float { filterable: true });
if binding_arrays_are_usable(render_device) {
texture_cube_binding =
texture_cube_binding.count(NonZeroU32::new(MAX_VIEW_LIGHT_PROBES as _).unwrap());
texture_cube_binding.count(NonZero::<u32>::new(MAX_VIEW_LIGHT_PROBES as _).unwrap());
}
[

View file

@ -142,7 +142,7 @@ use bevy_render::{
renderer::RenderDevice,
texture::{FallbackImage, GpuImage, Image},
};
use std::{num::NonZeroU32, ops::Deref};
use std::{num::NonZero, ops::Deref};
use bevy_asset::{AssetId, Handle};
use bevy_reflect::Reflect;
@ -306,7 +306,7 @@ pub(crate) fn get_bind_group_layout_entries(
binding_types::texture_3d(TextureSampleType::Float { filterable: true });
if binding_arrays_are_usable(render_device) {
texture_3d_binding =
texture_3d_binding.count(NonZeroU32::new(MAX_VIEW_LIGHT_PROBES as _).unwrap());
texture_3d_binding.count(NonZero::<u32>::new(MAX_VIEW_LIGHT_PROBES as _).unwrap());
}
[

View file

@ -35,7 +35,7 @@ use bevy_render::{
use bevy_utils::tracing::error;
use std::marker::PhantomData;
use std::sync::atomic::{AtomicU32, Ordering};
use std::{hash::Hash, num::NonZeroU32};
use std::{hash::Hash, num::NonZero};
use self::{irradiance_volume::IrradianceVolume, prelude::EnvironmentMapLight};
@ -978,7 +978,7 @@ impl AtomicMaterialBindGroupId {
/// See also: [`AtomicU32::store`].
pub fn set(&self, id: MaterialBindGroupId) {
let id = if let Some(id) = id.0 {
NonZeroU32::from(id).get()
NonZero::<u32>::from(id).get()
} else {
0
};
@ -990,7 +990,9 @@ impl AtomicMaterialBindGroupId {
///
/// See also: [`AtomicU32::load`].
pub fn get(&self) -> MaterialBindGroupId {
MaterialBindGroupId(NonZeroU32::new(self.0.load(Ordering::Relaxed)).map(BindGroupId::from))
MaterialBindGroupId(
NonZero::<u32>::new(self.0.load(Ordering::Relaxed)).map(BindGroupId::from),
)
}
}

View file

@ -6,7 +6,7 @@ use bevy_render::{
renderer::{RenderDevice, RenderQueue},
};
use range_alloc::RangeAllocator;
use std::{num::NonZeroU64, ops::Range};
use std::{num::NonZero, ops::Range};
/// Wrapper for a GPU buffer holding a large amount of data that persists across frames.
pub struct PersistentGpuBuffer<T: PersistentGpuBufferable> {
@ -66,7 +66,8 @@ impl<T: PersistentGpuBufferable> PersistentGpuBuffer<T> {
let queue_count = self.write_queue.len();
for (data, metadata, buffer_slice) in self.write_queue.drain(..) {
let buffer_slice_size = NonZeroU64::new(buffer_slice.end - buffer_slice.start).unwrap();
let buffer_slice_size =
NonZero::<u64>::new(buffer_slice.end - buffer_slice.start).unwrap();
let mut buffer_view = render_queue
.write_buffer_with(&self.buffer, buffer_slice.start, buffer_slice_size)
.unwrap();

View file

@ -6,7 +6,7 @@
//! [`MeshInputUniform`]s instead and use the GPU to calculate the remaining
//! derived fields in [`MeshUniform`].
use std::num::NonZeroU64;
use std::num::NonZero;
use bevy_app::{App, Plugin};
use bevy_asset::{load_internal_asset, Handle};
@ -408,7 +408,7 @@ pub fn prepare_preprocess_bind_groups(
// Don't use `as_entire_binding()` here; the shader reads the array
// length and the underlying buffer may be longer than the actual size
// of the vector.
let index_buffer_size = NonZeroU64::try_from(
let index_buffer_size = NonZero::<u64>::try_from(
index_buffer_vec.buffer.len() as u64 * u64::from(PreprocessWorkItem::min_size()),
)
.ok();

View file

@ -1,4 +1,4 @@
use std::{array, num::NonZeroU64, sync::Arc};
use std::{array, num::NonZero, sync::Arc};
use bevy_core_pipeline::{
core_3d::ViewTransmissionTexture,
@ -164,7 +164,7 @@ impl From<Option<&ViewPrepassTextures>> for MeshPipelineViewLayoutKey {
fn buffer_layout(
buffer_binding_type: BufferBindingType,
has_dynamic_offset: bool,
min_binding_size: Option<NonZeroU64>,
min_binding_size: Option<NonZero<u64>>,
) -> BindGroupLayoutEntryBuilder {
match buffer_binding_type {
BufferBindingType::Uniform => uniform_buffer_sized(has_dynamic_offset, min_binding_size),

View file

@ -12,7 +12,7 @@ use core::{
fmt::{self, Formatter, Pointer},
marker::PhantomData,
mem::{align_of, ManuallyDrop},
num::NonZeroUsize,
num::NonZero,
ptr::NonNull,
};
@ -535,10 +535,10 @@ impl<'a, T> From<&'a [T]> for ThinSlicePtr<'a, T> {
/// Creates a dangling pointer with specified alignment.
/// See [`NonNull::dangling`].
pub fn dangling_with_align(align: NonZeroUsize) -> NonNull<u8> {
pub fn dangling_with_align(align: NonZero<usize>) -> NonNull<u8> {
debug_assert!(align.is_power_of_two(), "Alignment must be power of two.");
// SAFETY: The pointer will not be null, since it was created
// from the address of a `NonZeroUsize`.
// from the address of a `NonZero<usize>`.
unsafe { NonNull::new_unchecked(align.get() as *mut u8) }
}

View file

@ -2444,11 +2444,11 @@ mod tests {
#[test]
fn nonzero_usize_impl_reflect_from_reflect() {
let a: &dyn PartialReflect = &std::num::NonZeroUsize::new(42).unwrap();
let b: &dyn PartialReflect = &std::num::NonZeroUsize::new(42).unwrap();
let a: &dyn PartialReflect = &std::num::NonZero::<usize>::new(42).unwrap();
let b: &dyn PartialReflect = &std::num::NonZero::<usize>::new(42).unwrap();
assert!(a.reflect_partial_eq(b).unwrap_or_default());
let forty_two: std::num::NonZeroUsize = FromReflect::from_reflect(a).unwrap();
assert_eq!(forty_two, std::num::NonZeroUsize::new(42).unwrap());
let forty_two: std::num::NonZero<usize> = FromReflect::from_reflect(a).unwrap();
assert_eq!(forty_two, std::num::NonZero::<usize>::new(42).unwrap());
}
#[test]

View file

@ -8,7 +8,7 @@ use encase::{
ShaderType,
};
use nonmax::NonMaxU32;
use std::{marker::PhantomData, num::NonZeroU64};
use std::{marker::PhantomData, num::NonZero};
use wgpu::{BindingResource, Limits};
// 1MB else we will make really large arrays on macOS which reports very large
@ -69,7 +69,7 @@ impl<T: GpuArrayBufferable> BatchedUniformBuffer<T> {
}
#[inline]
pub fn size(&self) -> NonZeroU64 {
pub fn size(&self) -> NonZero<u64> {
self.temp.size()
}
@ -141,7 +141,7 @@ where
const METADATA: Metadata<Self::ExtraMetadata> = T::METADATA;
fn size(&self) -> NonZeroU64 {
fn size(&self) -> NonZero<u64> {
Self::METADATA.stride().mul(self.1.max(1) as u64).0
}
}

View file

@ -1,5 +1,5 @@
use bevy_utils::all_tuples_with_size;
use std::num::NonZeroU32;
use std::num::NonZero;
use wgpu::{BindGroupLayoutEntry, BindingType, ShaderStages};
/// Helper for constructing bind group layouts.
@ -130,7 +130,7 @@ use wgpu::{BindGroupLayoutEntry, BindingType, ShaderStages};
pub struct BindGroupLayoutEntryBuilder {
ty: BindingType,
visibility: Option<ShaderStages>,
count: Option<NonZeroU32>,
count: Option<NonZero<u32>>,
}
impl BindGroupLayoutEntryBuilder {
@ -139,7 +139,7 @@ impl BindGroupLayoutEntryBuilder {
self
}
pub fn count(mut self, count: NonZeroU32) -> Self {
pub fn count(mut self, count: NonZero<u32>) -> Self {
self.count = Some(count);
self
}
@ -353,7 +353,7 @@ pub mod binding_types {
BufferBindingType, SamplerBindingType, TextureSampleType, TextureViewDimension,
};
use encase::ShaderType;
use std::num::NonZeroU64;
use std::num::NonZero;
use wgpu::{StorageTextureAccess, TextureFormat};
use super::*;
@ -364,7 +364,7 @@ pub mod binding_types {
pub fn storage_buffer_sized(
has_dynamic_offset: bool,
min_binding_size: Option<NonZeroU64>,
min_binding_size: Option<NonZero<u64>>,
) -> BindGroupLayoutEntryBuilder {
BindingType::Buffer {
ty: BufferBindingType::Storage { read_only: false },
@ -382,7 +382,7 @@ pub mod binding_types {
pub fn storage_buffer_read_only_sized(
has_dynamic_offset: bool,
min_binding_size: Option<NonZeroU64>,
min_binding_size: Option<NonZero<u64>>,
) -> BindGroupLayoutEntryBuilder {
BindingType::Buffer {
ty: BufferBindingType::Storage { read_only: true },
@ -398,7 +398,7 @@ pub mod binding_types {
pub fn uniform_buffer_sized(
has_dynamic_offset: bool,
min_binding_size: Option<NonZeroU64>,
min_binding_size: Option<NonZero<u64>>,
) -> BindGroupLayoutEntryBuilder {
BindingType::Buffer {
ty: BufferBindingType::Uniform,

View file

@ -149,7 +149,7 @@ macro_rules! render_resource_wrapper {
macro_rules! define_atomic_id {
($atomic_id_type:ident) => {
#[derive(Copy, Clone, Hash, Eq, PartialEq, PartialOrd, Ord, Debug)]
pub struct $atomic_id_type(core::num::NonZeroU32);
pub struct $atomic_id_type(core::num::NonZero<u32>);
// We use new instead of default to indicate that each ID created will be unique.
#[allow(clippy::new_without_default)]
@ -160,7 +160,7 @@ macro_rules! define_atomic_id {
static COUNTER: AtomicU32 = AtomicU32::new(1);
let counter = COUNTER.fetch_add(1, Ordering::Relaxed);
Self(core::num::NonZeroU32::new(counter).unwrap_or_else(|| {
Self(core::num::NonZero::<u32>::new(counter).unwrap_or_else(|| {
panic!(
"The system ran out of unique `{}`s.",
stringify!($atomic_id_type)
@ -169,14 +169,14 @@ macro_rules! define_atomic_id {
}
}
impl From<$atomic_id_type> for core::num::NonZeroU32 {
impl From<$atomic_id_type> for core::num::NonZero<u32> {
fn from(value: $atomic_id_type) -> Self {
value.0
}
}
impl From<core::num::NonZeroU32> for $atomic_id_type {
fn from(value: core::num::NonZeroU32) -> Self {
impl From<core::num::NonZero<u32>> for $atomic_id_type {
fn from(value: core::num::NonZero<u32>) -> Self {
Self(value)
}
}

View file

@ -1,4 +1,4 @@
use std::{marker::PhantomData, num::NonZeroU64};
use std::{marker::PhantomData, num::NonZero};
use crate::{
render_resource::Buffer,
@ -309,7 +309,7 @@ impl<T: ShaderType + WriteInto> DynamicUniformBuffer<T> {
if let Some(buffer) = self.buffer.as_deref() {
let buffer_view = queue
.write_buffer_with(buffer, 0, NonZeroU64::new(buffer.size())?)
.write_buffer_with(buffer, 0, NonZero::<u64>::new(buffer.size())?)
.unwrap();
Some(DynamicUniformBufferWriter {
buffer: encase::DynamicUniformBuffer::new_with_alignment(

View file

@ -13,7 +13,7 @@ use bevy_window::{
};
use bevy_winit::CustomCursorCache;
use std::{
num::NonZeroU32,
num::NonZero,
ops::{Deref, DerefMut},
};
use wgpu::{
@ -63,7 +63,7 @@ pub struct ExtractedWindow {
pub physical_width: u32,
pub physical_height: u32,
pub present_mode: PresentMode,
pub desired_maximum_frame_latency: Option<NonZeroU32>,
pub desired_maximum_frame_latency: Option<NonZero<u32>>,
/// Note: this will not always be the swap chain texture view. When taking a screenshot,
/// this will point to an alternative texture instead to allow for copying the render result
/// to CPU memory.
@ -395,7 +395,7 @@ pub fn create_surfaces(
},
desired_maximum_frame_latency: window
.desired_maximum_frame_latency
.map(NonZeroU32::get)
.map(NonZero::<u32>::get)
.unwrap_or(DEFAULT_DESIRED_MAXIMUM_FRAME_LATENCY),
alpha_mode: match window.alpha_mode {
CompositeAlphaMode::Auto => wgpu::CompositeAlphaMode::Auto,

View file

@ -55,7 +55,7 @@ pub mod prelude {
};
}
use std::num::NonZeroUsize;
use std::num::NonZero;
/// Gets the logical CPU core count available to the current process.
///
@ -65,6 +65,6 @@ use std::num::NonZeroUsize;
/// This will always return at least 1.
pub fn available_parallelism() -> usize {
std::thread::available_parallelism()
.map(NonZeroUsize::get)
.map(NonZero::<usize>::get)
.unwrap_or(1)
}

View file

@ -12,7 +12,7 @@ use bevy_transform::prelude::GlobalTransform;
use bevy_utils::warn_once;
use bevy_window::{PrimaryWindow, WindowRef};
use smallvec::SmallVec;
use std::num::{NonZeroI16, NonZeroU16};
use std::num::NonZero;
use thiserror::Error;
/// Base component for a UI node, which also provides the computed size of the node.
@ -1481,15 +1481,15 @@ pub struct GridPlacement {
/// Lines are 1-indexed.
/// Negative indexes count backwards from the end of the grid.
/// Zero is not a valid index.
pub(crate) start: Option<NonZeroI16>,
pub(crate) start: Option<NonZero<i16>>,
/// How many grid tracks the item should span.
/// Defaults to 1.
pub(crate) span: Option<NonZeroU16>,
pub(crate) span: Option<NonZero<u16>>,
/// The grid line at which the item should end.
/// Lines are 1-indexed.
/// Negative indexes count backwards from the end of the grid.
/// Zero is not a valid index.
pub(crate) end: Option<NonZeroI16>,
pub(crate) end: Option<NonZero<i16>>,
}
impl GridPlacement {
@ -1497,7 +1497,7 @@ impl GridPlacement {
pub const DEFAULT: Self = Self {
start: None,
// SAFETY: This is trivially safe as 1 is non-zero.
span: Some(unsafe { NonZeroU16::new_unchecked(1) }),
span: Some(unsafe { NonZero::<u16>::new_unchecked(1) }),
end: None,
};
@ -1614,17 +1614,17 @@ impl GridPlacement {
/// Returns the grid line at which the item should start, or `None` if not set.
pub fn get_start(self) -> Option<i16> {
self.start.map(NonZeroI16::get)
self.start.map(NonZero::<i16>::get)
}
/// Returns the grid line at which the item should end, or `None` if not set.
pub fn get_end(self) -> Option<i16> {
self.end.map(NonZeroI16::get)
self.end.map(NonZero::<i16>::get)
}
/// Returns span for this grid item, or `None` if not set.
pub fn get_span(self) -> Option<u16> {
self.span.map(NonZeroU16::get)
self.span.map(NonZero::<u16>::get)
}
}
@ -1634,17 +1634,17 @@ impl Default for GridPlacement {
}
}
/// Convert an `i16` to `NonZeroI16`, fails on `0` and returns the `InvalidZeroIndex` error.
fn try_into_grid_index(index: i16) -> Result<Option<NonZeroI16>, GridPlacementError> {
/// Convert an `i16` to `NonZero<i16>`, fails on `0` and returns the `InvalidZeroIndex` error.
fn try_into_grid_index(index: i16) -> Result<Option<NonZero<i16>>, GridPlacementError> {
Ok(Some(
NonZeroI16::new(index).ok_or(GridPlacementError::InvalidZeroIndex)?,
NonZero::<i16>::new(index).ok_or(GridPlacementError::InvalidZeroIndex)?,
))
}
/// Convert a `u16` to `NonZeroU16`, fails on `0` and returns the `InvalidZeroSpan` error.
fn try_into_grid_span(span: u16) -> Result<Option<NonZeroU16>, GridPlacementError> {
/// Convert a `u16` to `NonZero<u16>`, fails on `0` and returns the `InvalidZeroSpan` error.
fn try_into_grid_span(span: u16) -> Result<Option<NonZero<u16>>, GridPlacementError> {
Ok(Some(
NonZeroU16::new(span).ok_or(GridPlacementError::InvalidZeroSpan)?,
NonZero::<u16>::new(span).ok_or(GridPlacementError::InvalidZeroSpan)?,
))
}

View file

@ -1,4 +1,4 @@
use std::num::NonZeroU32;
use std::num::NonZero;
use bevy_ecs::{
entity::{Entity, EntityMapper, MapEntities},
@ -279,7 +279,7 @@ pub struct Window {
///
/// [`wgpu::SurfaceConfiguration::desired_maximum_frame_latency`]:
/// https://docs.rs/wgpu/latest/wgpu/type.SurfaceConfiguration.html#structfield.desired_maximum_frame_latency
pub desired_maximum_frame_latency: Option<NonZeroU32>,
pub desired_maximum_frame_latency: Option<NonZero<u32>>,
/// Sets whether this window recognizes [`PinchGesture`](https://docs.rs/bevy/latest/bevy/input/gestures/struct.PinchGesture.html)
///
/// ## Platform-specific

View file

@ -381,7 +381,7 @@ impl render_graph::Node for ImageCopyDriver {
layout: ImageDataLayout {
offset: 0,
bytes_per_row: Some(
std::num::NonZeroU32::new(padded_bytes_per_row as u32)
std::num::NonZero::<u32>::new(padded_bytes_per_row as u32)
.unwrap()
.into(),
),

View file

@ -17,7 +17,7 @@ use bevy::{
RenderApp,
},
};
use std::{num::NonZeroU32, process::exit};
use std::{num::NonZero, process::exit};
/// This example uses a shader source file from the assets subdirectory
const SHADER_ASSET_PATH: &str = "shaders/texture_binding_array.wgsl";
@ -166,7 +166,7 @@ impl AsBindGroup for BindlessMaterial {
(
0,
texture_2d(TextureSampleType::Float { filterable: true })
.count(NonZeroU32::new(MAX_TEXTURE_COUNT as u32).unwrap()),
.count(NonZero::<u32>::new(MAX_TEXTURE_COUNT as u32).unwrap()),
),
// Sampler
//
@ -177,7 +177,7 @@ impl AsBindGroup for BindlessMaterial {
//
// ```
// sampler(SamplerBindingType::Filtering)
// .count(NonZeroU32::new(MAX_TEXTURE_COUNT as u32).unwrap()),
// .count(NonZero::<u32>::new(MAX_TEXTURE_COUNT as u32).unwrap()),
// ```
//
// One may need to pay attention to the limit of sampler binding