Remove redundent information and optimize dynamic allocations in Table (#12929)

# Objective

- fix #12853
- Make `Table::allocate` faster

## Solution
The PR consists of multiple steps:

1) For the component data: create a new data-structure that's similar to
`BlobVec` but doesn't store `len` & `capacity` inside of it: "BlobArray"
(name suggestions welcome)
2) For the `Tick` data: create a new data-structure that's similar to
`ThinSlicePtr` but supports dynamic reallocation: "ThinArrayPtr" (name
suggestions welcome)
3) Create a new data-structure that's very similar to `Column` that
doesn't store `len` & `capacity` inside of it: "ThinColumn"
4) Adjust the `Table` implementation to use `ThinColumn` instead of
`Column`

The result is that only one set of `len` & `capacity` is stored in
`Table`, in `Table::entities`

### Notes Regarding Performance
Apart from shaving off some excess memory in `Table`, the changes have
also brought noteworthy performance improvements:
The previous implementation relied on `Vec::reserve` &
`BlobVec::reserve`, but that redundantly repeated the same if statement
(`capacity` == `len`). Now that check could be made at the `Table` level
because the capacity and length of all the columns are synchronized;
saving N branches per allocation. The result is a respectable
performance improvement per every `Table::reserve` (and subsequently
`Table::allocate`) call.

I'm hesitant to give exact numbers because I don't have a lot of
experience in profiling and benchmarking, but these are the results I
got so far:

*`add_remove_big/table` benchmark after the implementation:*


![after_add_remove_big_table](https://github.com/bevyengine/bevy/assets/46227443/b667da29-1212-4020-8bb0-ec0f15bb5f8a)

*`add_remove_big/table` benchmark in main branch (measured in comparison
to the implementation):*


![main_add_remove_big_table](https://github.com/bevyengine/bevy/assets/46227443/41abb92f-3112-4e01-b935-99696eb2fe58)

*`add_remove_very_big/table` benchmark after the implementation:*


![after_add_remove_very_big](https://github.com/bevyengine/bevy/assets/46227443/f268a155-295b-4f55-ab02-f8a9dcc64fc2)

*`add_remove_very_big/table` benchmark in main branch (measured in
comparison to the implementation):*


![main_add_remove_very_big](https://github.com/bevyengine/bevy/assets/46227443/78b4e3a6-b255-47c9-baee-1a24c25b9aea)

cc @james7132 to verify

---

## Changelog

- New data-structure that's similar to `BlobVec` but doesn't store `len`
& `capacity` inside of it: `BlobArray`
- New data-structure that's similar to `ThinSlicePtr` but supports
dynamic allocation:`ThinArrayPtr`
- New data-structure that's very similar to `Column` that doesn't store
`len` & `capacity` inside of it: `ThinColumn`
- Adjust the `Table` implementation to use `ThinColumn` instead of
`Column`
- New benchmark: `add_remove_very_big` to benchmark the performance of
spawning a lot of entities with a lot of components (15) each

## Migration Guide

`Table` now uses `ThinColumn` instead of `Column`. That means that
methods that previously returned `Column`, will now return `ThinColumn`
instead.

`ThinColumn` has a much more limited and low-level API, but you can
still achieve the same things in `ThinColumn` as you did in `Column`.
For example, instead of calling `Column::get_added_tick`, you'd call
`ThinColumn::get_added_ticks_slice` and index it to get the specific
added tick.

---------

Co-authored-by: James Liu <contact@jamessliu.com>
This commit is contained in:
Adam 2024-09-16 18:52:05 -04:00 committed by GitHub
parent 262b068bc3
commit 9bda913e36
No known key found for this signature in database
GPG key ID: B5690EEEBB952194
18 changed files with 2630 additions and 1210 deletions

1
.gitignore vendored
View file

@ -6,6 +6,7 @@ Cargo.lock
.cargo/config.toml
/.idea
/.vscode
.zed
/benches/target
/tools/compile_fail_utils/target
dxcompiler.dll

View file

@ -0,0 +1,111 @@
#![allow(dead_code)]
use bevy_ecs::prelude::*;
use glam::*;
#[derive(Component, Copy, Clone)]
struct A<const N: usize>(Mat4);
#[derive(Component, Copy, Clone)]
struct B<const N: usize>(Mat4);
#[derive(Component, Copy, Clone)]
struct C<const N: usize>(Mat4);
#[derive(Component, Copy, Clone)]
struct D<const N: usize>(Mat4);
#[derive(Component, Copy, Clone)]
struct E<const N: usize>(Mat4);
#[derive(Component, Copy, Clone)]
struct F<const N: usize>(Mat4);
#[derive(Component, Copy, Clone)]
struct Z<const N: usize>;
pub struct Benchmark(World, Vec<Entity>);
impl Benchmark {
pub fn new() -> Self {
let mut world = World::default();
let mut entities = Vec::with_capacity(10_000);
for _ in 0..10_000 {
entities.push(
world
.spawn((
(
A::<1>(Mat4::from_scale(Vec3::ONE)),
B::<1>(Mat4::from_scale(Vec3::ONE)),
C::<1>(Mat4::from_scale(Vec3::ONE)),
D::<1>(Mat4::from_scale(Vec3::ONE)),
E::<1>(Mat4::from_scale(Vec3::ONE)),
A::<2>(Mat4::from_scale(Vec3::ONE)),
B::<2>(Mat4::from_scale(Vec3::ONE)),
C::<2>(Mat4::from_scale(Vec3::ONE)),
D::<2>(Mat4::from_scale(Vec3::ONE)),
E::<2>(Mat4::from_scale(Vec3::ONE)),
),
(
A::<3>(Mat4::from_scale(Vec3::ONE)),
B::<3>(Mat4::from_scale(Vec3::ONE)),
C::<3>(Mat4::from_scale(Vec3::ONE)),
D::<3>(Mat4::from_scale(Vec3::ONE)),
E::<3>(Mat4::from_scale(Vec3::ONE)),
A::<4>(Mat4::from_scale(Vec3::ONE)),
B::<4>(Mat4::from_scale(Vec3::ONE)),
C::<4>(Mat4::from_scale(Vec3::ONE)),
D::<4>(Mat4::from_scale(Vec3::ONE)),
E::<4>(Mat4::from_scale(Vec3::ONE)),
),
(
A::<5>(Mat4::from_scale(Vec3::ONE)),
B::<5>(Mat4::from_scale(Vec3::ONE)),
C::<5>(Mat4::from_scale(Vec3::ONE)),
D::<5>(Mat4::from_scale(Vec3::ONE)),
E::<5>(Mat4::from_scale(Vec3::ONE)),
A::<6>(Mat4::from_scale(Vec3::ONE)),
B::<6>(Mat4::from_scale(Vec3::ONE)),
C::<6>(Mat4::from_scale(Vec3::ONE)),
D::<6>(Mat4::from_scale(Vec3::ONE)),
E::<6>(Mat4::from_scale(Vec3::ONE)),
),
(
A::<7>(Mat4::from_scale(Vec3::ONE)),
B::<7>(Mat4::from_scale(Vec3::ONE)),
C::<7>(Mat4::from_scale(Vec3::ONE)),
D::<7>(Mat4::from_scale(Vec3::ONE)),
E::<7>(Mat4::from_scale(Vec3::ONE)),
Z::<1>,
Z::<2>,
Z::<3>,
Z::<4>,
Z::<5>,
Z::<6>,
Z::<7>,
),
))
.id(),
);
}
Self(world, entities)
}
pub fn run(&mut self) {
for entity in &self.1 {
self.0.entity_mut(*entity).insert((
F::<1>(Mat4::from_scale(Vec3::ONE)),
F::<2>(Mat4::from_scale(Vec3::ONE)),
F::<3>(Mat4::from_scale(Vec3::ONE)),
F::<4>(Mat4::from_scale(Vec3::ONE)),
F::<5>(Mat4::from_scale(Vec3::ONE)),
F::<6>(Mat4::from_scale(Vec3::ONE)),
F::<7>(Mat4::from_scale(Vec3::ONE)),
));
}
for entity in &self.1 {
self.0
.entity_mut(*entity)
.remove::<(F<1>, F<2>, F<3>, F<4>, F<5>, F<6>, F<7>)>();
self.0
.entity_mut(*entity)
.remove::<(Z<1>, Z<2>, Z<3>, Z<4>, Z<5>, Z<6>, Z<7>)>();
}
}
}

View file

@ -4,6 +4,7 @@ mod add_remove_big_sparse_set;
mod add_remove_big_table;
mod add_remove_sparse_set;
mod add_remove_table;
mod add_remove_very_big_table;
mod archetype_updates;
mod insert_simple;
mod insert_simple_unbatched;
@ -14,6 +15,7 @@ criterion_group!(
components_benches,
add_remove,
add_remove_big,
add_remove_very_big,
insert_simple,
no_archetypes,
added_archetypes,
@ -49,6 +51,17 @@ fn add_remove_big(c: &mut Criterion) {
group.finish();
}
fn add_remove_very_big(c: &mut Criterion) {
let mut group = c.benchmark_group("add_remove_very_big");
group.warm_up_time(std::time::Duration::from_millis(500));
group.measurement_time(std::time::Duration::from_secs(4));
group.bench_function("table", |b| {
let mut bench = add_remove_very_big_table::Benchmark::new();
b.iter(move || bench.run());
});
group.finish();
}
fn insert_simple(c: &mut Criterion) {
let mut group = c.benchmark_group("insert_simple");
group.warm_up_time(std::time::Duration::from_millis(500));

View file

@ -517,33 +517,30 @@ impl BundleInfo {
let component_id = *self.component_ids.get_unchecked(bundle_component);
match storage_type {
StorageType::Table => {
let column =
// SAFETY: If component_id is in self.component_ids, BundleInfo::new requires that
// the target table contains the component.
unsafe { table.get_column_mut(component_id).debug_checked_unwrap() };
// SAFETY: bundle_component is a valid index for this bundle
let status = unsafe { bundle_component_status.get_status(bundle_component) };
// SAFETY: If component_id is in self.component_ids, BundleInfo::new requires that
// the target table contains the component.
let column = table.get_column_mut(component_id).debug_checked_unwrap();
match (status, insert_mode) {
(ComponentStatus::Added, _) => {
column.initialize(
(ComponentStatus::Added, _) => column.initialize(
table_row,
component_ptr,
change_tick,
#[cfg(feature = "track_change_detection")]
caller,
);
}
(ComponentStatus::Existing, InsertMode::Replace) => {
column.replace(
),
(ComponentStatus::Existing, InsertMode::Replace) => column.replace(
table_row,
component_ptr,
change_tick,
#[cfg(feature = "track_change_detection")]
caller,
);
}
),
(ComponentStatus::Existing, InsertMode::Keep) => {
column.drop(component_ptr);
if let Some(drop_fn) = table.get_drop_for(component_id) {
drop_fn(component_ptr);
}
}
}
}

View file

@ -623,7 +623,6 @@ mod tests {
.collect::<HashSet<_>>(),
HashSet::from([(e1, A(1), B(3)), (e2, A(2), B(4))])
);
assert_eq!(world.entity_mut(e1).take::<A>(), Some(A(1)));
assert_eq!(
world

View file

@ -982,9 +982,8 @@ unsafe impl<T: Component> WorldQuery for &T {
) {
fetch.table_components = Some(
table
.get_column(component_id)
.get_data_slice_for(component_id)
.debug_checked_unwrap()
.get_data_slice()
.into(),
);
}
@ -1147,11 +1146,11 @@ unsafe impl<'__w, T: Component> WorldQuery for Ref<'__w, T> {
) {
let column = table.get_column(component_id).debug_checked_unwrap();
fetch.table_data = Some((
column.get_data_slice().into(),
column.get_added_ticks_slice().into(),
column.get_changed_ticks_slice().into(),
column.get_data_slice(table.entity_count()).into(),
column.get_added_ticks_slice(table.entity_count()).into(),
column.get_changed_ticks_slice(table.entity_count()).into(),
#[cfg(feature = "track_change_detection")]
column.get_changed_by_slice().into(),
column.get_changed_by_slice(table.entity_count()).into(),
#[cfg(not(feature = "track_change_detection"))]
(),
));
@ -1346,11 +1345,11 @@ unsafe impl<'__w, T: Component> WorldQuery for &'__w mut T {
) {
let column = table.get_column(component_id).debug_checked_unwrap();
fetch.table_data = Some((
column.get_data_slice().into(),
column.get_added_ticks_slice().into(),
column.get_changed_ticks_slice().into(),
column.get_data_slice(table.entity_count()).into(),
column.get_added_ticks_slice(table.entity_count()).into(),
column.get_changed_ticks_slice(table.entity_count()).into(),
#[cfg(feature = "track_change_detection")]
column.get_changed_by_slice().into(),
column.get_changed_by_slice(table.entity_count()).into(),
#[cfg(not(feature = "track_change_detection"))]
(),
));

View file

@ -3,7 +3,7 @@ use crate::{
component::{Component, ComponentId, Components, StorageType, Tick},
entity::Entity,
query::{DebugCheckedUnwrap, FilteredAccess, WorldQuery},
storage::{Column, ComponentSparseSet, Table, TableRow},
storage::{ComponentSparseSet, Table, TableRow},
world::{unsafe_world_cell::UnsafeWorldCell, World},
};
use bevy_ptr::{ThinSlicePtr, UnsafeCellDeref};
@ -686,7 +686,9 @@ unsafe impl<T: Component> WorldQuery for Added<T> {
table: &'w Table,
) {
fetch.table_ticks = Some(
Column::get_added_ticks_slice(table.get_column(component_id).debug_checked_unwrap())
table
.get_added_ticks_slice_for(component_id)
.debug_checked_unwrap()
.into(),
);
}
@ -902,7 +904,9 @@ unsafe impl<T: Component> WorldQuery for Changed<T> {
table: &'w Table,
) {
fetch.table_ticks = Some(
Column::get_changed_ticks_slice(table.get_column(component_id).debug_checked_unwrap())
table
.get_changed_ticks_slice_for(component_id)
.debug_checked_unwrap()
.into(),
);
}

View file

@ -53,6 +53,40 @@ impl<T> DebugCheckedUnwrap for Option<T> {
}
}
// These two impls are explicitly split to ensure that the unreachable! macro
// does not cause inlining to fail when compiling in release mode.
#[cfg(debug_assertions)]
impl<T, U> DebugCheckedUnwrap for Result<T, U> {
type Item = T;
#[inline(always)]
#[track_caller]
unsafe fn debug_checked_unwrap(self) -> Self::Item {
if let Ok(inner) = self {
inner
} else {
unreachable!()
}
}
}
// These two impls are explicitly split to ensure that the unreachable! macro
// does not cause inlining to fail when compiling in release mode.
#[cfg(not(debug_assertions))]
impl<T, U> DebugCheckedUnwrap for Result<T, U> {
type Item = T;
#[inline(always)]
#[track_caller]
unsafe fn debug_checked_unwrap(self) -> Self::Item {
if let Ok(inner) = self {
inner
} else {
std::hint::unreachable_unchecked()
}
}
}
#[cfg(not(debug_assertions))]
impl<T> DebugCheckedUnwrap for Option<T> {
type Item = T;
@ -69,13 +103,12 @@ impl<T> DebugCheckedUnwrap for Option<T> {
#[cfg(test)]
mod tests {
use bevy_ecs_macros::{QueryData, QueryFilter};
use crate::prelude::{AnyOf, Changed, Entity, Or, QueryState, With, Without};
use crate::query::{ArchetypeFilter, Has, QueryCombinationIter, ReadOnlyQueryData};
use crate::schedule::{IntoSystemConfigs, Schedule};
use crate::system::{IntoSystem, Query, System, SystemState};
use crate::{self as bevy_ecs, component::Component, world::World};
use bevy_ecs_macros::{QueryData, QueryFilter};
use std::any::type_name;
use std::collections::HashSet;
use std::fmt::Debug;

View file

@ -0,0 +1,495 @@
use super::blob_vec::array_layout;
use crate::storage::blob_vec::array_layout_unchecked;
use bevy_ptr::{OwningPtr, Ptr, PtrMut};
use bevy_utils::OnDrop;
use std::{
alloc::{handle_alloc_error, Layout},
cell::UnsafeCell,
num::NonZeroUsize,
ptr::NonNull,
};
/// A flat, type-erased data storage type similar to a [`BlobVec`](super::blob_vec::BlobVec), but with the length and capacity cut out
/// for performance reasons. This type is reliant on its owning type to store the capacity and length information.
///
/// Used to densely store homogeneous ECS data. A blob is usually just an arbitrary block of contiguous memory without any identity, and
/// could be used to represent any arbitrary data (i.e. string, arrays, etc). This type only stores meta-data about the Blob that it stores,
/// and a pointer to the location of the start of the array, similar to a C array.
pub(super) struct BlobArray {
item_layout: Layout,
// the `data` ptr's layout is always `array_layout(item_layout, capacity)`
data: NonNull<u8>,
// None if the underlying type doesn't need to be dropped
pub drop: Option<unsafe fn(OwningPtr<'_>)>,
#[cfg(debug_assertions)]
capacity: usize,
}
impl BlobArray {
/// Create a new [`BlobArray`] with a specified `capacity`.
/// If `capacity` is 0, no allocations will be made.
///
/// `drop` is an optional function pointer that is meant to be invoked when any element in the [`BlobArray`]
/// should be dropped. For all Rust-based types, this should match 1:1 with the implementation of [`Drop`]
/// if present, and should be `None` if `T: !Drop`. For non-Rust based types, this should match any cleanup
/// processes typically associated with the stored element.
///
/// # Safety
/// `drop` should be safe to call with an [`OwningPtr`] pointing to any item that's been placed into this [`BlobArray`].
/// If `drop` is `None`, the items will be leaked. This should generally be set as None based on [`needs_drop`].
///
/// [`needs_drop`]: core::mem::needs_drop
pub unsafe fn with_capacity(
item_layout: Layout,
drop_fn: Option<unsafe fn(OwningPtr<'_>)>,
capacity: usize,
) -> Self {
if capacity == 0 {
let align = NonZeroUsize::new(item_layout.align()).expect("alignment must be > 0");
let data = bevy_ptr::dangling_with_align(align);
Self {
item_layout,
drop: drop_fn,
data,
#[cfg(debug_assertions)]
capacity,
}
} else {
let mut arr = Self::with_capacity(item_layout, drop_fn, 0);
// SAFETY: `capacity` > 0
unsafe { arr.alloc(NonZeroUsize::new_unchecked(capacity)) }
arr
}
}
/// Returns the [`Layout`] of the element type stored in the vector.
#[inline]
pub fn layout(&self) -> Layout {
self.item_layout
}
/// Return `true` if this [`BlobArray`] stores `ZSTs`.
pub fn is_zst(&self) -> bool {
self.item_layout.size() == 0
}
/// Returns a reference to the element at `index`, without doing bounds checking.
///
/// *`len` refers to the length of the array, the number of elements that have been initialized, and are safe to read.
/// Just like [`Vec::len`], or [`BlobVec::len`](super::blob_vec::BlobVec::len).*
///
/// # Safety
/// - The element at index `index` is safe to access.
/// (If the safety requirements of every method that has been used on `Self` have been fulfilled, the caller just needs to ensure that `index` < `len`)
#[inline]
pub unsafe fn get_unchecked(&self, index: usize) -> Ptr<'_> {
#[cfg(debug_assertions)]
debug_assert!(index < self.capacity);
let size = self.item_layout.size();
// SAFETY:
// - The caller ensures that `index` fits in this array,
// so this operation will not overflow the original allocation.
// - `size` is a multiple of the erased type's alignment,
// so adding a multiple of `size` will preserve alignment.
unsafe { self.get_ptr().byte_add(index * size) }
}
/// Returns a mutable reference to the element at `index`, without doing bounds checking.
///
/// *`len` refers to the length of the array, the number of elements that have been initialized, and are safe to read.
/// Just like [`Vec::len`], or [`BlobVec::len`](super::blob_vec::BlobVec::len).*
///
/// # Safety
/// - The element with at index `index` is safe to access.
/// (If the safety requirements of every method that has been used on `Self` have been fulfilled, the caller just needs to ensure that `index` < `len`)
#[inline]
pub unsafe fn get_unchecked_mut(&mut self, index: usize) -> PtrMut<'_> {
#[cfg(debug_assertions)]
debug_assert!(index < self.capacity);
let size = self.item_layout.size();
// SAFETY:
// - The caller ensures that `index` fits in this vector,
// so this operation will not overflow the original allocation.
// - `size` is a multiple of the erased type's alignment,
// so adding a multiple of `size` will preserve alignment.
unsafe { self.get_ptr_mut().byte_add(index * size) }
}
/// Gets a [`Ptr`] to the start of the array
#[inline]
pub fn get_ptr(&self) -> Ptr<'_> {
// SAFETY: the inner data will remain valid for as long as 'self.
unsafe { Ptr::new(self.data) }
}
/// Gets a [`PtrMut`] to the start of the array
#[inline]
pub fn get_ptr_mut(&mut self) -> PtrMut<'_> {
// SAFETY: the inner data will remain valid for as long as 'self.
unsafe { PtrMut::new(self.data) }
}
/// Get a slice of the first `slice_len` elements in [`BlobArray`] as if it were an array with elements of type `T`
/// To get a slice to the entire array, the caller must plug `len` in `slice_len`.
///
/// *`len` refers to the length of the array, the number of elements that have been initialized, and are safe to read.
/// Just like [`Vec::len`], or [`BlobVec::len`](super::blob_vec::BlobVec::len).*
///
/// # Safety
/// - The type `T` must be the type of the items in this [`BlobArray`].
/// - `slice_len` <= `len`
pub unsafe fn get_sub_slice<T>(&self, slice_len: usize) -> &[UnsafeCell<T>] {
#[cfg(debug_assertions)]
debug_assert!(slice_len <= self.capacity);
// SAFETY: the inner data will remain valid for as long as 'self.
unsafe { std::slice::from_raw_parts(self.data.as_ptr() as *const UnsafeCell<T>, slice_len) }
}
/// Clears the array, i.e. removing (and dropping) all of the elements.
/// Note that this method has no effect on the allocated capacity of the vector.
///
/// Note that this method will behave exactly the same as [`Vec::clear`].
///
/// # Safety
/// - For every element with index `i`, if `i` < `len`: It must be safe to call [`Self::get_unchecked_mut`] with `i`.
/// (If the safety requirements of every method that has been used on `Self` have been fulfilled, the caller just needs to ensure that `len` is correct.)
pub unsafe fn clear(&mut self, len: usize) {
#[cfg(debug_assertions)]
debug_assert!(self.capacity >= len);
if let Some(drop) = self.drop {
// We set `self.drop` to `None` before dropping elements for unwind safety. This ensures we don't
// accidentally drop elements twice in the event of a drop impl panicking.
self.drop = None;
let size = self.item_layout.size();
for i in 0..len {
// SAFETY:
// * 0 <= `i` < `len`, so `i * size` must be in bounds for the allocation.
// * `size` is a multiple of the erased type's alignment,
// so adding a multiple of `size` will preserve alignment.
// * The item is left unreachable so it can be safely promoted to an `OwningPtr`.
let item = unsafe { self.get_ptr_mut().byte_add(i * size).promote() };
// SAFETY: `item` was obtained from this `BlobArray`, so its underlying type must match `drop`.
unsafe { drop(item) };
}
self.drop = Some(drop);
}
}
/// Because this method needs parameters, it can't be the implementation of the `Drop` trait.
/// The owner of this [`BlobArray`] must call this method with the correct information.
///
/// # Safety
/// - `cap` and `len` are indeed the capacity and length of this [`BlobArray`]
/// - This [`BlobArray`] mustn't be used after calling this method.
pub unsafe fn drop(&mut self, cap: usize, len: usize) {
#[cfg(debug_assertions)]
debug_assert_eq!(self.capacity, cap);
if cap != 0 {
self.clear(len);
if !self.is_zst() {
let layout =
array_layout(&self.item_layout, cap).expect("array layout should be valid");
std::alloc::dealloc(self.data.as_ptr().cast(), layout);
}
#[cfg(debug_assertions)]
{
self.capacity = 0;
}
}
}
/// Drops the last element in this [`BlobArray`].
///
/// # Safety
// - `last_element_index` must correspond to the last element in the array.
// - After this method is called, the last element must not be used
// unless [`Self::initialize_unchecked`] is called to set the value of the last element.
pub unsafe fn drop_last_element(&mut self, last_element_index: usize) {
#[cfg(debug_assertions)]
debug_assert!(self.capacity > last_element_index);
if let Some(drop) = self.drop {
// We set `self.drop` to `None` before dropping elements for unwind safety. This ensures we don't
// accidentally drop elements twice in the event of a drop impl panicking.
self.drop = None;
// SAFETY:
let item = self.get_unchecked_mut(last_element_index).promote();
// SAFETY:
unsafe { drop(item) };
self.drop = Some(drop);
}
}
/// Allocate a block of memory for the array. This should be used to initialize the array, do not use this
/// method if there are already elements stored in the array - use [`Self::realloc`] instead.
pub(super) fn alloc(&mut self, capacity: NonZeroUsize) {
#[cfg(debug_assertions)]
debug_assert_eq!(self.capacity, 0);
if !self.is_zst() {
let new_layout = array_layout(&self.item_layout, capacity.get())
.expect("array layout should be valid");
// SAFETY: layout has non-zero size because capacity > 0, and the blob isn't ZST (`self.is_zst` == false)
let new_data = unsafe { std::alloc::alloc(new_layout) };
self.data = NonNull::new(new_data).unwrap_or_else(|| handle_alloc_error(new_layout));
}
#[cfg(debug_assertions)]
{
self.capacity = capacity.into();
}
}
/// Reallocate memory for this array.
/// For example, if the length (number of stored elements) reached the capacity (number of elements the current allocation can store),
/// you might want to use this method to increase the allocation, so more data can be stored in the array.
///
/// # Safety
/// - `current_capacity` is indeed the current capacity of this array.
/// - After calling this method, the caller must update their saved capacity to reflect the change.
pub(super) unsafe fn realloc(
&mut self,
current_capacity: NonZeroUsize,
new_capacity: NonZeroUsize,
) {
#[cfg(debug_assertions)]
debug_assert_eq!(self.capacity, current_capacity.into());
if !self.is_zst() {
// SAFETY: `new_capacity` can't overflow usize
let new_layout =
unsafe { array_layout_unchecked(&self.item_layout, new_capacity.get()) };
// SAFETY:
// - ptr was be allocated via this allocator
// - the layout used to previously allocate this array is equivalent to `array_layout(&self.item_layout, current_capacity.get())`
// - `item_layout.size() > 0` (`self.is_zst`==false) and `new_capacity > 0`, so the layout size is non-zero
// - "new_size, when rounded up to the nearest multiple of layout.align(), must not overflow (i.e., the rounded value must be less than usize::MAX)",
// since the item size is always a multiple of its align, the rounding cannot happen
// here and the overflow is handled in `array_layout`
let new_data = unsafe {
std::alloc::realloc(
self.get_ptr_mut().as_ptr(),
// SAFETY: This is the Layout of the current array, it must be valid, if it hadn't have been, there would have been a panic on a previous allocation
array_layout_unchecked(&self.item_layout, current_capacity.get()),
new_layout.size(),
)
};
self.data = NonNull::new(new_data).unwrap_or_else(|| handle_alloc_error(new_layout));
}
#[cfg(debug_assertions)]
{
self.capacity = new_capacity.into();
}
}
/// Initializes the value at `index` to `value`. This function does not do any bounds checking.
///
/// # Safety
/// - `index` must be in bounds (`index` < capacity)
/// - The [`Layout`] of the value must match the layout of the blobs stored in this array,
/// and it must be safe to use the `drop` function of this [`BlobArray`] to drop `value`.
/// - `value` must not point to the same value that is being initialized.
#[inline]
pub unsafe fn initialize_unchecked(&mut self, index: usize, value: OwningPtr<'_>) {
#[cfg(debug_assertions)]
debug_assert!(self.capacity > index);
let size = self.item_layout.size();
let dst = self.get_unchecked_mut(index);
std::ptr::copy::<u8>(value.as_ptr(), dst.as_ptr(), size);
}
/// Replaces the value at `index` with `value`. This function does not do any bounds checking.
///
/// # Safety
/// - Index must be in-bounds (`index` < `len`)
/// - `value`'s [`Layout`] must match this [`BlobArray`]'s `item_layout`,
/// and it must be safe to use the `drop` function of this [`BlobArray`] to drop `value`.
/// - `value` must not point to the same value that is being replaced.
pub unsafe fn replace_unchecked(&mut self, index: usize, value: OwningPtr<'_>) {
#[cfg(debug_assertions)]
debug_assert!(self.capacity > index);
// Pointer to the value in the vector that will get replaced.
// SAFETY: The caller ensures that `index` fits in this vector.
let destination = NonNull::from(unsafe { self.get_unchecked_mut(index) });
let source = value.as_ptr();
if let Some(drop) = self.drop {
// We set `self.drop` to `None` before dropping elements for unwind safety. This ensures we don't
// accidentally drop elements twice in the event of a drop impl panicking.
self.drop = None;
// Transfer ownership of the old value out of the vector, so it can be dropped.
// SAFETY:
// - `destination` was obtained from a `PtrMut` in this vector, which ensures it is non-null,
// well-aligned for the underlying type, and has proper provenance.
// - The storage location will get overwritten with `value` later, which ensures
// that the element will not get observed or double dropped later.
// - If a panic occurs, `self.len` will remain `0`, which ensures a double-drop
// does not occur. Instead, all elements will be forgotten.
let old_value = unsafe { OwningPtr::new(destination) };
// This closure will run in case `drop()` panics,
// which ensures that `value` does not get forgotten.
let on_unwind = OnDrop::new(|| drop(value));
drop(old_value);
// If the above code does not panic, make sure that `value` doesn't get dropped.
core::mem::forget(on_unwind);
self.drop = Some(drop);
}
// Copy the new value into the vector, overwriting the previous value.
// SAFETY:
// - `source` and `destination` were obtained from `OwningPtr`s, which ensures they are
// valid for both reads and writes.
// - The value behind `source` will only be dropped if the above branch panics,
// so it must still be initialized and it is safe to transfer ownership into the vector.
// - `source` and `destination` were obtained from different memory locations,
// both of which we have exclusive access to, so they are guaranteed not to overlap.
unsafe {
std::ptr::copy_nonoverlapping::<u8>(
source,
destination.as_ptr(),
self.item_layout.size(),
);
}
}
/// This method will swap two elements in the array, and return the one at `index_to_remove`.
/// It is the caller's responsibility to drop the returned pointer, if that is desirable.
///
/// # Safety
/// - `index_to_keep` must be safe to access (within the bounds of the length of the array).
/// - `index_to_remove` must be safe to access (within the bounds of the length of the array).
/// - `index_to_remove` != `index_to_keep`
/// - The caller should address the inconsistent state of the array that has occurred after the swap, either:
/// 1) initialize a different value in `index_to_keep`
/// 2) update the saved length of the array if `index_to_keep` was the last element.
#[inline]
#[must_use = "The returned pointer should be used to drop the removed element"]
pub unsafe fn swap_remove_unchecked(
&mut self,
index_to_remove: usize,
index_to_keep: usize,
) -> OwningPtr<'_> {
#[cfg(debug_assertions)]
{
debug_assert!(self.capacity > index_to_keep);
debug_assert!(self.capacity > index_to_remove);
}
if index_to_remove != index_to_keep {
return self.swap_remove_unchecked_nonoverlapping(index_to_remove, index_to_keep);
}
// Now the element that used to be in index `index_to_remove` is now in index `index_to_keep` (after swap)
// If we are storing ZSTs than the index doesn't actually matter because the size is 0.
self.get_unchecked_mut(index_to_keep).promote()
}
/// The same as [`Self::swap_remove_unchecked`] but the two elements must non-overlapping.
///
/// # Safety
/// - `index_to_keep` must be safe to access (within the bounds of the length of the array).
/// - `index_to_remove` must be safe to access (within the bounds of the length of the array).
/// - `index_to_remove` != `index_to_keep`
/// - The caller should address the inconsistent state of the array that has occurred after the swap, either:
/// 1) initialize a different value in `index_to_keep`
/// 2) update the saved length of the array if `index_to_keep` was the last element.
#[inline]
pub unsafe fn swap_remove_unchecked_nonoverlapping(
&mut self,
index_to_remove: usize,
index_to_keep: usize,
) -> OwningPtr<'_> {
#[cfg(debug_assertions)]
{
debug_assert!(self.capacity > index_to_keep);
debug_assert!(self.capacity > index_to_remove);
debug_assert_ne!(index_to_keep, index_to_remove);
}
debug_assert_ne!(index_to_keep, index_to_remove);
std::ptr::swap_nonoverlapping::<u8>(
self.get_unchecked_mut(index_to_keep).as_ptr(),
self.get_unchecked_mut(index_to_remove).as_ptr(),
self.item_layout.size(),
);
// Now the element that used to be in index `index_to_remove` is now in index `index_to_keep` (after swap)
// If we are storing ZSTs than the index doesn't actually matter because the size is 0.
self.get_unchecked_mut(index_to_keep).promote()
}
/// This method will can [`Self::swap_remove_unchecked`] and drop the result.
///
/// # Safety
/// - `index_to_keep` must be safe to access (within the bounds of the length of the array).
/// - `index_to_remove` must be safe to access (within the bounds of the length of the array).
/// - `index_to_remove` != `index_to_keep`
/// - The caller should address the inconsistent state of the array that has occurred after the swap, either:
/// 1) initialize a different value in `index_to_keep`
/// 2) update the saved length of the array if `index_to_keep` was the last element.
#[inline]
pub unsafe fn swap_remove_and_drop_unchecked(
&mut self,
index_to_remove: usize,
index_to_keep: usize,
) {
#[cfg(debug_assertions)]
{
debug_assert!(self.capacity > index_to_keep);
debug_assert!(self.capacity > index_to_remove);
}
let drop = self.drop;
let value = self.swap_remove_unchecked(index_to_remove, index_to_keep);
if let Some(drop) = drop {
drop(value);
}
}
/// The same as [`Self::swap_remove_and_drop_unchecked`] but the two elements must non-overlapping.
///
/// # Safety
/// - `index_to_keep` must be safe to access (within the bounds of the length of the array).
/// - `index_to_remove` must be safe to access (within the bounds of the length of the array).
/// - `index_to_remove` != `index_to_keep`
/// - The caller should address the inconsistent state of the array that has occurred after the swap, either:
/// 1) initialize a different value in `index_to_keep`
/// 2) update the saved length of the array if `index_to_keep` was the last element.
#[inline]
pub unsafe fn swap_remove_and_drop_unchecked_nonoverlapping(
&mut self,
index_to_remove: usize,
index_to_keep: usize,
) {
#[cfg(debug_assertions)]
{
debug_assert!(self.capacity > index_to_keep);
debug_assert!(self.capacity > index_to_remove);
debug_assert_ne!(index_to_keep, index_to_remove);
}
let drop = self.drop;
let value = self.swap_remove_unchecked_nonoverlapping(index_to_remove, index_to_keep);
if let Some(drop) = drop {
drop(value);
}
}
}
#[cfg(test)]
mod tests {
use crate as bevy_ecs;
use bevy_ecs::prelude::*;
#[derive(Component)]
struct PanicOnDrop;
impl Drop for PanicOnDrop {
fn drop(&mut self) {
panic!("PanicOnDrop is being Dropped");
}
}
#[test]
#[should_panic(expected = "PanicOnDrop is being Dropped")]
fn make_sure_zst_components_get_dropped() {
let mut world = World::new();
world.spawn(PanicOnDrop);
}
}

View file

@ -1,3 +1,5 @@
use bevy_ptr::{OwningPtr, Ptr, PtrMut};
use bevy_utils::OnDrop;
use std::{
alloc::{handle_alloc_error, Layout},
cell::UnsafeCell,
@ -5,9 +7,6 @@ use std::{
ptr::NonNull,
};
use bevy_ptr::{OwningPtr, Ptr, PtrMut};
use bevy_utils::OnDrop;
/// A flat, type-erased data storage type
///
/// Used to densely store homogeneous ECS data. A blob is usually just an arbitrary block of contiguous memory without any identity, and
@ -93,12 +92,6 @@ impl BlobVec {
self.len == 0
}
/// Returns the total number of elements the vector can hold without reallocating.
#[inline]
pub fn capacity(&self) -> usize {
self.capacity
}
/// Returns the [`Layout`] of the element type stored in the vector.
#[inline]
pub fn layout(&self) -> Layout {
@ -271,26 +264,12 @@ impl BlobVec {
self.initialize_unchecked(index, value);
}
/// Forces the length of the vector to `len`.
///
/// # Safety
/// `len` must be <= `capacity`. if length is decreased, "out of bounds" items must be dropped.
/// Newly added items must be immediately populated with valid values and length must be
/// increased. For better unwind safety, call [`BlobVec::set_len`] _after_ populating a new
/// value.
#[inline]
pub unsafe fn set_len(&mut self, len: usize) {
debug_assert!(len <= self.capacity());
self.len = len;
}
/// Performs a "swap remove" at the given `index`, which removes the item at `index` and moves
/// the last item in the [`BlobVec`] to `index` (if `index` is not the last item). It is the
/// caller's responsibility to drop the returned pointer, if that is desirable.
///
/// # Safety
/// It is the caller's responsibility to ensure that `index` is less than `self.len()`.
#[inline]
#[must_use = "The returned pointer should be used to dropped the removed element"]
pub unsafe fn swap_remove_and_forget_unchecked(&mut self, index: usize) -> OwningPtr<'_> {
debug_assert!(index < self.len());
@ -318,28 +297,6 @@ impl BlobVec {
unsafe { p.promote() }
}
/// Removes the value at `index` and copies the value stored into `ptr`.
/// Does not do any bounds checking on `index`.
/// The removed element is replaced by the last element of the `BlobVec`.
///
/// # Safety
/// It is the caller's responsibility to ensure that `index` is < `self.len()`
/// and that `self[index]` has been properly initialized.
#[inline]
pub unsafe fn swap_remove_unchecked(&mut self, index: usize, ptr: PtrMut<'_>) {
debug_assert!(index < self.len());
let last = self.get_unchecked_mut(self.len - 1).as_ptr();
let target = self.get_unchecked_mut(index).as_ptr();
// Copy the item at the index into the provided ptr
std::ptr::copy_nonoverlapping::<u8>(target, ptr.as_ptr(), self.item_layout.size());
// Recompress the storage by moving the previous last element into the
// now-free row overwriting the previous data. The removed row may be the last
// one so a non-overlapping copy must not be used here.
std::ptr::copy::<u8>(last, target, self.item_layout.size());
// Invalidate the data stored in the last row, as it has been moved
self.len -= 1;
}
/// Removes the value at `index` and drops it.
/// Does not do any bounds checking on `index`.
/// The removed element is replaced by the last element of the `BlobVec`.
@ -438,14 +395,6 @@ impl BlobVec {
}
}
}
/// Get the `drop` argument that was passed to `BlobVec::new`.
///
/// Callers can use this if they have a type-erased pointer of the correct
/// type to add to this [`BlobVec`], which they just want to drop instead.
pub fn get_drop(&self) -> Option<unsafe fn(OwningPtr<'_>)> {
self.drop
}
}
impl Drop for BlobVec {
@ -463,7 +412,7 @@ impl Drop for BlobVec {
}
/// From <https://doc.rust-lang.org/beta/src/core/alloc/layout.rs.html>
fn array_layout(layout: &Layout, n: usize) -> Option<Layout> {
pub(super) fn array_layout(layout: &Layout, n: usize) -> Option<Layout> {
let (array_layout, offset) = repeat_layout(layout, n)?;
debug_assert_eq!(layout.size(), offset);
Some(array_layout)
@ -489,6 +438,40 @@ fn repeat_layout(layout: &Layout, n: usize) -> Option<(Layout, usize)> {
}
}
/// From <https://doc.rust-lang.org/beta/src/core/alloc/layout.rs.html>
/// # Safety
/// The caller must ensure that:
/// - The resulting [`Layout`] is valid, by ensuring that `(layout.size() + padding_needed_for(layout, layout.align())) * n` doesn't overflow.
pub(super) unsafe fn array_layout_unchecked(layout: &Layout, n: usize) -> Layout {
let (array_layout, offset) = repeat_layout_unchecked(layout, n);
debug_assert_eq!(layout.size(), offset);
array_layout
}
// TODO: replace with `Layout::repeat` if/when it stabilizes
/// From <https://doc.rust-lang.org/beta/src/core/alloc/layout.rs.html>
/// # Safety
/// The caller must ensure that:
/// - The resulting [`Layout`] is valid, by ensuring that `(layout.size() + padding_needed_for(layout, layout.align())) * n` doesn't overflow.
unsafe fn repeat_layout_unchecked(layout: &Layout, n: usize) -> (Layout, usize) {
// This cannot overflow. Quoting from the invariant of Layout:
// > `size`, when rounded up to the nearest multiple of `align`,
// > must not overflow (i.e., the rounded value must be less than
// > `usize::MAX`)
let padded_size = layout.size() + padding_needed_for(layout, layout.align());
// This may overflow in release builds, that's why this function is unsafe.
let alloc_size = padded_size * n;
// SAFETY: self.align is already known to be valid and alloc_size has been
// padded already.
unsafe {
(
Layout::from_size_align_unchecked(alloc_size, layout.align()),
padded_size,
)
}
}
/// From <https://doc.rust-lang.org/beta/src/core/alloc/layout.rs.html>
const fn padding_needed_for(layout: &Layout, align: usize) -> usize {
let len = layout.size();
@ -571,7 +554,7 @@ mod tests {
}
assert_eq!(blob_vec.len(), 1_000);
assert_eq!(blob_vec.capacity(), 1_024);
assert_eq!(blob_vec.capacity, 1_024);
}
#[derive(Debug, Eq, PartialEq, Clone)]
@ -595,7 +578,7 @@ mod tests {
let drop = drop_ptr::<Foo>;
// SAFETY: drop is able to drop a value of its `item_layout`
let mut blob_vec = unsafe { BlobVec::new(item_layout, Some(drop), 2) };
assert_eq!(blob_vec.capacity(), 2);
assert_eq!(blob_vec.capacity, 2);
// SAFETY: the following code only deals with values of type `Foo`, which satisfies the safety requirement of `push`, `get_mut` and `swap_remove` that the
// values have a layout compatible to the blob vec's `item_layout`.
// Every index is in range.
@ -616,7 +599,7 @@ mod tests {
};
push::<Foo>(&mut blob_vec, foo2.clone());
assert_eq!(blob_vec.len(), 2);
assert_eq!(blob_vec.capacity(), 2);
assert_eq!(blob_vec.capacity, 2);
assert_eq!(get_mut::<Foo>(&mut blob_vec, 0), &foo1);
assert_eq!(get_mut::<Foo>(&mut blob_vec, 1), &foo2);
@ -631,19 +614,19 @@ mod tests {
push(&mut blob_vec, foo3.clone());
assert_eq!(blob_vec.len(), 3);
assert_eq!(blob_vec.capacity(), 4);
assert_eq!(blob_vec.capacity, 4);
let last_index = blob_vec.len() - 1;
let value = swap_remove::<Foo>(&mut blob_vec, last_index);
assert_eq!(foo3, value);
assert_eq!(blob_vec.len(), 2);
assert_eq!(blob_vec.capacity(), 4);
assert_eq!(blob_vec.capacity, 4);
let value = swap_remove::<Foo>(&mut blob_vec, 0);
assert_eq!(foo1, value);
assert_eq!(blob_vec.len(), 1);
assert_eq!(blob_vec.capacity(), 4);
assert_eq!(blob_vec.capacity, 4);
foo2.a = 8;
assert_eq!(get_mut::<Foo>(&mut blob_vec, 0), &foo2);
@ -667,14 +650,12 @@ mod tests {
// SAFETY: no drop is correct drop for `()`.
let mut blob_vec = unsafe { BlobVec::new(Layout::new::<()>(), None, 0) };
assert_eq!(usize::MAX, blob_vec.capacity(), "Self-check");
assert_eq!(usize::MAX, blob_vec.capacity, "Self-check");
// SAFETY: Because `()` is a ZST trivial drop type, and because `BlobVec` capacity
// is always `usize::MAX` for ZSTs, we can arbitrarily set the length
// and still be sound.
unsafe {
blob_vec.set_len(usize::MAX);
}
blob_vec.len = usize::MAX;
// SAFETY: `BlobVec` was initialized for `()`, so it is safe to push `()` to it.
unsafe {
@ -691,7 +672,7 @@ mod tests {
// SAFETY: no drop is correct drop for `u32`.
let mut blob_vec = unsafe { BlobVec::new(Layout::new::<u32>(), None, 0) };
assert_eq!(0, blob_vec.capacity(), "Self-check");
assert_eq!(0, blob_vec.capacity, "Self-check");
OwningPtr::make(17u32, |ptr| {
// SAFETY: we push the value of correct type.

View file

@ -20,10 +20,12 @@
//! [`World`]: crate::world::World
//! [`World::storages`]: crate::world::World::storages
mod blob_array;
mod blob_vec;
mod resource;
mod sparse_set;
mod table;
mod thin_array_ptr;
pub use resource::*;
pub use sparse_set::*;

File diff suppressed because it is too large Load diff

View file

@ -0,0 +1,688 @@
use super::*;
use crate::{
component::TickCells,
storage::{blob_array::BlobArray, thin_array_ptr::ThinArrayPtr},
};
use bevy_ptr::PtrMut;
/// Very similar to a normal [`Column`], but with the capacities and lengths cut out for performance reasons.
/// This type is used by [`Table`], because all of the capacities and lengths of the [`Table`]'s columns must match.
///
/// Like many other low-level storage types, [`ThinColumn`] has a limited and highly unsafe
/// interface. It's highly advised to use higher level types and their safe abstractions
/// instead of working directly with [`ThinColumn`].
pub struct ThinColumn {
pub(super) data: BlobArray,
pub(super) added_ticks: ThinArrayPtr<UnsafeCell<Tick>>,
pub(super) changed_ticks: ThinArrayPtr<UnsafeCell<Tick>>,
#[cfg(feature = "track_change_detection")]
pub(super) changed_by: ThinArrayPtr<UnsafeCell<&'static Location<'static>>>,
}
impl ThinColumn {
/// Create a new [`ThinColumn`] with the given `capacity`.
pub fn with_capacity(component_info: &ComponentInfo, capacity: usize) -> Self {
Self {
// SAFETY: The components stored in this columns will match the information in `component_info`
data: unsafe {
BlobArray::with_capacity(component_info.layout(), component_info.drop(), capacity)
},
added_ticks: ThinArrayPtr::with_capacity(capacity),
changed_ticks: ThinArrayPtr::with_capacity(capacity),
#[cfg(feature = "track_change_detection")]
changed_by: ThinArrayPtr::with_capacity(capacity),
}
}
/// Swap-remove and drop the removed element, but the component at `row` must not be the last element.
///
/// # Safety
/// - `row.as_usize()` < `len`
/// - `last_element_index` = `len - 1`
/// - `last_element_index` != `row.as_usize()`
/// - The caller should update the `len` to `len - 1`, or immediately initialize another element in the `last_element_index`
pub(crate) unsafe fn swap_remove_and_drop_unchecked_nonoverlapping(
&mut self,
last_element_index: usize,
row: TableRow,
) {
self.data
.swap_remove_and_drop_unchecked_nonoverlapping(row.as_usize(), last_element_index);
self.added_ticks
.swap_remove_unchecked_nonoverlapping(row.as_usize(), last_element_index);
self.changed_ticks
.swap_remove_unchecked_nonoverlapping(row.as_usize(), last_element_index);
#[cfg(feature = "track_change_detection")]
self.changed_by
.swap_remove_unchecked_nonoverlapping(row.as_usize(), last_element_index);
}
/// Swap-remove and drop the removed element.
///
/// # Safety
/// - `last_element_index` must be the index of the last element—stored in the highest place in memory.
/// - `row.as_usize()` <= `last_element_index`
/// - The caller should update the their saved length to reflect the change (decrement it by 1).
pub(crate) unsafe fn swap_remove_and_drop_unchecked(
&mut self,
last_element_index: usize,
row: TableRow,
) {
self.data
.swap_remove_and_drop_unchecked(row.as_usize(), last_element_index);
self.added_ticks
.swap_remove_and_drop_unchecked(row.as_usize(), last_element_index);
self.changed_ticks
.swap_remove_and_drop_unchecked(row.as_usize(), last_element_index);
#[cfg(feature = "track_change_detection")]
self.changed_by
.swap_remove_and_drop_unchecked(row.as_usize(), last_element_index);
}
/// Swap-remove and forget the removed element.
///
/// # Safety
/// - `last_element_index` must be the index of the last element—stored in the highest place in memory.
/// - `row.as_usize()` <= `last_element_index`
/// - The caller should update the their saved length to reflect the change (decrement it by 1).
pub(crate) unsafe fn swap_remove_and_forget_unchecked(
&mut self,
last_element_index: usize,
row: TableRow,
) {
let _ = self
.data
.swap_remove_unchecked(row.as_usize(), last_element_index);
self.added_ticks
.swap_remove_unchecked(row.as_usize(), last_element_index);
self.changed_ticks
.swap_remove_unchecked(row.as_usize(), last_element_index);
#[cfg(feature = "track_change_detection")]
self.changed_by
.swap_remove_unchecked(row.as_usize(), last_element_index);
}
/// Call [`realloc`](std::alloc::realloc) to expand / shrink the memory allocation for this [`ThinColumn`]
///
/// # Safety
/// - `current_capacity` must be the current capacity of this column (the capacity of `self.data`, `self.added_ticks`, `self.changed_tick`)
/// - The caller should make sure their saved `capacity` value is updated to `new_capacity` after this operation.
pub(crate) unsafe fn realloc(
&mut self,
current_capacity: NonZeroUsize,
new_capacity: NonZeroUsize,
) {
self.data.realloc(current_capacity, new_capacity);
self.added_ticks.realloc(current_capacity, new_capacity);
self.changed_ticks.realloc(current_capacity, new_capacity);
#[cfg(feature = "track_change_detection")]
self.changed_by.realloc(current_capacity, new_capacity);
}
/// Call [`alloc`](std::alloc::alloc) to allocate memory for this [`ThinColumn`]
/// The caller should make sure their saved `capacity` value is updated to `new_capacity` after this operation.
pub(crate) fn alloc(&mut self, new_capacity: NonZeroUsize) {
self.data.alloc(new_capacity);
self.added_ticks.alloc(new_capacity);
self.changed_ticks.alloc(new_capacity);
#[cfg(feature = "track_change_detection")]
self.changed_by.alloc(new_capacity);
}
/// Writes component data to the column at the given row.
/// Assumes the slot is uninitialized, drop is not called.
/// To overwrite existing initialized value, use [`Self::replace`] instead.
///
/// # Safety
/// - `row.as_usize()` must be in bounds.
/// - `comp_ptr` holds a component that matches the `component_id`
#[inline]
pub(crate) unsafe fn initialize(
&mut self,
row: TableRow,
data: OwningPtr<'_>,
tick: Tick,
#[cfg(feature = "track_change_detection")] caller: &'static Location<'static>,
) {
self.data.initialize_unchecked(row.as_usize(), data);
*self.added_ticks.get_unchecked_mut(row.as_usize()).get_mut() = tick;
*self
.changed_ticks
.get_unchecked_mut(row.as_usize())
.get_mut() = tick;
#[cfg(feature = "track_change_detection")]
{
*self.changed_by.get_unchecked_mut(row.as_usize()).get_mut() = caller;
}
}
/// Writes component data to the column at given row. Assumes the slot is initialized, drops the previous value.
///
/// # Safety
/// - `row.as_usize()` must be in bounds.
/// - `data` holds a component that matches the `component_id`
#[inline]
pub(crate) unsafe fn replace(
&mut self,
row: TableRow,
data: OwningPtr<'_>,
change_tick: Tick,
#[cfg(feature = "track_change_detection")] caller: &'static Location<'static>,
) {
self.data.replace_unchecked(row.as_usize(), data);
*self
.changed_ticks
.get_unchecked_mut(row.as_usize())
.get_mut() = change_tick;
#[cfg(feature = "track_change_detection")]
{
*self.changed_by.get_unchecked_mut(row.as_usize()).get_mut() = caller;
}
}
/// Removes the element from `other` at `src_row` and inserts it
/// into the current column to initialize the values at `dst_row`.
/// Does not do any bounds checking.
///
/// # Safety
/// - `other` must have the same data layout as `self`
/// - `src_row` must be in bounds for `other`
/// - `dst_row` must be in bounds for `self`
/// - `other[src_row]` must be initialized to a valid value.
/// - `self[dst_row]` must not be initialized yet.
#[inline]
pub(crate) unsafe fn initialize_from_unchecked(
&mut self,
other: &mut ThinColumn,
other_last_element_index: usize,
src_row: TableRow,
dst_row: TableRow,
) {
debug_assert!(self.data.layout() == other.data.layout());
// Init the data
let src_val = other
.data
.swap_remove_unchecked(src_row.as_usize(), other_last_element_index);
self.data.initialize_unchecked(dst_row.as_usize(), src_val);
// Init added_ticks
let added_tick = other
.added_ticks
.swap_remove_unchecked(src_row.as_usize(), other_last_element_index);
self.added_ticks
.initialize_unchecked(dst_row.as_usize(), added_tick);
// Init changed_ticks
let changed_tick = other
.changed_ticks
.swap_remove_unchecked(src_row.as_usize(), other_last_element_index);
self.changed_ticks
.initialize_unchecked(dst_row.as_usize(), changed_tick);
#[cfg(feature = "track_change_detection")]
let changed_by = other
.changed_by
.swap_remove_unchecked(src_row.as_usize(), other_last_element_index);
#[cfg(feature = "track_change_detection")]
self.changed_by
.initialize_unchecked(dst_row.as_usize(), changed_by);
}
/// Call [`Tick::check_tick`] on all of the ticks stored in this column.
///
/// # Safety
/// `len` is the actual length of this column
#[inline]
pub(crate) unsafe fn check_change_ticks(&mut self, len: usize, change_tick: Tick) {
for i in 0..len {
// SAFETY:
// - `i` < `len`
// we have a mutable reference to `self`
unsafe { self.added_ticks.get_unchecked_mut(i) }
.get_mut()
.check_tick(change_tick);
// SAFETY:
// - `i` < `len`
// we have a mutable reference to `self`
unsafe { self.changed_ticks.get_unchecked_mut(i) }
.get_mut()
.check_tick(change_tick);
}
}
/// Clear all the components from this column.
///
/// # Safety
/// - `len` must match the actual length of the column
/// - The caller must not use the elements this column's data until [`initializing`](Self::initialize) it again (set `len` to 0).
pub(crate) unsafe fn clear(&mut self, len: usize) {
self.added_ticks.clear_elements(len);
self.changed_ticks.clear_elements(len);
self.data.clear(len);
#[cfg(feature = "track_change_detection")]
self.changed_by.clear_elements(len);
}
/// Because this method needs parameters, it can't be the implementation of the `Drop` trait.
/// The owner of this [`ThinColumn`] must call this method with the correct information.
///
/// # Safety
/// - `len` is indeed the length of the column
/// - `cap` is indeed the capacity of the column
/// - the data stored in `self` will never be used again
pub(crate) unsafe fn drop(&mut self, cap: usize, len: usize) {
self.added_ticks.drop(cap, len);
self.changed_ticks.drop(cap, len);
self.data.drop(cap, len);
#[cfg(feature = "track_change_detection")]
self.changed_by.drop(cap, len);
}
/// Drops the last component in this column.
///
/// # Safety
/// - `last_element_index` is indeed the index of the last element
/// - the data stored in `last_element_index` will never be used unless properly initialized again.
pub(crate) unsafe fn drop_last_component(&mut self, last_element_index: usize) {
std::ptr::drop_in_place(self.added_ticks.get_unchecked_raw(last_element_index));
std::ptr::drop_in_place(self.changed_ticks.get_unchecked_raw(last_element_index));
#[cfg(feature = "track_change_detection")]
std::ptr::drop_in_place(self.changed_by.get_unchecked_raw(last_element_index));
self.data.drop_last_element(last_element_index);
}
/// Get a slice to the data stored in this [`ThinColumn`].
///
/// # Safety
/// - `T` must match the type of data that's stored in this [`ThinColumn`]
/// - `len` must match the actual length of this column (number of elements stored)
pub unsafe fn get_data_slice<T>(&self, len: usize) -> &[UnsafeCell<T>] {
self.data.get_sub_slice(len)
}
/// Get a slice to the added [`ticks`](Tick) in this [`ThinColumn`].
///
/// # Safety
/// - `len` must match the actual length of this column (number of elements stored)
pub unsafe fn get_added_ticks_slice(&self, len: usize) -> &[UnsafeCell<Tick>] {
self.added_ticks.as_slice(len)
}
/// Get a slice to the changed [`ticks`](Tick) in this [`ThinColumn`].
///
/// # Safety
/// - `len` must match the actual length of this column (number of elements stored)
pub unsafe fn get_changed_ticks_slice(&self, len: usize) -> &[UnsafeCell<Tick>] {
self.changed_ticks.as_slice(len)
}
/// Get a slice to the calling locations that last changed each value in this [`ThinColumn`]
///
/// # Safety
/// - `len` must match the actual length of this column (number of elements stored)
#[cfg(feature = "track_change_detection")]
pub unsafe fn get_changed_by_slice(
&self,
len: usize,
) -> &[UnsafeCell<&'static Location<'static>>] {
self.changed_by.as_slice(len)
}
}
/// A type-erased contiguous container for data of a homogeneous type.
///
/// Conceptually, a [`Column`] is very similar to a type-erased `Vec<T>`.
/// It also stores the change detection ticks for its components, kept in two separate
/// contiguous buffers internally. An element shares its data across these buffers by using the
/// same index (i.e. the entity at row 3 has it's data at index 3 and its change detection ticks at index 3).
///
/// Like many other low-level storage types, [`Column`] has a limited and highly unsafe
/// interface. It's highly advised to use higher level types and their safe abstractions
/// instead of working directly with [`Column`].
#[derive(Debug)]
pub struct Column {
pub(super) data: BlobVec,
pub(super) added_ticks: Vec<UnsafeCell<Tick>>,
pub(super) changed_ticks: Vec<UnsafeCell<Tick>>,
#[cfg(feature = "track_change_detection")]
changed_by: Vec<UnsafeCell<&'static Location<'static>>>,
}
impl Column {
/// Constructs a new [`Column`], configured with a component's layout and an initial `capacity`.
#[inline]
pub(crate) fn with_capacity(component_info: &ComponentInfo, capacity: usize) -> Self {
Column {
// SAFETY: component_info.drop() is valid for the types that will be inserted.
data: unsafe { BlobVec::new(component_info.layout(), component_info.drop(), capacity) },
added_ticks: Vec::with_capacity(capacity),
changed_ticks: Vec::with_capacity(capacity),
#[cfg(feature = "track_change_detection")]
changed_by: Vec::with_capacity(capacity),
}
}
/// Fetches the [`Layout`] for the underlying type.
#[inline]
pub fn item_layout(&self) -> Layout {
self.data.layout()
}
/// Writes component data to the column at given row.
/// Assumes the slot is initialized, calls drop.
///
/// # Safety
/// Assumes data has already been allocated for the given row.
#[inline]
pub(crate) unsafe fn replace(
&mut self,
row: TableRow,
data: OwningPtr<'_>,
change_tick: Tick,
#[cfg(feature = "track_change_detection")] caller: &'static Location<'static>,
) {
debug_assert!(row.as_usize() < self.len());
self.data.replace_unchecked(row.as_usize(), data);
*self
.changed_ticks
.get_unchecked_mut(row.as_usize())
.get_mut() = change_tick;
#[cfg(feature = "track_change_detection")]
{
*self.changed_by.get_unchecked_mut(row.as_usize()).get_mut() = caller;
}
}
/// Gets the current number of elements stored in the column.
#[inline]
pub fn len(&self) -> usize {
self.data.len()
}
/// Checks if the column is empty. Returns `true` if there are no elements, `false` otherwise.
#[inline]
pub fn is_empty(&self) -> bool {
self.data.is_empty()
}
/// Removes an element from the [`Column`].
///
/// - The value will be dropped if it implements [`Drop`].
/// - This does not preserve ordering, but is O(1).
/// - This does not do any bounds checking.
/// - The element is replaced with the last element in the [`Column`].
///
/// # Safety
/// `row` must be within the range `[0, self.len())`.
///
#[inline]
pub(crate) unsafe fn swap_remove_unchecked(&mut self, row: TableRow) {
self.data.swap_remove_and_drop_unchecked(row.as_usize());
self.added_ticks.swap_remove(row.as_usize());
self.changed_ticks.swap_remove(row.as_usize());
#[cfg(feature = "track_change_detection")]
self.changed_by.swap_remove(row.as_usize());
}
/// Removes an element from the [`Column`] and returns it and its change detection ticks.
/// This does not preserve ordering, but is O(1) and does not do any bounds checking.
///
/// The element is replaced with the last element in the [`Column`].
///
/// It's the caller's responsibility to ensure that the removed value is dropped or used.
/// Failure to do so may result in resources not being released (i.e. files handles not being
/// released, memory leaks, etc.)
///
/// # Safety
/// `row` must be within the range `[0, self.len())`.
#[inline]
#[must_use = "The returned pointer should be used to dropped the removed component"]
pub(crate) unsafe fn swap_remove_and_forget_unchecked(
&mut self,
row: TableRow,
) -> (OwningPtr<'_>, ComponentTicks, MaybeLocation) {
let data = self.data.swap_remove_and_forget_unchecked(row.as_usize());
let added = self.added_ticks.swap_remove(row.as_usize()).into_inner();
let changed = self.changed_ticks.swap_remove(row.as_usize()).into_inner();
#[cfg(feature = "track_change_detection")]
let caller = self.changed_by.swap_remove(row.as_usize()).into_inner();
#[cfg(not(feature = "track_change_detection"))]
let caller = ();
(data, ComponentTicks { added, changed }, caller)
}
/// Pushes a new value onto the end of the [`Column`].
///
/// # Safety
/// `ptr` must point to valid data of this column's component type
pub(crate) unsafe fn push(
&mut self,
ptr: OwningPtr<'_>,
ticks: ComponentTicks,
#[cfg(feature = "track_change_detection")] caller: &'static Location<'static>,
) {
self.data.push(ptr);
self.added_ticks.push(UnsafeCell::new(ticks.added));
self.changed_ticks.push(UnsafeCell::new(ticks.changed));
#[cfg(feature = "track_change_detection")]
self.changed_by.push(UnsafeCell::new(caller));
}
/// Fetches the data pointer to the first element of the [`Column`].
///
/// The pointer is type erased, so using this function to fetch anything
/// other than the first element will require computing the offset using
/// [`Column::item_layout`].
#[inline]
pub fn get_data_ptr(&self) -> Ptr<'_> {
self.data.get_ptr()
}
/// Fetches the slice to the [`Column`]'s data cast to a given type.
///
/// Note: The values stored within are [`UnsafeCell`].
/// Users of this API must ensure that accesses to each individual element
/// adhere to the safety invariants of [`UnsafeCell`].
///
/// # Safety
/// The type `T` must be the type of the items in this column.
pub unsafe fn get_data_slice<T>(&self) -> &[UnsafeCell<T>] {
self.data.get_slice()
}
/// Fetches the slice to the [`Column`]'s "added" change detection ticks.
///
/// Note: The values stored within are [`UnsafeCell`].
/// Users of this API must ensure that accesses to each individual element
/// adhere to the safety invariants of [`UnsafeCell`].
#[inline]
pub fn get_added_ticks_slice(&self) -> &[UnsafeCell<Tick>] {
&self.added_ticks
}
/// Fetches the slice to the [`Column`]'s "changed" change detection ticks.
///
/// Note: The values stored within are [`UnsafeCell`].
/// Users of this API must ensure that accesses to each individual element
/// adhere to the safety invariants of [`UnsafeCell`].
#[inline]
pub fn get_changed_ticks_slice(&self) -> &[UnsafeCell<Tick>] {
&self.changed_ticks
}
/// Fetches a reference to the data and change detection ticks at `row`.
///
/// Returns `None` if `row` is out of bounds.
#[inline]
pub fn get(&self, row: TableRow) -> Option<(Ptr<'_>, TickCells<'_>)> {
(row.as_usize() < self.data.len())
// SAFETY: The row is length checked before fetching the pointer. This is being
// accessed through a read-only reference to the column.
.then(|| unsafe {
(
self.data.get_unchecked(row.as_usize()),
TickCells {
added: self.added_ticks.get_unchecked(row.as_usize()),
changed: self.changed_ticks.get_unchecked(row.as_usize()),
},
)
})
}
/// Fetches a read-only reference to the data at `row`.
///
/// Returns `None` if `row` is out of bounds.
#[inline]
pub fn get_data(&self, row: TableRow) -> Option<Ptr<'_>> {
(row.as_usize() < self.data.len()).then(|| {
// SAFETY: The row is length checked before fetching the pointer. This is being
// accessed through a read-only reference to the column.
unsafe { self.data.get_unchecked(row.as_usize()) }
})
}
/// Fetches a read-only reference to the data at `row`. Unlike [`Column::get`] this does not
/// do any bounds checking.
///
/// # Safety
/// - `row` must be within the range `[0, self.len())`.
/// - no other mutable reference to the data of the same row can exist at the same time
#[inline]
pub unsafe fn get_data_unchecked(&self, row: TableRow) -> Ptr<'_> {
debug_assert!(row.as_usize() < self.data.len());
self.data.get_unchecked(row.as_usize())
}
/// Fetches a mutable reference to the data at `row`.
///
/// Returns `None` if `row` is out of bounds.
#[inline]
pub fn get_data_mut(&mut self, row: TableRow) -> Option<PtrMut<'_>> {
(row.as_usize() < self.data.len()).then(|| {
// SAFETY: The row is length checked before fetching the pointer. This is being
// accessed through an exclusive reference to the column.
unsafe { self.data.get_unchecked_mut(row.as_usize()) }
})
}
/// Fetches the "added" change detection tick for the value at `row`.
///
/// Returns `None` if `row` is out of bounds.
///
/// Note: The values stored within are [`UnsafeCell`].
/// Users of this API must ensure that accesses to each individual element
/// adhere to the safety invariants of [`UnsafeCell`].
#[inline]
pub fn get_added_tick(&self, row: TableRow) -> Option<&UnsafeCell<Tick>> {
self.added_ticks.get(row.as_usize())
}
/// Fetches the "changed" change detection tick for the value at `row`.
///
/// Returns `None` if `row` is out of bounds.
///
/// Note: The values stored within are [`UnsafeCell`].
/// Users of this API must ensure that accesses to each individual element
/// adhere to the safety invariants of [`UnsafeCell`].
#[inline]
pub fn get_changed_tick(&self, row: TableRow) -> Option<&UnsafeCell<Tick>> {
self.changed_ticks.get(row.as_usize())
}
/// Fetches the change detection ticks for the value at `row`.
///
/// Returns `None` if `row` is out of bounds.
#[inline]
pub fn get_ticks(&self, row: TableRow) -> Option<ComponentTicks> {
if row.as_usize() < self.data.len() {
// SAFETY: The size of the column has already been checked.
Some(unsafe { self.get_ticks_unchecked(row) })
} else {
None
}
}
/// Fetches the "added" change detection tick for the value at `row`. Unlike [`Column::get_added_tick`]
/// this function does not do any bounds checking.
///
/// # Safety
/// `row` must be within the range `[0, self.len())`.
#[inline]
pub unsafe fn get_added_tick_unchecked(&self, row: TableRow) -> &UnsafeCell<Tick> {
debug_assert!(row.as_usize() < self.added_ticks.len());
self.added_ticks.get_unchecked(row.as_usize())
}
/// Fetches the "changed" change detection tick for the value at `row`. Unlike [`Column::get_changed_tick`]
/// this function does not do any bounds checking.
///
/// # Safety
/// `row` must be within the range `[0, self.len())`.
#[inline]
pub unsafe fn get_changed_tick_unchecked(&self, row: TableRow) -> &UnsafeCell<Tick> {
debug_assert!(row.as_usize() < self.changed_ticks.len());
self.changed_ticks.get_unchecked(row.as_usize())
}
/// Fetches the change detection ticks for the value at `row`. Unlike [`Column::get_ticks`]
/// this function does not do any bounds checking.
///
/// # Safety
/// `row` must be within the range `[0, self.len())`.
#[inline]
pub unsafe fn get_ticks_unchecked(&self, row: TableRow) -> ComponentTicks {
debug_assert!(row.as_usize() < self.added_ticks.len());
debug_assert!(row.as_usize() < self.changed_ticks.len());
ComponentTicks {
added: self.added_ticks.get_unchecked(row.as_usize()).read(),
changed: self.changed_ticks.get_unchecked(row.as_usize()).read(),
}
}
/// Clears the column, removing all values.
///
/// Note that this function has no effect on the allocated capacity of the [`Column`]>
pub fn clear(&mut self) {
self.data.clear();
self.added_ticks.clear();
self.changed_ticks.clear();
#[cfg(feature = "track_change_detection")]
self.changed_by.clear();
}
#[inline]
pub(crate) fn check_change_ticks(&mut self, change_tick: Tick) {
for component_ticks in &mut self.added_ticks {
component_ticks.get_mut().check_tick(change_tick);
}
for component_ticks in &mut self.changed_ticks {
component_ticks.get_mut().check_tick(change_tick);
}
}
/// Fetches the calling location that last changed the value at `row`.
///
/// Returns `None` if `row` is out of bounds.
///
/// Note: The values stored within are [`UnsafeCell`].
/// Users of this API must ensure that accesses to each individual element
/// adhere to the safety invariants of [`UnsafeCell`].
#[inline]
#[cfg(feature = "track_change_detection")]
pub fn get_changed_by(&self, row: TableRow) -> Option<&UnsafeCell<&'static Location<'static>>> {
self.changed_by.get(row.as_usize())
}
/// Fetches the calling location that last changed the value at `row`.
///
/// Unlike [`Column::get_changed_by`] this function does not do any bounds checking.
///
/// # Safety
/// `row` must be within the range `[0, self.len())`.
#[inline]
#[cfg(feature = "track_change_detection")]
pub unsafe fn get_changed_by_unchecked(
&self,
row: TableRow,
) -> &UnsafeCell<&'static Location<'static>> {
debug_assert!(row.as_usize() < self.changed_by.len());
self.changed_by.get_unchecked(row.as_usize())
}
}

View file

@ -0,0 +1,861 @@
use crate::{
change_detection::MaybeLocation,
component::{ComponentId, ComponentInfo, ComponentTicks, Components, Tick},
entity::Entity,
query::DebugCheckedUnwrap,
storage::{blob_vec::BlobVec, ImmutableSparseSet, SparseSet},
};
use bevy_ptr::{OwningPtr, Ptr, UnsafeCellDeref};
use bevy_utils::HashMap;
pub use column::*;
#[cfg(feature = "track_change_detection")]
use std::panic::Location;
use std::{alloc::Layout, num::NonZeroUsize};
use std::{
cell::UnsafeCell,
ops::{Index, IndexMut},
};
mod column;
/// An opaque unique ID for a [`Table`] within a [`World`].
///
/// Can be used with [`Tables::get`] to fetch the corresponding
/// table.
///
/// Each [`Archetype`] always points to a table via [`Archetype::table_id`].
/// Multiple archetypes can point to the same table so long as the components
/// stored in the table are identical, but do not share the same sparse set
/// components.
///
/// [`World`]: crate::world::World
/// [`Archetype`]: crate::archetype::Archetype
/// [`Archetype::table_id`]: crate::archetype::Archetype::table_id
#[derive(Debug, Clone, Copy, PartialEq, Eq)]
// SAFETY: Must be repr(transparent) due to the safety requirements on EntityLocation
#[repr(transparent)]
pub struct TableId(u32);
impl TableId {
pub(crate) const INVALID: TableId = TableId(u32::MAX);
/// Creates a new [`TableId`].
///
/// `index` *must* be retrieved from calling [`TableId::as_u32`] on a `TableId` you got
/// from a table of a given [`World`] or the created ID may be invalid.
///
/// [`World`]: crate::world::World
#[inline]
pub const fn from_u32(index: u32) -> Self {
Self(index)
}
/// Creates a new [`TableId`].
///
/// `index` *must* be retrieved from calling [`TableId::as_usize`] on a `TableId` you got
/// from a table of a given [`World`] or the created ID may be invalid.
///
/// [`World`]: crate::world::World
///
/// # Panics
///
/// Will panic if the provided value does not fit within a [`u32`].
#[inline]
pub const fn from_usize(index: usize) -> Self {
debug_assert!(index as u32 as usize == index);
Self(index as u32)
}
/// Gets the underlying table index from the ID.
#[inline]
pub const fn as_u32(self) -> u32 {
self.0
}
/// Gets the underlying table index from the ID.
#[inline]
pub const fn as_usize(self) -> usize {
// usize is at least u32 in Bevy
self.0 as usize
}
/// The [`TableId`] of the [`Table`] without any components.
#[inline]
pub const fn empty() -> Self {
Self(0)
}
}
/// A opaque newtype for rows in [`Table`]s. Specifies a single row in a specific table.
///
/// Values of this type are retrievable from [`Archetype::entity_table_row`] and can be
/// used alongside [`Archetype::table_id`] to fetch the exact table and row where an
/// [`Entity`]'s
///
/// Values of this type are only valid so long as entities have not moved around.
/// Adding and removing components from an entity, or despawning it will invalidate
/// potentially any table row in the table the entity was previously stored in. Users
/// should *always* fetch the appropriate row from the entity's [`Archetype`] before
/// fetching the entity's components.
///
/// [`Archetype`]: crate::archetype::Archetype
/// [`Archetype::entity_table_row`]: crate::archetype::Archetype::entity_table_row
/// [`Archetype::table_id`]: crate::archetype::Archetype::table_id
#[derive(Debug, Clone, Copy, PartialEq, Eq)]
// SAFETY: Must be repr(transparent) due to the safety requirements on EntityLocation
#[repr(transparent)]
pub struct TableRow(u32);
impl TableRow {
pub(crate) const INVALID: TableRow = TableRow(u32::MAX);
/// Creates a `TableRow`.
#[inline]
pub const fn from_u32(index: u32) -> Self {
Self(index)
}
/// Creates a `TableRow` from a [`usize`] index.
///
/// # Panics
///
/// Will panic if the provided value does not fit within a [`u32`].
#[inline]
pub const fn from_usize(index: usize) -> Self {
debug_assert!(index as u32 as usize == index);
Self(index as u32)
}
/// Gets the index of the row as a [`usize`].
#[inline]
pub const fn as_usize(self) -> usize {
// usize is at least u32 in Bevy
self.0 as usize
}
/// Gets the index of the row as a [`usize`].
#[inline]
pub const fn as_u32(self) -> u32 {
self.0
}
}
/// A builder type for constructing [`Table`]s.
///
/// - Use [`with_capacity`] to initialize the builder.
/// - Repeatedly call [`add_column`] to add columns for components.
/// - Finalize with [`build`] to get the constructed [`Table`].
///
/// [`with_capacity`]: Self::with_capacity
/// [`add_column`]: Self::add_column
/// [`build`]: Self::build
pub(crate) struct TableBuilder {
columns: SparseSet<ComponentId, ThinColumn>,
capacity: usize,
}
impl TableBuilder {
/// Start building a new [`Table`] with a specified `column_capacity` (How many components per column?) and a `capacity` (How many columns?)
pub fn with_capacity(capacity: usize, column_capacity: usize) -> Self {
Self {
columns: SparseSet::with_capacity(column_capacity),
capacity,
}
}
/// Add a new column to the [`Table`]. Specify the component which will be stored in the [`column`](ThinColumn) using its [`ComponentId`]
#[must_use]
pub fn add_column(mut self, component_info: &ComponentInfo) -> Self {
self.columns.insert(
component_info.id(),
ThinColumn::with_capacity(component_info, self.capacity),
);
self
}
/// Build the [`Table`], after this operation the caller wouldn't be able to add more columns. The [`Table`] will be ready to use.
#[must_use]
pub fn build(self) -> Table {
Table {
columns: self.columns.into_immutable(),
entities: Vec::with_capacity(self.capacity),
}
}
}
/// A column-oriented [structure-of-arrays] based storage for [`Component`]s of entities
/// in a [`World`].
///
/// Conceptually, a `Table` can be thought of as an `HashMap<ComponentId, Column>`, where
/// each [`ThinColumn`] is a type-erased `Vec<T: Component>`. Each row corresponds to a single entity
/// (i.e. index 3 in Column A and index 3 in Column B point to different components on the same
/// entity). Fetching components from a table involves fetching the associated column for a
/// component type (via its [`ComponentId`]), then fetching the entity's row within that column.
///
/// [structure-of-arrays]: https://en.wikipedia.org/wiki/AoS_and_SoA#Structure_of_arrays
/// [`Component`]: crate::component::Component
/// [`World`]: crate::world::World
pub struct Table {
columns: ImmutableSparseSet<ComponentId, ThinColumn>,
entities: Vec<Entity>,
}
struct AbortOnPanic;
impl Drop for AbortOnPanic {
fn drop(&mut self) {
// Panicking while unwinding will force an abort.
panic!("Aborting due to allocator error");
}
}
impl Table {
/// Fetches a read-only slice of the entities stored within the [`Table`].
#[inline]
pub fn entities(&self) -> &[Entity] {
&self.entities
}
/// Get the capacity of this table, in entities.
/// Note that if an allocation is in process, this might not match the actual capacity of the columns, but it should once the allocation ends.
#[inline]
pub fn capacity(&self) -> usize {
self.entities.capacity()
}
/// Removes the entity at the given row and returns the entity swapped in to replace it (if an
/// entity was swapped in)
///
/// # Safety
/// `row` must be in-bounds (`row.as_usize()` < `self.len()`)
pub(crate) unsafe fn swap_remove_unchecked(&mut self, row: TableRow) -> Option<Entity> {
debug_assert!(row.as_usize() < self.entity_count());
let last_element_index = self.entity_count() - 1;
if row.as_usize() != last_element_index {
// Instead of checking this condition on every `swap_remove` call, we
// check it here and use `swap_remove_nonoverlapping`.
for col in self.columns.values_mut() {
// SAFETY:
// - `row` < `len`
// - `last_element_index` = `len` - 1
// - `row` != `last_element_index`
// - the `len` is kept within `self.entities`, it will update accordingly.
unsafe {
col.swap_remove_and_drop_unchecked_nonoverlapping(last_element_index, row);
};
}
} else {
// If `row.as_usize()` == `last_element_index` than there's no point in removing the component
// at `row`, but we still need to drop it.
for col in self.columns.values_mut() {
col.drop_last_component(last_element_index);
}
}
let is_last = row.as_usize() == last_element_index;
self.entities.swap_remove(row.as_usize());
if is_last {
None
} else {
Some(self.entities[row.as_usize()])
}
}
/// Moves the `row` column values to `new_table`, for the columns shared between both tables.
/// Returns the index of the new row in `new_table` and the entity in this table swapped in
/// to replace it (if an entity was swapped in). missing columns will be "forgotten". It is
/// the caller's responsibility to drop them. Failure to do so may result in resources not
/// being released (i.e. files handles not being released, memory leaks, etc.)
///
/// # Safety
/// - `row` must be in-bounds
pub(crate) unsafe fn move_to_and_forget_missing_unchecked(
&mut self,
row: TableRow,
new_table: &mut Table,
) -> TableMoveResult {
debug_assert!(row.as_usize() < self.entity_count());
let last_element_index = self.entity_count() - 1;
let is_last = row.as_usize() == last_element_index;
let new_row = new_table.allocate(self.entities.swap_remove(row.as_usize()));
for (component_id, column) in self.columns.iter_mut() {
if let Some(new_column) = new_table.get_column_mut(*component_id) {
new_column.initialize_from_unchecked(column, last_element_index, row, new_row);
} else {
// It's the caller's responsibility to drop these cases.
column.swap_remove_and_forget_unchecked(last_element_index, row);
}
}
TableMoveResult {
new_row,
swapped_entity: if is_last {
None
} else {
Some(self.entities[row.as_usize()])
},
}
}
/// Moves the `row` column values to `new_table`, for the columns shared between both tables.
/// Returns the index of the new row in `new_table` and the entity in this table swapped in
/// to replace it (if an entity was swapped in).
///
/// # Safety
/// row must be in-bounds
pub(crate) unsafe fn move_to_and_drop_missing_unchecked(
&mut self,
row: TableRow,
new_table: &mut Table,
) -> TableMoveResult {
debug_assert!(row.as_usize() < self.entity_count());
let last_element_index = self.entity_count() - 1;
let is_last = row.as_usize() == last_element_index;
let new_row = new_table.allocate(self.entities.swap_remove(row.as_usize()));
for (component_id, column) in self.columns.iter_mut() {
if let Some(new_column) = new_table.get_column_mut(*component_id) {
new_column.initialize_from_unchecked(column, last_element_index, row, new_row);
} else {
column.swap_remove_and_drop_unchecked(last_element_index, row);
}
}
TableMoveResult {
new_row,
swapped_entity: if is_last {
None
} else {
Some(self.entities[row.as_usize()])
},
}
}
/// Moves the `row` column values to `new_table`, for the columns shared between both tables.
/// Returns the index of the new row in `new_table` and the entity in this table swapped in
/// to replace it (if an entity was swapped in).
///
/// # Safety
/// - `row` must be in-bounds
/// - `new_table` must contain every component this table has
pub(crate) unsafe fn move_to_superset_unchecked(
&mut self,
row: TableRow,
new_table: &mut Table,
) -> TableMoveResult {
debug_assert!(row.as_usize() < self.entity_count());
let last_element_index = self.entity_count() - 1;
let is_last = row.as_usize() == last_element_index;
let new_row = new_table.allocate(self.entities.swap_remove(row.as_usize()));
for (component_id, column) in self.columns.iter_mut() {
new_table
.get_column_mut(*component_id)
.debug_checked_unwrap()
.initialize_from_unchecked(column, last_element_index, row, new_row);
}
TableMoveResult {
new_row,
swapped_entity: if is_last {
None
} else {
Some(self.entities[row.as_usize()])
},
}
}
/// Get the data of the column matching `component_id` as a slice.
///
/// # Safety
/// `row.as_usize()` < `self.len()`
/// - `T` must match the `component_id`
pub unsafe fn get_data_slice_for<T>(
&self,
component_id: ComponentId,
) -> Option<&[UnsafeCell<T>]> {
self.get_column(component_id)
.map(|col| col.get_data_slice(self.entity_count()))
}
/// Get the added ticks of the column matching `component_id` as a slice.
pub fn get_added_ticks_slice_for(
&self,
component_id: ComponentId,
) -> Option<&[UnsafeCell<Tick>]> {
self.get_column(component_id)
// SAFETY: `self.len()` is guaranteed to be the len of the ticks array
.map(|col| unsafe { col.get_added_ticks_slice(self.entity_count()) })
}
/// Get the changed ticks of the column matching `component_id` as a slice.
pub fn get_changed_ticks_slice_for(
&self,
component_id: ComponentId,
) -> Option<&[UnsafeCell<Tick>]> {
self.get_column(component_id)
// SAFETY: `self.len()` is guaranteed to be the len of the ticks array
.map(|col| unsafe { col.get_changed_ticks_slice(self.entity_count()) })
}
/// Fetches the calling locations that last changed the each component
#[cfg(feature = "track_change_detection")]
pub fn get_changed_by_slice_for(
&self,
component_id: ComponentId,
) -> Option<&[UnsafeCell<&'static Location<'static>>]> {
self.get_column(component_id)
// SAFETY: `self.len()` is guaranteed to be the len of the locations array
.map(|col| unsafe { col.get_changed_by_slice(self.entity_count()) })
}
/// Get the specific [`change tick`](Tick) of the component matching `component_id` in `row`.
pub fn get_changed_tick(
&self,
component_id: ComponentId,
row: TableRow,
) -> Option<&UnsafeCell<Tick>> {
(row.as_usize() < self.entity_count()).then_some(
// SAFETY: `row.as_usize()` < `len`
unsafe {
self.get_column(component_id)?
.changed_ticks
.get_unchecked(row.as_usize())
},
)
}
/// Get the specific [`added tick`](Tick) of the component matching `component_id` in `row`.
pub fn get_added_tick(
&self,
component_id: ComponentId,
row: TableRow,
) -> Option<&UnsafeCell<Tick>> {
(row.as_usize() < self.entity_count()).then_some(
// SAFETY: `row.as_usize()` < `len`
unsafe {
self.get_column(component_id)?
.added_ticks
.get_unchecked(row.as_usize())
},
)
}
/// Get the specific calling location that changed the component matching `component_id` in `row`
#[cfg(feature = "track_change_detection")]
pub fn get_changed_by(
&self,
component_id: ComponentId,
row: TableRow,
) -> Option<&UnsafeCell<&'static Location<'static>>> {
(row.as_usize() < self.entity_count()).then_some(
// SAFETY: `row.as_usize()` < `len`
unsafe {
self.get_column(component_id)?
.changed_by
.get_unchecked(row.as_usize())
},
)
}
/// Get the [`ComponentTicks`] of the component matching `component_id` in `row`.
///
/// # Safety
/// - `row.as_usize()` < `self.len()`
pub unsafe fn get_ticks_unchecked(
&self,
component_id: ComponentId,
row: TableRow,
) -> Option<ComponentTicks> {
self.get_column(component_id).map(|col| ComponentTicks {
added: col.added_ticks.get_unchecked(row.as_usize()).read(),
changed: col.changed_ticks.get_unchecked(row.as_usize()).read(),
})
}
/// Fetches a read-only reference to the [`ThinColumn`] for a given [`Component`] within the table.
///
/// Returns `None` if the corresponding component does not belong to the table.
///
/// [`Component`]: crate::component::Component
#[inline]
pub fn get_column(&self, component_id: ComponentId) -> Option<&ThinColumn> {
self.columns.get(component_id)
}
/// Fetches a mutable reference to the [`ThinColumn`] for a given [`Component`] within the
/// table.
///
/// Returns `None` if the corresponding component does not belong to the table.
///
/// [`Component`]: crate::component::Component
#[inline]
pub(crate) fn get_column_mut(&mut self, component_id: ComponentId) -> Option<&mut ThinColumn> {
self.columns.get_mut(component_id)
}
/// Checks if the table contains a [`ThinColumn`] for a given [`Component`].
///
/// Returns `true` if the column is present, `false` otherwise.
///
/// [`Component`]: crate::component::Component
#[inline]
pub fn has_column(&self, component_id: ComponentId) -> bool {
self.columns.contains(component_id)
}
/// Reserves `additional` elements worth of capacity within the table.
pub(crate) fn reserve(&mut self, additional: usize) {
if self.capacity() - self.entity_count() < additional {
let column_cap = self.capacity();
self.entities.reserve(additional);
// use entities vector capacity as driving capacity for all related allocations
let new_capacity = self.entities.capacity();
if column_cap == 0 {
// SAFETY: the current capacity is 0
unsafe { self.alloc_columns(NonZeroUsize::new_unchecked(new_capacity)) };
} else {
// SAFETY:
// - `column_cap` is indeed the columns' capacity
unsafe {
self.realloc_columns(
NonZeroUsize::new_unchecked(column_cap),
NonZeroUsize::new_unchecked(new_capacity),
);
};
}
}
}
/// Allocate memory for the columns in the [`Table`]
///
/// The current capacity of the columns should be 0, if it's not 0, then the previous data will be overwritten and leaked.
fn alloc_columns(&mut self, new_capacity: NonZeroUsize) {
// If any of these allocations trigger an unwind, the wrong capacity will be used while dropping this table - UB.
// To avoid this, we use `AbortOnPanic`. If the allocation triggered a panic, the `AbortOnPanic`'s Drop impl will be
// called, and abort the program.
let _guard = AbortOnPanic;
for col in self.columns.values_mut() {
col.alloc(new_capacity);
}
core::mem::forget(_guard); // The allocation was successful, so we don't drop the guard.
}
/// Reallocate memory for the columns in the [`Table`]
///
/// # Safety
/// - `current_column_capacity` is indeed the capacity of the columns
unsafe fn realloc_columns(
&mut self,
current_column_capacity: NonZeroUsize,
new_capacity: NonZeroUsize,
) {
// If any of these allocations trigger an unwind, the wrong capacity will be used while dropping this table - UB.
// To avoid this, we use `AbortOnPanic`. If the allocation triggered a panic, the `AbortOnPanic`'s Drop impl will be
// called, and abort the program.
let _guard = AbortOnPanic;
// SAFETY:
// - There's no overflow
// - `current_capacity` is indeed the capacity - safety requirement
// - current capacity > 0
for col in self.columns.values_mut() {
col.realloc(current_column_capacity, new_capacity);
}
core::mem::forget(_guard); // The allocation was successful, so we don't drop the guard.
}
/// Allocates space for a new entity
///
/// # Safety
/// the allocated row must be written to immediately with valid values in each column
pub(crate) unsafe fn allocate(&mut self, entity: Entity) -> TableRow {
self.reserve(1);
let len = self.entity_count();
self.entities.push(entity);
for col in self.columns.values_mut() {
col.added_ticks
.initialize_unchecked(len, UnsafeCell::new(Tick::new(0)));
col.changed_ticks
.initialize_unchecked(len, UnsafeCell::new(Tick::new(0)));
#[cfg(feature = "track_change_detection")]
col.changed_by
.initialize_unchecked(len, UnsafeCell::new(Location::caller()));
}
TableRow::from_usize(len)
}
/// Gets the number of entities currently being stored in the table.
#[inline]
pub fn entity_count(&self) -> usize {
self.entities.len()
}
/// Get the drop function for some component that is stored in this table.
#[inline]
pub fn get_drop_for(&self, component_id: ComponentId) -> Option<unsafe fn(OwningPtr<'_>)> {
self.get_column(component_id)?.data.drop
}
/// Gets the number of components being stored in the table.
#[inline]
pub fn component_count(&self) -> usize {
self.columns.len()
}
/// Gets the maximum number of entities the table can currently store
/// without reallocating the underlying memory.
#[inline]
pub fn entity_capacity(&self) -> usize {
self.entities.capacity()
}
/// Checks if the [`Table`] is empty or not.
///
/// Returns `true` if the table contains no entities, `false` otherwise.
#[inline]
pub fn is_empty(&self) -> bool {
self.entities.is_empty()
}
/// Call [`Tick::check_tick`] on all of the ticks in the [`Table`]
pub(crate) fn check_change_ticks(&mut self, change_tick: Tick) {
let len = self.entity_count();
for col in self.columns.values_mut() {
// SAFETY: `len` is the actual length of the column
unsafe { col.check_change_ticks(len, change_tick) };
}
}
/// Iterates over the [`ThinColumn`]s of the [`Table`].
pub fn iter_columns(&self) -> impl Iterator<Item = &ThinColumn> {
self.columns.values()
}
/// Clears all of the stored components in the [`Table`].
pub(crate) fn clear(&mut self) {
let len = self.entity_count();
// We must clear the entities first, because in the drop function causes a panic, it will result in a double free of the columns.
self.entities.clear();
for column in self.columns.values_mut() {
// SAFETY: we defer `self.entities.clear()` until after clearing the columns,
// so `self.len()` should match the columns' len
unsafe { column.clear(len) };
}
}
/// Moves component data out of the [`Table`].
///
/// This function leaves the underlying memory unchanged, but the component behind
/// returned pointer is semantically owned by the caller and will not be dropped in its original location.
/// Caller is responsible to drop component data behind returned pointer.
///
/// # Safety
/// - This table must hold the component matching `component_id`
/// - `row` must be in bounds
/// - The row's inconsistent state that happens after taking the component must be resolved—either initialize a new component or remove the row.
pub(crate) unsafe fn take_component(
&mut self,
component_id: ComponentId,
row: TableRow,
) -> OwningPtr<'_> {
self.get_column_mut(component_id)
.debug_checked_unwrap()
.data
.get_unchecked_mut(row.as_usize())
.promote()
}
/// Get the component at a given `row`, if the [`Table`] stores components with the given `component_id`
///
/// # Safety
/// `row.as_usize()` < `self.len()`
pub unsafe fn get_component(
&self,
component_id: ComponentId,
row: TableRow,
) -> Option<Ptr<'_>> {
self.get_column(component_id)
.map(|col| col.data.get_unchecked(row.as_usize()))
}
}
/// A collection of [`Table`] storages, indexed by [`TableId`]
///
/// Can be accessed via [`Storages`](crate::storage::Storages)
pub struct Tables {
tables: Vec<Table>,
table_ids: HashMap<Box<[ComponentId]>, TableId>,
}
impl Default for Tables {
fn default() -> Self {
let empty_table = TableBuilder::with_capacity(0, 0).build();
Tables {
tables: vec![empty_table],
table_ids: HashMap::default(),
}
}
}
pub(crate) struct TableMoveResult {
pub swapped_entity: Option<Entity>,
pub new_row: TableRow,
}
impl Tables {
/// Returns the number of [`Table`]s this collection contains
#[inline]
pub fn len(&self) -> usize {
self.tables.len()
}
/// Returns true if this collection contains no [`Table`]s
#[inline]
pub fn is_empty(&self) -> bool {
self.tables.is_empty()
}
/// Fetches a [`Table`] by its [`TableId`].
///
/// Returns `None` if `id` is invalid.
#[inline]
pub fn get(&self, id: TableId) -> Option<&Table> {
self.tables.get(id.as_usize())
}
/// Fetches mutable references to two different [`Table`]s.
///
/// # Panics
///
/// Panics if `a` and `b` are equal.
#[inline]
pub(crate) fn get_2_mut(&mut self, a: TableId, b: TableId) -> (&mut Table, &mut Table) {
if a.as_usize() > b.as_usize() {
let (b_slice, a_slice) = self.tables.split_at_mut(a.as_usize());
(&mut a_slice[0], &mut b_slice[b.as_usize()])
} else {
let (a_slice, b_slice) = self.tables.split_at_mut(b.as_usize());
(&mut a_slice[a.as_usize()], &mut b_slice[0])
}
}
/// Attempts to fetch a table based on the provided components,
/// creating and returning a new [`Table`] if one did not already exist.
///
/// # Safety
/// `component_ids` must contain components that exist in `components`
pub(crate) unsafe fn get_id_or_insert(
&mut self,
component_ids: &[ComponentId],
components: &Components,
) -> TableId {
let tables = &mut self.tables;
let (_key, value) = self
.table_ids
.raw_entry_mut()
.from_key(component_ids)
.or_insert_with(|| {
let mut table = TableBuilder::with_capacity(0, component_ids.len());
for component_id in component_ids {
table = table.add_column(components.get_info_unchecked(*component_id));
}
tables.push(table.build());
(component_ids.into(), TableId::from_usize(tables.len() - 1))
});
*value
}
/// Iterates through all of the tables stored within in [`TableId`] order.
pub fn iter(&self) -> std::slice::Iter<'_, Table> {
self.tables.iter()
}
/// Clears all data from all [`Table`]s stored within.
pub(crate) fn clear(&mut self) {
for table in &mut self.tables {
table.clear();
}
}
pub(crate) fn check_change_ticks(&mut self, change_tick: Tick) {
for table in &mut self.tables {
table.check_change_ticks(change_tick);
}
}
}
impl Index<TableId> for Tables {
type Output = Table;
#[inline]
fn index(&self, index: TableId) -> &Self::Output {
&self.tables[index.as_usize()]
}
}
impl IndexMut<TableId> for Tables {
#[inline]
fn index_mut(&mut self, index: TableId) -> &mut Self::Output {
&mut self.tables[index.as_usize()]
}
}
impl Drop for Table {
fn drop(&mut self) {
let len = self.entity_count();
let cap = self.capacity();
self.entities.clear();
for col in self.columns.values_mut() {
// SAFETY: `cap` and `len` are correct
unsafe {
col.drop(cap, len);
}
}
}
}
#[cfg(test)]
mod tests {
use crate as bevy_ecs;
use crate::component::Component;
use crate::ptr::OwningPtr;
use crate::storage::Storages;
use crate::{
component::{Components, Tick},
entity::Entity,
storage::{TableBuilder, TableRow},
};
#[cfg(feature = "track_change_detection")]
use std::panic::Location;
#[derive(Component)]
struct W<T>(T);
#[test]
fn table() {
let mut components = Components::default();
let mut storages = Storages::default();
let component_id = components.init_component::<W<TableRow>>(&mut storages);
let columns = &[component_id];
let mut table = TableBuilder::with_capacity(0, columns.len())
.add_column(components.get_info(component_id).unwrap())
.build();
let entities = (0..200).map(Entity::from_raw).collect::<Vec<_>>();
for entity in &entities {
// SAFETY: we allocate and immediately set data afterwards
unsafe {
let row = table.allocate(*entity);
let value: W<TableRow> = W(row);
OwningPtr::make(value, |value_ptr| {
table.get_column_mut(component_id).unwrap().initialize(
row,
value_ptr,
Tick::new(0),
#[cfg(feature = "track_change_detection")]
Location::caller(),
);
});
};
}
assert_eq!(table.entity_capacity(), 256);
assert_eq!(table.entity_count(), 200);
}
}

View file

@ -0,0 +1,314 @@
use crate::query::DebugCheckedUnwrap;
use std::alloc::{alloc, handle_alloc_error, realloc, Layout};
use std::mem::{needs_drop, size_of};
use std::num::NonZeroUsize;
use std::ptr::{self, NonNull};
/// Similar to [`Vec<T>`], but with the capacity and length cut out for performance reasons.
///
/// This type can be treated as a `ManuallyDrop<Box<[T]>>` without a built in length. To avoid
/// memory leaks, [`drop`](Self::drop) must be called when no longer in use.
pub struct ThinArrayPtr<T> {
data: NonNull<T>,
#[cfg(debug_assertions)]
capacity: usize,
}
impl<T> ThinArrayPtr<T> {
fn empty() -> Self {
#[cfg(debug_assertions)]
{
Self {
data: NonNull::dangling(),
capacity: 0,
}
}
#[cfg(not(debug_assertions))]
{
Self {
data: NonNull::dangling(),
}
}
}
#[inline(always)]
fn set_capacity(&mut self, _capacity: usize) {
#[cfg(debug_assertions)]
{
self.capacity = _capacity;
}
}
/// Create a new [`ThinArrayPtr`] with a given capacity. If the `capacity` is 0, this will no allocate any memory.
#[inline]
pub fn with_capacity(capacity: usize) -> Self {
let mut arr = Self::empty();
if capacity > 0 {
// SAFETY:
// - The `current_capacity` is 0 because it was just created
unsafe { arr.alloc(NonZeroUsize::new_unchecked(capacity)) };
}
arr
}
/// Allocate memory for the array, this should only be used if not previous allocation has been made (capacity = 0)
/// The caller should update their saved `capacity` value to reflect the fact that it was changed
///
/// # Panics
/// - Panics if the new capacity overflows `usize`
pub fn alloc(&mut self, capacity: NonZeroUsize) {
self.set_capacity(capacity.get());
if size_of::<T>() != 0 {
let new_layout = Layout::array::<T>(capacity.get())
.expect("layout should be valid (arithmetic overflow)");
// SAFETY:
// - layout has non-zero size, `capacity` > 0, `size` > 0 (`size_of::<T>() != 0`)
self.data = NonNull::new(unsafe { alloc(new_layout) })
.unwrap_or_else(|| handle_alloc_error(new_layout))
.cast();
}
}
/// Reallocate memory for the array, this should only be used if a previous allocation for this array has been made (capacity > 0).
///
/// # Panics
/// - Panics if the new capacity overflows `usize`
///
/// # Safety
/// - The current capacity is indeed greater than 0
/// - The caller should update their saved `capacity` value to reflect the fact that it was changed
pub unsafe fn realloc(&mut self, current_capacity: NonZeroUsize, new_capacity: NonZeroUsize) {
#[cfg(debug_assertions)]
assert_eq!(self.capacity, current_capacity.into());
self.set_capacity(new_capacity.get());
if size_of::<T>() != 0 {
let new_layout =
Layout::array::<T>(new_capacity.get()).expect("overflow while allocating memory");
// SAFETY:
// - ptr was be allocated via this allocator
// - the layout of the array is the same as `Layout::array::<T>(current_capacity)`
// - the size of `T` is non 0, and `new_capacity` > 0
// - "new_size, when rounded up to the nearest multiple of layout.align(), must not overflow (i.e., the rounded value must be less than usize::MAX)",
// since the item size is always a multiple of its align, the rounding cannot happen
// here and the overflow is handled in `Layout::array`
self.data = NonNull::new(unsafe {
realloc(
self.data.cast().as_ptr(),
// We can use `unwrap_unchecked` because this is the Layout of the current allocation, it must be valid
Layout::array::<T>(current_capacity.get()).debug_checked_unwrap(),
new_layout.size(),
)
})
.unwrap_or_else(|| handle_alloc_error(new_layout))
.cast();
}
}
/// Initializes the value at `index` to `value`. This function does not do any bounds checking.
///
/// # Safety
/// `index` must be in bounds i.e. within the `capacity`.
/// if `index` = `len` the caller should update their saved `len` value to reflect the fact that it was changed
#[inline]
pub unsafe fn initialize_unchecked(&mut self, index: usize, value: T) {
// SAFETY: `index` is in bounds
let ptr = unsafe { self.get_unchecked_raw(index) };
// SAFETY: `index` is in bounds, therefore the pointer to that location in the array is valid, and aligned.
unsafe { ptr::write(ptr, value) };
}
/// Get a raw pointer to the element at `index`. This method doesn't do any bounds checking.
///
/// # Safety
/// - `index` must be safe to access.
#[inline]
pub unsafe fn get_unchecked_raw(&mut self, index: usize) -> *mut T {
// SAFETY:
// - `self.data` and the resulting pointer are in the same allocated object
// - the memory address of the last element doesn't overflow `isize`, so if `index` is in bounds, it won't overflow either
unsafe { self.data.as_ptr().add(index) }
}
/// Get a reference to the element at `index`. This method doesn't do any bounds checking.
///
/// # Safety
/// - `index` must be safe to read.
#[inline]
pub unsafe fn get_unchecked(&self, index: usize) -> &'_ T {
// SAFETY:
// - `self.data` and the resulting pointer are in the same allocated object
// - the memory address of the last element doesn't overflow `isize`, so if `index` is in bounds, it won't overflow either
let ptr = unsafe { self.data.as_ptr().add(index) };
// SAFETY:
// - The pointer is properly aligned
// - It is derefrancable (all in the same allocation)
// - `index` < `len` and the element is safe to write to, so its valid
// - We have a reference to self, so no other mutable accesses to the element can occur
unsafe {
ptr.as_ref()
// SAFETY: We can use `unwarp_unchecked` because the pointer isn't null)
.debug_checked_unwrap()
}
}
/// Get a mutable reference to the element at `index`. This method doesn't do any bounds checking.
///
/// # Safety
/// - `index` must be safe to write to.
#[inline]
pub unsafe fn get_unchecked_mut(&mut self, index: usize) -> &'_ mut T {
// SAFETY:
// - `self.data` and the resulting pointer are in the same allocated object
// - the memory address of the last element doesn't overflow `isize`, so if `index` is in bounds, it won't overflow either
let ptr = unsafe { self.data.as_ptr().add(index) };
// SAFETY:
// - The pointer is properly aligned
// - It is derefrancable (all in the same allocation)
// - `index` < `len` and the element is safe to write to, so its valid
// - We have a mutable reference to `self`
unsafe {
ptr.as_mut()
// SAFETY: We can use `unwarp_unchecked` because the pointer isn't null)
.unwrap_unchecked()
}
}
/// Perform a [`swap-remove`](https://doc.rust-lang.org/std/vec/struct.Vec.html#method.swap_remove) and return the removed value.
///
/// # Safety
/// - `index_to_keep` must be safe to access (within the bounds of the length of the array).
/// - `index_to_remove` must be safe to access (within the bounds of the length of the array).
/// - `index_to_remove` != `index_to_keep`
/// - The caller should address the inconsistent state of the array that has occurred after the swap, either:
/// 1) initialize a different value in `index_to_keep`
/// 2) update the saved length of the array if `index_to_keep` was the last element.
#[inline]
pub unsafe fn swap_remove_unchecked_nonoverlapping(
&mut self,
index_to_remove: usize,
index_to_keep: usize,
) -> T {
#[cfg(debug_assertions)]
{
debug_assert!(self.capacity > index_to_keep);
debug_assert!(self.capacity > index_to_remove);
debug_assert_ne!(index_to_keep, index_to_remove);
}
let base_ptr = self.data.as_ptr();
let value = ptr::read(base_ptr.add(index_to_remove));
ptr::copy_nonoverlapping(
base_ptr.add(index_to_keep),
base_ptr.add(index_to_remove),
1,
);
value
}
/// Perform a [`swap-remove`](https://doc.rust-lang.org/std/vec/struct.Vec.html#method.swap_remove) and return the removed value.
///
/// # Safety
/// - `index_to_keep` must be safe to access (within the bounds of the length of the array).
/// - `index_to_remove` must be safe to access (within the bounds of the length of the array).
/// - `index_to_remove` != `index_to_keep`
/// - The caller should address the inconsistent state of the array that has occurred after the swap, either:
/// 1) initialize a different value in `index_to_keep`
/// 2) update the saved length of the array if `index_to_keep` was the last element.
#[inline]
pub unsafe fn swap_remove_unchecked(
&mut self,
index_to_remove: usize,
index_to_keep: usize,
) -> T {
if index_to_remove != index_to_keep {
return self.swap_remove_unchecked_nonoverlapping(index_to_remove, index_to_keep);
}
ptr::read(self.data.as_ptr().add(index_to_remove))
}
/// Perform a [`swap-remove`](https://doc.rust-lang.org/std/vec/struct.Vec.html#method.swap_remove) and drop the removed value.
///
/// # Safety
/// - `index_to_keep` must be safe to access (within the bounds of the length of the array).
/// - `index_to_remove` must be safe to access (within the bounds of the length of the array).
/// - `index_to_remove` != `index_to_keep`
/// - The caller should address the inconsistent state of the array that has occurred after the swap, either:
/// 1) initialize a different value in `index_to_keep`
/// 2) update the saved length of the array if `index_to_keep` was the last element.
#[inline]
pub unsafe fn swap_remove_and_drop_unchecked(
&mut self,
index_to_remove: usize,
index_to_keep: usize,
) {
let val = &mut self.swap_remove_unchecked(index_to_remove, index_to_keep);
ptr::drop_in_place(ptr::from_mut(val));
}
/// Get a raw pointer to the last element of the array, return `None` if the length is 0
///
/// # Safety
/// - ensure that `current_len` is indeed the len of the array
#[inline]
unsafe fn last_element(&mut self, current_len: usize) -> Option<*mut T> {
(current_len != 0).then_some(self.data.as_ptr().add(current_len - 1))
}
/// Clears the array, removing (and dropping) Note that this method has no effect on the allocated capacity of the vector.
///
/// # Safety
/// - `current_len` is indeed the length of the array
/// - The caller should update their saved length value
pub unsafe fn clear_elements(&mut self, mut current_len: usize) {
if needs_drop::<T>() {
while let Some(to_drop) = self.last_element(current_len) {
ptr::drop_in_place(to_drop);
current_len -= 1;
}
}
}
/// Drop the entire array and all its elements.
///
/// # Safety
/// - `current_len` is indeed the length of the array
/// - `current_capacity` is indeed the capacity of the array
/// - The caller must not use this `ThinArrayPtr` in any way after calling this function
pub unsafe fn drop(&mut self, current_capacity: usize, current_len: usize) {
#[cfg(debug_assertions)]
assert_eq!(self.capacity, current_capacity);
if current_capacity != 0 {
self.clear_elements(current_len);
let layout = Layout::array::<T>(current_capacity).expect("layout should be valid");
std::alloc::dealloc(self.data.as_ptr().cast(), layout);
}
self.set_capacity(0);
}
/// Get the [`ThinArrayPtr`] as a slice with a given length.
///
/// # Safety
/// - `slice_len` must match the actual length of the array
#[inline]
pub unsafe fn as_slice(&self, slice_len: usize) -> &[T] {
// SAFETY:
// - the data is valid - allocated with the same allocater
// - non-null and well-aligned
// - we have a shared reference to self - the data will not be mutated during 'a
unsafe { std::slice::from_raw_parts(self.data.as_ptr(), slice_len) }
}
}
impl<T> From<Box<[T]>> for ThinArrayPtr<T> {
fn from(value: Box<[T]>) -> Self {
let _len = value.len();
let slice_ptr = Box::<[T]>::into_raw(value);
// SAFETY: We just got the pointer from a reference
let first_element_ptr = unsafe { (*slice_ptr).as_mut_ptr() };
Self {
// SAFETY: The pointer can't be null, it came from a reference
data: unsafe { NonNull::new_unchecked(first_element_ptr) },
#[cfg(debug_assertions)]
capacity: _len,
}
}
}

View file

@ -2578,16 +2578,11 @@ pub(crate) unsafe fn take_component<'a>(
match component_info.storage_type() {
StorageType::Table => {
let table = &mut storages.tables[location.table_id];
let components = table.get_column_mut(component_id).unwrap();
// SAFETY:
// - archetypes only store valid table_rows
// - index is in bounds as promised by caller
// - promote is safe because the caller promises to remove the table row without dropping it immediately afterwards
unsafe {
components
.get_data_unchecked_mut(location.table_row)
.promote()
}
unsafe { table.take_component(component_id, location.table_row) }
}
StorageType::SparseSet => storages
.sparse_sets

View file

@ -13,7 +13,7 @@ use crate::{
prelude::Component,
query::{DebugCheckedUnwrap, ReadOnlyQueryData},
removal_detection::RemovedComponentEvents,
storage::{Column, ComponentSparseSet, Storages},
storage::{ComponentSparseSet, Storages, Table},
system::{Res, Resource},
world::RawCommandQueue,
};
@ -1003,17 +1003,13 @@ impl<'w> UnsafeEntityCell<'w> {
impl<'w> UnsafeWorldCell<'w> {
#[inline]
/// # Safety:
/// - the returned `Column` is only used in ways that this [`UnsafeWorldCell`] has permission for.
/// - the returned `Column` is only used in ways that would not conflict with any existing
/// borrows of world data.
unsafe fn fetch_table(
self,
location: EntityLocation,
component_id: ComponentId,
) -> Option<&'w Column> {
// SAFETY: caller ensures returned data is not misused and we have not created any borrows
// of component/resource data
unsafe { self.storages() }.tables[location.table_id].get_column(component_id)
/// - the returned `Table` is only used in ways that this [`UnsafeWorldCell`] has permission for.
/// - the returned `Table` is only used in ways that would not conflict with any existing borrows of world data.
unsafe fn fetch_table(self, location: EntityLocation) -> Option<&'w Table> {
// SAFETY:
// - caller ensures returned data is not misused and we have not created any borrows of component/resource data
// - `location` contains a valid `TableId`, so getting the table won't fail
unsafe { self.storages().tables.get(location.table_id) }
}
#[inline]
@ -1048,9 +1044,9 @@ unsafe fn get_component(
// SAFETY: component_id exists and is therefore valid
match storage_type {
StorageType::Table => {
let components = world.fetch_table(location, component_id)?;
let table = world.fetch_table(location)?;
// SAFETY: archetypes only store valid table_rows and caller ensure aliasing rules
Some(components.get_data_unchecked(location.table_row))
table.get_component(component_id, location.table_row)
}
StorageType::SparseSet => world.fetch_sparse_set(component_id)?.get(entity),
}
@ -1074,17 +1070,23 @@ unsafe fn get_component_and_ticks(
) -> Option<(Ptr<'_>, TickCells<'_>, MaybeUnsafeCellLocation<'_>)> {
match storage_type {
StorageType::Table => {
let components = world.fetch_table(location, component_id)?;
let table = world.fetch_table(location)?;
// SAFETY: archetypes only store valid table_rows and caller ensure aliasing rules
Some((
components.get_data_unchecked(location.table_row),
table.get_component(component_id, location.table_row)?,
TickCells {
added: components.get_added_tick_unchecked(location.table_row),
changed: components.get_changed_tick_unchecked(location.table_row),
added: table
.get_added_tick(component_id, location.table_row)
.debug_checked_unwrap(),
changed: table
.get_changed_tick(component_id, location.table_row)
.debug_checked_unwrap(),
},
#[cfg(feature = "track_change_detection")]
components.get_changed_by_unchecked(location.table_row),
table
.get_changed_by(component_id, location.table_row)
.debug_checked_unwrap(),
#[cfg(not(feature = "track_change_detection"))]
(),
))
@ -1112,9 +1114,9 @@ unsafe fn get_ticks(
) -> Option<ComponentTicks> {
match storage_type {
StorageType::Table => {
let components = world.fetch_table(location, component_id)?;
let table = world.fetch_table(location)?;
// SAFETY: archetypes only store valid table_rows and caller ensure aliasing rules
Some(components.get_ticks_unchecked(location.table_row))
table.get_ticks_unchecked(component_id, location.table_row)
}
StorageType::SparseSet => world.fetch_sparse_set(component_id)?.get_ticks(entity),
}

View file

@ -11,8 +11,8 @@ use core::{
cell::UnsafeCell,
fmt::{self, Formatter, Pointer},
marker::PhantomData,
mem::{align_of, ManuallyDrop},
num::NonZero,
mem::ManuallyDrop,
num::NonZeroUsize,
ptr::NonNull,
};
@ -535,7 +535,7 @@ impl<'a, T> From<&'a [T]> for ThinSlicePtr<'a, T> {
/// Creates a dangling pointer with specified alignment.
/// See [`NonNull::dangling`].
pub fn dangling_with_align(align: NonZero<usize>) -> NonNull<u8> {
pub const fn dangling_with_align(align: NonZeroUsize) -> NonNull<u8> {
debug_assert!(align.is_power_of_two(), "Alignment must be power of two.");
// SAFETY: The pointer will not be null, since it was created
// from the address of a `NonZero<usize>`.
@ -603,6 +603,7 @@ trait DebugEnsureAligned {
impl<T: Sized> DebugEnsureAligned for *mut T {
#[track_caller]
fn debug_ensure_aligned(self) -> Self {
use core::mem::align_of;
let align = align_of::<T>();
// Implementation shamelessly borrowed from the currently unstable
// ptr.is_aligned_to.