small ecs cleanup and remove_bundle drop bugfix (#2172)

- simplified code around archetype generations a little bit, as the special case value is not actually needed
- removed unnecessary UnsafeCell around pointer value that is never updated through shared references
- fixed and added a test for correct drop behaviour when removing sparse components through remove_bundle command
This commit is contained in:
Paweł Grabarz 2021-05-18 19:25:57 +00:00
parent 4563e69e06
commit 93cc7219bc
8 changed files with 85 additions and 76 deletions

View file

@ -317,8 +317,8 @@ pub struct ArchetypeGeneration(usize);
impl ArchetypeGeneration {
#[inline]
pub fn new(generation: usize) -> Self {
ArchetypeGeneration(generation)
pub const fn initial() -> Self {
ArchetypeGeneration(0)
}
#[inline]

View file

@ -54,7 +54,7 @@ where
let mut state = Self {
world_id: world.id(),
archetype_generation: ArchetypeGeneration::new(usize::MAX),
archetype_generation: ArchetypeGeneration::initial(),
matched_table_ids: Vec::new(),
matched_archetype_ids: Vec::new(),
fetch_state,
@ -74,17 +74,10 @@ where
std::any::type_name::<Self>());
}
let archetypes = world.archetypes();
let old_generation = self.archetype_generation;
let archetype_index_range = if old_generation == archetypes.generation() {
0..0
} else {
self.archetype_generation = archetypes.generation();
if old_generation.value() == usize::MAX {
0..archetypes.len()
} else {
old_generation.value()..archetypes.len()
}
};
let new_generation = archetypes.generation();
let old_generation = std::mem::replace(&mut self.archetype_generation, new_generation);
let archetype_index_range = old_generation.value()..new_generation.value();
for archetype_index in archetype_index_range {
self.new_archetype(&archetypes[ArchetypeId::new(archetype_index)]);
}

View file

@ -18,8 +18,7 @@ pub struct SingleThreadedExecutor {
impl Default for SingleThreadedExecutor {
fn default() -> Self {
Self {
// MAX ensures access information will be initialized on first run.
archetype_generation: ArchetypeGeneration::new(usize::MAX),
archetype_generation: ArchetypeGeneration::initial(),
}
}
}
@ -46,24 +45,15 @@ impl SingleThreadedExecutor {
/// [update_archetypes] and updates cached archetype_component_access.
fn update_archetypes(&mut self, systems: &mut [ParallelSystemContainer], world: &World) {
let archetypes = world.archetypes();
let old_generation = self.archetype_generation;
let new_generation = archetypes.generation();
if old_generation == new_generation {
return;
}
let old_generation = std::mem::replace(&mut self.archetype_generation, new_generation);
let archetype_index_range = old_generation.value()..new_generation.value();
let archetype_index_range = if old_generation.value() == usize::MAX {
0..archetypes.len()
} else {
old_generation.value()..archetypes.len()
};
for archetype in archetypes.archetypes[archetype_index_range].iter() {
for container in systems.iter_mut() {
let system = container.system_mut();
system.new_archetype(archetype);
}
}
self.archetype_generation = new_generation;
}
}

View file

@ -58,8 +58,7 @@ impl Default for ParallelExecutor {
fn default() -> Self {
let (finish_sender, finish_receiver) = async_channel::unbounded();
Self {
// MAX ensures access information will be initialized on first run.
archetype_generation: ArchetypeGeneration::new(usize::MAX),
archetype_generation: ArchetypeGeneration::initial(),
system_metadata: Default::default(),
finish_sender,
finish_receiver,
@ -152,17 +151,10 @@ impl ParallelExecutor {
/// [update_archetypes] and updates cached archetype_component_access.
fn update_archetypes(&mut self, systems: &mut [ParallelSystemContainer], world: &World) {
let archetypes = world.archetypes();
let old_generation = self.archetype_generation;
let new_generation = archetypes.generation();
if old_generation == new_generation {
return;
}
let old_generation = std::mem::replace(&mut self.archetype_generation, new_generation);
let archetype_index_range = old_generation.value()..new_generation.value();
let archetype_index_range = if old_generation.value() == usize::MAX {
0..archetypes.len()
} else {
old_generation.value()..archetypes.len()
};
for archetype in archetypes.archetypes[archetype_index_range].iter() {
for (index, container) in systems.iter_mut().enumerate() {
let meta = &mut self.system_metadata[index];
@ -172,8 +164,6 @@ impl ParallelExecutor {
.extend(system.archetype_component_access());
}
}
self.archetype_generation = new_generation;
}
/// Populates `should_run` bitset, spawns tasks for systems that should run this iteration,

View file

@ -1,6 +1,5 @@
use std::{
alloc::{handle_alloc_error, Layout},
cell::UnsafeCell,
ptr::NonNull,
};
@ -9,8 +8,8 @@ pub struct BlobVec {
item_layout: Layout,
capacity: usize,
len: usize,
data: UnsafeCell<NonNull<u8>>,
swap_scratch: UnsafeCell<NonNull<u8>>,
data: NonNull<u8>,
swap_scratch: NonNull<u8>,
drop: unsafe fn(*mut u8),
}
@ -18,8 +17,8 @@ impl BlobVec {
pub fn new(item_layout: Layout, drop: unsafe fn(*mut u8), capacity: usize) -> BlobVec {
if item_layout.size() == 0 {
BlobVec {
swap_scratch: UnsafeCell::new(NonNull::dangling()),
data: UnsafeCell::new(NonNull::dangling()),
swap_scratch: NonNull::dangling(),
data: NonNull::dangling(),
capacity: usize::MAX,
len: 0,
item_layout,
@ -29,8 +28,8 @@ impl BlobVec {
let swap_scratch = NonNull::new(unsafe { std::alloc::alloc(item_layout) })
.unwrap_or_else(|| std::alloc::handle_alloc_error(item_layout));
let mut blob_vec = BlobVec {
swap_scratch: UnsafeCell::new(swap_scratch),
data: UnsafeCell::new(NonNull::dangling()),
swap_scratch,
data: NonNull::dangling(),
capacity: 0,
len: 0,
item_layout,
@ -81,9 +80,7 @@ impl BlobVec {
)
};
self.data = UnsafeCell::new(
NonNull::new(new_data).unwrap_or_else(|| handle_alloc_error(new_layout)),
);
self.data = NonNull::new(new_data).unwrap_or_else(|| handle_alloc_error(new_layout));
}
self.capacity = new_capacity;
}
@ -132,7 +129,7 @@ impl BlobVec {
pub unsafe fn swap_remove_and_forget_unchecked(&mut self, index: usize) -> *mut u8 {
debug_assert!(index < self.len());
let last = self.len - 1;
let swap_scratch = (*self.swap_scratch.get()).as_ptr();
let swap_scratch = self.swap_scratch.as_ptr();
std::ptr::copy_nonoverlapping(
self.get_unchecked(index),
swap_scratch,
@ -170,7 +167,7 @@ impl BlobVec {
/// must ensure rust mutability rules are not violated
#[inline]
pub unsafe fn get_ptr(&self) -> NonNull<u8> {
*self.data.get()
self.data
}
pub fn clear(&mut self) {
@ -199,7 +196,7 @@ impl Drop for BlobVec {
array_layout(&self.item_layout, self.capacity)
.expect("array layout should be valid"),
);
std::alloc::dealloc((*self.swap_scratch.get()).as_ptr(), self.item_layout);
std::alloc::dealloc(self.swap_scratch.as_ptr(), self.item_layout);
}
}
}

View file

@ -180,10 +180,7 @@ impl ComponentSparseSet {
/// returned).
pub fn remove_and_forget(&mut self, entity: Entity) -> Option<*mut u8> {
self.sparse.remove(entity).map(|dense_index| {
// SAFE: unique access to ticks
unsafe {
(*self.ticks.get()).swap_remove(dense_index);
}
self.ticks.get_mut().swap_remove(dense_index);
self.entities.swap_remove(dense_index);
let is_last = dense_index == self.dense.len() - 1;
// SAFE: dense_index was just removed from `sparse`, which ensures that it is valid

View file

@ -417,9 +417,29 @@ impl<T: Component> Command for RemoveResource<T> {
#[allow(clippy::float_cmp, clippy::approx_constant)]
mod tests {
use crate::{
component::{ComponentDescriptor, StorageType},
system::{CommandQueue, Commands},
world::World,
};
use std::sync::{
atomic::{AtomicUsize, Ordering},
Arc,
};
#[derive(Clone, Debug)]
struct DropCk(Arc<AtomicUsize>);
impl DropCk {
fn new_pair() -> (Self, Arc<AtomicUsize>) {
let atomic = Arc::new(AtomicUsize::new(0));
(DropCk(atomic.clone()), atomic)
}
}
impl Drop for DropCk {
fn drop(&mut self) {
self.0.as_ref().fetch_add(1, Ordering::Relaxed);
}
}
#[test]
fn commands() {
@ -454,10 +474,20 @@ mod tests {
#[test]
fn remove_components() {
let mut world = World::default();
struct DenseDropCk(DropCk);
world
.register_component(ComponentDescriptor::new::<DropCk>(StorageType::SparseSet))
.unwrap();
let mut command_queue = CommandQueue::default();
let (dense_dropck, dense_is_dropped) = DropCk::new_pair();
let dense_dropck = DenseDropCk(dense_dropck);
let (sparse_dropck, sparse_is_dropped) = DropCk::new_pair();
let entity = Commands::new(&mut command_queue, &world)
.spawn()
.insert_bundle((1u32, 2u64))
.insert_bundle((1u32, 2u64, dense_dropck, sparse_dropck))
.id();
command_queue.apply(&mut world);
let results_before = world
@ -471,8 +501,14 @@ mod tests {
Commands::new(&mut command_queue, &world)
.entity(entity)
.remove::<u32>()
.remove_bundle::<(u32, u64)>();
.remove_bundle::<(u32, u64, DenseDropCk, DropCk)>();
assert_eq!(dense_is_dropped.load(Ordering::Relaxed), 0);
assert_eq!(sparse_is_dropped.load(Ordering::Relaxed), 0);
command_queue.apply(&mut world);
assert_eq!(dense_is_dropped.load(Ordering::Relaxed), 1);
assert_eq!(sparse_is_dropped.load(Ordering::Relaxed), 1);
let results_after = world
.query::<(&u32, &u64)>()
.iter(&world)

View file

@ -325,7 +325,7 @@ impl<'w> EntityMut<'w> {
T::from_components(|| {
let component_id = bundle_components.next().unwrap();
// SAFE: entity location is valid and table row is removed below
remove_component(
take_component(
components,
storages,
old_archetype,
@ -406,17 +406,18 @@ impl<'w> EntityMut<'w> {
let entity = self.entity;
for component_id in bundle_info.component_ids.iter().cloned() {
if old_archetype.contains(component_id) {
// SAFE: entity location is valid and table row is removed below
unsafe {
remove_component(
components,
storages,
old_archetype,
removed_components,
component_id,
entity,
old_location,
);
removed_components
.get_or_insert_with(component_id, Vec::new)
.push(entity);
// Make sure to drop components stored in sparse sets.
// Dense components are dropped later in `move_to_and_drop_missing_unchecked`.
if let Some(StorageType::SparseSet) = old_archetype.get_storage_type(component_id) {
storages
.sparse_sets
.get_mut(component_id)
.unwrap()
.remove(entity);
}
}
}
@ -586,13 +587,18 @@ unsafe fn get_component_and_ticks(
}
}
/// Moves component data out of storage.
///
/// This function leaves the underlying memory unchanged, but the component behind
/// returned pointer is semantically owned by the caller and will not be dropped in its original location.
/// Caller is responsible to drop component data behind returned pointer.
///
/// # Safety
// `entity_location` must be within bounds of the given archetype and `entity` must exist inside the
// archetype
/// The relevant table row must be removed separately
/// `component_id` must be valid
/// - `entity_location` must be within bounds of the given archetype and `entity` must exist inside the archetype
/// - `component_id` must be valid
/// - The relevant table row **must be removed** by the caller once all components are taken
#[inline]
unsafe fn remove_component(
unsafe fn take_component(
components: &Components,
storages: &mut Storages,
archetype: &Archetype,