bevy_ecs: Use 32-bit entity ID cursor on platforms without AtomicI64 (#4452)

# Objective
- Fixes #4451

## Solution
- Conditionally compile entity ID cursor as `AtomicI32` when compiling on a platform that does not support 64-bit atomics.

- This effectively raises the MSRV to 1.60 as it uses a `#[cfg]` that was only just stabilized there. (should this be noted in changelog?)

---

## Changelog
- Added `bevy_ecs` support for platforms without 64-bit atomic ints


## Migration Guide
N/A
This commit is contained in:
Ian Chamberlain 2022-08-21 00:45:49 +00:00
parent 04538fd802
commit cde5ae8104

View file

@ -40,11 +40,20 @@ pub use self::serde::*;
pub use map_entities::*;
use crate::{archetype::ArchetypeId, storage::SparseSetIndex};
use std::{
convert::TryFrom,
fmt, mem,
sync::atomic::{AtomicI64, Ordering},
};
use std::{convert::TryFrom, fmt, mem, sync::atomic::Ordering};
#[cfg(target_has_atomic = "64")]
use std::sync::atomic::AtomicI64 as AtomicIdCursor;
#[cfg(target_has_atomic = "64")]
type IdCursor = i64;
/// Most modern platforms support 64-bit atomics, but some less-common platforms
/// do not. This fallback allows compilation using a 32-bit cursor instead, with
/// the caveat that some conversions may fail (and panic) at runtime.
#[cfg(not(target_has_atomic = "64"))]
use std::sync::atomic::AtomicIsize as AtomicIdCursor;
#[cfg(not(target_has_atomic = "64"))]
type IdCursor = isize;
/// Lightweight identifier of an [entity](crate::entity).
///
@ -291,7 +300,7 @@ pub struct Entities {
///
/// Once `flush()` is done, `free_cursor` will equal `pending.len()`.
pending: Vec<u32>,
free_cursor: AtomicI64,
free_cursor: AtomicIdCursor,
/// Stores the number of free entities for [`len`](Entities::len)
len: u32,
}
@ -304,8 +313,12 @@ impl Entities {
// Use one atomic subtract to grab a range of new IDs. The range might be
// entirely nonnegative, meaning all IDs come from the freelist, or entirely
// negative, meaning they are all new IDs to allocate, or a mix of both.
let range_end = self.free_cursor.fetch_sub(count as i64, Ordering::Relaxed);
let range_start = range_end - count as i64;
let range_end = self
.free_cursor
// Unwrap: these conversions can only fail on platforms that don't support 64-bit atomics
// and use AtomicIsize instead (see note on `IdCursor`).
.fetch_sub(IdCursor::try_from(count).unwrap(), Ordering::Relaxed);
let range_start = range_end - IdCursor::try_from(count).unwrap();
let freelist_range = range_start.max(0) as usize..range_end.max(0) as usize;
@ -322,7 +335,7 @@ impl Entities {
// In this example, we truncate the end to 0, leaving us with `-3..0`.
// Then we negate these values to indicate how far beyond the end of `meta.end()`
// to go, yielding `meta.len()+0 .. meta.len()+3`.
let base = self.meta.len() as i64;
let base = self.meta.len() as IdCursor;
let new_id_end = u32::try_from(base - range_start).expect("too many entities");
@ -359,7 +372,7 @@ impl Entities {
// and farther beyond `meta.len()`.
Entity {
generation: 0,
id: u32::try_from(self.meta.len() as i64 - n).expect("too many entities"),
id: u32::try_from(self.meta.len() as IdCursor - n).expect("too many entities"),
}
}
}
@ -377,7 +390,7 @@ impl Entities {
self.verify_flushed();
self.len += 1;
if let Some(id) = self.pending.pop() {
let new_free_cursor = self.pending.len() as i64;
let new_free_cursor = self.pending.len() as IdCursor;
*self.free_cursor.get_mut() = new_free_cursor;
Entity {
generation: self.meta[id as usize].generation,
@ -399,14 +412,14 @@ impl Entities {
let loc = if entity.id as usize >= self.meta.len() {
self.pending.extend((self.meta.len() as u32)..entity.id);
let new_free_cursor = self.pending.len() as i64;
let new_free_cursor = self.pending.len() as IdCursor;
*self.free_cursor.get_mut() = new_free_cursor;
self.meta.resize(entity.id as usize + 1, EntityMeta::EMPTY);
self.len += 1;
None
} else if let Some(index) = self.pending.iter().position(|item| *item == entity.id) {
self.pending.swap_remove(index);
let new_free_cursor = self.pending.len() as i64;
let new_free_cursor = self.pending.len() as IdCursor;
*self.free_cursor.get_mut() = new_free_cursor;
self.len += 1;
None
@ -430,14 +443,14 @@ impl Entities {
let result = if entity.id as usize >= self.meta.len() {
self.pending.extend((self.meta.len() as u32)..entity.id);
let new_free_cursor = self.pending.len() as i64;
let new_free_cursor = self.pending.len() as IdCursor;
*self.free_cursor.get_mut() = new_free_cursor;
self.meta.resize(entity.id as usize + 1, EntityMeta::EMPTY);
self.len += 1;
AllocAtWithoutReplacement::DidNotExist
} else if let Some(index) = self.pending.iter().position(|item| *item == entity.id) {
self.pending.swap_remove(index);
let new_free_cursor = self.pending.len() as i64;
let new_free_cursor = self.pending.len() as IdCursor;
*self.free_cursor.get_mut() = new_free_cursor;
self.len += 1;
AllocAtWithoutReplacement::DidNotExist
@ -472,7 +485,7 @@ impl Entities {
self.pending.push(entity.id);
let new_free_cursor = self.pending.len() as i64;
let new_free_cursor = self.pending.len() as IdCursor;
*self.free_cursor.get_mut() = new_free_cursor;
self.len -= 1;
Some(loc)
@ -483,7 +496,9 @@ impl Entities {
self.verify_flushed();
let freelist_size = *self.free_cursor.get_mut();
let shortfall = additional as i64 - freelist_size;
// Unwrap: these conversions can only fail on platforms that don't support 64-bit atomics
// and use AtomicIsize instead (see note on `IdCursor`).
let shortfall = IdCursor::try_from(additional).unwrap() - freelist_size;
if shortfall > 0 {
self.meta.reserve(shortfall as usize);
}
@ -540,7 +555,7 @@ impl Entities {
}
fn needs_flush(&mut self) -> bool {
*self.free_cursor.get_mut() != self.pending.len() as i64
*self.free_cursor.get_mut() != self.pending.len() as IdCursor
}
/// Allocates space for entities previously reserved with `reserve_entity` or