add dynamic plugin loading

This commit is contained in:
Carter Anderson 2020-01-20 20:10:40 -08:00
parent e8dfc0081b
commit 129a9747bb
51 changed files with 16362 additions and 72 deletions

6
.gitignore vendored
View file

@ -1,7 +1,3 @@
/target
**/*.rs.bk
Cargo.lock
tiny-town/target
**/*.rs.bk
tiny-town/Cargo.lock
Cargo.lock

View file

@ -5,7 +5,8 @@ authors = ["Carter Anderson <mcanders1@gmail.com>"]
edition = "2018"
[dependencies]
legion = { git = "https://github.com/TomGillen/legion.git", rev = "940ef3bfcb77e5d074ee3184b776ff1600da228d", features = ["serde-1"] }
# Modified to use std::any::type_name instead of std::any::TypeId
legion = { path = "bevy_legion", features = ["serde-1"] }
wgpu = { git = "https://github.com/gfx-rs/wgpu-rs.git", rev = "4a0da16fe6764c4e1dc918a31cbd7467d404df51"}
glam = "0.8.4"
winit = "0.20.0-alpha6"
@ -20,6 +21,7 @@ uuid = { version = "0.8", features = ["v4"] }
erased-serde = "0.3"
type-uuid = "0.1"
shaderc = "0.6"
libloading = "0.5.2"
bevy_derive = { path = "bevy_derive" }
bevy_transform = { path = "bevy_transform" }

View file

@ -23,3 +23,21 @@ pub fn derive_entity_archetype(input: TokenStream) -> TokenStream {
}
})
}
#[proc_macro_derive(RegisterAppPlugin)]
pub fn derive_app_plugin(input: TokenStream) -> TokenStream {
let ast = parse_macro_input!(input as DeriveInput);
let struct_name = &ast.ident;
TokenStream::from(quote! {
#[no_mangle]
pub extern "C" fn _create_plugin() -> *mut AppPlugin {
// TODO: without this the assembly does nothing. why is that the case?
print!("");
// make sure the constructor is the correct type.
let object = #struct_name {};
let boxed = Box::new(object);
Box::into_raw(boxed)
}
})
}

21
bevy_legion/.travis.yml Normal file
View file

@ -0,0 +1,21 @@
language: rust
rust:
- stable
- beta
- nightly
matrix:
allow_failures:
- rust: nightly
fast_finish: true
cache: cargo
script:
- cargo test --all-features
- cargo test --no-default-features
- cargo test --no-default-features --features events
- cargo test --no-default-features --features par-iter
- cargo test --no-default-features --features ffi
- cargo test --release --all-features
- cargo test --release --no-default-features
- cargo test --release --no-default-features --features events
- cargo test --release --no-default-features --features par-iter
- cargo test --release --no-default-features --features ffi

62
bevy_legion/Cargo.toml Normal file
View file

@ -0,0 +1,62 @@
[package]
name = "legion"
version = "0.2.1"
description = "High performance entity component system (ECS) library"
authors = ["Thomas Gillen <thomas.gillen@googlemail.com>"]
repository = "https://github.com/TomGillen/legion"
keywords = ["ecs", "game"]
categories = ["game-engines", "data-structures"]
readme = "readme.md"
license = "MIT"
edition = "2018"
[badges]
travis-ci = { repository = "TomGillen/legion", branch = "master" }
[features]
default = ["par-iter", "par-schedule", "events", "ffi"]
par-iter = ["rayon"]
par-schedule = ["rayon", "crossbeam-queue"]
log = ["tracing/log", "tracing/log-always"]
ffi = []
serde-1 = ["serde"]
events = ["rayon"]
[dependencies]
parking_lot = "0.9"
downcast-rs = "1.0"
itertools = "0.8"
rayon = { version = "1.2", optional = true }
crossbeam-queue = { version = "0.2.0", optional = true }
crossbeam-channel = "0.4.0"
derivative = "1"
smallvec = "0.6"
bit-set = "0.5"
paste = "0.1"
tracing = "0.1"
metrics = { version = "0.12", optional = true }
serde = { version = "1", optional = true }
fxhash = "0.2"
[dev-dependencies]
criterion = "0.3"
cgmath = "0.17"
tracing-subscriber = "0.1.6"
legion = { features = ["serde-1"], path = "." }
serde_json = "1.0"
type-uuid = "0.1"
erased-serde = "0.3"
serde = { version = "1", features = ["derive"]}
uuid = { version = "0.8", features = ["v4"] }
[[bench]]
name = "benchmarks"
harness = false
[[bench]]
name = "parallel_query"
harness = false
[[bench]]
name = "transform"
harness = false

21
bevy_legion/LICENSE Normal file
View file

@ -0,0 +1,21 @@
MIT License
Copyright (c) [year] [fullname]
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.

BIN
bevy_legion/bench.png Normal file

Binary file not shown.

After

Width:  |  Height:  |  Size: 42 KiB

View file

@ -0,0 +1,195 @@
use criterion::*;
use itertools::*;
use legion::prelude::*;
#[derive(Copy, Clone, Debug, PartialEq)]
struct A(f32);
#[derive(Copy, Clone, Debug, PartialEq)]
struct B(f32);
#[derive(Copy, Clone, Debug, PartialEq)]
struct C(f32);
#[derive(Copy, Clone, Debug, PartialEq)]
struct D(f32);
#[derive(Copy, Clone, Debug, PartialEq)]
struct E(f32);
#[derive(Copy, Clone, Debug, PartialEq)]
struct F(f32);
#[derive(Copy, Clone, Debug, PartialEq)]
struct Tag(f32);
#[derive(Copy, Clone, Debug, PartialEq)]
struct Position(f32);
#[derive(Copy, Clone, Debug, PartialEq)]
struct Rotation(f32);
fn create_entities(
world: &mut World,
variants: &mut [Box<dyn FnMut(Entity, &mut World)>],
num_components: usize,
count: usize,
) {
let len_variants = variants.len();
let components = (0..)
.flat_map(|step| (0..len_variants).map(move |i| (i + i * step) % len_variants))
.chunks(num_components);
for initializers in (&components).into_iter().take(count) {
let entity = world.insert((), Some((A(0.0),)))[0];
for i in initializers {
let init = variants.get_mut(i).unwrap();
init(entity, world);
}
}
}
fn add_background_entities(world: &mut World, count: usize) {
create_entities(
world,
&mut [
Box::new(|e, w| w.add_component(e, A(0.0))),
Box::new(|e, w| w.add_component(e, B(0.0))),
Box::new(|e, w| w.add_component(e, C(0.0))),
Box::new(|e, w| w.add_tag(e, Tag(0.0))),
Box::new(|e, w| w.add_component(e, D(0.0))),
Box::new(|e, w| w.add_tag(e, Tag(1.0))),
Box::new(|e, w| w.add_component(e, E(0.0))),
Box::new(|e, w| w.add_tag(e, Tag(2.0))),
Box::new(|e, w| w.add_component(e, F(0.0))),
Box::new(|e, w| w.add_tag(e, Tag(3.0))),
],
5,
count,
);
}
fn setup(n: usize) -> World {
let universe = Universe::new();
let mut world = universe.create_world();
world.insert((), (0..n).map(|_| (Position(0.), Rotation(0.))));
world
}
fn bench_create_delete(c: &mut Criterion) {
c.bench_function_over_inputs(
"create-delete",
|b, count| {
let mut world = setup(0);
b.iter(|| {
let entities = world
.insert((), (0..*count).map(|_| (Position(0.),)))
.to_vec();
for e in entities {
world.delete(e);
}
})
},
(0..10).map(|i| i * 100),
);
}
fn bench_iter_simple(c: &mut Criterion) {
c.bench_function("iter-simple", |b| {
let mut world = setup(2000);
add_background_entities(&mut world, 10000);
let query = <(Read<Position>, Write<Rotation>)>::query();
b.iter(|| {
for (pos, mut rot) in query.iter_mut(&mut world) {
rot.0 = pos.0;
}
});
});
}
fn bench_iter_complex(c: &mut Criterion) {
c.bench_function("iter-complex", |b| {
let mut world = setup(0);
add_background_entities(&mut world, 10000);
for i in 0..200 {
world.insert(
(Tag(i as f32),),
(0..2000).map(|_| (Position(0.), Rotation(0.))),
);
}
let query = <(Read<Position>, Write<Rotation>)>::query()
.filter(!component::<A>() & tag_value(&Tag(2.0)));
b.iter(|| {
for (pos, mut rot) in query.iter_mut(&mut world) {
rot.0 = pos.0;
}
});
});
}
fn bench_iter_chunks_simple(c: &mut Criterion) {
c.bench_function("iter-chunks-simple", |b| {
let mut world = setup(10000);
add_background_entities(&mut world, 10000);
let query = <(Write<Position>, Read<Rotation>)>::query();
b.iter(|| {
for c in query.iter_chunks_mut(&mut world) {
unsafe {
c.components_mut::<Position>()
.unwrap()
.get_unchecked_mut(0)
.0 = 0.0
};
}
});
});
}
fn bench_iter_chunks_complex(c: &mut Criterion) {
c.bench_function("iter-chunks-complex", |b| {
let mut world = setup(0);
add_background_entities(&mut world, 10000);
for i in 0..200 {
world.insert(
(Tag(i as f32),),
(0..10000).map(|_| (Position(0.), Rotation(0.))),
);
}
let query = <(Write<Position>, Read<Rotation>)>::query()
.filter(!component::<A>() & tag_value(&Tag(2.0)));
b.iter(|| {
for c in query.iter_chunks_mut(&mut world) {
unsafe {
c.components_mut::<Position>()
.unwrap()
.get_unchecked_mut(0)
.0 = 0.0
};
}
});
});
}
criterion_group!(
basic,
bench_create_delete,
bench_iter_simple,
bench_iter_complex,
bench_iter_chunks_simple,
bench_iter_chunks_complex
);
criterion_main!(basic);

View file

@ -0,0 +1,177 @@
use criterion::*;
use itertools::*;
use legion::prelude::*;
use rayon::join;
#[derive(Copy, Clone, Debug, PartialEq)]
struct A(f32);
#[derive(Copy, Clone, Debug, PartialEq)]
struct B(f32);
#[derive(Copy, Clone, Debug, PartialEq)]
struct C(f32);
#[derive(Copy, Clone, Debug)]
enum Variants {
AB(A, B),
AC(A, C),
}
fn index(v: Variants) -> u8 {
match v {
Variants::AB(_, _) => 0,
Variants::AC(_, _) => 1,
}
}
fn generate(i: u8) -> Variants {
match i {
0 => Variants::AB(A(0.0), B(0.0)),
_ => Variants::AC(A(0.0), C(0.0)),
}
}
fn data(n: usize) -> Vec<Variants> {
let mut v = Vec::<Variants>::new();
for _ in 0..n {
v.push(generate(0));
}
for _ in 0..n {
v.push(generate(1));
}
v
}
fn setup(data: &[Variants]) -> World {
let universe = Universe::new();
let mut world = universe.create_world();
for (i, group) in &data.iter().group_by(|x| index(**x)) {
match i {
0 => world.insert(
(),
group.map(|x| {
if let Variants::AB(a, b) = x {
(*a, *b)
} else {
panic!();
}
}),
),
_ => world.insert(
(),
group.map(|x| {
if let Variants::AC(a, c) = x {
(*a, *c)
} else {
panic!();
}
}),
),
};
}
world
}
fn setup_ideal(data: &[Variants]) -> (Vec<(A, B)>, Vec<(A, C)>) {
let mut ab = Vec::<(A, B)>::new();
let mut ac = Vec::<(A, C)>::new();
for v in data {
match v {
Variants::AB(a, b) => ab.push((*a, *b)),
Variants::AC(a, c) => ac.push((*a, *c)),
};
}
(ab, ac)
}
fn ideal(ab: &mut Vec<(A, B)>, ac: &mut Vec<(A, C)>) {
for (a, b) in ab.iter_mut() {
b.0 = a.0;
}
for (a, c) in ac.iter_mut() {
c.0 = a.0;
}
}
fn sequential(world: &mut World) {
for (mut b, a) in <(Write<B>, Read<A>)>::query().iter_mut(world) {
b.0 = a.0;
}
for (mut c, a) in <(Write<C>, Read<A>)>::query().iter_mut(world) {
c.0 = a.0;
}
}
fn parallel(world: &mut World) {
join(
|| unsafe {
for (mut b, a) in <(Write<B>, Read<A>)>::query().iter_unchecked(&world) {
b.0 = a.0;
}
},
|| unsafe {
for (mut c, a) in <(Write<C>, Read<A>)>::query().iter_unchecked(&world) {
c.0 = a.0;
}
},
);
}
fn par_for_each_mut(world: &mut World) {
join(
|| unsafe {
<(Write<B>, Read<A>)>::query().par_for_each_unchecked(&world, |(mut b, a)| {
b.0 = a.0;
});
},
|| unsafe {
<(Write<C>, Read<A>)>::query().par_for_each_unchecked(&world, |(mut c, a)| {
c.0 = a.0;
});
},
);
}
fn bench_ordered(c: &mut Criterion) {
c.bench(
"concurrent queries",
ParameterizedBenchmark::new(
"sequential ideal",
|b, n| {
let data = data(*n);
let (mut ab, mut ac) = setup_ideal(&data);
b.iter(|| ideal(&mut ab, &mut ac));
},
(1..11).map(|i| i * 1000),
)
.with_function("sequential", |b, n| {
let data = data(*n);
let mut world = setup(&data);
b.iter(|| sequential(&mut world));
})
.with_function("parallel", |b, n| {
let data = data(*n);
let mut world = setup(&data);
join(|| {}, || b.iter(|| parallel(&mut world)));
})
.with_function("par_for_each_mut", |b, n| {
let data = data(*n);
let mut world = setup(&data);
join(|| {}, || b.iter(|| par_for_each_mut(&mut world)));
}),
);
}
criterion_group!(iterate, bench_ordered);
criterion_main!(iterate);

View file

@ -0,0 +1,111 @@
use criterion::*;
use cgmath::prelude::*;
use cgmath::{vec3, Matrix4, Quaternion, Vector3};
use legion::prelude::*;
use rayon::join;
#[derive(Copy, Clone, Debug, PartialEq)]
struct Position(Vector3<f32>);
#[derive(Copy, Clone, Debug, PartialEq)]
struct Orientation(Quaternion<f32>);
#[derive(Copy, Clone, Debug, PartialEq)]
struct Scale(Vector3<f32>);
#[derive(Copy, Clone, Debug, PartialEq)]
struct Transform(Matrix4<f32>);
fn data(n: usize) -> Vec<(Position, Orientation, Scale, Transform)> {
let mut v = Vec::<(Position, Orientation, Scale, Transform)>::new();
for _ in 0..n {
v.push((
Position(vec3(0.0, 0.0, 0.0)),
Orientation(Quaternion::new(1.0, 0.0, 0.0, 0.0)),
Scale(vec3(0.0, 0.0, 0.0)),
Transform(Matrix4::identity()),
));
}
v
}
fn setup(data: Vec<(Position, Orientation, Scale, Transform)>) -> World {
let universe = Universe::new();
let mut world = universe.create_world();
world.insert((), data);
world
}
fn process(
position: &Vector3<f32>,
orientation: &Quaternion<f32>,
scale: &Vector3<f32>,
) -> Matrix4<f32> {
let rot: Matrix4<f32> = (*orientation).into();
Matrix4::from_nonuniform_scale(scale.x, scale.y, scale.z)
* rot
* Matrix4::from_translation(*position)
}
fn ideal(data: &mut Vec<(Position, Orientation, Scale, Transform)>) {
for (pos, orient, scale, trans) in data.iter_mut() {
trans.0 = process(&pos.0, &orient.0, &scale.0);
}
}
fn sequential(world: &mut World) {
for (pos, orient, scale, mut trans) in <(
Read<Position>,
Read<Orientation>,
Read<Scale>,
Write<Transform>,
)>::query()
.iter_mut(world)
{
trans.0 = process(&pos.0, &orient.0, &scale.0);
}
}
fn par_for_each_mut(world: &mut World) {
<(
Read<Position>,
Read<Orientation>,
Read<Scale>,
Write<Transform>,
)>::query()
.par_for_each_mut(world, |(pos, orient, scale, mut trans)| {
trans.0 = process(&pos.0, &orient.0, &scale.0);
});
}
fn bench_transform(c: &mut Criterion) {
c.bench(
"update transform (experimental)",
ParameterizedBenchmark::new(
"ideal sequential",
|b, n| {
let mut data = data(*n);
b.iter(|| ideal(&mut data));
},
(1..11).map(|i| i * 1000),
)
.with_function("sequential", |b, n| {
let data = data(*n);
let mut world = setup(data);
b.iter(|| sequential(&mut world));
})
.with_function("par_for_each_mut", |b, n| {
let data = data(*n);
let mut world = setup(data);
join(|| {}, || b.iter(|| par_for_each_mut(&mut world)));
}),
);
}
criterion_group!(iterate, bench_transform);
criterion_main!(iterate);

1
bevy_legion/clippy.toml Normal file
View file

@ -0,0 +1 @@
type-complexity-threshold = 500

View file

@ -0,0 +1,5 @@
[book]
title = "The Legion Book"
multilingual = false
author = "Walter Pearce"
description = "Introduction to Legion ECS"

View file

@ -0,0 +1,33 @@
# Introduction
Welcome to the Legion book! This book is intended to be a summary overview of legion, including:
- An overview of how to use it
- Some examples of different use case scenarios
- how it is different than other Entity-Component-Systems in the rust ecosystem
- Overviews of some pertinent internals
This book assumes a general understanding of the concepts of the Entity-Component-System design and data composition as a design pattern. If you need a summary of what an ECS is, please see the [Wikipedia article on ECS].
## Design
Legions internal architecture is heavily inspired by the new Unity ECS architecture [^1], while the publicly facing API is strongly built upon specs [^2], while expanding on it and learning from many of the faults found in that API.
#### Quick Version
The core concept of Legion design is based around the concept of `Entities`, `Archetypes` and `Chunks`. These three core concepts are the building blocks of legion, and its entity component system.
##### Entities
Entities are strictly ID's, allocated within a given `Universe` of legion, which allow for uniquely referencing component instances. ID's may be reused generationally, but legion guarantees that they are unique in any given universe; this is accomplished by providing each `World` in a `Universe` its own Entity Allocator, which will be unique in that universe.
##### Archetypes
An Archetype is considered a "Grouping of Components and Tags". Entities may have varying numbers and types of components; any combination of these tags and components is considered an `Archetype`. In legion, entity storage and parallelization of system execution are all centered on this concept of Archetypes, or like-entities.
## Other resources
[^1]: https://docs.unity3d.com/Packages/com.unity.entities@0.1/manual/ecs_core.html
[^2]: https://github.com/amethyst/specs
[Wikipedia article on ECS]: https://en.wikipedia.org/wiki/Entity_component_system

View file

@ -0,0 +1,5 @@
# The Legion Book
- [Introduction](01_introduction.md)
- [Hello World](02_hello_world.md)

View file

@ -0,0 +1,114 @@
use legion::prelude::*;
#[derive(Clone, Copy, Debug, PartialEq)]
struct Pos(f32, f32, f32);
#[derive(Clone, Copy, Debug, PartialEq)]
struct Vel(f32, f32, f32);
#[derive(Clone)]
pub struct ExampleResource1(String);
#[derive(Clone)]
pub struct ExampleResource2(String);
fn main() {
let _ = tracing_subscriber::fmt::try_init();
// create world
let universe = Universe::new();
let mut world = universe.create_world();
// Insert resources into the world
// Resources are also dynamically scheduled just like components, so the accessed
// declared within a SystemBuilder is correct.
// Any resource accessed by systems *must be* manually inserted beforehand, otherwise it will panic.
world
.resources
.insert(ExampleResource1("ExampleResource1".to_string()));
world
.resources
.insert(ExampleResource2("ExampleResource2".to_string()));
// create entities
// An insert call is used to insert matching entities into the world.
let entities = world
.insert(
(),
vec![
(Pos(1., 2., 3.), Vel(1., 2., 3.)),
(Pos(1., 2., 3.), Vel(1., 2., 3.)),
(Pos(1., 2., 3.), Vel(1., 2., 3.)),
(Pos(1., 2., 3.), Vel(1., 2., 3.)),
],
)
.to_vec();
// update positions
// This example shows the use of a `iter`, which is default mutable, across a query.
let query = <(Write<Pos>, Read<Vel>)>::query();
for (mut pos, vel) in query.iter_mut(&mut world) {
pos.0 += vel.0;
pos.1 += vel.1;
pos.2 += vel.2;
}
// update positions using a system
let update_positions = SystemBuilder::new("update_positions")
.write_resource::<ExampleResource1>()
.read_resource::<ExampleResource2>()
.with_query(<(Write<Pos>, Read<Vel>)>::query())
.build(|_, mut world, (res1, res2), query| {
res1.0 = res2.0.clone(); // Write the mutable resource from the immutable resource
for (mut pos, vel) in query.iter_mut(&mut world) {
pos.0 += vel.0;
pos.1 += vel.1;
pos.2 += vel.2;
}
});
// Uses the command buffer to insert an entity into the world every frame.
let entity = entities[0];
let command_buffer_usage = SystemBuilder::new("command_buffer_usage")
.read_resource::<ExampleResource1>()
.write_resource::<ExampleResource2>()
// Read and write component definitions allow us to declare access to a component across all archetypes
// This means we can use the SubWorld provided to the system as a `World` for that component.
.write_component::<Pos>()
.build(move |command_buffer, world, (res1, res2), _| {
res2.0 = res1.0.clone(); // Write the mutable resource from the immutable resource
// Read a component from the SubWorld.
let _ = world.get_component_mut::<Pos>(entity).unwrap();
let _entities = command_buffer.insert(
(),
vec![
(Pos(1., 2., 3.), Vel(1., 2., 3.)),
(Pos(1., 2., 3.), Vel(1., 2., 3.)),
],
);
});
let thread_local_example = Box::new(|world: &mut World| {
// This is an example of a thread local system which has full, exclusive mutable access to the world.
let query = <(Write<Pos>, Read<Vel>)>::query();
for (mut pos, vel) in query.iter_mut(world) {
pos.0 += vel.0;
pos.1 += vel.1;
pos.2 += vel.2;
}
});
let mut schedule = Schedule::builder()
.add_system(update_positions)
.add_system(command_buffer_usage)
// This flushes all command buffers of all systems.
.flush()
// a thread local system or function will wait for all previous systems to finish running,
// and then take exclusive access of the world.
.add_thread_local_fn(thread_local_example)
.build();
// Execute a frame of the schedule.
schedule.execute(&mut world);
}

View file

@ -0,0 +1,464 @@
use legion::{
entity::EntityAllocator,
prelude::*,
storage::{
ArchetypeDescription, ComponentMeta, ComponentResourceSet, ComponentTypeId, TagMeta,
TagStorage, TagTypeId,
},
};
use serde::{
de::{self, DeserializeSeed, IgnoredAny, Visitor},
Deserialize, Deserializer, Serialize, Serializer,
};
use std::{any::TypeId, cell::RefCell, collections::HashMap, marker::PhantomData, ptr::NonNull};
use type_uuid::TypeUuid;
#[derive(TypeUuid, Serialize, Deserialize, Clone, Copy, Debug, PartialEq)]
#[uuid = "5fd8256d-db36-4fe2-8211-c7b3446e1927"]
struct Pos(f32, f32, f32);
#[derive(TypeUuid, Serialize, Deserialize, Clone, Copy, Debug, PartialEq)]
#[uuid = "14dec17f-ae14-40a3-8e44-e487fc423287"]
struct Vel(f32, f32, f32);
#[derive(Clone, Copy, Debug, PartialEq)]
struct Unregistered(f32, f32, f32);
struct ComponentDeserializer<'de, T: Deserialize<'de>> {
ptr: *mut T,
_marker: PhantomData<&'de T>,
}
impl<'de, T: Deserialize<'de> + 'static> DeserializeSeed<'de> for ComponentDeserializer<'de, T> {
type Value = ();
fn deserialize<D>(self, deserializer: D) -> Result<Self::Value, D::Error>
where
D: Deserializer<'de>,
{
let value = <T as Deserialize<'de>>::deserialize(deserializer)?;
unsafe {
std::ptr::write(self.ptr, value);
}
Ok(())
}
}
struct ComponentSeqDeserializer<'a, T> {
get_next_storage_fn: &'a mut dyn FnMut() -> Option<(NonNull<u8>, usize)>,
_marker: PhantomData<T>,
}
impl<'de, 'a, T: for<'b> Deserialize<'b> + 'static> DeserializeSeed<'de>
for ComponentSeqDeserializer<'a, T>
{
type Value = ();
fn deserialize<D>(self, deserializer: D) -> Result<Self::Value, D::Error>
where
D: Deserializer<'de>,
{
deserializer.deserialize_seq(self)
}
}
impl<'de, 'a, T: for<'b> Deserialize<'b> + 'static> Visitor<'de>
for ComponentSeqDeserializer<'a, T>
{
type Value = ();
fn expecting(&self, formatter: &mut std::fmt::Formatter) -> std::fmt::Result {
formatter.write_str("sequence of objects")
}
fn visit_seq<A>(self, mut seq: A) -> Result<Self::Value, A::Error>
where
A: de::SeqAccess<'de>,
{
let size = seq.size_hint();
for _ in 0..size.unwrap_or(std::usize::MAX) {
match (self.get_next_storage_fn)() {
Some((storage_ptr, storage_len)) => {
let storage_ptr = storage_ptr.as_ptr() as *mut T;
for idx in 0..storage_len {
let element_ptr = unsafe { storage_ptr.offset(idx as isize) };
if let None = seq.next_element_seed(ComponentDeserializer {
ptr: element_ptr,
_marker: PhantomData,
})? {
panic!(
"expected {} elements in chunk but only {} found",
storage_len, idx
);
}
}
}
None => {
if let Some(_) = seq.next_element::<IgnoredAny>()? {
panic!("unexpected element when there was no storage space available");
} else {
// No more elements and no more storage - that's what we want!
break;
}
}
}
}
Ok(())
}
}
#[derive(Clone)]
struct TagRegistration {
uuid: type_uuid::Bytes,
ty: TypeId,
tag_serialize_fn: fn(&TagStorage, &mut dyn FnMut(&dyn erased_serde::Serialize)),
tag_deserialize_fn: fn(
deserializer: &mut dyn erased_serde::Deserializer,
&mut TagStorage,
) -> Result<(), erased_serde::Error>,
register_tag_fn: fn(&mut ArchetypeDescription),
}
impl TagRegistration {
fn of<
T: TypeUuid
+ Serialize
+ for<'de> Deserialize<'de>
+ PartialEq
+ Clone
+ Send
+ Sync
+ 'static,
>() -> Self {
Self {
uuid: T::UUID,
ty: TypeId::of::<T>(),
tag_serialize_fn: |tag_storage, serialize_fn| {
// it's safe because we know this is the correct type due to lookup
let slice = unsafe { tag_storage.data_slice::<T>() };
serialize_fn(&&*slice);
},
tag_deserialize_fn: |deserializer, tag_storage| {
// TODO implement visitor to avoid allocation of Vec
let tag_vec = <Vec<T> as Deserialize>::deserialize(deserializer)?;
for tag in tag_vec {
// Tag types should line up, making this safe
unsafe {
tag_storage.push(tag);
}
}
Ok(())
},
register_tag_fn: |desc| {
desc.register_tag::<T>();
},
}
}
}
#[derive(Clone)]
struct ComponentRegistration {
uuid: type_uuid::Bytes,
ty: TypeId,
comp_serialize_fn: fn(&ComponentResourceSet, &mut dyn FnMut(&dyn erased_serde::Serialize)),
comp_deserialize_fn: fn(
deserializer: &mut dyn erased_serde::Deserializer,
get_next_storage_fn: &mut dyn FnMut() -> Option<(NonNull<u8>, usize)>,
) -> Result<(), erased_serde::Error>,
register_comp_fn: fn(&mut ArchetypeDescription),
}
impl ComponentRegistration {
fn of<T: TypeUuid + Serialize + for<'de> Deserialize<'de> + Send + Sync + 'static>() -> Self {
Self {
uuid: T::UUID,
ty: TypeId::of::<T>(),
comp_serialize_fn: |comp_storage, serialize_fn| {
// it's safe because we know this is the correct type due to lookup
let slice = unsafe { comp_storage.data_slice::<T>() };
serialize_fn(&*slice);
},
comp_deserialize_fn: |deserializer, get_next_storage_fn| {
let comp_seq_deser = ComponentSeqDeserializer::<T> {
get_next_storage_fn,
_marker: PhantomData,
};
comp_seq_deser.deserialize(deserializer)?;
Ok(())
},
register_comp_fn: |desc| {
desc.register_component::<T>();
},
}
}
}
#[derive(Serialize, Deserialize)]
struct SerializedArchetypeDescription {
tag_types: Vec<type_uuid::Bytes>,
component_types: Vec<type_uuid::Bytes>,
}
struct SerializeImpl {
tag_types: HashMap<TypeId, TagRegistration>,
comp_types: HashMap<TypeId, ComponentRegistration>,
entity_map: RefCell<HashMap<Entity, uuid::Bytes>>,
}
impl legion::ser::WorldSerializer for SerializeImpl {
fn can_serialize_tag(&self, ty: &TagTypeId, _meta: &TagMeta) -> bool {
self.tag_types.get(&ty.0).is_some()
}
fn can_serialize_component(&self, ty: &ComponentTypeId, _meta: &ComponentMeta) -> bool {
self.comp_types.get(&ty.0).is_some()
}
fn serialize_archetype_description<S: Serializer>(
&self,
serializer: S,
archetype_desc: &ArchetypeDescription,
) -> Result<S::Ok, S::Error> {
let tags_to_serialize = archetype_desc
.tags()
.iter()
.filter_map(|(ty, _)| self.tag_types.get(&ty.0))
.map(|reg| reg.uuid)
.collect::<Vec<_>>();
let components_to_serialize = archetype_desc
.components()
.iter()
.filter_map(|(ty, _)| self.comp_types.get(&ty.0))
.map(|reg| reg.uuid)
.collect::<Vec<_>>();
SerializedArchetypeDescription {
tag_types: tags_to_serialize,
component_types: components_to_serialize,
}
.serialize(serializer)
}
fn serialize_components<S: Serializer>(
&self,
serializer: S,
component_type: &ComponentTypeId,
_component_meta: &ComponentMeta,
components: &ComponentResourceSet,
) -> Result<S::Ok, S::Error> {
if let Some(reg) = self.comp_types.get(&component_type.0) {
let result = RefCell::new(None);
let serializer = RefCell::new(Some(serializer));
{
let mut result_ref = result.borrow_mut();
(reg.comp_serialize_fn)(components, &mut |serialize| {
result_ref.replace(erased_serde::serialize(
serialize,
serializer.borrow_mut().take().unwrap(),
));
});
}
return result.borrow_mut().take().unwrap();
}
panic!(
"received unserializable type {:?}, this should be filtered by can_serialize",
component_type
);
}
fn serialize_tags<S: Serializer>(
&self,
serializer: S,
tag_type: &TagTypeId,
_tag_meta: &TagMeta,
tags: &TagStorage,
) -> Result<S::Ok, S::Error> {
if let Some(reg) = self.tag_types.get(&tag_type.0) {
let result = RefCell::new(None);
let serializer = RefCell::new(Some(serializer));
{
let mut result_ref = result.borrow_mut();
(reg.tag_serialize_fn)(tags, &mut |serialize| {
result_ref.replace(erased_serde::serialize(
serialize,
serializer.borrow_mut().take().unwrap(),
));
});
}
return result.borrow_mut().take().unwrap();
}
panic!(
"received unserializable type {:?}, this should be filtered by can_serialize",
tag_type
);
}
fn serialize_entities<S: Serializer>(
&self,
serializer: S,
entities: &[Entity],
) -> Result<S::Ok, S::Error> {
let mut uuid_map = self.entity_map.borrow_mut();
serializer.collect_seq(entities.iter().map(|e| {
*uuid_map
.entry(*e)
.or_insert_with(|| *uuid::Uuid::new_v4().as_bytes())
}))
}
}
struct DeserializeImpl {
tag_types: HashMap<TypeId, TagRegistration>,
comp_types: HashMap<TypeId, ComponentRegistration>,
tag_types_by_uuid: HashMap<type_uuid::Bytes, TagRegistration>,
comp_types_by_uuid: HashMap<type_uuid::Bytes, ComponentRegistration>,
entity_map: RefCell<HashMap<uuid::Bytes, Entity>>,
}
impl legion::de::WorldDeserializer for DeserializeImpl {
fn deserialize_archetype_description<'de, D: Deserializer<'de>>(
&self,
deserializer: D,
) -> Result<ArchetypeDescription, <D as Deserializer<'de>>::Error> {
let serialized_desc =
<SerializedArchetypeDescription as Deserialize>::deserialize(deserializer)?;
let mut desc = ArchetypeDescription::default();
for tag in serialized_desc.tag_types {
if let Some(reg) = self.tag_types_by_uuid.get(&tag) {
(reg.register_tag_fn)(&mut desc);
}
}
for comp in serialized_desc.component_types {
if let Some(reg) = self.comp_types_by_uuid.get(&comp) {
(reg.register_comp_fn)(&mut desc);
}
}
Ok(desc)
}
fn deserialize_components<'de, D: Deserializer<'de>>(
&self,
deserializer: D,
component_type: &ComponentTypeId,
_component_meta: &ComponentMeta,
get_next_storage_fn: &mut dyn FnMut() -> Option<(NonNull<u8>, usize)>,
) -> Result<(), <D as Deserializer<'de>>::Error> {
if let Some(reg) = self.comp_types.get(&component_type.0) {
let mut erased = erased_serde::Deserializer::erase(deserializer);
(reg.comp_deserialize_fn)(&mut erased, get_next_storage_fn)
.map_err(<<D as serde::Deserializer<'de>>::Error as serde::de::Error>::custom)?;
} else {
<IgnoredAny>::deserialize(deserializer)?;
}
Ok(())
}
fn deserialize_tags<'de, D: Deserializer<'de>>(
&self,
deserializer: D,
tag_type: &TagTypeId,
_tag_meta: &TagMeta,
tags: &mut TagStorage,
) -> Result<(), <D as Deserializer<'de>>::Error> {
if let Some(reg) = self.tag_types.get(&tag_type.0) {
let mut erased = erased_serde::Deserializer::erase(deserializer);
(reg.tag_deserialize_fn)(&mut erased, tags)
.map_err(<<D as serde::Deserializer<'de>>::Error as serde::de::Error>::custom)?;
} else {
<IgnoredAny>::deserialize(deserializer)?;
}
Ok(())
}
fn deserialize_entities<'de, D: Deserializer<'de>>(
&self,
deserializer: D,
entity_allocator: &EntityAllocator,
entities: &mut Vec<Entity>,
) -> Result<(), <D as Deserializer<'de>>::Error> {
let entity_uuids = <Vec<uuid::Bytes> as Deserialize>::deserialize(deserializer)?;
let mut entity_map = self.entity_map.borrow_mut();
for id in entity_uuids {
let entity = entity_allocator.create_entity();
entity_map.insert(id, entity);
entities.push(entity);
}
Ok(())
}
}
fn main() {
// create world
let universe = Universe::new();
let mut world = universe.create_world();
// Pos and Vel are both serializable, so all components in this chunkset will be serialized
world.insert(
(),
vec![
(Pos(1., 2., 3.), Vel(1., 2., 3.)),
(Pos(1., 2., 3.), Vel(1., 2., 3.)),
(Pos(1., 2., 3.), Vel(1., 2., 3.)),
(Pos(1., 2., 3.), Vel(1., 2., 3.)),
],
);
// Unserializable components are not serialized, so only the Pos components should be serialized in this chunkset
for _ in 0..1000 {
world.insert(
(Pos(4., 5., 6.), Unregistered(4., 5., 6.)),
vec![
(Pos(1., 2., 3.), Unregistered(4., 5., 6.)),
(Pos(1., 2., 3.), Unregistered(4., 5., 6.)),
(Pos(1., 2., 3.), Unregistered(4., 5., 6.)),
(Pos(1., 2., 3.), Unregistered(4., 5., 6.)),
],
);
}
// Entities with no serializable components are not serialized, so this entire chunkset should be skipped in the output
world.insert(
(Unregistered(4., 5., 6.),),
vec![(Unregistered(4., 5., 6.),), (Unregistered(4., 5., 6.),)],
);
let comp_registrations = [
ComponentRegistration::of::<Pos>(),
ComponentRegistration::of::<Vel>(),
];
let tag_registrations = [TagRegistration::of::<Pos>(), TagRegistration::of::<Vel>()];
use std::iter::FromIterator;
let ser_helper = SerializeImpl {
comp_types: HashMap::from_iter(comp_registrations.iter().map(|reg| (reg.ty, reg.clone()))),
tag_types: HashMap::from_iter(tag_registrations.iter().map(|reg| (reg.ty, reg.clone()))),
entity_map: RefCell::new(HashMap::new()),
};
let serializable = legion::ser::serializable_world(&world, &ser_helper);
let serialized_data = serde_json::to_string(&serializable).unwrap();
let de_helper = DeserializeImpl {
tag_types_by_uuid: HashMap::from_iter(
ser_helper
.tag_types
.iter()
.map(|reg| (reg.1.uuid, reg.1.clone())),
),
comp_types_by_uuid: HashMap::from_iter(
ser_helper
.comp_types
.iter()
.map(|reg| (reg.1.uuid, reg.1.clone())),
),
tag_types: ser_helper.tag_types,
comp_types: ser_helper.comp_types,
// re-use the entity-uuid mapping
entity_map: RefCell::new(HashMap::from_iter(
ser_helper
.entity_map
.into_inner()
.into_iter()
.map(|(e, uuid)| (uuid, e)),
)),
};
let mut deserialized_world = universe.create_world();
let mut deserializer = serde_json::Deserializer::from_str(&serialized_data);
legion::de::deserialize(&mut deserialized_world, &de_helper, &mut deserializer).unwrap();
let ser_helper = SerializeImpl {
tag_types: de_helper.tag_types,
comp_types: de_helper.comp_types,
// re-use the entity-uuid mapping
entity_map: RefCell::new(HashMap::from_iter(
de_helper
.entity_map
.into_inner()
.into_iter()
.map(|(uuid, e)| (e, uuid)),
)),
};
let serializable = legion::ser::serializable_world(&deserialized_world, &ser_helper);
let roundtrip_data = serde_json::to_string(&serializable).unwrap();
assert_eq!(roundtrip_data, serialized_data);
}

164
bevy_legion/readme.md Normal file
View file

@ -0,0 +1,164 @@
# Legion
[![Build Status][build_img]][build_lnk] [![Crates.io][crates_img]][crates_lnk] [![Docs.rs][doc_img]][doc_lnk]
[build_img]: https://img.shields.io/travis/TomGillen/legion/master.svg
[build_lnk]: https://travis-ci.org/TomGillen/legion
[crates_img]: https://img.shields.io/crates/v/legion.svg
[crates_lnk]: https://crates.io/crates/legion
[doc_img]: https://docs.rs/legion/badge.svg
[doc_lnk]: https://docs.rs/legion
Legion aims to be a feature rich high performance ECS library for Rust game projects with minimal boilerplate.
## Benchmarks
Based on the [ecs_bench](https://github.com/lschmierer/ecs_bench) project.
![](bench.png)
## Getting Started
```rust
use legion::prelude::*;
// Define our entity data types
#[derive(Clone, Copy, Debug, PartialEq)]
struct Position {
x: f32,
y: f32,
}
#[derive(Clone, Copy, Debug, PartialEq)]
struct Velocity {
dx: f32,
dy: f32,
}
#[derive(Clone, Copy, Debug, PartialEq)]
struct Model(usize);
#[derive(Clone, Copy, Debug, PartialEq)]
struct Static;
// Create a world to store our entities
let universe = Universe::new();
let mut world = universe.create_world();
// Create entities with `Position` and `Velocity` data
world.insert(
(),
(0..999).map(|_| (Position { x: 0.0, y: 0.0 }, Velocity { dx: 0.0, dy: 0.0 }))
);
// Create entities with `Position` data and a shared `Model` data, tagged as `Static`
// Shared data values are shared across many entities,
// and enable further batch processing and filtering use cases
let entities: &[Entity] = world.insert(
(Model(5), Static),
(0..999).map(|_| (Position { x: 0.0, y: 0.0 },))
);
// Create a query which finds all `Position` and `Velocity` components
let query = <(Write<Position>, Read<Velocity>)>::query();
// Iterate through all entities that match the query in the world
for (mut pos, vel) in query.iter(&mut world) {
pos.x += vel.dx;
pos.y += vel.dy;
}
```
## Features
Legion aims to be a more feature-complete game-ready ECS than many of its predecessors.
### Advanced Query Filters
The query API can do much more than pull entity data out of the world.
Additional data type filters:
```rust
// It is possible to specify that entities must contain data beyond that being fetched
let query = Read::<Position>::query()
.filter(component::<Velocity>());
for position in query.iter(&mut world) {
// these entities also have `Velocity`
}
```
Filter boolean operations:
```rust
// Filters can be combined with boolean operators
let query = Read::<Position>::query()
.filter(tag::<Static>() | !component::<Velocity>());
for position in query.iter(&mut world) {
// these entities are also either marked as `Static`, or do *not* have a `Velocity`
}
```
Filter by shared data value:
```rust
// Filters can filter by specific shared data values
let query = Read::<Position>::query()
.filter(tag_value(&Model(3)));
for position in query.iter(&mut world) {
// these entities all have shared data value `Model(3)`
}
```
Change detection:
```rust
// Queries can perform coarse-grained change detection, rejecting entities who's data
// has not changed since the last time the query was iterated.
let query = <(Read<Position>, Shared<Model>)>::query()
.filter(changed::<Position>());
for (pos, model) in query.iter(&mut world) {
// entities who have changed position
}
```
### Content Streaming
Entities can be loaded and initialized in a background `World` on separate threads and then
when ready, merged into the main `World` near instantaneously.
```rust
let universe = Universe::new();
let mut world_a = universe.create_world();
let mut world_b = universe.create_world();
// Merge all entities from `world_b` into `world_a`
// Entity IDs are guarenteed to be unique across worlds and will
// remain unchanged across the merge.
world_a.merge(world_b);
```
### Chunk Iteration
Entity data is allocated in blocks called "chunks", each approximately containing 64KiB of data. The query API exposes each chunk via `iter_chunk`. As all entities in a chunk are guarenteed to contain the same set of entity data and shared data values, it is possible to do batch processing via the chunk API.
```rust
fn render_instanced(model: &Model, transforms: &[Transform]) {
// pass `transforms` pointer to graphics API to load into constant buffer
// issue instanced draw call with model data and transforms
}
let query = Read::<Transform>::query()
.filter(tag::<Model>());
for chunk in query.iter_chunks_mut(&mut world) {
// get the chunk's model
let model: &Model = chunk.tag().unwrap();
// get a (runtime borrow checked) slice of transforms
let transforms = chunk.components::<Transform>().unwrap();
// give the model and transform slice to our renderer
render_instanced(model, &transforms);
}
```

1
bevy_legion/rustfmt.toml Normal file
View file

@ -0,0 +1 @@
fn_single_line = true

759
bevy_legion/src/borrow.rs Normal file
View file

@ -0,0 +1,759 @@
//! Atomic runtime borrow checking module.
//! These types implement something akin to `RefCell`, but are atomically handled allowing them to
//! cross thread boundaries.
use std::cell::UnsafeCell;
use std::hash::{Hash, Hasher};
use std::any::{Any, type_name};
use std::ops::Deref;
use std::ops::DerefMut;
use std::sync::atomic::AtomicIsize;
use crate::resource::Resource;
#[cfg(not(debug_assertions))]
use std::marker::PhantomData;
// #[inline(always)]
// pub fn downcast_typename_mut<U: Any>(value: &mut dyn Any) -> &mut U {
// unsafe { &mut *(value as *mut dyn Any as *mut U) }
// }
// #[inline(always)]
// pub fn downcast_typename_ref<U: Any>(value: &dyn Any) -> &U {
// unsafe { &*(value as *const dyn Any as *const U) }
// // if type_name::<T>() == type_name::<U>() {
// // unsafe { Some(&*(value as *const dyn Any as *const U)) }
// // } else {
// // None
// // }
// }
pub trait DowncastTypename {
fn downcast_typename_mut<T: Any>(&mut self) -> Option<&mut T>;
fn downcast_typename_ref<T: Any>(&self) -> Option<&T>;
fn is_typename<T: Any>(&self) -> bool;
}
impl DowncastTypename for dyn Resource {
#[inline(always)]
fn downcast_typename_mut<T: Any>(&mut self) -> Option<&mut T> {
if self.is_typename::<T>() {
// SAFETY: just checked whether we are pointing to the correct type
unsafe { Some(&mut *(self.as_any_mut() as *mut dyn Any as *mut T)) }
} else {
None
}
}
#[inline(always)]
fn downcast_typename_ref<T: Any>(&self) -> Option<&T> {
if self.is_typename::<T>() {
// SAFETY: just checked whether we are pointing to the correct type
unsafe { Some(&*(self.as_any() as *const dyn Any as *const T)) }
} else {
None
}
}
#[inline(always)]
fn is_typename<T: Any>(&self) -> bool {
true
// TODO: it would be nice to add type safety here, but the type names don't match
// println!("{} {}", type_name_of_val(self), type_name::<T>());
// type_name_of_val(self) == type_name::<T>()
}
}
pub fn type_name_of_val<T: ?Sized>(_val: &T) -> &'static str {
type_name::<T>()
}
/// A `RefCell` implementation which is thread safe. This type performs all the standard runtime
/// borrow checking which would be familiar from using `RefCell`.
///
/// `UnsafeCell` is used in this type, but borrow checking is performed using atomic values,
/// garunteeing safe access across threads.
///
/// # Safety
/// Runtime borrow checking is only conducted in builds with `debug_assertions` enabled. Release
/// builds assume proper resource access and will cause undefined behavior with improper use.
pub struct AtomicRefCell<T> {
value: UnsafeCell<T>,
borrow_state: AtomicIsize,
}
impl<T: Default> Default for AtomicRefCell<T> {
fn default() -> Self { Self::new(T::default()) }
}
impl<T: std::fmt::Debug> std::fmt::Debug for AtomicRefCell<T> {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
write!(f, "({:?}) {:?}", self.borrow_state, self.value)
}
}
impl<T> AtomicRefCell<T> {
pub fn new(value: T) -> Self {
AtomicRefCell {
value: UnsafeCell::from(value),
borrow_state: AtomicIsize::from(0),
}
}
/// Retrieve an immutable `Ref` wrapped reference of `&T`.
///
/// # Panics
///
/// This method panics if this value is already mutably borrowed.
///
/// # Safety
/// Runtime borrow checking is only conducted in builds with `debug_assertions` enabled. Release
/// builds assume proper resource access and will cause undefined behavior with improper use.
#[inline(always)]
pub fn get(&self) -> Ref<T> { self.try_get().unwrap() }
/// Unwrap the value from the RefCell and kill it, returning the value.
pub fn into_inner(self) -> T { self.value.into_inner() }
/// Retrieve an immutable `Ref` wrapped reference of `&T`. This is the safe version of `get`
/// providing an error result on failure.
///
/// # Returns
///
/// `Some(T)` if the value can be retrieved.
/// `Err` if the value is already mutably borrowed.
#[cfg(debug_assertions)]
pub fn try_get(&self) -> Result<Ref<T>, String> {
loop {
let read = self.borrow_state.load(std::sync::atomic::Ordering::SeqCst);
if read < 0 {
return Err(format!(
"resource already borrowed as mutable: {}",
std::any::type_name::<T>()
));
}
if self.borrow_state.compare_and_swap(
read,
read + 1,
std::sync::atomic::Ordering::SeqCst,
) == read
{
break;
}
}
Ok(Ref::new(Shared::new(&self.borrow_state), unsafe {
&*self.value.get()
}))
}
/// Retrieve an immutable `Ref` wrapped reference of `&T`. This is the safe version of `get`
/// providing an error result on failure.
///
/// # Returns
///
/// `Some(T)` if the value can be retrieved.
/// `Err` if the value is already mutably borrowed.
///
/// # Safety
///
/// This release version of this function does not perform runtime borrow checking and will
/// cause undefined behavior if borrow rules are violated. This means they should be enforced
/// on the use of this type.
#[cfg(not(debug_assertions))]
#[inline(always)]
pub fn try_get(&self) -> Result<Ref<T>, &'static str> {
Ok(Ref::new(Shared::new(&self.borrow_state), unsafe {
&*self.value.get()
}))
}
/// Retrieve an mutable `RefMut` wrapped reference of `&mut T`.
///
/// # Panics
///
/// This method panics if this value is already mutably borrowed.
///
/// # Safety
/// Runtime borrow checking is only conducted in builds with `debug_assertions` enabled. Release
/// builds assume proper resource access and will cause undefined behavior with improper use.
#[inline(always)]
pub fn get_mut(&self) -> RefMut<T> { self.try_get_mut().unwrap() }
/// Retrieve a mutable `RefMut` wrapped reference of `&mut T`. This is the safe version of
/// `get_mut` providing an error result on failure.
///
/// # Returns
///
/// `Some(T)` if the value can be retrieved.
/// `Err` if the value is already mutably borrowed.
///
/// # Safety
///
/// This release version of this function does not perform runtime borrow checking and will
/// cause undefined behavior if borrow rules are violated. This means they should be enforced
/// on the use of this type.
#[cfg(debug_assertions)]
pub fn try_get_mut(&self) -> Result<RefMut<T>, String> {
let borrowed =
self.borrow_state
.compare_and_swap(0, -1, std::sync::atomic::Ordering::SeqCst);
match borrowed {
0 => Ok(RefMut::new(Exclusive::new(&self.borrow_state), unsafe {
&mut *self.value.get()
})),
x if x < 0 => Err(format!(
"resource already borrowed as mutable: {}",
std::any::type_name::<T>()
)),
_ => Err(format!(
"resource already borrowed as immutable: {}",
std::any::type_name::<T>()
)),
}
}
/// Retrieve a mutable `RefMut` wrapped reference of `&mut T`. This is the safe version of
/// `get_mut` providing an error result on failure.
///
/// # Returns
///
/// `Some(T)` if the value can be retrieved.
/// `Err` if the value is already mutably borrowed.
///
/// # Safety
///
/// This release version of this function does not perform runtime borrow checking and will
/// cause undefined behavior if borrow rules are violated. This means they should be enforced
/// on the use of this type.
#[cfg(not(debug_assertions))]
#[inline(always)]
pub fn try_get_mut(&self) -> Result<RefMut<T>, &'static str> {
Ok(RefMut::new(Exclusive::new(&self.borrow_state), unsafe {
&mut *self.value.get()
}))
}
}
unsafe impl<T: Send> Send for AtomicRefCell<T> {}
unsafe impl<T: Sync> Sync for AtomicRefCell<T> {}
/// Type used for allowing unsafe cloning of internal types
pub trait UnsafeClone {
/// Clone this type unsafely
///
/// # Safety
/// Types implementing this trait perform clones under an unsafe context.
unsafe fn clone(&self) -> Self;
}
impl<A: UnsafeClone, B: UnsafeClone> UnsafeClone for (A, B) {
unsafe fn clone(&self) -> Self { (self.0.clone(), self.1.clone()) }
}
#[derive(Debug)]
pub struct Shared<'a> {
#[cfg(debug_assertions)]
state: &'a AtomicIsize,
#[cfg(not(debug_assertions))]
state: PhantomData<&'a ()>,
}
impl<'a> Shared<'a> {
#[cfg(debug_assertions)]
fn new(state: &'a AtomicIsize) -> Self { Self { state } }
#[cfg(not(debug_assertions))]
#[inline(always)]
fn new(_: &'a AtomicIsize) -> Self { Self { state: PhantomData } }
}
#[cfg(debug_assertions)]
impl<'a> Drop for Shared<'a> {
fn drop(&mut self) { self.state.fetch_sub(1, std::sync::atomic::Ordering::SeqCst); }
}
impl<'a> Clone for Shared<'a> {
#[inline(always)]
fn clone(&self) -> Self {
#[cfg(debug_assertions)]
self.state.fetch_add(1, std::sync::atomic::Ordering::SeqCst);
Shared { state: self.state }
}
}
impl<'a> UnsafeClone for Shared<'a> {
unsafe fn clone(&self) -> Self { Clone::clone(&self) }
}
#[derive(Debug)]
pub struct Exclusive<'a> {
#[cfg(debug_assertions)]
state: &'a AtomicIsize,
#[cfg(not(debug_assertions))]
state: PhantomData<&'a ()>,
}
impl<'a> Exclusive<'a> {
#[cfg(debug_assertions)]
fn new(state: &'a AtomicIsize) -> Self { Self { state } }
#[cfg(not(debug_assertions))]
#[inline(always)]
fn new(_: &'a AtomicIsize) -> Self { Self { state: PhantomData } }
}
#[cfg(debug_assertions)]
impl<'a> Drop for Exclusive<'a> {
fn drop(&mut self) { self.state.fetch_add(1, std::sync::atomic::Ordering::SeqCst); }
}
impl<'a> UnsafeClone for Exclusive<'a> {
#[inline(always)]
unsafe fn clone(&self) -> Self {
#[cfg(debug_assertions)]
self.state.fetch_sub(1, std::sync::atomic::Ordering::SeqCst);
Exclusive { state: self.state }
}
}
#[derive(Debug)]
pub struct Ref<'a, T: 'a> {
#[allow(dead_code)]
// held for drop impl
borrow: Shared<'a>,
value: &'a T,
}
impl<'a, T: 'a> Clone for Ref<'a, T> {
#[inline(always)]
fn clone(&self) -> Self { Ref::new(Clone::clone(&self.borrow), self.value) }
}
impl<'a, T: 'a> Ref<'a, T> {
#[inline(always)]
pub fn new(borrow: Shared<'a>, value: &'a T) -> Self { Self { borrow, value } }
#[inline(always)]
pub fn map_into<K: 'a, F: FnMut(&'a T) -> K>(self, mut f: F) -> RefMap<'a, K> {
RefMap::new(self.borrow, f(&self.value))
}
#[inline(always)]
pub fn map<K: 'a, F: FnMut(&T) -> &K>(&self, mut f: F) -> Ref<'a, K> {
Ref::new(Clone::clone(&self.borrow), f(&self.value))
}
/// Deconstructs this mapped borrow to its underlying borrow state and value.
///
/// # Safety
///
/// Ensure that you still follow all safety guidelines of this mapped ref.
#[inline(always)]
pub unsafe fn deconstruct(self) -> (Shared<'a>, &'a T) { (self.borrow, self.value) }
}
impl<'a, T: 'a> Deref for Ref<'a, T> {
type Target = T;
#[inline(always)]
fn deref(&self) -> &Self::Target { self.value }
}
impl<'a, T: 'a> AsRef<T> for Ref<'a, T> {
#[inline(always)]
fn as_ref(&self) -> &T { self.value }
}
impl<'a, T: 'a> std::borrow::Borrow<T> for Ref<'a, T> {
#[inline(always)]
fn borrow(&self) -> &T { self.value }
}
impl<'a, T> PartialEq for Ref<'a, T>
where
T: 'a + PartialEq,
{
fn eq(&self, other: &Self) -> bool { self.value == other.value }
}
impl<'a, T> Eq for Ref<'a, T> where T: 'a + Eq {}
impl<'a, T> PartialOrd for Ref<'a, T>
where
T: 'a + PartialOrd,
{
fn partial_cmp(&self, other: &Self) -> Option<std::cmp::Ordering> {
self.value.partial_cmp(&other.value)
}
}
impl<'a, T> Ord for Ref<'a, T>
where
T: 'a + Ord,
{
fn cmp(&self, other: &Self) -> std::cmp::Ordering { self.value.cmp(&other.value) }
}
impl<'a, T> Hash for Ref<'a, T>
where
T: 'a + Hash,
{
fn hash<H: Hasher>(&self, state: &mut H) { self.value.hash(state); }
}
#[derive(Debug)]
pub struct RefMut<'a, T: 'a> {
#[allow(dead_code)]
// held for drop impl
borrow: Exclusive<'a>,
value: &'a mut T,
}
impl<'a, T: 'a> RefMut<'a, T> {
#[inline(always)]
pub fn new(borrow: Exclusive<'a>, value: &'a mut T) -> Self { Self { borrow, value } }
#[inline(always)]
pub fn map_into<K: 'a, F: FnMut(&mut T) -> K>(mut self, mut f: F) -> RefMapMut<'a, K> {
RefMapMut::new(self.borrow, f(&mut self.value))
}
/// Deconstructs this mapped borrow to its underlying borrow state and value.
///
/// # Safety
///
/// Ensure that you still follow all safety guidelines of this mapped ref.
#[inline(always)]
pub unsafe fn deconstruct(self) -> (Exclusive<'a>, &'a mut T) { (self.borrow, self.value) }
#[inline(always)]
pub fn split<First, Rest, F: Fn(&'a mut T) -> (&'a mut First, &'a mut Rest)>(
self,
f: F,
) -> (RefMut<'a, First>, RefMut<'a, Rest>) {
let (first, rest) = f(self.value);
(
RefMut::new(unsafe { self.borrow.clone() }, first),
RefMut::new(self.borrow, rest),
)
}
}
impl<'a, T: 'a> Deref for RefMut<'a, T> {
type Target = T;
#[inline(always)]
fn deref(&self) -> &Self::Target { self.value }
}
impl<'a, T: 'a> DerefMut for RefMut<'a, T> {
#[inline(always)]
fn deref_mut(&mut self) -> &mut Self::Target { self.value }
}
impl<'a, T: 'a> AsRef<T> for RefMut<'a, T> {
#[inline(always)]
fn as_ref(&self) -> &T { self.value }
}
impl<'a, T: 'a> std::borrow::Borrow<T> for RefMut<'a, T> {
#[inline(always)]
fn borrow(&self) -> &T { self.value }
}
impl<'a, T> PartialEq for RefMut<'a, T>
where
T: 'a + PartialEq,
{
fn eq(&self, other: &Self) -> bool { self.value == other.value }
}
impl<'a, T> Eq for RefMut<'a, T> where T: 'a + Eq {}
impl<'a, T> PartialOrd for RefMut<'a, T>
where
T: 'a + PartialOrd,
{
fn partial_cmp(&self, other: &Self) -> Option<std::cmp::Ordering> {
self.value.partial_cmp(&other.value)
}
}
impl<'a, T> Ord for RefMut<'a, T>
where
T: 'a + Ord,
{
fn cmp(&self, other: &Self) -> std::cmp::Ordering { self.value.cmp(&other.value) }
}
impl<'a, T> Hash for RefMut<'a, T>
where
T: 'a + Hash,
{
fn hash<H: Hasher>(&self, state: &mut H) { self.value.hash(state); }
}
#[derive(Debug)]
pub struct RefMap<'a, T: 'a> {
#[allow(dead_code)]
// held for drop impl
borrow: Shared<'a>,
value: T,
}
impl<'a, T: 'a> RefMap<'a, T> {
#[inline(always)]
pub fn new(borrow: Shared<'a>, value: T) -> Self { Self { borrow, value } }
#[inline(always)]
pub fn map_into<K: 'a, F: FnMut(&mut T) -> K>(mut self, mut f: F) -> RefMap<'a, K> {
RefMap::new(self.borrow, f(&mut self.value))
}
/// Deconstructs this mapped borrow to its underlying borrow state and value.
///
/// # Safety
///
/// Ensure that you still follow all safety guidelines of this mapped ref.
#[inline(always)]
pub unsafe fn deconstruct(self) -> (Shared<'a>, T) { (self.borrow, self.value) }
}
impl<'a, T: 'a> Deref for RefMap<'a, T> {
type Target = T;
#[inline(always)]
fn deref(&self) -> &Self::Target { &self.value }
}
impl<'a, T: 'a> AsRef<T> for RefMap<'a, T> {
#[inline(always)]
fn as_ref(&self) -> &T { &self.value }
}
impl<'a, T: 'a> std::borrow::Borrow<T> for RefMap<'a, T> {
#[inline(always)]
fn borrow(&self) -> &T { &self.value }
}
#[derive(Debug)]
pub struct RefMapMut<'a, T: 'a> {
#[allow(dead_code)]
// held for drop impl
borrow: Exclusive<'a>,
value: T,
}
impl<'a, T: 'a> RefMapMut<'a, T> {
#[inline(always)]
pub fn new(borrow: Exclusive<'a>, value: T) -> Self { Self { borrow, value } }
#[inline(always)]
pub fn map_into<K: 'a, F: FnMut(&mut T) -> K>(mut self, mut f: F) -> RefMapMut<'a, K> {
RefMapMut {
value: f(&mut self.value),
borrow: self.borrow,
}
}
/// Deconstructs this mapped borrow to its underlying borrow state and value.
///
/// # Safety
///
/// Ensure that you still follow all safety guidelines of this mutable mapped ref.
#[inline(always)]
pub unsafe fn deconstruct(self) -> (Exclusive<'a>, T) { (self.borrow, self.value) }
}
impl<'a, T: 'a> Deref for RefMapMut<'a, T> {
type Target = T;
#[inline(always)]
fn deref(&self) -> &Self::Target { &self.value }
}
impl<'a, T: 'a> DerefMut for RefMapMut<'a, T> {
#[inline(always)]
fn deref_mut(&mut self) -> &mut Self::Target { &mut self.value }
}
impl<'a, T: 'a> AsRef<T> for RefMapMut<'a, T> {
#[inline(always)]
fn as_ref(&self) -> &T { &self.value }
}
impl<'a, T: 'a> std::borrow::Borrow<T> for RefMapMut<'a, T> {
#[inline(always)]
fn borrow(&self) -> &T { &self.value }
}
#[derive(Debug)]
pub struct RefIter<'a, T: 'a, I: Iterator<Item = &'a T>> {
#[allow(dead_code)]
// held for drop impl
borrow: Shared<'a>,
iter: I,
}
impl<'a, T: 'a, I: Iterator<Item = &'a T>> RefIter<'a, T, I> {
#[inline(always)]
pub fn new(borrow: Shared<'a>, iter: I) -> Self { Self { borrow, iter } }
}
impl<'a, T: 'a, I: Iterator<Item = &'a T>> Iterator for RefIter<'a, T, I> {
type Item = Ref<'a, T>;
#[inline(always)]
fn next(&mut self) -> Option<Self::Item> {
if let Some(item) = self.iter.next() {
Some(Ref::new(Clone::clone(&self.borrow), item))
} else {
None
}
}
fn size_hint(&self) -> (usize, Option<usize>) { self.iter.size_hint() }
}
impl<'a, T: 'a, I: Iterator<Item = &'a T> + ExactSizeIterator> ExactSizeIterator
for RefIter<'a, T, I>
{
}
#[derive(Debug)]
enum TryIter<State, T> {
Found { borrow: State, iter: T },
Missing(usize),
}
#[derive(Debug)]
pub struct TryRefIter<'a, T: 'a, I: Iterator<Item = &'a T>> {
inner: TryIter<Shared<'a>, I>,
}
impl<'a, T: 'a, I: Iterator<Item = &'a T>> TryRefIter<'a, T, I> {
#[inline(always)]
pub(crate) fn found(borrow: Shared<'a>, iter: I) -> Self {
Self {
inner: TryIter::Found { borrow, iter },
}
}
#[inline(always)]
pub(crate) fn missing(count: usize) -> Self {
Self {
inner: TryIter::Missing(count),
}
}
}
impl<'a, T: 'a, I: Iterator<Item = &'a T>> Iterator for TryRefIter<'a, T, I> {
type Item = Option<Ref<'a, T>>;
#[inline(always)]
fn next(&mut self) -> Option<Self::Item> {
Some(match self.inner {
TryIter::Found {
ref borrow,
ref mut iter,
..
} => Some(Ref::new(Clone::clone(borrow), iter.next()?)),
TryIter::Missing(ref mut n) => {
*n = n.checked_sub(1)?;
None
}
})
}
fn size_hint(&self) -> (usize, Option<usize>) {
match self.inner {
TryIter::Found { ref iter, .. } => iter.size_hint(),
TryIter::Missing(n) => (n, Some(n)),
}
}
}
impl<'a, T: 'a, I: Iterator<Item = &'a T> + ExactSizeIterator> ExactSizeIterator
for TryRefIter<'a, T, I>
{
}
#[derive(Debug)]
pub struct RefIterMut<'a, T: 'a, I: Iterator<Item = &'a mut T>> {
#[allow(dead_code)]
// held for drop impl
borrow: Exclusive<'a>,
iter: I,
}
impl<'a, T: 'a, I: Iterator<Item = &'a mut T>> RefIterMut<'a, T, I> {
#[inline(always)]
pub fn new(borrow: Exclusive<'a>, iter: I) -> Self { Self { borrow, iter } }
}
impl<'a, T: 'a, I: Iterator<Item = &'a mut T>> Iterator for RefIterMut<'a, T, I> {
type Item = RefMut<'a, T>;
#[inline(always)]
fn next(&mut self) -> Option<Self::Item> {
if let Some(item) = self.iter.next() {
Some(RefMut::new(unsafe { self.borrow.clone() }, item))
} else {
None
}
}
fn size_hint(&self) -> (usize, Option<usize>) { self.iter.size_hint() }
}
impl<'a, T: 'a, I: Iterator<Item = &'a mut T> + ExactSizeIterator> ExactSizeIterator
for RefIterMut<'a, T, I>
{
}
#[derive(Debug)]
pub struct TryRefIterMut<'a, T: 'a, I: Iterator<Item = &'a mut T>> {
inner: TryIter<Exclusive<'a>, I>,
}
impl<'a, T: 'a, I: Iterator<Item = &'a mut T>> TryRefIterMut<'a, T, I> {
#[inline(always)]
pub(crate) fn found(borrow: Exclusive<'a>, iter: I) -> Self {
Self {
inner: TryIter::Found { borrow, iter },
}
}
#[inline(always)]
pub(crate) fn missing(count: usize) -> Self {
Self {
inner: TryIter::Missing(count),
}
}
}
impl<'a, T: 'a, I: Iterator<Item = &'a mut T>> Iterator for TryRefIterMut<'a, T, I> {
type Item = Option<RefMut<'a, T>>;
#[inline(always)]
fn next(&mut self) -> Option<Self::Item> {
Some(match self.inner {
TryIter::Found {
ref borrow,
ref mut iter,
..
} => Some(RefMut::new(unsafe { borrow.clone() }, iter.next()?)),
TryIter::Missing(ref mut n) => {
*n = n.checked_sub(1)?;
None
}
})
}
fn size_hint(&self) -> (usize, Option<usize>) {
match self.inner {
TryIter::Found { ref iter, .. } => iter.size_hint(),
TryIter::Missing(n) => (n, Some(n)),
}
}
}
impl<'a, T: 'a, I: Iterator<Item = &'a mut T> + ExactSizeIterator> ExactSizeIterator
for TryRefIterMut<'a, T, I>
{
}

699
bevy_legion/src/command.rs Normal file
View file

@ -0,0 +1,699 @@
use crate::{
borrow::AtomicRefCell,
cons::{ConsAppend, ConsFlatten},
entity::{Entity, EntityAllocator},
filter::{ChunksetFilterData, Filter},
storage::{Component, ComponentTypeId, Tag, TagTypeId},
world::{ComponentSource, ComponentTupleSet, IntoComponentSource, TagLayout, TagSet, World},
};
use derivative::Derivative;
use smallvec::SmallVec;
use std::{collections::VecDeque, iter::FromIterator, marker::PhantomData, sync::Arc};
/// This trait can be used to implement custom world writer types that can be directly
/// inserted into the command buffer, for more custom and complex world operations. This is analogous
/// to the `CommandBuffer::exec_mut` function type, but does not perform explicit any/any archetype
/// access.
pub trait WorldWritable {
/// Destructs the writer and performs the write operations on the world.
fn write(self: Arc<Self>, world: &mut World);
/// Returns the list of `ComponentTypeId` which are written by this command buffer. This is leveraged
/// to allow parralel command buffer flushing.
fn write_components(&self) -> Vec<ComponentTypeId>;
/// Returns the list of `TagTypeId` which are written by this command buffer. This is leveraged
/// to allow parralel command buffer flushing.
fn write_tags(&self) -> Vec<TagTypeId>;
}
#[derive(Derivative)]
#[derivative(Debug(bound = ""))]
struct InsertBufferedCommand<T, C> {
write_components: Vec<ComponentTypeId>,
write_tags: Vec<TagTypeId>,
#[derivative(Debug = "ignore")]
tags: T,
#[derivative(Debug = "ignore")]
components: C,
entities: Vec<Entity>,
}
impl<T, C> WorldWritable for InsertBufferedCommand<T, C>
where
T: TagSet + TagLayout + for<'a> Filter<ChunksetFilterData<'a>>,
C: ComponentSource,
{
fn write(self: Arc<Self>, world: &mut World) {
let consumed = Arc::try_unwrap(self).unwrap();
world.insert_buffered(&consumed.entities, consumed.tags, consumed.components);
}
fn write_components(&self) -> Vec<ComponentTypeId> { self.write_components.clone() }
fn write_tags(&self) -> Vec<TagTypeId> { self.write_tags.clone() }
}
#[derive(Derivative)]
#[derivative(Debug(bound = ""))]
struct InsertCommand<T, C> {
write_components: Vec<ComponentTypeId>,
write_tags: Vec<TagTypeId>,
#[derivative(Debug = "ignore")]
tags: T,
#[derivative(Debug = "ignore")]
components: C,
}
impl<T, C> WorldWritable for InsertCommand<T, C>
where
T: TagSet + TagLayout + for<'a> Filter<ChunksetFilterData<'a>>,
C: IntoComponentSource,
{
fn write(self: Arc<Self>, world: &mut World) {
let consumed = Arc::try_unwrap(self).unwrap();
world.insert(consumed.tags, consumed.components);
}
fn write_components(&self) -> Vec<ComponentTypeId> { self.write_components.clone() }
fn write_tags(&self) -> Vec<TagTypeId> { self.write_tags.clone() }
}
#[derive(Derivative)]
#[derivative(Debug(bound = ""))]
struct DeleteEntityCommand(Entity);
impl WorldWritable for DeleteEntityCommand {
fn write(self: Arc<Self>, world: &mut World) { world.delete(self.0); }
fn write_components(&self) -> Vec<ComponentTypeId> { Vec::with_capacity(0) }
fn write_tags(&self) -> Vec<TagTypeId> { Vec::with_capacity(0) }
}
#[derive(Derivative)]
#[derivative(Debug(bound = ""))]
struct AddTagCommand<T> {
entity: Entity,
#[derivative(Debug = "ignore")]
tag: T,
}
impl<T> WorldWritable for AddTagCommand<T>
where
T: Tag,
{
fn write(self: Arc<Self>, world: &mut World) {
let consumed = Arc::try_unwrap(self).unwrap();
world.add_tag(consumed.entity, consumed.tag)
}
fn write_components(&self) -> Vec<ComponentTypeId> { Vec::with_capacity(0) }
fn write_tags(&self) -> Vec<TagTypeId> { vec![TagTypeId::of::<T>()] }
}
#[derive(Derivative)]
#[derivative(Debug(bound = ""))]
struct RemoveTagCommand<T> {
entity: Entity,
_marker: PhantomData<T>,
}
impl<T> WorldWritable for RemoveTagCommand<T>
where
T: Tag,
{
fn write(self: Arc<Self>, world: &mut World) { world.remove_tag::<T>(self.entity) }
fn write_components(&self) -> Vec<ComponentTypeId> { Vec::with_capacity(0) }
fn write_tags(&self) -> Vec<TagTypeId> { vec![TagTypeId::of::<T>()] }
}
#[derive(Derivative)]
#[derivative(Debug(bound = ""))]
struct AddComponentCommand<C> {
#[derivative(Debug = "ignore")]
entity: Entity,
#[derivative(Debug = "ignore")]
component: C,
}
impl<C> WorldWritable for AddComponentCommand<C>
where
C: Component,
{
fn write(self: Arc<Self>, world: &mut World) {
let consumed = Arc::try_unwrap(self).unwrap();
world
.add_component::<C>(consumed.entity, consumed.component)
.unwrap();
}
fn write_components(&self) -> Vec<ComponentTypeId> { vec![ComponentTypeId::of::<C>()] }
fn write_tags(&self) -> Vec<TagTypeId> { Vec::with_capacity(0) }
}
#[derive(Derivative)]
#[derivative(Debug(bound = ""))]
struct RemoveComponentCommand<C> {
entity: Entity,
_marker: PhantomData<C>,
}
impl<C> WorldWritable for RemoveComponentCommand<C>
where
C: Component,
{
fn write(self: Arc<Self>, world: &mut World) { world.remove_component::<C>(self.entity) }
fn write_components(&self) -> Vec<ComponentTypeId> { vec![ComponentTypeId::of::<C>()] }
fn write_tags(&self) -> Vec<TagTypeId> { Vec::with_capacity(0) }
}
#[allow(clippy::enum_variant_names)]
enum EntityCommand {
WriteWorld(Arc<dyn WorldWritable>),
ExecWorld(Arc<dyn Fn(&World)>),
ExecMutWorld(Arc<dyn Fn(&mut World)>),
}
/// A builder type which can be retrieved from the command buffer. This is the ideal use case for
/// inserted complex entities with multiple components and tags from a command buffer. Although
/// `add_component` will perform a new move operation on every addition, this allows the construction
/// of a single `insert` command for an entity, but without using the actual `insert` command
/// provided by the `CommandBuffer`
///
/// # Examples
///
/// Inserting an entity using the `EntityBuilder`:
///
/// ```
/// # use legion::prelude::*;
/// # #[derive(Copy, Clone, Debug, PartialEq)]
/// # struct Position(f32);
/// # #[derive(Copy, Clone, Debug, PartialEq)]
/// # struct Rotation(f32);
/// # let universe = Universe::new();
/// # let mut world = universe.create_world();
/// let mut command_buffer = CommandBuffer::from_world(&mut world);
/// command_buffer.build_entity().unwrap()
/// .with_component(Position(123.0))
/// .with_component(Rotation(456.0)).build(&mut command_buffer);
/// command_buffer.write(&mut world);
/// ```
pub struct EntityBuilder<TS = (), CS = ()> {
entity: Entity,
tags: TS,
components: CS,
}
impl<TS, CS> EntityBuilder<TS, CS>
where
TS: 'static + Send + ConsFlatten,
CS: 'static + Send + ConsFlatten,
{
/// Adds a component to this builder, returning a new builder type containing that component type
/// and its data.
pub fn with_component<C: Component>(
self,
component: C,
) -> EntityBuilder<TS, <CS as ConsAppend<C>>::Output>
where
CS: ConsAppend<C>,
<CS as ConsAppend<C>>::Output: ConsFlatten,
{
EntityBuilder {
components: ConsAppend::append(self.components, component),
entity: self.entity,
tags: self.tags,
}
}
/// Adds a tag to this builder, returning a new builder type containing that component type
/// and its data.
pub fn with_tag<T: Tag>(self, tag: T) -> EntityBuilder<<TS as ConsAppend<T>>::Output, CS>
where
TS: ConsAppend<T>,
<TS as ConsAppend<T>>::Output: ConsFlatten,
{
EntityBuilder {
tags: ConsAppend::append(self.tags, tag),
entity: self.entity,
components: self.components,
}
}
/// Finalizes this builder type and submits it to the `CommandBuffer` as a `WorldWritable` trait
/// object.
pub fn build(self, buffer: &mut CommandBuffer)
where
<TS as ConsFlatten>::Output: TagSet + TagLayout + for<'a> Filter<ChunksetFilterData<'a>>,
ComponentTupleSet<
<CS as ConsFlatten>::Output,
std::iter::Once<<CS as ConsFlatten>::Output>,
>: ComponentSource,
{
buffer
.commands
.get_mut()
.push_front(EntityCommand::WriteWorld(Arc::new(InsertBufferedCommand {
write_components: Vec::default(),
write_tags: Vec::default(),
tags: self.tags.flatten(),
components: IntoComponentSource::into(std::iter::once(self.components.flatten())),
entities: vec![self.entity],
})));
}
}
/// Errors returned by the `CommandBuffer`
#[derive(Debug)]
pub enum CommandError {
/// The command buffers entity cache has been exhausted. This is defaulted to 64 at `World::DEFAULT_COMMAND_BUFFER_SIZE`.
/// This upper limit can be changed via `SystemBuilder::with_command_buffer_size` for specific systems,
/// or globally via `World::set_command_buffer_size`.
EntityBlockFull,
}
impl std::fmt::Display for CommandError {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { write!(f, "CommandError") }
}
impl std::error::Error for CommandError {
fn cause(&self) -> Option<&dyn std::error::Error> { None }
}
/// A command buffer used to queue mutable changes to the world from a system. This buffer is automatically
/// flushed and refreshed at the beginning of every frame by `Schedule`. If `Schedule` is not used,
/// then the user needs to manually flush it by performing `CommandBuffer::write`.
///
/// This buffer operates as follows:
/// - All commands are queued as trait object of type `WorldWritable`, to be executed when `CommandBuffer:write` is called.
/// - Entities are allocated at the time of `CommandBuffer:write` occuring, being directly allocated from the world
/// and cached internally in the system. This upper cache size can be changed via `SystemBuilder::with_command_buffer_size`
/// for specific systems, or globally via `World::set_command_buffer_size`. In the event the cached entity count is exceeded,
/// the cache will be refilled on demand from the world `EntityAllocator`.
///
/// This behavior exists because `EntityAllocator` is a shared lock within the world, so in order to reduce lock contention with many
/// systems running and adding entities, the `CommandBuffer` will cache the configured number of entities - reducing contention.
///
/// # Examples
///
/// Inserting an entity using the `CommandBuffer`:
///
/// ```
/// # use legion::prelude::*;
/// # #[derive(Copy, Clone, Debug, PartialEq)]
/// # struct Position(f32);
/// # #[derive(Copy, Clone, Debug, PartialEq)]
/// # struct Rotation(f32);
/// # let universe = Universe::new();
/// # let mut world = universe.create_world();
/// let mut command_buffer = CommandBuffer::from_world(&mut world);
/// let entity = command_buffer.create_entity().unwrap();
///
/// command_buffer.add_component(entity, Position(123.0));
/// command_buffer.delete(entity);
///
/// command_buffer.write(&mut world);
/// ```
#[derive(Default)]
pub struct CommandBuffer {
commands: AtomicRefCell<VecDeque<EntityCommand>>,
entity_allocator: Option<Arc<EntityAllocator>>,
pub(crate) custom_capacity: Option<usize>,
pub(crate) free_list: SmallVec<[Entity; 64]>,
pub(crate) used_list: SmallVec<[Entity; 64]>,
}
// This is safe because only 1 system in 1 execution is only ever accessing a command buffer
// and we garuntee the write operations of a command buffer occur in a safe manner
unsafe impl Send for CommandBuffer {}
unsafe impl Sync for CommandBuffer {}
impl CommandBuffer {
/// Creates a `CommandBuffer` with a custom capacity of cached Entity's to be collected every frame.
/// Allocating a command buffer in this manner will overwrite `World::set_command_buffer_size` and
/// this system will always allocate the custom provide capacity of entities every frame.
///
/// # Notes
/// This function does not perform any actual entity preallocation. `ComamandBuffer:resize` or `CommandBuffer:write`
/// must be called before using the command buffer for the first time to make entities available.
pub fn with_capacity(capacity: usize) -> Self {
// Pull free entities from the world.
Self {
custom_capacity: Some(capacity),
free_list: SmallVec::with_capacity(capacity),
commands: Default::default(),
used_list: SmallVec::with_capacity(capacity),
entity_allocator: None,
}
}
/// Creates a `CommandBuffer` with a custom capacity of cached Entity's to be collected every frame.
/// Allocating a command buffer in this manner will overwrite `World::set_command_buffer_size` and
/// this system will always allocate the custom provide capacity of entities every frame.
///
/// This constructor will preallocate the first round of entities needed from the world.
pub fn from_world_with_capacity(world: &mut World, capacity: usize) -> Self {
// Pull free entities from the world.
let free_list =
SmallVec::from_iter((0..capacity).map(|_| world.entity_allocator.create_entity()));
Self {
free_list,
custom_capacity: Some(capacity),
commands: Default::default(),
used_list: SmallVec::with_capacity(capacity),
entity_allocator: Some(world.entity_allocator.clone()),
}
}
/// Creates a `CommandBuffer` with a custom capacity of cached Entity's to be collected every frame.
/// Allocating a command buffer in this manner will use the default `World::set_command_buffer_size`
/// value.
///
/// This constructor will preallocate the first round of entities needed from the world.
pub fn from_world(world: &mut World) -> Self {
// Pull free entities from the world.
let free_list = SmallVec::from_iter(
(0..world.command_buffer_size()).map(|_| world.entity_allocator.create_entity()),
);
Self {
free_list,
custom_capacity: None,
commands: Default::default(),
used_list: SmallVec::with_capacity(world.command_buffer_size()),
entity_allocator: Some(world.entity_allocator.clone()),
}
}
/// Changes the cached capacity of this `CommandBuffer` to the specified capacity. This includes shrinking
/// and growing the allocated entities, and possibly returning them to the entity allocator in the
/// case of a shrink.
///
/// This function does *NOT* set the `CommandBuffer::custom_capacity` override.
#[allow(clippy::comparison_chain)]
pub fn resize(&mut self, capacity: usize) {
let allocator = &self.entity_allocator;
let free_list = &mut self.free_list;
if let Some(allocator) = allocator.as_ref() {
if free_list.len() < capacity {
(free_list.len()..capacity).for_each(|_| free_list.push(allocator.create_entity()));
} else if free_list.len() > capacity {
// Free the entities
(free_list.len() - capacity..capacity).for_each(|_| {
allocator.delete_entity(free_list.pop().unwrap());
});
}
} else {
panic!("Entity allocator not assigned to command buffer")
}
}
/// Flushes this command buffer, draining all stored commands and writing them to the world.
///
/// Command flushes are performed in a FIFO manner, allowing for reliable, linear commands being
/// executed in the order they were provided.
///
/// This function also calls `CommandBuffer:resize`, performing any appropriate entity preallocation,
/// refilling the entity cache of any consumed entities.
pub fn write(&mut self, world: &mut World) {
tracing::trace!("Draining command buffer");
if self.entity_allocator.is_none() {
self.entity_allocator = Some(world.entity_allocator.clone());
}
let empty = Vec::from_iter((0..self.used_list.len()).map(|_| ()));
world.insert_buffered(
self.used_list.as_slice(),
(),
IntoComponentSource::into(empty),
);
self.used_list.clear();
while let Some(command) = self.commands.get_mut().pop_back() {
match command {
EntityCommand::WriteWorld(ptr) => ptr.write(world),
EntityCommand::ExecMutWorld(closure) => closure(world),
EntityCommand::ExecWorld(closure) => closure(world),
}
}
// Refill our entity buffer from the world
if let Some(custom_capacity) = self.custom_capacity {
self.resize(custom_capacity);
} else {
self.resize(world.command_buffer_size());
}
}
/// Consumed an internally cached entity, returning an `EntityBuilder` using that entity.
pub fn build_entity(&mut self) -> Result<EntityBuilder<(), ()>, CommandError> {
let entity = self.create_entity()?;
Ok(EntityBuilder {
entity,
tags: (),
components: (),
})
}
/// Consumed an internally cached entity, or returns `CommandError`
pub fn create_entity(&mut self) -> Result<Entity, CommandError> {
if self.free_list.is_empty() {
self.resize(
self.custom_capacity
.unwrap_or(World::DEFAULT_COMMAND_BUFFER_SIZE),
);
}
let entity = self.free_list.pop().ok_or(CommandError::EntityBlockFull)?;
self.used_list.push(entity);
Ok(entity)
}
/// Executes an arbitrary closure against the mutable world, allowing for queued exclusive
/// access to the world.
pub fn exec_mut<F>(&self, f: F)
where
F: 'static + Fn(&mut World),
{
self.commands
.get_mut()
.push_front(EntityCommand::ExecMutWorld(Arc::new(f)));
}
/// Inserts an arbitrary implementor of the `WorldWritable` trait into the command queue.
/// This can be leveraged for creating custom `WorldWritable` trait implementors, and is used
/// internally for the default writers.
pub fn insert_writer<W>(&self, writer: W)
where
W: 'static + WorldWritable,
{
self.commands
.get_mut()
.push_front(EntityCommand::WriteWorld(Arc::new(writer)));
}
/// Queues an *unbuffered* insertion into the world. This command follows the same syntax as
/// the normal `World::insert`, except for one caviate - entities are NOT returned by this
/// function, meaning that the internal entity cache and limits of this `CommandBuffer` are not
/// applicable to this function.
///
/// This function can be considered a "fire and forget" entity creation method which is not bound
/// by the standard command buffer size limits of the other entity insertion functions. This allows
/// for mass insertion of entities, exceeding the command buffer sizes, to occur in scenarios that
/// the entities do not need to be retrieved.
pub fn insert_unbuffered<T, C>(&mut self, tags: T, components: C)
where
T: 'static + TagSet + TagLayout + for<'a> Filter<ChunksetFilterData<'a>>,
C: 'static + IntoComponentSource,
{
self.commands
.get_mut()
.push_front(EntityCommand::WriteWorld(Arc::new(InsertCommand {
write_components: Vec::default(),
write_tags: Vec::default(),
tags,
components,
})));
}
/// Queues an insertion into the world. This command follows the same syntax as
/// the normal `World::insert`, returning the entities created for this command.
pub fn insert<T, C>(&mut self, tags: T, components: C) -> Result<Vec<Entity>, CommandError>
where
T: 'static + TagSet + TagLayout + for<'a> Filter<ChunksetFilterData<'a>>,
C: 'static + IntoComponentSource,
{
let components = components.into();
if components.len() > self.free_list.len() {
return Err(CommandError::EntityBlockFull);
}
let mut entities = Vec::with_capacity(components.len());
for _ in 0..components.len() {
entities.push(self.free_list.pop().ok_or(CommandError::EntityBlockFull)?);
}
self.commands
.get_mut()
.push_front(EntityCommand::WriteWorld(Arc::new(InsertBufferedCommand {
write_components: Vec::default(),
write_tags: Vec::default(),
tags,
components,
entities: entities.clone(),
})));
Ok(entities)
}
/// Queues the deletion of an entity in the command buffer. This writer calls `World::delete`
pub fn delete(&self, entity: Entity) {
self.commands
.get_mut()
.push_front(EntityCommand::WriteWorld(Arc::new(DeleteEntityCommand(
entity,
))));
}
/// Queues the addition of a component from an entity in the command buffer.
/// This writer calls `World::add_component`
pub fn add_component<C: Component>(&self, entity: Entity, component: C) {
self.commands
.get_mut()
.push_front(EntityCommand::WriteWorld(Arc::new(AddComponentCommand {
entity,
component,
})));
}
/// Queues the removal of a component from an entity in the command buffer.
/// This writer calls `World::remove_component`
pub fn remove_component<C: Component>(&self, entity: Entity) {
self.commands
.get_mut()
.push_front(EntityCommand::WriteWorld(Arc::new(
RemoveComponentCommand {
entity,
_marker: PhantomData::<C>::default(),
},
)));
}
/// Queues the addition of a tag from an entity in the command buffer.
/// This writer calls `World::add_tag`
pub fn add_tag<T: Tag>(&self, entity: Entity, tag: T) {
self.commands
.get_mut()
.push_front(EntityCommand::WriteWorld(Arc::new(AddTagCommand {
entity,
tag,
})));
}
/// Queues the removal of a tag from an entity in the command buffer.
/// This writer calls `World::remove_tag`
pub fn remove_tag<T: Tag>(&self, entity: Entity) {
self.commands
.get_mut()
.push_front(EntityCommand::WriteWorld(Arc::new(RemoveTagCommand {
entity,
_marker: PhantomData::<T>::default(),
})));
}
/// Returns the current number of commands already queued in this `CommandBuffer` instance.
#[inline]
pub fn len(&self) -> usize { self.commands.get().len() }
/// Returns true if this `CommandBuffer` is currently empty and contains no writers.
#[inline]
pub fn is_empty(&self) -> bool { self.commands.get().len() == 0 }
}
#[cfg(test)]
mod tests {
use super::*;
use crate::prelude::*;
#[derive(Clone, Copy, Debug, PartialEq)]
struct Pos(f32, f32, f32);
#[derive(Clone, Copy, Debug, PartialEq)]
struct Vel(f32, f32, f32);
#[derive(Default)]
struct TestResource(pub i32);
#[test]
fn create_entity_test() -> Result<(), CommandError> {
let _ = tracing_subscriber::fmt::try_init();
let universe = Universe::new();
let mut world = universe.create_world();
let components = vec![
(Pos(1., 2., 3.), Vel(0.1, 0.2, 0.3)),
(Pos(4., 5., 6.), Vel(0.4, 0.5, 0.6)),
];
let components_len = components.len();
//world.entity_allocator.get_block()
let mut command = CommandBuffer::from_world(&mut world);
let entity1 = command.create_entity()?;
let entity2 = command.create_entity()?;
command.add_component(entity1, Pos(1., 2., 3.));
command.add_component(entity2, Pos(4., 5., 6.));
command.write(&mut world);
let query = Read::<Pos>::query();
let mut count = 0;
for _ in query.iter_entities(&mut world) {
count += 1;
}
assert_eq!(components_len, count);
Ok(())
}
#[test]
fn simple_write_test() -> Result<(), CommandError> {
let _ = tracing_subscriber::fmt::try_init();
let universe = Universe::new();
let mut world = universe.create_world();
let components = vec![
(Pos(1., 2., 3.), Vel(0.1, 0.2, 0.3)),
(Pos(4., 5., 6.), Vel(0.4, 0.5, 0.6)),
];
let components_len = components.len();
//world.entity_allocator.get_block()
let mut command = CommandBuffer::from_world(&mut world);
let _ = command.insert((), components)?;
// Assert writing checks
// TODO:
//assert_eq!(
// vec![ComponentTypeId::of::<Pos>(), ComponentTypeId::of::<Vel>()],
// command.write_components()
//);
command.write(&mut world);
let query = Read::<Pos>::query();
let mut count = 0;
for _ in query.iter_entities_mut(&mut world) {
count += 1;
}
assert_eq!(components_len, count);
Ok(())
}
}

153
bevy_legion/src/cons.rs Normal file
View file

@ -0,0 +1,153 @@
// Things happen here, and they work.
// ,---.
// / |
// / |
// / |
// / |
// ___,' |
// < -' :
// `-.__..--'``-,_\_
// |o/ ` :,.)_`>
// :/ ` ||/)
// (_.).__,-` |\
// /( `.`` `| :
// \'`-.) ` ; ;
// | ` /-<
// | ` / `.
// ,-_-..____ /| ` :__..-'\
// ,'-.__\\ ``-./ :` ; \
//`\ `\ `\\ \ : ( ` / , `. \
// \` \ \\ | | ` : : .\ \
// \ `\_ )) : ; | | ): :
// (`-.-'\ || |\ \ ` ; ; | |
// \-_ `;;._ ( ` / /_ | |
// `-.-.// ,'`-._\__/_,' ; |
// \:: : / ` , / |
// || | ( ,' / / |
// || ,' / |
/// Prepend a new type into a cons list
pub trait ConsPrepend<T> {
/// Result of prepend
type Output;
/// Prepend to runtime cons value
fn prepend(self, t: T) -> Self::Output;
}
impl<T> ConsPrepend<T> for () {
type Output = (T, Self);
fn prepend(self, t: T) -> Self::Output { (t, self) }
}
impl<T, A, B> ConsPrepend<T> for (A, B) {
type Output = (T, Self);
fn prepend(self, t: T) -> Self::Output { (t, self) }
}
/// Prepend a new type into a cons list
pub trait ConsAppend<T> {
/// Result of append
type Output;
/// Prepend to runtime cons value
fn append(self, t: T) -> Self::Output;
}
impl<T> ConsAppend<T> for () {
type Output = (T, Self);
fn append(self, t: T) -> Self::Output { (t, ()) }
}
impl<T, A, B: ConsAppend<T>> ConsAppend<T> for (A, B) {
type Output = (A, <B as ConsAppend<T>>::Output);
fn append(self, t: T) -> Self::Output {
let (a, b) = self;
(a, b.append(t))
}
}
/// transform cons list into a flat tuple
pub trait ConsFlatten {
/// Flattened tuple
type Output;
/// Flatten runtime cons value
fn flatten(self) -> Self::Output;
}
impl ConsFlatten for () {
type Output = ();
fn flatten(self) -> Self::Output { self }
}
macro_rules! cons {
() => (
()
);
($head:tt) => (
($head, ())
);
($head:tt, $($tail:tt),*) => (
($head, cons!($($tail),*))
);
}
macro_rules! impl_flatten {
($($items:ident),*) => {
#[allow(unused_parens)] // This is added because the nightly compiler complains
impl<$($items),*> ConsFlatten for cons!($($items),*)
{
type Output = ($($items),*);
fn flatten(self) -> Self::Output {
#[allow(non_snake_case)]
let cons!($($items),*) = self;
($($items),*)
}
}
impl_flatten!(@ $($items),*);
};
(@ $head:ident, $($tail:ident),*) => {
impl_flatten!($($tail),*);
};
(@ $head:ident) => {};
}
impl_flatten!(A, B, C, D, E, F, G, H, I, J, K, L, M, N, O, P, Q, R, S, T, U, V, W, X, Y, Z);
fn test_api() {}
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn cons_macro() {
assert_eq!(cons!(), ());
assert_eq!(cons!(1), (1, ()));
assert_eq!(cons!(1, 2, 3, 4), (1, (2, (3, (4, ())))));
}
#[test]
fn cons_prepend() {
assert_eq!(().prepend(123), (123, ()));
assert_eq!(
cons!(1, 2, 3, 4, 5).prepend(123).prepend(15),
cons!(15, 123, 1, 2, 3, 4, 5)
);
}
#[test]
fn cons_append() {
assert_eq!(().append(123), (123, ()));
assert_eq!(
cons!(1, 2, 3, 4, 5).append(123).append(15),
cons!(1, 2, 3, 4, 5, 123, 15)
);
}
#[test]
fn cons_flatten() {
assert_eq!(().flatten(), ());
assert_eq!((1, ()).flatten(), 1);
assert_eq!(cons!(1, 2, 3, 4, 5).flatten(), (1, 2, 3, 4, 5));
}
}

750
bevy_legion/src/de.rs Normal file
View file

@ -0,0 +1,750 @@
use crate::{
entity::{Entity, EntityAllocator},
storage::{
ArchetypeData, ArchetypeDescription, Chunkset, ComponentMeta, ComponentTypeId, TagMeta,
TagStorage, TagTypeId,
},
world::World,
};
use serde::{
self,
de::{self, DeserializeSeed, Visitor},
Deserialize, Deserializer,
};
use std::{cell::RefCell, collections::HashMap, ptr::NonNull};
/// Returns a type that implements `serde::DeserializeSeed`.
/// Pass the returned value to your `serde::Deserializer`.
/// The caller must provide an implementation for `WorldDeserializer`.
pub fn deserializable<'a, 'b, WD: WorldDeserializer>(
world: &'a mut World,
deserialize_impl: &'b WD,
) -> WorldDeserialize<'a, 'b, WD> {
WorldDeserialize {
world,
user: deserialize_impl,
}
}
/// Deserializes data into the provided World using the provided `serde::Deserializer`.
/// The caller must provide an implementation for `WorldDeserializer`.
pub fn deserialize<'dd, 'a, 'b, WD: WorldDeserializer, D: Deserializer<'dd>>(
world: &'a mut World,
deserialize_impl: &'b WD,
deserializer: D,
) -> Result<(), <D as Deserializer<'dd>>::Error> {
let deserializable = deserializable(world, deserialize_impl);
<WorldDeserialize<WD> as DeserializeSeed>::deserialize(deserializable, deserializer)
}
/// User must implement this trait to deserialize a World.
/// The implementation must match that of the `WorldSerializer` provided
/// when serializing the data that is to be deserialized by this impl.
pub trait WorldDeserializer {
/// Deserializes an ArchetypeDescription
fn deserialize_archetype_description<'de, D: Deserializer<'de>>(
&self,
deserializer: D,
) -> Result<ArchetypeDescription, <D as Deserializer<'de>>::Error>;
/// Deserializes component data.
/// `get_next_storage_fn` will return Some(component_data_ptr, num_elements) until all
/// reserved memory has been exhausted, whereupon it will return None.
/// `component_data_ptr` are pointers to reserved memory in chunks
/// that have been reserved to accomodate the number of entities that were previously deserialized
/// by `deserialize_entities`.
///
/// # Safety
///
/// The implementation must ensure `get_next_storage_fn` is called until it returns
/// None, and that all memory returned by `get_next_storage_fn` is properly initialized
/// before this function returns.
fn deserialize_components<'de, D: Deserializer<'de>>(
&self,
deserializer: D,
component_type: &ComponentTypeId,
component_meta: &ComponentMeta,
get_next_storage_fn: &mut dyn FnMut() -> Option<(NonNull<u8>, usize)>,
) -> Result<(), <D as Deserializer<'de>>::Error>;
/// Deserializes tag data into a TagStorage.
fn deserialize_tags<'de, D: Deserializer<'de>>(
&self,
deserializer: D,
tag_type: &TagTypeId,
tag_meta: &TagMeta,
tags: &mut TagStorage,
) -> Result<(), <D as Deserializer<'de>>::Error>;
/// Deserializes entity identifiers into the provided buffer.
fn deserialize_entities<'de, D: Deserializer<'de>>(
&self,
deserializer: D,
entity_allocator: &EntityAllocator,
entities: &mut Vec<Entity>,
) -> Result<(), <D as Deserializer<'de>>::Error>;
}
/// Implements `DeserializeSeed` and can be passed to a `serde::Deserializer`.
pub struct WorldDeserialize<'a, 'b, WD: WorldDeserializer> {
user: &'b WD,
world: &'a mut World,
}
impl<'de, 'a, 'b, WD: WorldDeserializer> DeserializeSeed<'de> for WorldDeserialize<'a, 'b, WD> {
type Value = ();
fn deserialize<D>(self, deserializer: D) -> Result<Self::Value, D::Error>
where
D: Deserializer<'de>,
{
let world_refcell = RefCell::new(self.world);
deserializer.deserialize_seq(SeqDeserializer(ArchetypeDeserializer {
user: self.user,
world: &world_refcell,
}))?;
Ok(())
}
}
#[derive(Deserialize, Debug)]
#[serde(field_identifier, rename_all = "snake_case")]
enum ArchetypeField {
Description,
Tags,
ChunkSets,
}
struct ArchetypeDeserializer<'a, 'b, WD: WorldDeserializer> {
user: &'b WD,
world: &'a RefCell<&'a mut World>,
}
impl<'a, 'b, WD: WorldDeserializer> Clone for ArchetypeDeserializer<'a, 'b, WD> {
fn clone(&self) -> Self {
Self {
user: self.user,
world: self.world,
}
}
}
impl<'de, 'a, 'b, WD: WorldDeserializer> DeserializeSeed<'de>
for ArchetypeDeserializer<'a, 'b, WD>
{
type Value = ();
fn deserialize<D>(self, deserializer: D) -> Result<Self::Value, D::Error>
where
D: Deserializer<'de>,
{
impl<'a, 'b, 'de, WD: WorldDeserializer> Visitor<'de> for ArchetypeDeserializer<'a, 'b, WD> {
type Value = ();
fn expecting(&self, formatter: &mut std::fmt::Formatter) -> std::fmt::Result {
formatter.write_str("struct Archetype")
}
fn visit_seq<V>(self, mut seq: V) -> Result<Self::Value, V::Error>
where
V: de::SeqAccess<'de>,
{
let archetype_idx = seq
.next_element_seed(ArchetypeDescriptionDeserialize {
user: self.user,
world: self.world,
})?
.expect("expected description");
let mut world = self.world.borrow_mut();
let archetype_data = &mut world.storage_mut().archetypes_mut()[archetype_idx];
let chunkset_map = seq
.next_element_seed(TagsDeserializer {
user: self.user,
archetype: archetype_data,
})?
.expect("expected tags");
seq.next_element_seed(ChunkSetDeserializer {
user: self.user,
world: &mut *world,
archetype_idx,
chunkset_map: &chunkset_map,
})?
.expect("expected chunk_sets");
Ok(())
}
fn visit_map<V>(self, mut map: V) -> Result<Self::Value, V::Error>
where
V: de::MapAccess<'de>,
{
let mut archetype_idx = None;
let mut chunkset_map = None;
while let Some(key) = map.next_key()? {
match key {
ArchetypeField::Description => {
archetype_idx =
Some(map.next_value_seed(ArchetypeDescriptionDeserialize {
user: self.user,
world: self.world,
})?);
}
ArchetypeField::Tags => {
let archetype_idx =
archetype_idx.expect("expected archetype description before tags");
let mut world = self.world.borrow_mut();
let archetype_data =
&mut world.storage_mut().archetypes_mut()[archetype_idx];
chunkset_map = Some(map.next_value_seed(TagsDeserializer {
user: self.user,
archetype: archetype_data,
})?);
}
ArchetypeField::ChunkSets => {
let archetype_idx = archetype_idx
.expect("expected archetype description before chunksets");
let mut world = self.world.borrow_mut();
map.next_value_seed(ChunkSetDeserializer {
user: self.user,
world: &mut *world,
archetype_idx,
chunkset_map: chunkset_map
.as_ref()
.expect("expected tags before chunksets"),
})?;
return Ok(());
}
}
}
Err(de::Error::missing_field("data"))
}
}
const FIELDS: &[&str] = &["description", "tags", "chunk_sets"];
deserializer.deserialize_struct("Archetype", FIELDS, self)
}
}
pub struct SeqDeserializer<T>(T);
impl<'de, T: DeserializeSeed<'de> + Clone> DeserializeSeed<'de> for SeqDeserializer<T> {
type Value = ();
fn deserialize<D>(self, deserializer: D) -> Result<Self::Value, D::Error>
where
D: Deserializer<'de>,
{
deserializer.deserialize_seq(self)
}
}
impl<'de, T: DeserializeSeed<'de> + Clone> Visitor<'de> for SeqDeserializer<T> {
type Value = ();
fn expecting(&self, formatter: &mut std::fmt::Formatter) -> std::fmt::Result {
formatter.write_str("sequence of objects")
}
fn visit_seq<A>(self, mut seq: A) -> Result<Self::Value, A::Error>
where
A: de::SeqAccess<'de>,
{
while let Some(_) = seq.next_element_seed::<T>(self.0.clone())? {}
Ok(())
}
}
struct ArchetypeDescriptionDeserialize<'a, 'b, WD: WorldDeserializer> {
user: &'b WD,
world: &'a RefCell<&'a mut World>,
}
impl<'de, 'a, 'b, WD: WorldDeserializer> DeserializeSeed<'de>
for ArchetypeDescriptionDeserialize<'a, 'b, WD>
{
type Value = usize;
fn deserialize<D>(self, deserializer: D) -> Result<Self::Value, <D as Deserializer<'de>>::Error>
where
D: Deserializer<'de>,
{
let archetype_desc = <WD as WorldDeserializer>::deserialize_archetype_description::<D>(
self.user,
deserializer,
)?;
let mut world = self.world.borrow_mut();
let storage = world.storage_mut();
Ok(storage
.archetypes()
.iter()
.position(|a| a.description() == &archetype_desc)
.unwrap_or_else(|| {
let (idx, _) = storage.alloc_archetype(archetype_desc);
idx
}))
}
}
type ChunkSetMapping = HashMap<usize, usize>;
struct TagsDeserializer<'a, 'b, WD: WorldDeserializer> {
user: &'b WD,
archetype: &'a mut ArchetypeData,
}
impl<'de, 'a, 'b, WD: WorldDeserializer> DeserializeSeed<'de> for TagsDeserializer<'a, 'b, WD> {
type Value = ChunkSetMapping;
fn deserialize<D>(self, deserializer: D) -> Result<Self::Value, <D as Deserializer<'de>>::Error>
where
D: Deserializer<'de>,
{
let (mut deserialized_tags, this) = deserializer.deserialize_seq(self)?;
let tag_types = this.archetype.description().tags().to_vec();
let mut chunkset_map = ChunkSetMapping::new();
let tags = this.archetype.tags_mut();
assert_eq!(tags.0.len(), tag_types.len());
// To simplify later code, shuffle the &mut tag_storage indices to match tag_types
let world_tag_storages = {
let mut world_tag_storages: Vec<&mut TagStorage> = Vec::with_capacity(tag_types.len());
for (tag_type, tag_storage) in tags.0.iter_mut() {
let type_idx = tag_types
.iter()
.position(|(ty, _)| ty == tag_type)
.expect("tag type mismatch with Tags");
unsafe {
std::ptr::write(world_tag_storages.as_mut_ptr().add(type_idx), tag_storage);
}
}
unsafe {
world_tag_storages.set_len(tag_types.len());
}
world_tag_storages
};
let num_world_values = world_tag_storages.iter().map(|ts| ts.len()).nth(0);
let num_tag_values = deserialized_tags
.iter()
.map(|ts| ts.len())
.nth(0)
.unwrap_or(0);
let mut chunksets_to_add = Vec::new();
for i in 0..num_tag_values {
let mut matching_idx = None;
if let Some(num_world_values) = num_world_values {
for j in 0..num_world_values {
let mut is_matching = true;
for tag_idx in 0..tag_types.len() {
unsafe {
let (de_ptr, stride, _) = deserialized_tags[tag_idx].data_raw();
let (world_ptr, _, _) = world_tag_storages[tag_idx].data_raw();
let (_, tag_meta) = tag_types[tag_idx];
let de_offset = (i * stride) as isize;
let world_offset = (j * stride) as isize;
if !tag_meta.equals(
de_ptr.as_ptr().offset(de_offset),
world_ptr.as_ptr().offset(world_offset),
) {
is_matching = false;
break;
}
}
}
if is_matching {
matching_idx = Some(j);
break;
}
}
}
// If we have a matching tag set, we will drop our temporary values manually.
// All temporary TagStorages in `deserialized_tags` will be forgotten later
// because we move data into World when allocating a new chunkset
if let Some(world_idx) = matching_idx {
chunkset_map.insert(i, world_idx);
for tag_idx in 0..tag_types.len() {
unsafe {
let (_, tag_meta) = tag_types[tag_idx];
let (de_ptr, stride, _) = deserialized_tags[tag_idx].data_raw();
let de_offset = (i * stride) as isize;
tag_meta.drop(de_ptr.as_ptr().offset(de_offset) as *mut u8);
}
}
} else {
chunksets_to_add.push(i);
}
}
for tag_value_idx in chunksets_to_add {
let chunkset_idx = this.archetype.alloc_chunk_set(|tags| {
for (tag_idx, (tag_type, _)) in tag_types.iter().enumerate() {
unsafe {
let (de_ptr, stride, _) = deserialized_tags[tag_idx].data_raw();
let de_offset = (tag_value_idx * stride) as isize;
let world_storage = tags
.get_mut(*tag_type)
.expect("tag_storage should be present after allocating chunk_set");
world_storage.push_raw(de_ptr.as_ptr().offset(de_offset));
}
}
});
chunkset_map.insert(tag_value_idx, chunkset_idx);
}
for tag in deserialized_tags.drain(0..) {
tag.forget_data();
}
if num_tag_values == 0 {
let chunkset_idx = this.archetype.alloc_chunk_set(|_| {});
chunkset_map.insert(0, chunkset_idx);
}
Ok(chunkset_map)
}
}
impl<'de, 'a, 'b, WD: WorldDeserializer> Visitor<'de> for TagsDeserializer<'a, 'b, WD> {
type Value = (Vec<TagStorage>, Self);
fn expecting(&self, formatter: &mut std::fmt::Formatter) -> std::fmt::Result {
formatter.write_str("sequence of objects")
}
fn visit_seq<A>(self, mut seq: A) -> Result<Self::Value, A::Error>
where
A: de::SeqAccess<'de>,
{
let tag_types = self.archetype.description().tags();
let mut deserialized_tags = Vec::new();
for (tag_type, tag_meta) in tag_types {
let mut tag_storage = TagStorage::new(*tag_meta);
if seq
.next_element_seed(TagStorageDeserializer {
user: self.user,
tag_storage: &mut tag_storage,
tag_type: &tag_type,
tag_meta: &tag_meta,
})?
.is_none()
{
break;
}
deserialized_tags.push(tag_storage);
}
Ok((deserialized_tags, self))
}
}
struct TagStorageDeserializer<'a, 'b, WD: WorldDeserializer> {
user: &'b WD,
tag_storage: &'a mut TagStorage,
tag_type: &'a TagTypeId,
tag_meta: &'a TagMeta,
}
impl<'de, 'a, 'b, WD: WorldDeserializer> DeserializeSeed<'de>
for TagStorageDeserializer<'a, 'b, WD>
{
type Value = ();
fn deserialize<D>(self, deserializer: D) -> Result<Self::Value, <D as Deserializer<'de>>::Error>
where
D: Deserializer<'de>,
{
self.user
.deserialize_tags(deserializer, self.tag_type, self.tag_meta, self.tag_storage)?;
Ok(())
}
}
struct ChunkSetDeserializer<'a, 'b, WD: WorldDeserializer> {
user: &'b WD,
world: &'a mut World,
archetype_idx: usize,
chunkset_map: &'a ChunkSetMapping,
}
impl<'de, 'a, 'b, WD: WorldDeserializer> DeserializeSeed<'de> for ChunkSetDeserializer<'a, 'b, WD> {
type Value = ();
fn deserialize<D>(self, deserializer: D) -> Result<Self::Value, <D as Deserializer<'de>>::Error>
where
D: Deserializer<'de>,
{
deserializer.deserialize_seq(self)
}
}
impl<'de, 'a, 'b, WD: WorldDeserializer> Visitor<'de> for ChunkSetDeserializer<'a, 'b, WD> {
type Value = ();
fn expecting(&self, formatter: &mut std::fmt::Formatter) -> std::fmt::Result {
formatter.write_str("sequence of objects")
}
fn visit_seq<A>(self, mut seq: A) -> Result<Self::Value, A::Error>
where
A: de::SeqAccess<'de>,
{
for idx in 0.. {
let chunkset_idx = self.chunkset_map.get(&idx).cloned();
if seq
.next_element_seed(ChunkListDeserializer {
user: self.user,
world: self.world,
archetype_idx: self.archetype_idx,
chunkset_idx,
})?
.is_none()
{
break;
}
}
Ok(())
}
}
struct ChunkListDeserializer<'a, 'b, WD: WorldDeserializer> {
user: &'b WD,
world: &'a mut World,
archetype_idx: usize,
chunkset_idx: Option<usize>,
}
impl<'de, 'a, 'b, WD: WorldDeserializer> DeserializeSeed<'de>
for ChunkListDeserializer<'a, 'b, WD>
{
type Value = ();
fn deserialize<D>(self, deserializer: D) -> Result<Self::Value, <D as Deserializer<'de>>::Error>
where
D: Deserializer<'de>,
{
deserializer.deserialize_seq(self)
}
}
impl<'de, 'a, 'b, WD: WorldDeserializer> Visitor<'de> for ChunkListDeserializer<'a, 'b, WD> {
type Value = ();
fn expecting(&self, formatter: &mut std::fmt::Formatter) -> std::fmt::Result {
formatter.write_str("sequence of struct Chunk")
}
fn visit_seq<A>(self, mut seq: A) -> Result<Self::Value, A::Error>
where
A: de::SeqAccess<'de>,
{
loop {
if seq
.next_element_seed(ChunkDeserializer {
user: self.user,
world: self.world,
archetype_idx: self.archetype_idx,
chunkset_idx: self.chunkset_idx.expect("expected chunkset_idx"),
})?
.is_none()
{
break;
}
}
Ok(())
}
}
#[derive(Deserialize, Debug)]
#[serde(field_identifier, rename_all = "lowercase")]
enum ChunkField {
Entities,
Components,
}
struct ChunkDeserializer<'a, 'b, WD: WorldDeserializer> {
user: &'b WD,
world: &'a mut World,
archetype_idx: usize,
chunkset_idx: usize,
}
impl<'de, 'a, 'b, WD: WorldDeserializer> DeserializeSeed<'de> for ChunkDeserializer<'a, 'b, WD> {
type Value = ();
fn deserialize<D>(self, deserializer: D) -> Result<Self::Value, <D as Deserializer<'de>>::Error>
where
D: Deserializer<'de>,
{
deserializer.deserialize_struct("Chunk", &["entities", "components"], self)
}
}
impl<'de, 'a, 'b, WD: WorldDeserializer> Visitor<'de> for ChunkDeserializer<'a, 'b, WD> {
type Value = ();
fn expecting(&self, formatter: &mut std::fmt::Formatter) -> std::fmt::Result {
formatter.write_str("struct Chunk")
}
fn visit_seq<V>(self, mut seq: V) -> Result<Self::Value, V::Error>
where
V: de::SeqAccess<'de>,
{
let chunk_ranges = seq.next_element_seed(EntitiesDeserializer {
user: self.user,
world: self.world,
archetype_idx: self.archetype_idx,
chunkset_idx: self.chunkset_idx,
})?;
seq.next_element_seed(ComponentsDeserializer {
user: self.user,
world: self.world,
archetype_idx: self.archetype_idx,
chunkset_idx: self.chunkset_idx,
chunk_ranges: chunk_ranges
.as_ref()
.expect("expected entities before components"),
})?;
Ok(())
}
fn visit_map<V>(self, mut map: V) -> Result<Self::Value, V::Error>
where
V: de::MapAccess<'de>,
{
let mut chunk_ranges = None;
while let Some(key) = map.next_key()? {
match key {
ChunkField::Entities => {
chunk_ranges = Some(map.next_value_seed(EntitiesDeserializer {
user: self.user,
world: self.world,
archetype_idx: self.archetype_idx,
chunkset_idx: self.chunkset_idx,
})?);
}
ChunkField::Components => {
map.next_value_seed(ComponentsDeserializer {
user: self.user,
world: self.world,
archetype_idx: self.archetype_idx,
chunkset_idx: self.chunkset_idx,
chunk_ranges: chunk_ranges
.as_ref()
.expect("expected entities before components"),
})?;
}
}
}
Ok(())
}
}
struct EntitiesDeserializer<'a, 'b, WD: WorldDeserializer> {
user: &'b WD,
world: &'a mut World,
archetype_idx: usize,
chunkset_idx: usize,
}
impl<'de, 'a, 'b, WD: WorldDeserializer> DeserializeSeed<'de> for EntitiesDeserializer<'a, 'b, WD> {
type Value = Vec<(usize, usize)>;
fn deserialize<D>(self, deserializer: D) -> Result<Self::Value, <D as Deserializer<'de>>::Error>
where
D: Deserializer<'de>,
{
let mut entities = Vec::new();
self.user.deserialize_entities(
deserializer,
&self.world.entity_allocator,
&mut entities,
)?;
let archetype = &mut self.world.storage_mut().archetypes_mut()[self.archetype_idx];
let mut chunk_ranges = Vec::new();
let mut chunk_idx = archetype.get_free_chunk(self.chunkset_idx, entities.len());
let mut entities_in_chunk = 0;
for entity in entities {
let chunk = {
let chunkset = &mut archetype.chunksets_mut()[self.chunkset_idx];
let chunk = &mut chunkset[chunk_idx];
if chunk.is_full() {
chunk_ranges.push((chunk_idx, entities_in_chunk));
chunk_idx = archetype.get_free_chunk(self.chunkset_idx, 1);
let chunkset = &mut archetype.chunksets_mut()[self.chunkset_idx];
&mut chunkset[chunk_idx]
} else {
chunk
}
};
chunk.writer().get().0.push(entity);
entities_in_chunk += 1;
}
if entities_in_chunk > 0 {
chunk_ranges.push((chunk_idx, entities_in_chunk));
}
Ok(chunk_ranges)
}
}
struct ComponentsDeserializer<'a, 'b, WD: WorldDeserializer> {
user: &'b WD,
world: &'a mut World,
archetype_idx: usize,
chunkset_idx: usize,
chunk_ranges: &'a Vec<(usize, usize)>,
}
impl<'de, 'a, 'b, WD: WorldDeserializer> DeserializeSeed<'de>
for ComponentsDeserializer<'a, 'b, WD>
{
type Value = ();
fn deserialize<D>(self, deserializer: D) -> Result<Self::Value, <D as Deserializer<'de>>::Error>
where
D: Deserializer<'de>,
{
deserializer.deserialize_seq(self)
}
}
impl<'de, 'a, 'b, WD: WorldDeserializer> Visitor<'de> for ComponentsDeserializer<'a, 'b, WD> {
type Value = ();
fn expecting(&self, formatter: &mut std::fmt::Formatter) -> std::fmt::Result {
formatter.write_str("sequence of objects")
}
fn visit_seq<A>(self, mut seq: A) -> Result<Self::Value, A::Error>
where
A: de::SeqAccess<'de>,
{
let archetype = &mut self.world.storage_mut().archetypes_mut()[self.archetype_idx];
for idx in 0..archetype.description().components().len() {
let desc = archetype.description();
let (comp_type, comp_meta) = desc.components()[idx];
let mut chunkset = &mut archetype.chunksets_mut()[self.chunkset_idx];
if seq
.next_element_seed(ComponentDataDeserializer {
user: self.user,
comp_type: &comp_type,
comp_meta: &comp_meta,
chunkset: &mut chunkset,
chunk_ranges: self.chunk_ranges,
})?
.is_none()
{
break;
}
}
Ok(())
}
}
struct ComponentDataDeserializer<'a, 'b, WD: WorldDeserializer> {
user: &'b WD,
comp_type: &'a ComponentTypeId,
comp_meta: &'a ComponentMeta,
chunkset: &'a mut Chunkset,
chunk_ranges: &'a Vec<(usize, usize)>,
}
impl<'de, 'a, 'b, WD: WorldDeserializer> DeserializeSeed<'de>
for ComponentDataDeserializer<'a, 'b, WD>
{
type Value = ();
fn deserialize<D>(self, deserializer: D) -> Result<Self::Value, <D as Deserializer<'de>>::Error>
where
D: Deserializer<'de>,
{
let mut range_idx = 0;
self.user.deserialize_components(
deserializer,
self.comp_type,
self.comp_meta,
&mut || -> Option<(NonNull<u8>, usize)> {
self.chunk_ranges.get(range_idx).map(|chunk_range| {
range_idx += 1;
let chunk = &mut self.chunkset[chunk_range.0];
unsafe {
let comp_storage = (&mut *chunk.writer().get().1.get())
.get_mut(*self.comp_type)
.expect(
"expected ComponentResourceSet when deserializing component data",
);
(
comp_storage.writer().reserve_raw(chunk_range.1),
chunk_range.1,
)
}
})
},
)?;
Ok(())
}
}

360
bevy_legion/src/entity.rs Normal file
View file

@ -0,0 +1,360 @@
use parking_lot::{Mutex, RwLock};
use std::fmt::Display;
use std::num::Wrapping;
use std::sync::Arc;
pub(crate) type EntityIndex = u32;
pub(crate) type EntityVersion = Wrapping<u32>;
/// A handle to an entity.
#[derive(Copy, Clone, Debug, Eq, PartialEq, Hash)]
pub struct Entity {
index: EntityIndex,
version: EntityVersion,
}
impl Entity {
pub(crate) fn new(index: EntityIndex, version: EntityVersion) -> Entity {
Entity { index, version }
}
pub(crate) fn index(self) -> EntityIndex { self.index }
}
impl Display for Entity {
fn fmt(&self, f: &mut std::fmt::Formatter) -> std::fmt::Result {
write!(f, "{}#{}", self.index, self.version)
}
}
#[derive(Copy, Clone, Debug, Eq, PartialEq, Hash)]
pub(crate) struct EntityLocation {
archetype_index: usize,
set_index: usize,
chunk_index: usize,
component_index: usize,
}
impl EntityLocation {
pub(crate) fn new(
archetype_index: usize,
set_index: usize,
chunk_index: usize,
component_index: usize,
) -> Self {
EntityLocation {
archetype_index,
set_index,
chunk_index,
component_index,
}
}
pub(crate) fn archetype(&self) -> usize { self.archetype_index }
pub(crate) fn set(&self) -> usize { self.set_index }
pub(crate) fn chunk(&self) -> usize { self.chunk_index }
pub(crate) fn component(&self) -> usize { self.component_index }
}
#[derive(Debug)]
pub(crate) struct BlockAllocator {
allocated: usize,
free: Vec<EntityBlock>,
}
impl BlockAllocator {
const BLOCK_SIZE: usize = 1024;
pub(crate) fn new() -> Self {
BlockAllocator {
allocated: 0,
free: Vec::new(),
}
}
pub fn allocate(&mut self) -> EntityBlock {
if let Some(block) = self.free.pop() {
block
} else {
let block = EntityBlock::new(self.allocated as EntityIndex, BlockAllocator::BLOCK_SIZE);
self.allocated += BlockAllocator::BLOCK_SIZE;
block
}
}
pub fn free(&mut self, block: EntityBlock) { self.free.push(block); }
}
#[derive(Debug)]
pub(crate) struct EntityBlock {
start: EntityIndex,
len: usize,
versions: Vec<EntityVersion>,
free: Vec<EntityIndex>,
locations: Vec<EntityLocation>,
}
impl EntityBlock {
pub fn new(start: EntityIndex, len: usize) -> EntityBlock {
EntityBlock {
start,
len,
versions: Vec::with_capacity(len),
free: Vec::new(),
locations: std::iter::repeat(EntityLocation::new(0, 0, 0, 0))
.take(len)
.collect(),
}
}
fn index(&self, index: EntityIndex) -> usize { (index - self.start) as usize }
pub fn in_range(&self, index: EntityIndex) -> bool {
index >= self.start && index < (self.start + self.len as u32)
}
pub fn is_alive(&self, entity: Entity) -> Option<bool> {
if entity.index >= self.start {
let i = self.index(entity.index);
self.versions.get(i).map(|v| *v == entity.version)
} else {
None
}
}
pub fn allocate(&mut self) -> Option<Entity> {
if let Some(index) = self.free.pop() {
let i = self.index(index);
Some(Entity::new(index, self.versions[i]))
} else if self.versions.len() < self.len {
let index = self.start + self.versions.len() as EntityIndex;
self.versions.push(Wrapping(1));
Some(Entity::new(index, Wrapping(1)))
} else {
None
}
}
pub fn free(&mut self, entity: Entity) -> Option<EntityLocation> {
if let Some(true) = self.is_alive(entity) {
let i = self.index(entity.index);
self.versions[i] += Wrapping(1);
self.free.push(entity.index);
self.get_location(entity.index)
} else {
None
}
}
pub fn set_location(&mut self, entity: EntityIndex, location: EntityLocation) {
assert!(entity >= self.start);
let index = (entity - self.start) as usize;
*self.locations.get_mut(index).unwrap() = location;
}
pub fn get_location(&self, entity: EntityIndex) -> Option<EntityLocation> {
if entity < self.start {
return None;
}
let index = (entity - self.start) as usize;
self.locations.get(index).copied()
}
}
/// Manages the allocation and deletion of `Entity` IDs within a world.
#[derive(Debug, Clone)]
pub struct EntityAllocator {
allocator: Arc<Mutex<BlockAllocator>>,
blocks: Arc<RwLock<Vec<EntityBlock>>>,
}
impl EntityAllocator {
pub(crate) fn new(allocator: Arc<Mutex<BlockAllocator>>) -> Self {
EntityAllocator {
allocator,
blocks: Arc::new(RwLock::new(Vec::new())),
}
}
/// Determines if the given `Entity` is considered alive.
pub fn is_alive(&self, entity: Entity) -> bool {
self.blocks
.read()
.iter()
.filter_map(|b| b.is_alive(entity))
.nth(0)
.unwrap_or(false)
}
/// Allocates a new unused `Entity` ID.
pub fn create_entity(&self) -> Entity {
let mut blocks = self.blocks.write();
if let Some(entity) = blocks.iter_mut().rev().filter_map(|b| b.allocate()).nth(0) {
entity
} else {
let mut block = self.allocator.lock().allocate();
let entity = block.allocate().unwrap();
blocks.push(block);
entity
}
}
pub(crate) fn delete_entity(&self, entity: Entity) -> Option<EntityLocation> {
self.blocks.write().iter_mut().find_map(|b| b.free(entity))
}
pub(crate) fn set_location(&self, entity: EntityIndex, location: EntityLocation) {
self.blocks
.write()
.iter_mut()
.rev()
.find(|b| b.in_range(entity))
.unwrap()
.set_location(entity, location);
}
pub(crate) fn get_location(&self, entity: EntityIndex) -> Option<EntityLocation> {
self.blocks
.read()
.iter()
.find(|b| b.in_range(entity))
.and_then(|b| b.get_location(entity))
}
pub(crate) fn merge(&self, other: EntityAllocator) {
assert!(Arc::ptr_eq(&self.allocator, &other.allocator));
self.blocks.write().append(&mut other.blocks.write());
}
}
impl Drop for EntityAllocator {
fn drop(&mut self) {
for block in self.blocks.write().drain(..) {
self.allocator.lock().free(block);
}
}
}
#[cfg(test)]
mod tests {
use crate::entity::*;
use std::collections::HashSet;
#[test]
fn create_entity() {
let allocator = EntityAllocator::new(Arc::from(Mutex::new(BlockAllocator::new())));
allocator.create_entity();
}
#[test]
fn create_entity_many() {
let allocator = EntityAllocator::new(Arc::from(Mutex::new(BlockAllocator::new())));
for _ in 0..512 {
allocator.create_entity();
}
}
#[test]
fn create_entity_many_blocks() {
let allocator = EntityAllocator::new(Arc::from(Mutex::new(BlockAllocator::new())));
for _ in 0..3000 {
allocator.create_entity();
}
}
#[test]
fn create_entity_recreate() {
let allocator = EntityAllocator::new(Arc::from(Mutex::new(BlockAllocator::new())));
for _ in 0..3 {
let entities: Vec<Entity> = (0..512).map(|_| allocator.create_entity()).collect();
for e in entities {
allocator.delete_entity(e);
}
}
}
#[test]
fn is_alive_allocated() {
let allocator = EntityAllocator::new(Arc::from(Mutex::new(BlockAllocator::new())));
let entity = allocator.create_entity();
assert_eq!(true, allocator.is_alive(entity));
}
#[test]
fn is_alive_unallocated() {
let allocator = EntityAllocator::new(Arc::from(Mutex::new(BlockAllocator::new())));
let entity = Entity::new(10 as EntityIndex, Wrapping(10));
assert_eq!(false, allocator.is_alive(entity));
}
#[test]
fn is_alive_killed() {
let allocator = EntityAllocator::new(Arc::from(Mutex::new(BlockAllocator::new())));
let entity = allocator.create_entity();
allocator.delete_entity(entity);
assert_eq!(false, allocator.is_alive(entity));
}
#[test]
fn delete_entity_was_alive() {
let allocator = EntityAllocator::new(Arc::from(Mutex::new(BlockAllocator::new())));
let entity = allocator.create_entity();
assert_eq!(true, allocator.delete_entity(entity).is_some());
}
#[test]
fn delete_entity_was_dead() {
let allocator = EntityAllocator::new(Arc::from(Mutex::new(BlockAllocator::new())));
let entity = allocator.create_entity();
allocator.delete_entity(entity);
assert_eq!(None, allocator.delete_entity(entity));
}
#[test]
fn delete_entity_was_unallocated() {
let allocator = EntityAllocator::new(Arc::from(Mutex::new(BlockAllocator::new())));
let entity = Entity::new(10 as EntityIndex, Wrapping(10));
assert_eq!(None, allocator.delete_entity(entity));
}
#[test]
fn multiple_allocators_unique_ids() {
let blocks = Arc::from(Mutex::new(BlockAllocator::new()));
let allocator_a = EntityAllocator::new(blocks.clone());
let allocator_b = EntityAllocator::new(blocks.clone());
let mut entities_a = HashSet::<Entity>::default();
let mut entities_b = HashSet::<Entity>::default();
for _ in 0..5 {
entities_a.extend((0..1500).map(|_| allocator_a.create_entity()));
entities_b.extend((0..1500).map(|_| allocator_b.create_entity()));
}
assert_eq!(true, entities_a.is_disjoint(&entities_b));
for e in entities_a {
assert_eq!(true, allocator_a.is_alive(e));
assert_eq!(false, allocator_b.is_alive(e));
}
for e in entities_b {
assert_eq!(false, allocator_a.is_alive(e));
assert_eq!(true, allocator_b.is_alive(e));
}
}
}

129
bevy_legion/src/event.rs Normal file
View file

@ -0,0 +1,129 @@
use crate::entity::Entity;
use crate::filter::{
ArchetypeFilterData, ChunkFilterData, ChunksetFilterData, EntityFilter, Filter, FilterResult,
};
use crate::storage::ArchetypeId;
use crate::storage::ChunkId;
use crossbeam_channel::{Sender, TrySendError};
use std::sync::Arc;
/// Events emitted by a world to subscribers. See `World.subscribe(Sender, EntityFilter)`.
#[derive(Debug, Clone)]
pub enum Event {
/// A new archetype has been created.
ArchetypeCreated(ArchetypeId),
/// A new chunk has been created.
ChunkCreated(ChunkId),
/// An entity has been inserted into a chunk.
EntityInserted(Entity, ChunkId),
/// An entity has been removed from a chunk.
EntityRemoved(Entity, ChunkId),
}
pub(crate) trait EventFilter: Send + Sync + 'static {
fn matches_archetype(&self, data: ArchetypeFilterData, index: usize) -> bool;
fn matches_chunkset(&self, data: ChunksetFilterData, index: usize) -> bool;
fn matches_chunk(&self, data: ChunkFilterData, index: usize) -> bool;
}
pub(crate) struct EventFilterWrapper<T: EntityFilter + Sync + 'static>(pub T);
impl<T: EntityFilter + Sync + 'static> EventFilter for EventFilterWrapper<T> {
fn matches_archetype(&self, data: ArchetypeFilterData, index: usize) -> bool {
let (filter, _, _) = self.0.filters();
if let Some(element) = filter.collect(data).nth(index) {
return filter.is_match(&element).is_pass();
}
false
}
fn matches_chunkset(&self, data: ChunksetFilterData, index: usize) -> bool {
let (_, filter, _) = self.0.filters();
if let Some(element) = filter.collect(data).nth(index) {
return filter.is_match(&element).is_pass();
}
false
}
fn matches_chunk(&self, data: ChunkFilterData, index: usize) -> bool {
let (_, _, filter) = self.0.filters();
if let Some(element) = filter.collect(data).nth(index) {
return filter.is_match(&element).is_pass();
}
false
}
}
#[derive(Clone)]
pub(crate) struct Subscriber {
pub filter: Arc<dyn EventFilter>,
pub sender: Sender<Event>,
}
impl Subscriber {
pub fn new(filter: Arc<dyn EventFilter>, sender: Sender<Event>) -> Self {
Self { filter, sender }
}
}
#[derive(Clone)]
pub(crate) struct Subscribers {
subscribers: Vec<Subscriber>,
}
impl Subscribers {
pub fn new() -> Self {
Self {
subscribers: Vec::new(),
}
}
pub fn push(&mut self, subscriber: Subscriber) { self.subscribers.push(subscriber); }
pub fn send(&mut self, message: Event) {
for i in (0..self.subscribers.len()).rev() {
if let Err(error) = self.subscribers[i].sender.try_send(message.clone()) {
if let TrySendError::Disconnected(_) = error {
self.subscribers.swap_remove(i);
}
}
}
}
pub fn matches_archetype(&self, data: ArchetypeFilterData, index: usize) -> Self {
let subscribers = self
.subscribers
.iter()
.filter(|sub| sub.filter.matches_archetype(data, index))
.cloned()
.collect();
Self { subscribers }
}
pub fn matches_chunkset(&self, data: ChunksetFilterData, index: usize) -> Self {
let subscribers = self
.subscribers
.iter()
.filter(|sub| sub.filter.matches_chunkset(data, index))
.cloned()
.collect();
Self { subscribers }
}
pub fn matches_chunk(&self, data: ChunkFilterData, index: usize) -> Self {
let subscribers = self
.subscribers
.iter()
.filter(|sub| sub.filter.matches_chunk(data, index))
.cloned()
.collect();
Self { subscribers }
}
}
impl Default for Subscribers {
fn default() -> Self { Subscribers::new() }
}

1237
bevy_legion/src/filter.rs Normal file

File diff suppressed because it is too large Load diff

220
bevy_legion/src/iterator.rs Normal file
View file

@ -0,0 +1,220 @@
use std::iter::repeat;
use std::iter::ExactSizeIterator;
use std::iter::FusedIterator;
use std::iter::Repeat;
use std::iter::Take;
use std::slice::Iter;
/// An iterator over slices in a `SliceVec`.
#[derive(Clone)]
pub struct SliceVecIter<'a, T> {
pub(crate) data: &'a [T],
pub(crate) counts: &'a [usize],
}
impl<'a, T> Iterator for SliceVecIter<'a, T> {
type Item = &'a [T];
#[inline]
fn next(&mut self) -> Option<Self::Item> {
if let Some((count, remaining_counts)) = self.counts.split_first() {
let (data, remaining_data) = self.data.split_at(*count);
self.counts = remaining_counts;
self.data = remaining_data;
Some(data)
} else {
None
}
}
#[inline]
fn size_hint(&self) -> (usize, Option<usize>) { (self.counts.len(), Some(self.counts.len())) }
#[inline]
fn count(self) -> usize { self.len() }
}
impl<'a, T> ExactSizeIterator for SliceVecIter<'a, T> {}
impl<'a, T> FusedIterator for SliceVecIter<'a, T> {}
/// A trait for iterators that are able to be split in roughly half.
/// Used for splitting work among threads in parallel iterator.
pub trait FissileIterator: Iterator + Sized {
/// Divides one iterator into two, roughly in half.
///
/// The implementation doesn't have to be precise,
/// but the closer to the midpoint it is, the better
/// the parallel iterator will behave.
///
/// Returns two split iterators and a number of elements left in first split.
/// That returned size must be exact.
fn split(self) -> (Self, Self, usize);
}
impl<'a, T> FissileIterator for Iter<'a, T> {
fn split(self) -> (Self, Self, usize) {
let slice = self.as_slice();
let split_point = slice.len() / 2;
let (left_slice, right_slice) = slice.split_at(split_point);
(left_slice.iter(), right_slice.iter(), split_point)
}
}
impl<'a, T> FissileIterator for SliceVecIter<'a, T> {
fn split(self) -> (Self, Self, usize) {
let counts_split_point = self.counts.len() / 2;
let (left_counts, right_counts) = self.counts.split_at(counts_split_point);
let data_split_point = left_counts.iter().sum();
let (left_data, right_data) = self.data.split_at(data_split_point);
(
Self {
data: left_data,
counts: left_counts,
},
Self {
data: right_data,
counts: right_counts,
},
counts_split_point,
)
}
}
pub(crate) struct FissileEnumerate<I: FissileIterator> {
iter: I,
count: usize,
}
impl<I: FissileIterator> FissileEnumerate<I> {
pub(crate) fn new(iter: I) -> Self { Self { iter, count: 0 } }
}
impl<I: FissileIterator> Iterator for FissileEnumerate<I>
where
I: Iterator,
{
type Item = (usize, <I as Iterator>::Item);
#[inline]
fn next(&mut self) -> Option<(usize, <I as Iterator>::Item)> {
self.iter.next().map(|a| {
let ret = (self.count, a);
self.count += 1;
ret
})
}
#[inline]
fn size_hint(&self) -> (usize, Option<usize>) { self.iter.size_hint() }
#[inline]
fn nth(&mut self, n: usize) -> Option<(usize, I::Item)> {
self.iter.nth(n).map(|a| {
let i = self.count + n;
self.count = i + 1;
(i, a)
})
}
#[inline]
fn count(self) -> usize { self.iter.count() }
#[inline]
fn fold<Acc, Fold>(self, init: Acc, mut fold: Fold) -> Acc
where
Fold: FnMut(Acc, Self::Item) -> Acc,
{
let mut count = self.count;
self.iter.fold(init, move |acc, item| {
let acc = fold(acc, (count, item));
count += 1;
acc
})
}
}
impl<I: FissileIterator> FissileIterator for FissileEnumerate<I> {
fn split(self) -> (Self, Self, usize) {
let (left, right, left_size) = self.iter.split();
(
Self {
iter: left,
count: self.count,
},
Self {
iter: right,
count: self.count + left_size,
},
left_size,
)
}
}
impl<I: ExactSizeIterator + FissileIterator> ExactSizeIterator for FissileEnumerate<I> {
fn len(&self) -> usize { self.iter.len() }
}
impl<I: FusedIterator + FissileIterator> FusedIterator for FissileEnumerate<I> {}
impl<T: Clone> FissileIterator for Take<Repeat<T>> {
fn split(mut self) -> (Self, Self, usize) {
if let Some(value) = self.next() {
let (len, len_max) = self.size_hint();
assert_eq!(Some(len), len_max);
let first_part = len / 2;
let second_part = len - first_part;
(
repeat(value.clone()).take(first_part),
repeat(value).take(second_part),
first_part,
)
} else {
(self.clone(), self, 0)
}
}
}
// Custom fissile zip iterator. Assumes that it's child iterators will always
// split in the same location. Panics when this is violated.
pub struct FissileZip<A, B> {
a: A,
b: B,
}
impl<A, B> FissileZip<A, B> {
pub(crate) fn new(a: A, b: B) -> Self { Self { a, b } }
}
impl<A: Iterator, B: Iterator> Iterator for FissileZip<A, B> {
type Item = (A::Item, B::Item);
fn next(&mut self) -> Option<(A::Item, B::Item)> {
self.a.next().and_then(|x| self.b.next().map(|y| (x, y)))
}
fn size_hint(&self) -> (usize, Option<usize>) {
let (a_lower, a_upper) = self.a.size_hint();
let (b_lower, b_upper) = self.b.size_hint();
let lower = std::cmp::min(a_lower, b_lower);
let upper = match (a_upper, b_upper) {
(Some(x), Some(y)) => Some(std::cmp::min(x, y)),
(Some(x), None) => Some(x),
(None, Some(y)) => Some(y),
(None, None) => None,
};
(lower, upper)
}
}
impl<A: FissileIterator, B: FissileIterator> FissileIterator for FissileZip<A, B> {
fn split(self) -> (Self, Self, usize) {
let (a_left, a_right, a_left_size) = self.a.split();
let (b_left, b_right, b_left_size) = self.b.split();
assert_eq!(a_left_size, b_left_size);
(
Self::new(a_left, b_left),
Self::new(a_right, b_right),
a_left_size,
)
}
}

265
bevy_legion/src/lib.rs Normal file
View file

@ -0,0 +1,265 @@
//! Legion aims to be a feature rich high performance ECS library for Rust game projects with minimal boilerplate.
//!
//! # Getting Started
//!
//! ```rust
//! use legion::prelude::*;
//!
//! // Define our entity data types
//! #[derive(Clone, Copy, Debug, PartialEq)]
//! struct Position {
//! x: f32,
//! y: f32,
//! }
//!
//! #[derive(Clone, Copy, Debug, PartialEq)]
//! struct Velocity {
//! dx: f32,
//! dy: f32,
//! }
//!
//! #[derive(Clone, Copy, Debug, PartialEq)]
//! struct Model(usize);
//!
//! #[derive(Clone, Copy, Debug, PartialEq)]
//! struct Static;
//!
//! // Create a world to store our entities
//! let universe = Universe::new();
//! let mut world = universe.create_world();
//!
//! // Create entities with `Position` and `Velocity` data
//! world.insert(
//! (),
//! (0..999).map(|_| (Position { x: 0.0, y: 0.0 }, Velocity { dx: 0.0, dy: 0.0 }))
//! );
//!
//! // Create entities with `Position` data and a tagged with `Model` data and as `Static`
//! // Tags are shared across many entities, and enable further batch processing and filtering use cases
//! world.insert(
//! (Model(5), Static),
//! (0..999).map(|_| (Position { x: 0.0, y: 0.0 },))
//! );
//!
//! // Create a query which finds all `Position` and `Velocity` components
//! let mut query = <(Write<Position>, Read<Velocity>)>::query();
//!
//! // Iterate through all entities that match the query in the world
//! for (mut pos, vel) in query.iter_mut(&mut world) {
//! pos.x += vel.dx;
//! pos.y += vel.dy;
//! }
//! ```
//!
//! ### Advanced Query Filters
//!
//! The query API can do much more than pull entity data out of the world.
//!
//! Additional data type filters:
//!
//! ```rust
//! # use legion::prelude::*;
//! # #[derive(Clone, Copy, Debug, PartialEq)]
//! # struct Position {
//! # x: f32,
//! # y: f32,
//! # }
//! # #[derive(Clone, Copy, Debug, PartialEq)]
//! # struct Velocity {
//! # dx: f32,
//! # dy: f32,
//! # }
//! # #[derive(Clone, Copy, Debug, PartialEq)]
//! # struct Model(usize);
//! # #[derive(Clone, Copy, Debug, PartialEq)]
//! # struct Static;
//! # let universe = Universe::new();
//! # let mut world = universe.create_world();
//! // It is possible to specify that entities must contain data beyond that being fetched
//! let mut query = Read::<Position>::query()
//! .filter(component::<Velocity>());
//! for position in query.iter(&mut world) {
//! // these entities also have `Velocity`
//! }
//! ```
//!
//! Filter boolean operations:
//!
//! ```rust
//! # use legion::prelude::*;
//! # #[derive(Clone, Copy, Debug, PartialEq)]
//! # struct Position {
//! # x: f32,
//! # y: f32,
//! # }
//! # #[derive(Clone, Copy, Debug, PartialEq)]
//! # struct Velocity {
//! # dx: f32,
//! # dy: f32,
//! # }
//! # #[derive(Clone, Copy, Debug, PartialEq)]
//! # struct Model(usize);
//! # #[derive(Clone, Copy, Debug, PartialEq)]
//! # struct Static;
//! # let universe = Universe::new();
//! # let mut world = universe.create_world();
//! // Filters can be combined with boolean operators
//! let mut query = Read::<Position>::query()
//! .filter(tag::<Static>() | !component::<Velocity>());
//! for position in query.iter(&mut world) {
//! // these entities are also either marked as `Static`, or do *not* have a `Velocity`
//! }
//! ```
//!
//! Filter by tag data value:
//!
//! ```rust
//! # use legion::prelude::*;
//! # #[derive(Clone, Copy, Debug, PartialEq)]
//! # struct Position {
//! # x: f32,
//! # y: f32,
//! # }
//! # #[derive(Clone, Copy, Debug, PartialEq)]
//! # struct Velocity {
//! # dx: f32,
//! # dy: f32,
//! # }
//! # #[derive(Clone, Copy, Debug, PartialEq)]
//! # struct Model(usize);
//! # #[derive(Clone, Copy, Debug, PartialEq)]
//! # struct Static;
//! # let universe = Universe::new();
//! # let mut world = universe.create_world();
//! // Filters can filter by specific tag values
//! let mut query = Read::<Position>::query()
//! .filter(tag_value(&Model(3)));
//! for position in query.iter(&mut world) {
//! // these entities all have tag value `Model(3)`
//! }
//! ```
//!
//! Change detection:
//!
//! ```rust
//! # use legion::prelude::*;
//! # #[derive(Clone, Copy, Debug, PartialEq)]
//! # struct Position {
//! # x: f32,
//! # y: f32,
//! # }
//! # #[derive(Clone, Copy, Debug, PartialEq)]
//! # struct Velocity {
//! # dx: f32,
//! # dy: f32,
//! # }
//! # #[derive(Clone, Copy, Debug, PartialEq)]
//! # struct Model(usize);
//! # #[derive(Clone, Copy, Debug, PartialEq)]
//! # struct Static;
//! # let universe = Universe::new();
//! # let mut world = universe.create_world();
//! // Queries can perform coarse-grained change detection, rejecting entities who's data
//! // has not changed since the last time the query was iterated.
//! let mut query = <(Read<Position>, Tagged<Model>)>::query()
//! .filter(changed::<Position>());
//! for (pos, model) in query.iter(&mut world) {
//! // entities who have changed position
//! }
//! ```
//!
//! ### Content Streaming
//!
//! Entities can be loaded and initialized in a background `World` on separate threads and then
//! when ready, merged into the main `World` near instantaneously.
//!
//! ```rust
//! # use legion::prelude::*;
//! let universe = Universe::new();
//! let mut world_a = universe.create_world();
//! let mut world_b = universe.create_world();
//!
//! // Merge all entities from `world_b` into `world_a`
//! // Entity IDs are guarenteed to be unique across worlds and will
//! // remain unchanged across the merge.
//! world_a.merge(world_b);
//! ```
//!
//! ### Chunk Iteration
//!
//! Entity data is allocated in blocks called "chunks", each approximately containing 64KiB of data.
//! The query API exposes each chunk via 'iter_chunk'. As all entities in a chunk are guarenteed to contain the same set of entity
//! data and shared data values, it is possible to do batch processing via the chunk API.
//!
//! ```rust
//! # use legion::prelude::*;
//! # #[derive(Clone, Copy, Debug, PartialEq)]
//! # struct Transform;
//! # #[derive(Clone, Copy, Debug, PartialEq)]
//! # struct Model(usize);
//! # let universe = Universe::new();
//! # let mut world = universe.create_world();
//! fn render_instanced(model: &Model, transforms: &[Transform]) {
//! // pass `transforms` pointer to graphics API to load into constant buffer
//! // issue instanced draw call with model data and transforms
//! }
//!
//! let mut query = Read::<Transform>::query()
//! .filter(tag::<Model>());
//!
//! for chunk in query.iter_chunks_mut(&mut world) {
//! // get the chunk's model
//! let model: &Model = chunk.tag().unwrap();
//!
//! // get a (runtime borrow checked) slice of transforms
//! let transforms = chunk.components::<Transform>().unwrap();
//!
//! // give the model and transform slice to our renderer
//! render_instanced(model, &transforms);
//! }
//! ```
//!
//! # Feature Flags
//!
//! * `par-iter`: Enables parallel APIs on queries (enabled by default).
//! * `par-schedule`: Configures system schedulers to try and run systems in parallel where possible (enabled by default).
//! * `log`: Configures `tracing` to redirect events to the `log` crate. This is a convenience feature for applications
//! that use `log` and do not wish to interact with `tracing`.
//! * `events`: Enables eventing APIs on worlds (enabled by default).
#![allow(dead_code)]
pub mod borrow;
pub mod command;
#[cfg(feature = "serde-1")]
pub mod de;
pub mod entity;
pub mod event;
pub mod filter;
pub mod iterator;
pub mod query;
pub mod resource;
pub mod schedule;
#[cfg(feature = "serde-1")]
pub mod ser;
pub mod storage;
pub mod system;
pub mod world;
mod cons;
mod tuple;
mod zip;
pub use bit_set;
pub mod prelude {
pub use crate::command::CommandBuffer;
pub use crate::entity::Entity;
pub use crate::event::Event;
pub use crate::filter::filter_fns::*;
pub use crate::query::{IntoQuery, Query, Read, Tagged, TryRead, TryWrite, Write};
pub use crate::resource::{ResourceSet, Resources};
pub use crate::schedule::{Executor, Runnable, Schedulable, Schedule};
pub use crate::system::{System, SystemBuilder};
pub use crate::world::{Universe, World};
pub use bit_set::BitSet;
}

1585
bevy_legion/src/query.rs Normal file

File diff suppressed because it is too large Load diff

435
bevy_legion/src/resource.rs Normal file
View file

@ -0,0 +1,435 @@
use crate::borrow::{AtomicRefCell, Ref, RefMut, DowncastTypename};
use crate::query::{Read, ReadOnly, Write};
use downcast_rs::{impl_downcast, Downcast};
use fxhash::FxHashMap;
use std::{
any::type_name,
marker::PhantomData,
ops::{Deref, DerefMut},
};
#[cfg(not(feature = "ffi"))]
/// A type ID identifying a component type.
#[derive(Copy, Clone, Debug, Eq, PartialEq, Hash, PartialOrd, Ord)]
pub struct ResourceTypeId(&'static str);
#[cfg(not(feature = "ffi"))]
impl ResourceTypeId {
/// Gets the component type ID that represents type `T`.
pub fn of<T: Resource>() -> Self { Self(type_name::<T>()) }
}
#[cfg(feature = "ffi")]
/// A type ID identifying a component type.
#[derive(Copy, Clone, Debug, Eq, PartialEq, Hash, PartialOrd, Ord)]
pub struct ResourceTypeId(&'static str, u32);
#[cfg(feature = "ffi")]
impl ResourceTypeId {
/// Gets the component type ID that represents type `T`.
pub fn of<T: Resource>() -> Self { Self(type_name::<T>(), 0) }
}
/// Trait which is implemented for tuples of resources and singular resources. This abstracts
/// fetching resources to allow for ergonomic fetching.
///
/// # Example:
/// ```
///
/// struct TypeA(usize);
/// struct TypeB(usize);
///
/// use legion::prelude::*;
/// let mut resources = Resources::default();
/// resources.insert(TypeA(55));
/// resources.insert(TypeB(12));
///
/// {
/// let (a, mut b) = <(Read<TypeA>, Write<TypeB>)>::fetch_mut(&mut resources);
/// assert_ne!(a.0, b.0);
/// b.0 = a.0;
/// }
///
/// {
/// let (a, b) = <(Read<TypeA>, Read<TypeB>)>::fetch(&resources);
/// assert_eq!(a.0, b.0);
/// }
///
/// ```
pub trait ResourceSet: Send + Sync {
type PreparedResources;
/// Fetches all defined resources, without checking mutability.
///
/// # Safety
/// It is up to the end user to validate proper mutability rules across the resources being accessed.
///
unsafe fn fetch_unchecked(resources: &Resources) -> Self::PreparedResources;
fn fetch_mut(resources: &mut Resources) -> Self::PreparedResources {
// safe because mutable borrow ensures exclusivity
unsafe { Self::fetch_unchecked(resources) }
}
fn fetch(resources: &Resources) -> Self::PreparedResources
where
Self: ReadOnly,
{
unsafe { Self::fetch_unchecked(resources) }
}
}
/// Blanket trait for resource types.
pub trait Resource: 'static + Downcast + Send + Sync {}
impl<T> Resource for T where T: 'static + Send + Sync {}
impl_downcast!(Resource);
/// Wrapper type for safe, lifetime-garunteed immutable access to a resource of type `T'. This
/// is the wrapper type which is provided to the closure in a `System`, meaning it is only scoped
/// to that system execution.
///
/// # Safety
///
/// This type contains an immutable pointer to `T`, and must not outlive its lifetime
pub struct PreparedRead<T: Resource> {
resource: *const T,
}
impl<T: Resource> PreparedRead<T> {
pub(crate) unsafe fn new(resource: *const T) -> Self { Self { resource } }
}
impl<T: Resource> Deref for PreparedRead<T> {
type Target = T;
fn deref(&self) -> &Self::Target { unsafe { &*self.resource } }
}
unsafe impl<T: Resource> Send for PreparedRead<T> {}
unsafe impl<T: Resource> Sync for PreparedRead<T> {}
/// Wrapper type for safe, lifetime-garunteed mutable access to a resource of type `T'. This
/// is the wrapper type which is provided to the closure in a `System`, meaning it is only scoped
/// to that system execution.
///
/// # Safety
///
/// This type contains an mutable pointer to `T`, and must not outlive its lifetime
pub struct PreparedWrite<T: Resource> {
resource: *mut T,
}
impl<T: Resource> Deref for PreparedWrite<T> {
type Target = T;
fn deref(&self) -> &Self::Target { unsafe { &*self.resource } }
}
impl<T: Resource> DerefMut for PreparedWrite<T> {
fn deref_mut(&mut self) -> &mut T { unsafe { &mut *self.resource } }
}
impl<T: Resource> PreparedWrite<T> {
pub(crate) unsafe fn new(resource: *mut T) -> Self { Self { resource } }
}
unsafe impl<T: Resource> Send for PreparedWrite<T> {}
unsafe impl<T: Resource> Sync for PreparedWrite<T> {}
/// Ergonomic wrapper type which contains a `Ref` type.
pub struct Fetch<'a, T: 'a + Resource> {
inner: Ref<'a, Box<dyn Resource>>,
_marker: PhantomData<T>,
}
impl<'a, T: Resource> Deref for Fetch<'a, T> {
type Target = T;
#[inline]
fn deref(&self) -> &Self::Target {
self.inner.downcast_typename_ref::<T>().unwrap_or_else(|| {
panic!(
"Unable to downcast the resource!: {}",
std::any::type_name::<T>()
)
})
}
}
impl<'a, T: 'a + Resource + std::fmt::Debug> std::fmt::Debug for Fetch<'a, T> {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
write!(f, "{:?}", self.deref())
}
}
/// Ergonomic wrapper type which contains a `RefMut` type.
pub struct FetchMut<'a, T: Resource> {
inner: RefMut<'a, Box<dyn Resource>>,
_marker: PhantomData<T>,
}
impl<'a, T: 'a + Resource> Deref for FetchMut<'a, T> {
type Target = T;
#[inline]
fn deref(&self) -> &Self::Target {
self.inner.downcast_typename_ref::<T>().unwrap_or_else(|| {
panic!(
"Unable to downcast the resource!: {}",
std::any::type_name::<T>()
)
})
}
}
impl<'a, T: 'a + Resource> DerefMut for FetchMut<'a, T> {
#[inline]
fn deref_mut(&mut self) -> &mut T {
self.inner.downcast_typename_mut::<T>().unwrap_or_else(|| {
panic!(
"Unable to downcast the resource!: {}",
std::any::type_name::<T>()
)
})
}
}
impl<'a, T: 'a + Resource + std::fmt::Debug> std::fmt::Debug for FetchMut<'a, T> {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
write!(f, "{:?}", self.deref())
}
}
/// Resources container. This container stores its underlying resources in a `FxHashMap` keyed on
/// `ResourceTypeId`. This means that the ID's used in this storage will not persist between recompiles.
#[derive(Default)]
pub struct Resources {
storage: FxHashMap<ResourceTypeId, AtomicRefCell<Box<dyn Resource>>>,
}
impl Resources {
/// Returns `true` if type `T` exists in the store. Otherwise, returns `false`
pub fn contains<T: Resource>(&self) -> bool {
self.storage.contains_key(&ResourceTypeId::of::<T>())
}
/// Inserts the instance of `T` into the store. If the type already exists, it will be silently
/// overwritten. If you would like to retain the instance of the resource that already exists,
/// call `remove` first to retrieve it.
pub fn insert<T: Resource>(&mut self, value: T) {
self.storage.insert(
ResourceTypeId::of::<T>(),
AtomicRefCell::new(Box::new(value)),
);
}
/// Removes the type `T` from this store if it exists.
///
/// # Returns
/// If the type `T` was stored, the inner instance of `T is returned. Otherwise, `None`
pub fn remove<T: Resource>(&mut self) -> Option<T> {
Some(
*self
.storage
.remove(&ResourceTypeId::of::<T>())?
.into_inner()
.downcast::<T>()
.ok()?,
)
}
/// Retrieve an immutable reference to `T` from the store if it exists. Otherwise, return `None`
pub fn get<T: Resource>(&self) -> Option<Fetch<'_, T>> {
Some(Fetch {
inner: self.storage.get(&ResourceTypeId::of::<T>())?.get(),
_marker: Default::default(),
})
}
/// Retrieve a mutable reference to `T` from the store if it exists. Otherwise, return `None`
pub fn get_mut<T: Resource>(&self) -> Option<FetchMut<'_, T>> {
Some(FetchMut {
inner: self.storage.get(&ResourceTypeId::of::<T>())?.get_mut(),
_marker: Default::default(),
})
}
/// Attempts to retrieve an immutable reference to `T` from the store. If it does not exist,
/// the closure `f` is called to construct the object and it is then inserted into the store.
pub fn get_or_insert_with<T: Resource, F: FnOnce() -> T>(
&mut self,
f: F,
) -> Option<Fetch<'_, T>> {
self.get_or_insert((f)())
}
/// Attempts to retrieve a mutable reference to `T` from the store. If it does not exist,
/// the closure `f` is called to construct the object and it is then inserted into the store.
pub fn get_mut_or_insert_with<T: Resource, F: FnOnce() -> T>(
&mut self,
f: F,
) -> Option<FetchMut<'_, T>> {
self.get_mut_or_insert((f)())
}
/// Attempts to retrieve an immutable reference to `T` from the store. If it does not exist,
/// the provided value is inserted and then a reference to it is returned.
pub fn get_or_insert<T: Resource>(&mut self, value: T) -> Option<Fetch<'_, T>> {
Some(Fetch {
inner: self
.storage
.entry(ResourceTypeId::of::<T>())
.or_insert_with(|| AtomicRefCell::new(Box::new(value)))
.get(),
_marker: Default::default(),
})
}
/// Attempts to retrieve a mutable reference to `T` from the store. If it does not exist,
/// the provided value is inserted and then a reference to it is returned.
pub fn get_mut_or_insert<T: Resource>(&mut self, value: T) -> Option<FetchMut<'_, T>> {
Some(FetchMut {
inner: self
.storage
.entry(ResourceTypeId::of::<T>())
.or_insert_with(|| AtomicRefCell::new(Box::new(value)))
.get_mut(),
_marker: Default::default(),
})
}
/// Attempts to retrieve an immutable reference to `T` from the store. If it does not exist,
/// the default constructor for `T` is called.
///
/// `T` must implement `Default` for this method.
pub fn get_or_default<T: Resource + Default>(&mut self) -> Option<Fetch<'_, T>> {
Some(Fetch {
inner: self
.storage
.entry(ResourceTypeId::of::<T>())
.or_insert_with(|| AtomicRefCell::new(Box::new(T::default())))
.get(),
_marker: Default::default(),
})
}
/// Attempts to retrieve a mutable reference to `T` from the store. If it does not exist,
/// the default constructor for `T` is called.
///
/// `T` must implement `Default` for this method.
pub fn get_mut_or_default<T: Resource + Default>(&mut self) -> Option<FetchMut<'_, T>> {
Some(FetchMut {
inner: self
.storage
.entry(ResourceTypeId::of::<T>())
.or_insert_with(|| AtomicRefCell::new(Box::new(T::default())))
.get_mut(),
_marker: Default::default(),
})
}
/// Performs merging of two resource storages, which occurs during a world merge.
/// This merge will retain any already-existant resources in the local world, while moving any
/// new resources from the source world into this one, consuming the resources.
pub fn merge(&mut self, mut other: Resources) {
// Merge resources, retaining our local ones but moving in any non-existant ones
for resource in other.storage.drain() {
self.storage.entry(resource.0).or_insert(resource.1);
}
}
}
impl ResourceSet for () {
type PreparedResources = ();
unsafe fn fetch_unchecked(_: &Resources) {}
}
impl<T: Resource> ResourceSet for Read<T> {
type PreparedResources = PreparedRead<T>;
unsafe fn fetch_unchecked(resources: &Resources) -> Self::PreparedResources {
let resource = resources
.get::<T>()
.unwrap_or_else(|| panic!("Failed to fetch resource!: {}", std::any::type_name::<T>()));
PreparedRead::new(resource.deref() as *const T)
}
}
impl<T: Resource> ResourceSet for Write<T> {
type PreparedResources = PreparedWrite<T>;
unsafe fn fetch_unchecked(resources: &Resources) -> Self::PreparedResources {
let mut resource = resources
.get_mut::<T>()
.unwrap_or_else(|| panic!("Failed to fetch resource!: {}", std::any::type_name::<T>()));
PreparedWrite::new(resource.deref_mut() as *mut T)
}
}
macro_rules! impl_resource_tuple {
( $( $ty: ident ),* ) => {
#[allow(unused_parens, non_snake_case)]
impl<$( $ty: ResourceSet ),*> ResourceSet for ($( $ty, )*)
{
type PreparedResources = ($( $ty::PreparedResources, )*);
unsafe fn fetch_unchecked(resources: &Resources) -> Self::PreparedResources {
($( $ty::fetch_unchecked(resources), )*)
}
}
};
}
//($( $ty, )*)
impl_resource_tuple!(A);
impl_resource_tuple!(A, B);
impl_resource_tuple!(A, B, C);
impl_resource_tuple!(A, B, C, D);
impl_resource_tuple!(A, B, C, D, E);
impl_resource_tuple!(A, B, C, D, E, F);
impl_resource_tuple!(A, B, C, D, E, F, G);
impl_resource_tuple!(A, B, C, D, E, F, G, H);
impl_resource_tuple!(A, B, C, D, E, F, G, H, I);
impl_resource_tuple!(A, B, C, D, E, F, G, H, I, J);
impl_resource_tuple!(A, B, C, D, E, F, G, H, I, J, K);
impl_resource_tuple!(A, B, C, D, E, F, G, H, I, J, K, L);
impl_resource_tuple!(A, B, C, D, E, F, G, H, I, J, K, L, M);
impl_resource_tuple!(A, B, C, D, E, F, G, H, I, J, K, L, M, N);
impl_resource_tuple!(A, B, C, D, E, F, G, H, I, J, K, L, M, N, O);
impl_resource_tuple!(A, B, C, D, E, F, G, H, I, J, K, L, M, N, O, P);
impl_resource_tuple!(A, B, C, D, E, F, G, H, I, J, K, L, M, N, O, P, Q);
impl_resource_tuple!(A, B, C, D, E, F, G, H, I, J, K, L, M, N, O, P, Q, R);
impl_resource_tuple!(A, B, C, D, E, F, G, H, I, J, K, L, M, N, O, P, Q, R, S);
impl_resource_tuple!(A, B, C, D, E, F, G, H, I, J, K, L, M, N, O, P, Q, R, S, T);
impl_resource_tuple!(A, B, C, D, E, F, G, H, I, J, K, L, M, N, O, P, Q, R, S, T, U);
impl_resource_tuple!(A, B, C, D, E, F, G, H, I, J, K, L, M, N, O, P, Q, R, S, T, U, V);
impl_resource_tuple!(A, B, C, D, E, F, G, H, I, J, K, L, M, N, O, P, Q, R, S, T, U, V, W);
impl_resource_tuple!(A, B, C, D, E, F, G, H, I, J, K, L, M, N, O, P, Q, R, S, T, U, V, W, X);
impl_resource_tuple!(A, B, C, D, E, F, G, H, I, J, K, L, M, N, O, P, Q, R, S, T, U, V, W, X, Y);
impl_resource_tuple!(A, B, C, D, E, F, G, H, I, J, K, L, M, N, O, P, Q, R, S, T, U, V, W, X, Y, Z);
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn simple_read_write_test() {
let _ = tracing_subscriber::fmt::try_init();
struct TestOne {
value: String,
}
struct TestTwo {
value: String,
}
let mut resources = Resources::default();
resources.insert(TestOne {
value: "poop".to_string(),
});
resources.insert(TestTwo {
value: "balls".to_string(),
});
assert_eq!(resources.get::<TestOne>().unwrap().value, "poop");
assert_eq!(resources.get::<TestTwo>().unwrap().value, "balls");
// test re-ownership
let owned = resources.remove::<TestTwo>();
assert_eq!(owned.unwrap().value, "balls")
}
}

550
bevy_legion/src/schedule.rs Normal file
View file

@ -0,0 +1,550 @@
use crate::system::SystemId;
use crate::{
borrow::RefMut, command::CommandBuffer, resource::ResourceTypeId, storage::ComponentTypeId,
world::World,
};
use bit_set::BitSet;
#[cfg(feature = "par-schedule")]
use tracing::{span, trace, Level};
#[cfg(feature = "par-schedule")]
use std::sync::atomic::{AtomicUsize, Ordering};
#[cfg(feature = "par-schedule")]
use fxhash::{FxHashMap, FxHashSet};
#[cfg(feature = "par-schedule")]
use rayon::prelude::*;
#[cfg(feature = "par-schedule")]
use itertools::izip;
#[cfg(feature = "par-schedule")]
use std::iter::repeat;
/// Empty trait which defines a `System` as schedulable by the dispatcher - this requires that the
/// type is both `Send` and `Sync`.
///
/// This is automatically implemented for all types that implement `Runnable` which meet the requirements.
pub trait Schedulable: Runnable + Send + Sync {}
impl<T> Schedulable for T where T: Runnable + Send + Sync {}
/// Describes which archetypes a system declares access to.
pub enum ArchetypeAccess {
/// All archetypes.
All,
/// Some archetypes.
Some(BitSet),
}
impl ArchetypeAccess {
pub fn is_disjoint(&self, other: &ArchetypeAccess) -> bool {
match self {
Self::All => false,
Self::Some(mine) => match other {
Self::All => false,
Self::Some(theirs) => mine.is_disjoint(theirs),
},
}
}
}
/// Trait describing a schedulable type. This is implemented by `System`
pub trait Runnable {
fn name(&self) -> &SystemId;
fn reads(&self) -> (&[ResourceTypeId], &[ComponentTypeId]);
fn writes(&self) -> (&[ResourceTypeId], &[ComponentTypeId]);
fn prepare(&mut self, world: &World);
fn accesses_archetypes(&self) -> &ArchetypeAccess;
fn run(&self, world: &World);
fn command_buffer_mut(&self) -> RefMut<CommandBuffer>;
}
/// Executes a sequence of systems, potentially in parallel, and then commits their command buffers.
///
/// Systems are provided in execution order. When the `par-schedule` feature is enabled, the `Executor`
/// may run some systems in parallel. The order in which side-effects (e.g. writes to resources
/// or entities) are observed is maintained.
pub struct Executor {
systems: Vec<Box<dyn Schedulable>>,
#[cfg(feature = "par-schedule")]
static_dependants: Vec<Vec<usize>>,
#[cfg(feature = "par-schedule")]
dynamic_dependants: Vec<Vec<usize>>,
#[cfg(feature = "par-schedule")]
static_dependency_counts: Vec<AtomicUsize>,
#[cfg(feature = "par-schedule")]
awaiting: Vec<AtomicUsize>,
}
impl Executor {
/// Constructs a new executor for all systems to be run in a single stage.
///
/// Systems are provided in the order in which side-effects (e.g. writes to resources or entities)
/// are to be observed.
#[cfg(not(feature = "par-schedule"))]
pub fn new(systems: Vec<Box<dyn Schedulable>>) -> Self { Self { systems } }
/// Constructs a new executor for all systems to be run in a single stage.
///
/// Systems are provided in the order in which side-effects (e.g. writes to resources or entities)
/// are to be observed.
#[cfg(feature = "par-schedule")]
#[allow(clippy::cognitive_complexity)]
// TODO: we should break this up
pub fn new(systems: Vec<Box<dyn Schedulable>>) -> Self {
if systems.len() > 1 {
let mut static_dependency_counts = Vec::with_capacity(systems.len());
let mut static_dependants: Vec<Vec<_>> =
repeat(Vec::with_capacity(64)).take(systems.len()).collect();
let mut dynamic_dependants: Vec<Vec<_>> =
repeat(Vec::with_capacity(64)).take(systems.len()).collect();
let mut resource_last_mutated =
FxHashMap::<ResourceTypeId, usize>::with_capacity_and_hasher(
64,
Default::default(),
);
let mut resource_last_read =
FxHashMap::<ResourceTypeId, usize>::with_capacity_and_hasher(
64,
Default::default(),
);
let mut component_mutated =
FxHashMap::<ComponentTypeId, Vec<usize>>::with_capacity_and_hasher(
64,
Default::default(),
);
for (i, system) in systems.iter().enumerate() {
let span = span!(
Level::TRACE,
"Building system dependencies",
system = %system.name(),
index = i,
);
let _guard = span.enter();
let (read_res, read_comp) = system.reads();
let (write_res, write_comp) = system.writes();
// find resource access dependencies
let mut dependencies = FxHashSet::with_capacity_and_hasher(64, Default::default());
for res in read_res {
trace!(resource = ?res, "Read resource");
if let Some(n) = resource_last_mutated.get(res) {
trace!(system_index = n, "Added write dependency");
dependencies.insert(*n);
}
resource_last_read.insert(*res, i);
}
for res in write_res {
trace!(resource = ?res, "Write resource");
// Writes have to be exclusive, so we are dependent on reads too
if let Some(n) = resource_last_read.get(res) {
trace!(system_index = n, "Added read dependency");
dependencies.insert(*n);
}
if let Some(n) = resource_last_mutated.get(res) {
trace!(system_index = n, "Added write dependency");
dependencies.insert(*n);
}
resource_last_mutated.insert(*res, i);
}
static_dependency_counts.push(AtomicUsize::from(dependencies.len()));
trace!(dependants = ?dependencies, "Computed static dependants");
for dep in dependencies {
static_dependants[dep].push(i);
}
// find component access dependencies
let mut comp_dependencies = FxHashSet::default();
for comp in read_comp {
if let Some(ns) = component_mutated.get(comp) {
for n in ns {
comp_dependencies.insert(*n);
}
}
}
for comp in write_comp {
if let Some(ns) = component_mutated.get(comp) {
for n in ns {
comp_dependencies.insert(*n);
}
}
component_mutated
.entry(*comp)
.or_insert_with(Vec::new)
.push(i);
}
trace!(depentants = ?comp_dependencies, "Computed dynamic dependants");
for dep in comp_dependencies {
if dep != i {
// dont be dependent on ourselves
dynamic_dependants[dep].push(i);
}
}
}
trace!(
?static_dependants,
?dynamic_dependants,
"Computed system dependencies"
);
let mut awaiting = Vec::with_capacity(systems.len());
systems
.iter()
.for_each(|_| awaiting.push(AtomicUsize::new(0)));
Executor {
awaiting,
static_dependants,
dynamic_dependants,
static_dependency_counts,
systems,
}
} else {
Executor {
awaiting: Vec::with_capacity(0),
static_dependants: Vec::with_capacity(0),
dynamic_dependants: Vec::with_capacity(0),
static_dependency_counts: Vec::with_capacity(0),
systems,
}
}
}
/// Converts this executor into a vector of its component systems.
pub fn into_vec(self) -> Vec<Box<dyn Schedulable>> { self.systems }
/// Executes all systems and then flushes their command buffers.
pub fn execute(&mut self, world: &mut World) {
self.run_systems(world);
self.flush_command_buffers(world);
}
/// Executes all systems sequentially.
///
/// Only enabled with par-schedule is disabled
#[cfg(not(feature = "par-schedule"))]
pub fn run_systems(&mut self, world: &mut World) {
// preflush command buffers
// This also handles the first case of allocating them.
self.systems
.iter()
.for_each(|system| system.command_buffer_mut().write(world));
self.systems.iter_mut().for_each(|system| {
system.run(world);
});
}
/// Executes all systems, potentially in parallel.
///
/// Ordering is retained in so far as the order of observed resource and component
/// accesses is maintained.
///
/// Call from within `rayon::ThreadPool::install()` to execute within a specific thread pool.
#[cfg(feature = "par-schedule")]
pub fn run_systems(&mut self, world: &mut World) {
// preflush command buffers
// This also handles the first case of allocating them.
self.systems
.iter()
.for_each(|system| system.command_buffer_mut().write(world));
rayon::join(
|| {},
|| {
match self.systems.len() {
1 => {
self.systems[0].run(world);
}
_ => {
let systems = &mut self.systems;
let static_dependency_counts = &self.static_dependency_counts;
let awaiting = &mut self.awaiting;
// prepare all systems - archetype filters are pre-executed here
systems.par_iter_mut().for_each(|sys| sys.prepare(world));
// determine dynamic dependencies
izip!(
systems.iter(),
self.static_dependants.iter_mut(),
self.dynamic_dependants.iter_mut()
)
.par_bridge()
.for_each(|(sys, static_dep, dyn_dep)| {
let archetypes = sys.accesses_archetypes();
for i in (0..dyn_dep.len()).rev() {
let dep = dyn_dep[i];
let other = &systems[dep];
// if the archetype sets intersect,
// then we can move the dynamic dependant into the static dependants set
if !other.accesses_archetypes().is_disjoint(archetypes) {
static_dep.push(dep);
dyn_dep.swap_remove(i);
static_dependency_counts[dep].fetch_add(1, Ordering::Relaxed);
}
}
});
// initialize dependency tracking
for (i, count) in static_dependency_counts.iter().enumerate() {
awaiting[i].store(count.load(Ordering::Relaxed), Ordering::Relaxed);
}
let awaiting = &self.awaiting;
// execute all systems with no outstanding dependencies
(0..systems.len())
.filter(|i| awaiting[*i].load(Ordering::SeqCst) == 0)
.for_each(|i| {
self.run_recursive(i, world);
});
}
}
},
);
}
/// Flushes the recorded command buffers for all systems.
pub fn flush_command_buffers(&mut self, world: &mut World) {
self.systems.iter().for_each(|system| {
system.command_buffer_mut().write(world);
});
}
/// Recursively execute through the generated depedency cascade and exhaust it.
#[cfg(feature = "par-schedule")]
fn run_recursive(&self, i: usize, world: &World) {
self.systems[i].run(world);
self.static_dependants[i].par_iter().for_each(|dep| {
match self.awaiting[*dep].compare_exchange(
1,
std::usize::MAX,
Ordering::Relaxed,
Ordering::Relaxed,
) {
Ok(_) => {
self.run_recursive(*dep, world);
}
Err(_) => {
self.awaiting[*dep].fetch_sub(1, Ordering::Relaxed);
}
}
});
}
}
/// A factory for `Schedule`.
pub struct Builder {
steps: Vec<Step>,
accumulator: Vec<Box<dyn Schedulable>>,
}
impl Builder {
/// Adds a system to the schedule.
pub fn add_system<T: Into<Box<dyn Schedulable>>>(mut self, system: T) -> Self {
self.accumulator.push(system.into());
self
}
/// Waits for executing systems to complete, and the flushes all outstanding system
/// command buffers.
pub fn flush(mut self) -> Self {
self.finalize_executor();
self.steps.push(Step::FlushCmdBuffers);
self
}
fn finalize_executor(&mut self) {
if !self.accumulator.is_empty() {
let mut systems = Vec::new();
std::mem::swap(&mut self.accumulator, &mut systems);
let executor = Executor::new(systems);
self.steps.push(Step::Systems(executor));
}
}
/// Adds a thread local function to the schedule. This function will be executed on the main thread.
pub fn add_thread_local_fn<F: FnMut(&mut World) + 'static>(mut self, f: F) -> Self {
self.finalize_executor();
self.steps.push(Step::ThreadLocalFn(
Box::new(f) as Box<dyn FnMut(&mut World)>
));
self
}
/// Adds a thread local system to the schedule. This system will be executed on the main thread.
pub fn add_thread_local<S: Into<Box<dyn Runnable>>>(self, system: S) -> Self {
let system = system.into();
self.add_thread_local_fn(move |world| system.run(world))
}
/// Finalizes the builder into a `Schedule`.
pub fn build(self) -> Schedule { self.into() }
}
impl Default for Builder {
fn default() -> Self {
Self {
steps: Vec::new(),
accumulator: Vec::new(),
}
}
}
/// A step in a schedule.
pub enum Step {
/// A batch of systems.
Systems(Executor),
/// Flush system command buffers.
FlushCmdBuffers,
/// A thread local function.
ThreadLocalFn(Box<dyn FnMut(&mut World)>),
}
/// A schedule of systems for execution.
///
/// # Examples
///
/// ```rust
/// # use legion::prelude::*;
/// # let find_collisions = SystemBuilder::new("find_collisions").build(|_,_,_,_| {});
/// # let calculate_acceleration = SystemBuilder::new("calculate_acceleration").build(|_,_,_,_| {});
/// # let update_positions = SystemBuilder::new("update_positions").build(|_,_,_,_| {});
/// # let mut world = World::new();
/// let mut schedule = Schedule::builder()
/// .add_system(find_collisions)
/// .flush()
/// .add_system(calculate_acceleration)
/// .add_system(update_positions)
/// .build();
///
/// schedule.execute(&mut world);
/// ```
pub struct Schedule {
steps: Vec<Step>,
}
impl Schedule {
/// Creates a new schedule builder.
pub fn builder() -> Builder { Builder::default() }
/// Executes all of the steps in the schedule.
pub fn execute(&mut self, world: &mut World) {
let mut waiting_flush: Vec<&mut Executor> = Vec::new();
for step in &mut self.steps {
match step {
Step::Systems(executor) => {
executor.run_systems(world);
waiting_flush.push(executor);
}
Step::FlushCmdBuffers => waiting_flush
.drain(..)
.for_each(|e| e.flush_command_buffers(world)),
Step::ThreadLocalFn(function) => function(world),
}
}
}
/// Converts the schedule into a vector of steps.
pub fn into_vec(self) -> Vec<Step> { self.steps }
}
impl From<Builder> for Schedule {
fn from(builder: Builder) -> Self {
Self {
steps: builder.flush().steps,
}
}
}
impl From<Vec<Step>> for Schedule {
fn from(steps: Vec<Step>) -> Self { Self { steps } }
}
#[cfg(test)]
mod tests {
use super::*;
use crate::prelude::*;
use itertools::sorted;
use std::sync::{Arc, Mutex};
#[test]
fn execute_in_order() {
let universe = Universe::new();
let mut world = universe.create_world();
#[derive(Default)]
struct Resource;
world.resources.insert(Resource);
let order = Arc::new(Mutex::new(Vec::new()));
let order_clone = order.clone();
let system_one = SystemBuilder::new("one")
.write_resource::<Resource>()
.build(move |_, _, _, _| order_clone.lock().unwrap().push(1usize));
let order_clone = order.clone();
let system_two = SystemBuilder::new("two")
.write_resource::<Resource>()
.build(move |_, _, _, _| order_clone.lock().unwrap().push(2usize));
let order_clone = order.clone();
let system_three = SystemBuilder::new("three")
.write_resource::<Resource>()
.build(move |_, _, _, _| order_clone.lock().unwrap().push(3usize));
let mut schedule = Schedule::builder()
.add_system(system_one)
.add_system(system_two)
.add_system(system_three)
.build();
schedule.execute(&mut world);
let order = order.lock().unwrap();
let sorted: Vec<usize> = sorted(order.clone()).collect();
assert_eq!(*order, sorted);
}
#[test]
fn flush() {
let universe = Universe::new();
let mut world = universe.create_world();
#[derive(Clone, Copy, Debug, PartialEq)]
struct TestComp(f32, f32, f32);
let system_one = SystemBuilder::new("one").build(move |cmd, _, _, _| {
cmd.insert((), vec![(TestComp(0., 0., 0.),)]).unwrap();
});
let system_two = SystemBuilder::new("two")
.with_query(Write::<TestComp>::query())
.build(move |_, world, _, query| assert_eq!(0, query.iter_mut(world).count()));
let system_three = SystemBuilder::new("three")
.with_query(Write::<TestComp>::query())
.build(move |_, world, _, query| assert_eq!(1, query.iter_mut(world).count()));
let mut schedule = Schedule::builder()
.add_system(system_one)
.add_system(system_two)
.flush()
.add_system(system_three)
.build();
schedule.execute(&mut world);
}
}

309
bevy_legion/src/ser.rs Normal file
View file

@ -0,0 +1,309 @@
use crate::{
entity::Entity,
storage::{
ArchetypeData, ArchetypeDescription, ComponentMeta, ComponentResourceSet, ComponentStorage,
ComponentTypeId, TagMeta, TagStorage, TagTypeId,
},
world::World,
};
use serde::{ser::SerializeStruct, Serialize, Serializer};
/// Implements `Serialize` and can be passed to a `serde::Serializer`.
pub struct WorldSerializable<'a, 'b, CS: WorldSerializer> {
world_serializer: &'b CS,
world: &'a World,
}
/// Returns a value that implements `Serialize` and can be passed to a `serde::Serializer`.
pub fn serializable_world<'a, 'b, CS: WorldSerializer>(
world: &'a World,
serialize_impl: &'b CS,
) -> WorldSerializable<'a, 'b, CS> {
WorldSerializable {
world,
world_serializer: serialize_impl,
}
}
/*
// Structure optimized for saving and loading:
[
(
// Description of archetype
archetype: {},
// Tag data arrays. One inner array per chunk set. Indices match chunk set indices
tags: [
// Tag values. One element per chunk set. Indices match chunk set indices
[TAG_DATA]
],
chunksets: [
// CHUNK SET. One array element per array of chunks in the chunkset
[
// CHUNK
(
// ENTITIES in the chunk
entities: [Entity],
// COMPONENT STORAGE: One array per component type, as per the archetype.
// Component type indices in archetype correspond to indices here
components: [
// COMPONENT RESOURCE SET: The actual component data. One element per entity
[COMPONENT_DATA],
...
],
),
...
],
...
],
),
...
]
*/
/// User must implement this trait to serialize a World.
pub trait WorldSerializer {
/// Returns whether this serializer can serialize a tag type or not.
/// If not, the tag type will not be passed to `serialize_tags`.
fn can_serialize_tag(&self, ty: &TagTypeId, meta: &TagMeta) -> bool;
/// Returns whether this serializer can serialize a component type or not.
/// If not, the component type will not be passed to `serialize_components`.
fn can_serialize_component(&self, ty: &ComponentTypeId, meta: &ComponentMeta) -> bool;
/// Serialize an `ArchetypeDescription`
fn serialize_archetype_description<S: Serializer>(
&self,
serializer: S,
archetype_desc: &ArchetypeDescription,
) -> Result<S::Ok, S::Error>;
/// Serialize all data in a `ComponentResourceSet`.
fn serialize_components<S: Serializer>(
&self,
serializer: S,
component_type: &ComponentTypeId,
component_meta: &ComponentMeta,
components: &ComponentResourceSet,
) -> Result<S::Ok, S::Error>;
/// Serialize all tags in a `TagStorage`.
fn serialize_tags<S: Serializer>(
&self,
serializer: S,
tag_type: &TagTypeId,
tag_meta: &TagMeta,
tags: &TagStorage,
) -> Result<S::Ok, S::Error>;
/// Serialize entities in the provided slice.
fn serialize_entities<S: Serializer>(
&self,
serializer: S,
entities: &[Entity],
) -> Result<S::Ok, S::Error>;
}
impl<'a, 'b, CS: WorldSerializer> Serialize for WorldSerializable<'a, 'b, CS> {
fn serialize<S>(&self, serializer: S) -> Result<S::Ok, S::Error>
where
S: Serializer,
{
let storage = self.world.storage();
serializer.collect_seq(
storage
.archetypes()
.iter()
.filter_map(|archetype| {
let valid_tags = archetype
.description()
.tags()
.iter()
.enumerate()
.filter(|(_, (ty, meta))| self.world_serializer.can_serialize_tag(ty, meta))
.map(|(idx, (ty, meta))| (idx, ty, meta))
.collect::<Vec<_>>();
let valid_components = archetype
.description()
.components()
.iter()
.enumerate()
.filter(|(_, (ty, meta))| {
self.world_serializer.can_serialize_component(ty, meta)
})
.map(|(idx, (ty, meta))| (idx, ty, meta))
.collect::<Vec<_>>();
if !valid_tags.is_empty() || !valid_components.is_empty() {
Some(ArchetypeSerializer {
world_serializer: self.world_serializer,
archetype,
valid_tags,
valid_components,
})
} else {
None
}
})
.collect::<Vec<_>>(),
)
}
}
struct ArchetypeSerializer<'a, 'b, CS: WorldSerializer> {
world_serializer: &'b CS,
archetype: &'a ArchetypeData,
valid_tags: Vec<(usize, &'a TagTypeId, &'a TagMeta)>,
valid_components: Vec<(usize, &'a ComponentTypeId, &'a ComponentMeta)>,
}
impl<'a, 'b, CS: WorldSerializer> Serialize for ArchetypeSerializer<'a, 'b, CS> {
fn serialize<S>(&self, serializer: S) -> Result<S::Ok, S::Error>
where
S: Serializer,
{
let mut archetype = serializer.serialize_struct("Archetype", 3)?;
let desc = self.archetype.description();
archetype.serialize_field(
"description",
&ArchetypeDescriptionSerializer {
world_serializer: self.world_serializer,
desc,
},
)?;
let tags: Vec<_> = self
.valid_tags
.iter()
.map(|(_idx, ty, meta)| {
let tag_storage = self
.archetype
.tags()
.get(**ty)
.expect("tag type in archetype but not in storage");
TagSerializer {
world_serializer: self.world_serializer,
ty,
meta,
tag_storage,
}
})
.collect();
archetype.serialize_field("tags", &tags)?;
let chunksets: Vec<_> = self
.archetype
.chunksets()
.iter()
.map(|chunkset| {
chunkset
.occupied()
.iter()
.map(|comp_storage| ChunkSerializer {
world_serializer: self.world_serializer,
desc,
comp_storage,
valid_components: &self.valid_components,
})
.collect::<Vec<_>>()
})
.collect();
archetype.serialize_field("chunk_sets", &chunksets)?;
archetype.end()
}
}
struct ArchetypeDescriptionSerializer<'a, 'b, CS: WorldSerializer> {
world_serializer: &'b CS,
desc: &'a ArchetypeDescription,
}
impl<'a, 'b, CS: WorldSerializer> Serialize for ArchetypeDescriptionSerializer<'a, 'b, CS> {
fn serialize<S>(&self, serializer: S) -> Result<S::Ok, S::Error>
where
S: Serializer,
{
self.world_serializer
.serialize_archetype_description(serializer, self.desc)
}
}
struct TagSerializer<'a, 'b, CS: WorldSerializer> {
world_serializer: &'b CS,
ty: &'a TagTypeId,
meta: &'a TagMeta,
tag_storage: &'a TagStorage,
}
impl<'a, 'b, CS: WorldSerializer> Serialize for TagSerializer<'a, 'b, CS> {
fn serialize<S>(&self, serializer: S) -> Result<S::Ok, S::Error>
where
S: Serializer,
{
self.world_serializer
.serialize_tags(serializer, self.ty, self.meta, self.tag_storage)
}
}
struct ChunkSerializer<'a, 'b, CS: WorldSerializer> {
world_serializer: &'b CS,
desc: &'a ArchetypeDescription,
comp_storage: &'a ComponentStorage,
valid_components: &'a Vec<(usize, &'a ComponentTypeId, &'a ComponentMeta)>,
}
impl<'a, 'b, CS: WorldSerializer> Serialize for ChunkSerializer<'a, 'b, CS> {
fn serialize<S>(&self, serializer: S) -> Result<S::Ok, S::Error>
where
S: Serializer,
{
let mut chunk = serializer.serialize_struct("Chunk", 2)?;
chunk.serialize_field(
"entities",
&EntitySerializer {
world_serializer: self.world_serializer,
entities: self.comp_storage.entities(),
},
)?;
let comp_storages: Vec<_> = self
.valid_components
.iter()
.map(|(_idx, ty, meta)| {
let comp_resources = self
.comp_storage
.components(**ty)
.expect("component type in archetype but not in storage");
ComponentResourceSetSerializer {
world_serializer: self.world_serializer,
ty,
meta,
comp_resources,
}
})
.collect();
chunk.serialize_field("components", &comp_storages)?;
chunk.end()
}
}
struct ComponentResourceSetSerializer<'a, 'b, CS: WorldSerializer> {
world_serializer: &'b CS,
ty: &'a ComponentTypeId,
meta: &'a ComponentMeta,
comp_resources: &'a ComponentResourceSet,
}
impl<'a, 'b, CS: WorldSerializer> Serialize for ComponentResourceSetSerializer<'a, 'b, CS> {
fn serialize<S>(&self, serializer: S) -> Result<S::Ok, S::Error>
where
S: Serializer,
{
self.world_serializer.serialize_components(
serializer,
self.ty,
self.meta,
self.comp_resources,
)
}
}
struct EntitySerializer<'a, 'b, CS: WorldSerializer> {
world_serializer: &'b CS,
entities: &'a [Entity],
}
impl<'a, 'b, CS: WorldSerializer> Serialize for EntitySerializer<'a, 'b, CS> {
fn serialize<S>(&self, serializer: S) -> Result<S::Ok, S::Error>
where
S: Serializer,
{
self.world_serializer
.serialize_entities(serializer, self.entities)
}
}

2100
bevy_legion/src/storage.rs Normal file

File diff suppressed because it is too large Load diff

1729
bevy_legion/src/system.rs Normal file

File diff suppressed because it is too large Load diff

442
bevy_legion/src/tuple.rs Normal file
View file

@ -0,0 +1,442 @@
// This is required to be copied in because PartialEq is only implemented up to 14 elements on a tuple.
// This implements our own Eq for up to 26 parameters (A-Z)
use std::cmp::*;
pub trait TupleEq<T: ?Sized = Self> {
fn legion_eq(&self, other: &T) -> bool;
fn legion_ne(&self, other: &T) -> bool;
}
// macro for implementing n-ary tuple functions and operations
macro_rules! tuple_impls {
($(
$Tuple:ident {
$(($idx:tt) -> $T:ident)+
}
)+) => {
$(
impl<$($T:PartialEq),+> TupleEq for ($($T,)+) where last_type!($($T,)+): ?Sized {
#[inline]
fn legion_eq(&self, other: &($($T,)+)) -> bool {
$(self.$idx == other.$idx)&&+
}
#[inline]
fn legion_ne(&self, other: &($($T,)+)) -> bool {
$(self.$idx != other.$idx)||+
}
}
)+
}
}
macro_rules! last_type {
($a:ident,) => { $a };
($a:ident, $($rest_a:ident,)+) => { last_type!($($rest_a,)+) };
}
tuple_impls! {
Tuple1 {
(0) -> A
}
Tuple2 {
(0) -> A
(1) -> B
}
Tuple3 {
(0) -> A
(1) -> B
(2) -> C
}
Tuple4 {
(0) -> A
(1) -> B
(2) -> C
(3) -> D
}
Tuple5 {
(0) -> A
(1) -> B
(2) -> C
(3) -> D
(4) -> E
}
Tuple6 {
(0) -> A
(1) -> B
(2) -> C
(3) -> D
(4) -> E
(5) -> F
}
Tuple7 {
(0) -> A
(1) -> B
(2) -> C
(3) -> D
(4) -> E
(5) -> F
(6) -> G
}
Tuple8 {
(0) -> A
(1) -> B
(2) -> C
(3) -> D
(4) -> E
(5) -> F
(6) -> G
(7) -> H
}
Tuple9 {
(0) -> A
(1) -> B
(2) -> C
(3) -> D
(4) -> E
(5) -> F
(6) -> G
(7) -> H
(8) -> I
}
Tuple10 {
(0) -> A
(1) -> B
(2) -> C
(3) -> D
(4) -> E
(5) -> F
(6) -> G
(7) -> H
(8) -> I
(9) -> J
}
Tuple11 {
(0) -> A
(1) -> B
(2) -> C
(3) -> D
(4) -> E
(5) -> F
(6) -> G
(7) -> H
(8) -> I
(9) -> J
(10) -> K
}
Tuple12 {
(0) -> A
(1) -> B
(2) -> C
(3) -> D
(4) -> E
(5) -> F
(6) -> G
(7) -> H
(8) -> I
(9) -> J
(10) -> K
(11) -> L
}
Tuple13 {
(0) -> A
(1) -> B
(2) -> C
(3) -> D
(4) -> E
(5) -> F
(6) -> G
(7) -> H
(8) -> I
(9) -> J
(10) -> K
(11) -> L
(12) -> M
}
Tuple14 {
(0) -> A
(1) -> B
(2) -> C
(3) -> D
(4) -> E
(5) -> F
(6) -> G
(7) -> H
(8) -> I
(9) -> J
(10) -> K
(11) -> L
(12) -> M
(13) -> N
}
Tuple15 {
(0) -> A
(1) -> B
(2) -> C
(3) -> D
(4) -> E
(5) -> F
(6) -> G
(7) -> H
(8) -> I
(9) -> J
(10) -> K
(11) -> L
(12) -> M
(13) -> N
(14) -> O
}
Tuple16 {
(0) -> A
(1) -> B
(2) -> C
(3) -> D
(4) -> E
(5) -> F
(6) -> G
(7) -> H
(8) -> I
(9) -> J
(10) -> K
(11) -> L
(12) -> M
(13) -> N
(14) -> O
(15) -> P
}
Tuple17 {
(0) -> A
(1) -> B
(2) -> C
(3) -> D
(4) -> E
(5) -> F
(6) -> G
(7) -> H
(8) -> I
(9) -> J
(10) -> K
(11) -> L
(12) -> M
(13) -> N
(14) -> O
(15) -> P
(16) -> Q
}
Tuple18 {
(0) -> A
(1) -> B
(2) -> C
(3) -> D
(4) -> E
(5) -> F
(6) -> G
(7) -> H
(8) -> I
(9) -> J
(10) -> K
(11) -> L
(12) -> M
(13) -> N
(14) -> O
(15) -> P
(16) -> Q
(17) -> R
}
Tuple19 {
(0) -> A
(1) -> B
(2) -> C
(3) -> D
(4) -> E
(5) -> F
(6) -> G
(7) -> H
(8) -> I
(9) -> J
(10) -> K
(11) -> L
(12) -> M
(13) -> N
(14) -> O
(15) -> P
(16) -> Q
(17) -> R
(18) -> S
}
Tuple20 {
(0) -> A
(1) -> B
(2) -> C
(3) -> D
(4) -> E
(5) -> F
(6) -> G
(7) -> H
(8) -> I
(9) -> J
(10) -> K
(11) -> L
(12) -> M
(13) -> N
(14) -> O
(15) -> P
(16) -> Q
(17) -> R
(18) -> S
(19) -> T
}
Tuple21 {
(0) -> A
(1) -> B
(2) -> C
(3) -> D
(4) -> E
(5) -> F
(6) -> G
(7) -> H
(8) -> I
(9) -> J
(10) -> K
(11) -> L
(12) -> M
(13) -> N
(14) -> O
(15) -> P
(16) -> Q
(17) -> R
(18) -> S
(19) -> T
(20) -> U
}
Tuple22 {
(0) -> A
(1) -> B
(2) -> C
(3) -> D
(4) -> E
(5) -> F
(6) -> G
(7) -> H
(8) -> I
(9) -> J
(10) -> K
(11) -> L
(12) -> M
(13) -> N
(14) -> O
(15) -> P
(16) -> Q
(17) -> R
(18) -> S
(19) -> T
(20) -> U
(21) -> V
}
Tuple23 {
(0) -> A
(1) -> B
(2) -> C
(3) -> D
(4) -> E
(5) -> F
(6) -> G
(7) -> H
(8) -> I
(9) -> J
(10) -> K
(11) -> L
(12) -> M
(13) -> N
(14) -> O
(15) -> P
(16) -> Q
(17) -> R
(18) -> S
(19) -> T
(20) -> U
(21) -> V
(22) -> W
}
Tuple24 {
(0) -> A
(1) -> B
(2) -> C
(3) -> D
(4) -> E
(5) -> F
(6) -> G
(7) -> H
(8) -> I
(9) -> J
(10) -> K
(11) -> L
(12) -> M
(13) -> N
(14) -> O
(15) -> P
(16) -> Q
(17) -> R
(18) -> S
(19) -> T
(20) -> U
(21) -> V
(22) -> W
(23) -> X
}
Tuple25 {
(0) -> A
(1) -> B
(2) -> C
(3) -> D
(4) -> E
(5) -> F
(6) -> G
(7) -> H
(8) -> I
(9) -> J
(10) -> K
(11) -> L
(12) -> M
(13) -> N
(14) -> O
(15) -> P
(16) -> Q
(17) -> R
(18) -> S
(19) -> T
(20) -> U
(21) -> V
(22) -> W
(23) -> X
(24) -> Y
}
Tuple26 {
(0) -> A
(1) -> B
(2) -> C
(3) -> D
(4) -> E
(5) -> F
(6) -> G
(7) -> H
(8) -> I
(9) -> J
(10) -> K
(11) -> L
(12) -> M
(13) -> N
(14) -> O
(15) -> P
(16) -> Q
(17) -> R
(18) -> S
(19) -> T
(20) -> U
(21) -> V
(22) -> W
(23) -> X
(24) -> Y
(25) -> Z
}
}

1882
bevy_legion/src/world.rs Normal file

File diff suppressed because it is too large Load diff

139
bevy_legion/src/zip.rs Normal file
View file

@ -0,0 +1,139 @@
// This is copied in from itertools because they only implement zip for up to 8 elements.
// This implements our own zip up to 26 elements.
// Copyright (c) 2015
// https://github.com/rust-itertools/itertools
//
//Permission is hereby granted, free of charge, to any
//person obtaining a copy of this software and associated
//documentation files (the "Software"), to deal in the
//Software without restriction, including without
//limitation the rights to use, copy, modify, merge,
//publish, distribute, sublicense, and/or sell copies of
//the Software, and to permit persons to whom the Software
//is furnished to do so, subject to the following
//conditions:
//
//The above copyright notice and this permission notice
//shall be included in all copies or substantial portions
//of the Software.
//
//THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF
//ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED
//TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A
//PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
//SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
//CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
//OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR
//IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
//DEALINGS IN THE SOFTWARE.
#[inline]
pub fn min(a: (usize, Option<usize>), b: (usize, Option<usize>)) -> (usize, Option<usize>) {
let (a_lower, a_upper) = a;
let (b_lower, b_upper) = b;
let lower = std::cmp::min(a_lower, b_lower);
let upper = match (a_upper, b_upper) {
(Some(u1), Some(u2)) => Some(std::cmp::min(u1, u2)),
_ => a_upper.or(b_upper),
};
(lower, upper)
}
#[derive(Clone, Debug)]
#[must_use = "iterator adaptors are lazy and do nothing unless consumed"]
pub struct Zip<T> {
t: T,
}
pub fn multizip<T, U>(t: U) -> Zip<T>
where
Zip<T>: From<U>,
Zip<T>: Iterator,
{
Zip::from(t)
}
macro_rules! impl_zip_iter {
($($B:ident),*) => (
#[allow(non_snake_case)]
impl<$($B: IntoIterator),*> From<($($B,)*)> for Zip<($($B::IntoIter,)*)> {
fn from(t: ($($B,)*)) -> Self {
let ($($B,)*) = t;
Zip { t: ($($B.into_iter(),)*) }
}
}
#[allow(non_snake_case)]
#[allow(unused_assignments)]
impl<$($B),*> Iterator for Zip<($($B,)*)>
where
$(
$B: Iterator,
)*
{
type Item = ($($B::Item,)*);
fn next(&mut self) -> Option<Self::Item>
{
let ($(ref mut $B,)*) = self.t;
// NOTE: Just like iter::Zip, we check the iterators
// for None in order. We may finish unevenly (some
// iterators gave n + 1 elements, some only n).
$(
let $B = match $B.next() {
None => return None,
Some(elt) => elt
};
)*
Some(($($B,)*))
}
#[allow(clippy::let_and_return)]
fn size_hint(&self) -> (usize, Option<usize>)
{
let sh = (::std::usize::MAX, None);
let ($(ref $B,)*) = self.t;
$(
let sh = min($B.size_hint(), sh);
)*
sh
}
}
#[allow(non_snake_case)]
impl<$($B),*> ExactSizeIterator for Zip<($($B,)*)> where
$(
$B: ExactSizeIterator,
)*
{ }
);
}
impl_zip_iter!(A);
impl_zip_iter!(A, B);
impl_zip_iter!(A, B, C);
impl_zip_iter!(A, B, C, D);
impl_zip_iter!(A, B, C, D, E);
impl_zip_iter!(A, B, C, D, E, F);
impl_zip_iter!(A, B, C, D, E, F, G);
impl_zip_iter!(A, B, C, D, E, F, G, H);
impl_zip_iter!(A, B, C, D, E, F, G, H, I);
impl_zip_iter!(A, B, C, D, E, F, G, H, I, J);
impl_zip_iter!(A, B, C, D, E, F, G, H, I, J, K);
impl_zip_iter!(A, B, C, D, E, F, G, H, I, J, K, L);
impl_zip_iter!(A, B, C, D, E, F, G, H, I, J, K, L, M);
impl_zip_iter!(A, B, C, D, E, F, G, H, I, J, K, L, M, N);
impl_zip_iter!(A, B, C, D, E, F, G, H, I, J, K, L, M, N, O);
impl_zip_iter!(A, B, C, D, E, F, G, H, I, J, K, L, M, N, O, P);
impl_zip_iter!(A, B, C, D, E, F, G, H, I, J, K, L, M, N, O, P, Q);
impl_zip_iter!(A, B, C, D, E, F, G, H, I, J, K, L, M, N, O, P, Q, R);
impl_zip_iter!(A, B, C, D, E, F, G, H, I, J, K, L, M, N, O, P, Q, R, S);
impl_zip_iter!(A, B, C, D, E, F, G, H, I, J, K, L, M, N, O, P, Q, R, S, T);
impl_zip_iter!(A, B, C, D, E, F, G, H, I, J, K, L, M, N, O, P, Q, R, S, T, U);
impl_zip_iter!(A, B, C, D, E, F, G, H, I, J, K, L, M, N, O, P, Q, R, S, T, U, V);
impl_zip_iter!(A, B, C, D, E, F, G, H, I, J, K, L, M, N, O, P, Q, R, S, T, U, V, W);
impl_zip_iter!(A, B, C, D, E, F, G, H, I, J, K, L, M, N, O, P, Q, R, S, T, U, V, W, X);
impl_zip_iter!(A, B, C, D, E, F, G, H, I, J, K, L, M, N, O, P, Q, R, S, T, U, V, W, X, Y);
impl_zip_iter!(A, B, C, D, E, F, G, H, I, J, K, L, M, N, O, P, Q, R, S, T, U, V, W, X, Y, Z);

View file

@ -0,0 +1,32 @@
use legion::iterator::*;
use legion::storage::SliceVec;
#[test]
fn slice_vec_iterate() {
let mut vec = SliceVec::default();
vec.push(vec![1, 2, 3, 4]);
vec.push(vec![5, 6, 7, 8]);
vec.push(vec![9, 10]);
let mut iter = vec.iter();
assert_eq!(Some(&[1, 2, 3, 4][..]), iter.next());
assert_eq!(Some(&[5, 6, 7, 8][..]), iter.next());
assert_eq!(Some(&[9, 10][..]), iter.next());
assert_eq!(None, iter.next());
}
#[test]
fn slice_vec_iterator_split() {
let mut vec = SliceVec::default();
vec.push(vec![1, 2, 3, 4]);
vec.push(vec![5, 6, 7, 8]);
vec.push(vec![9, 10]);
let (mut left, mut right, left_len) = vec.iter().split();
assert_eq!(left_len, 1);
assert_eq!(Some(&[1, 2, 3, 4][..]), left.next());
assert_eq!(None, left.next());
assert_eq!(Some(&[5, 6, 7, 8][..]), right.next());
assert_eq!(Some(&[9, 10][..]), right.next());
assert_eq!(None, right.next());
}

View file

@ -0,0 +1,589 @@
#![allow(clippy::map_clone)]
use legion::prelude::*;
use std::collections::HashMap;
#[cfg(feature = "par-iter")]
use std::sync::atomic::{AtomicUsize, Ordering};
#[derive(Clone, Copy, Debug, PartialEq)]
struct Pos(f32, f32, f32);
#[derive(Clone, Copy, Debug, PartialEq)]
struct Rot(f32, f32, f32);
#[derive(Clone, Copy, Debug, PartialEq)]
struct Scale(f32, f32, f32);
#[derive(Clone, Copy, Debug, PartialEq)]
struct Vel(f32, f32, f32);
#[derive(Clone, Copy, Debug, PartialEq)]
struct Accel(f32, f32, f32);
#[derive(Copy, Clone, Debug, Eq, PartialEq, Hash)]
struct Model(u32);
#[derive(Copy, Clone, Debug, Eq, PartialEq, Hash)]
struct Static;
#[test]
fn query_read_entity_data() {
let _ = tracing_subscriber::fmt::try_init();
let universe = Universe::new();
let mut world = universe.create_world();
let shared = (Static, Model(5));
let components = vec![
(Pos(1., 2., 3.), Rot(0.1, 0.2, 0.3)),
(Pos(4., 5., 6.), Rot(0.4, 0.5, 0.6)),
];
let mut expected = HashMap::<Entity, (Pos, Rot)>::new();
for (i, e) in world.insert(shared, components.clone()).iter().enumerate() {
if let Some((pos, rot)) = components.get(i) {
expected.insert(*e, (*pos, *rot));
}
}
let query = Read::<Pos>::query();
let mut count = 0;
for (entity, pos) in query.iter_entities_mut(&mut world) {
assert_eq!(expected.get(&entity).unwrap().0, *pos);
count += 1;
}
assert_eq!(components.len(), count);
}
#[test]
fn query_try_read_entity_data() {
let _ = tracing_subscriber::fmt::try_init();
let universe = Universe::new();
let mut world = universe.create_world();
world.insert((), Some((Pos(1., 2., 3.),)));
world.insert((), Some((Pos(4., 5., 6.), Rot(0.4, 0.5, 0.6))));
let query = TryRead::<Rot>::query();
let rots = query
.iter(&mut world)
.map(|x| x.map(|x| *x))
.collect::<Vec<_>>();
assert_eq!(rots.iter().filter(|x| x.is_none()).count(), 1);
assert_eq!(
rots.iter().cloned().filter_map(|x| x).collect::<Vec<_>>(),
&[Rot(0.4, 0.5, 0.6)]
);
}
#[test]
fn query_try_write_entity_data() {
let _ = tracing_subscriber::fmt::try_init();
let universe = Universe::new();
let mut world = universe.create_world();
world.insert((), Some((Pos(1., 2., 3.),)));
let entity = world.insert((), Some((Pos(4., 5., 6.), Rot(0.4, 0.5, 0.6))))[0];
let query = TryWrite::<Rot>::query();
for mut x in query.iter_mut(&mut world).filter_map(|x| x) {
*x = Rot(9.0, 9.0, 9.0);
}
assert_eq!(
world.get_component::<Rot>(entity).map(|x| *x),
Some(Rot(9.0, 9.0, 9.0))
);
}
#[test]
fn query_cached_read_entity_data() {
let _ = tracing_subscriber::fmt::try_init();
let universe = Universe::new();
let mut world = universe.create_world();
let shared = (Static, Model(5));
let components = vec![
(Pos(1., 2., 3.), Rot(0.1, 0.2, 0.3)),
(Pos(4., 5., 6.), Rot(0.4, 0.5, 0.6)),
];
let mut expected = HashMap::<Entity, (Pos, Rot)>::new();
for (i, e) in world.insert(shared, components.clone()).iter().enumerate() {
if let Some((pos, rot)) = components.get(i) {
expected.insert(*e, (*pos, *rot));
}
}
let query = Read::<Pos>::query(); //.cached();
let mut count = 0;
for (entity, pos) in query.iter_entities_mut(&mut world) {
assert_eq!(expected.get(&entity).unwrap().0, *pos);
count += 1;
}
assert_eq!(components.len(), count);
}
#[test]
#[cfg(feature = "par-iter")]
fn query_read_entity_data_par() {
let _ = tracing_subscriber::fmt::try_init();
let universe = Universe::new();
let mut world = universe.create_world();
let shared = (Static, Model(5));
let components = vec![
(Pos(1., 2., 3.), Rot(0.1, 0.2, 0.3)),
(Pos(4., 5., 6.), Rot(0.4, 0.5, 0.6)),
];
let mut expected = HashMap::<Entity, (Pos, Rot)>::new();
for (i, e) in world.insert(shared, components.clone()).iter().enumerate() {
if let Some((pos, rot)) = components.get(i) {
expected.insert(*e, (*pos, *rot));
}
}
let count = AtomicUsize::new(0);
let query = Read::<Pos>::query();
query.par_for_each_chunk_mut(&mut world, |mut chunk| {
for (entity, pos) in chunk.iter_entities_mut() {
assert_eq!(expected.get(&entity).unwrap().0, *pos);
count.fetch_add(1, Ordering::SeqCst);
}
});
assert_eq!(components.len(), count.load(Ordering::SeqCst));
}
#[test]
#[cfg(feature = "par-iter")]
fn query_read_entity_data_par_foreach() {
let _ = tracing_subscriber::fmt::try_init();
let universe = Universe::new();
let mut world = universe.create_world();
let shared = (Static, Model(5));
let components = vec![
(Pos(1., 2., 3.), Rot(0.1, 0.2, 0.3)),
(Pos(4., 5., 6.), Rot(0.4, 0.5, 0.6)),
];
let mut expected = HashMap::<Entity, (Pos, Rot)>::new();
for (i, e) in world.insert(shared, components.clone()).iter().enumerate() {
if let Some((pos, rot)) = components.get(i) {
expected.insert(*e, (*pos, *rot));
}
}
let count = AtomicUsize::new(0);
let query = Read::<Pos>::query();
query.par_for_each_mut(&mut world, |_pos| {
count.fetch_add(1, Ordering::SeqCst);
});
assert_eq!(components.len(), count.load(Ordering::SeqCst));
}
#[test]
fn query_read_entity_data_tuple() {
let _ = tracing_subscriber::fmt::try_init();
let universe = Universe::new();
let mut world = universe.create_world();
let shared = (Static, Model(5));
let components = vec![
(Pos(1., 2., 3.), Rot(0.1, 0.2, 0.3)),
(Pos(4., 5., 6.), Rot(0.4, 0.5, 0.6)),
];
let mut expected = HashMap::<Entity, (Pos, Rot)>::new();
for (i, e) in world.insert(shared, components.clone()).iter().enumerate() {
if let Some((pos, rot)) = components.get(i) {
expected.insert(*e, (*pos, *rot));
}
}
let query = <(Read<Pos>, Read<Rot>)>::query();
let mut count = 0;
for (entity, (pos, rot)) in query.iter_entities_mut(&mut world) {
assert_eq!(expected.get(&entity).unwrap().0, *pos);
assert_eq!(expected.get(&entity).unwrap().1, *rot);
count += 1;
}
assert_eq!(components.len(), count);
}
#[test]
fn query_write_entity_data() {
let _ = tracing_subscriber::fmt::try_init();
let universe = Universe::new();
let mut world = universe.create_world();
let shared = (Static, Model(5));
let components = vec![
(Pos(1., 2., 3.), Rot(0.1, 0.2, 0.3)),
(Pos(4., 5., 6.), Rot(0.4, 0.5, 0.6)),
];
let mut expected = HashMap::<Entity, (Pos, Rot)>::new();
for (i, e) in world.insert(shared, components.clone()).iter().enumerate() {
if let Some((pos, rot)) = components.get(i) {
expected.insert(*e, (*pos, *rot));
}
}
let query = Write::<Pos>::query();
let mut count = 0;
for (entity, mut pos) in query.iter_entities_mut(&mut world) {
assert_eq!(expected.get(&entity).unwrap().0, *pos);
count += 1;
pos.0 = 0.0;
}
assert_eq!(components.len(), count);
}
#[test]
fn query_write_entity_data_tuple() {
let _ = tracing_subscriber::fmt::try_init();
let universe = Universe::new();
let mut world = universe.create_world();
let shared = (Static, Model(5));
let components = vec![
(Pos(1., 2., 3.), Rot(0.1, 0.2, 0.3)),
(Pos(4., 5., 6.), Rot(0.4, 0.5, 0.6)),
];
let mut expected = HashMap::<Entity, (Pos, Rot)>::new();
for (i, e) in world.insert(shared, components.clone()).iter().enumerate() {
if let Some((pos, rot)) = components.get(i) {
expected.insert(*e, (*pos, *rot));
}
}
let query = <(Write<Pos>, Write<Rot>)>::query();
let mut count = 0;
for (entity, (mut pos, mut rot)) in query.iter_entities_mut(&mut world) {
assert_eq!(expected.get(&entity).unwrap().0, *pos);
assert_eq!(expected.get(&entity).unwrap().1, *rot);
count += 1;
pos.0 = 0.0;
rot.0 = 0.0;
}
assert_eq!(components.len(), count);
}
#[test]
fn query_mixed_entity_data_tuple() {
let _ = tracing_subscriber::fmt::try_init();
let universe = Universe::new();
let mut world = universe.create_world();
let shared = (Static, Model(5));
let components = vec![
(Pos(1., 2., 3.), Rot(0.1, 0.2, 0.3)),
(Pos(4., 5., 6.), Rot(0.4, 0.5, 0.6)),
];
let mut expected = HashMap::<Entity, (Pos, Rot)>::new();
for (i, e) in world.insert(shared, components.clone()).iter().enumerate() {
if let Some((pos, rot)) = components.get(i) {
expected.insert(*e, (*pos, *rot));
}
}
let query = <(Read<Pos>, Write<Rot>)>::query();
let mut count = 0;
for (entity, (pos, mut rot)) in query.iter_entities_mut(&mut world) {
assert_eq!(expected.get(&entity).unwrap().0, *pos);
assert_eq!(expected.get(&entity).unwrap().1, *rot);
count += 1;
rot.0 = 0.0;
}
assert_eq!(components.len(), count);
}
#[test]
fn query_partial_match() {
let _ = tracing_subscriber::fmt::try_init();
let universe = Universe::new();
let mut world = universe.create_world();
let shared = (Static, Model(5));
let components = vec![
(Pos(1., 2., 3.), Rot(0.1, 0.2, 0.3)),
(Pos(4., 5., 6.), Rot(0.4, 0.5, 0.6)),
];
let mut expected = HashMap::<Entity, (Pos, Rot)>::new();
for (i, e) in world.insert(shared, components.clone()).iter().enumerate() {
if let Some((pos, rot)) = components.get(i) {
expected.insert(*e, (*pos, *rot));
}
}
let query = <(Read<Pos>, Write<Rot>)>::query();
let mut count = 0;
for (entity, (pos, mut rot)) in query.iter_entities_mut(&mut world) {
assert_eq!(expected.get(&entity).unwrap().0, *pos);
assert_eq!(expected.get(&entity).unwrap().1, *rot);
count += 1;
rot.0 = 0.0;
}
assert_eq!(components.len(), count);
}
#[test]
fn query_read_shared_data() {
let _ = tracing_subscriber::fmt::try_init();
let universe = Universe::new();
let mut world = universe.create_world();
let shared = (Static, Model(5));
let components = vec![
(Pos(1., 2., 3.), Rot(0.1, 0.2, 0.3)),
(Pos(4., 5., 6.), Rot(0.4, 0.5, 0.6)),
];
world.insert(shared, components.clone());
let query = Tagged::<Static>::query();
let mut count = 0;
for marker in query.iter(&mut world) {
assert_eq!(Static, *marker);
count += 1;
}
assert_eq!(components.len(), count);
}
#[test]
fn query_on_changed_first() {
let _ = tracing_subscriber::fmt::try_init();
let universe = Universe::new();
let mut world = universe.create_world();
let shared = (Static, Model(5));
let components = vec![
(Pos(1., 2., 3.), Rot(0.1, 0.2, 0.3)),
(Pos(4., 5., 6.), Rot(0.4, 0.5, 0.6)),
];
let mut expected = HashMap::<Entity, (Pos, Rot)>::new();
for (i, e) in world.insert(shared, components.clone()).iter().enumerate() {
if let Some((pos, rot)) = components.get(i) {
expected.insert(*e, (*pos, *rot));
}
}
let query = Read::<Pos>::query().filter(changed::<Pos>() | changed::<Rot>());
let mut count = 0;
for (entity, pos) in query.iter_entities_mut(&mut world) {
assert_eq!(expected.get(&entity).unwrap().0, *pos);
count += 1;
}
assert_eq!(components.len(), count);
}
#[test]
fn query_on_changed_no_changes() {
let _ = tracing_subscriber::fmt::try_init();
let universe = Universe::new();
let mut world = universe.create_world();
let shared = (Static, Model(5));
let components = vec![
(Pos(1., 2., 3.), Rot(0.1, 0.2, 0.3)),
(Pos(4., 5., 6.), Rot(0.4, 0.5, 0.6)),
];
let mut expected = HashMap::<Entity, (Pos, Rot)>::new();
for (i, e) in world.insert(shared, components.clone()).iter().enumerate() {
if let Some((pos, rot)) = components.get(i) {
expected.insert(*e, (*pos, *rot));
}
}
let query = Read::<Pos>::query().filter(changed::<Pos>());
let mut count = 0;
for (entity, pos) in query.iter_entities_mut(&mut world) {
assert_eq!(expected.get(&entity).unwrap().0, *pos);
count += 1;
}
assert_eq!(components.len(), count);
count = 0;
for (entity, pos) in query.iter_entities_mut(&mut world) {
assert_eq!(expected.get(&entity).unwrap().0, *pos);
count += 1;
}
assert_eq!(0, count);
}
#[test]
fn query_on_changed_self_changes() {
let _ = tracing_subscriber::fmt::try_init();
let universe = Universe::new();
let mut world = universe.create_world();
let shared = (Static, Model(5));
let components = vec![
(Pos(1., 2., 3.), Rot(0.1, 0.2, 0.3)),
(Pos(4., 5., 6.), Rot(0.4, 0.5, 0.6)),
];
let mut expected = HashMap::<Entity, (Pos, Rot)>::new();
for (i, e) in world.insert(shared, components.clone()).iter().enumerate() {
if let Some((pos, rot)) = components.get(i) {
expected.insert(*e, (*pos, *rot));
}
}
let query = Write::<Pos>::query().filter(changed::<Pos>());
let mut count = 0;
for (entity, mut pos) in query.iter_entities_mut(&mut world) {
assert_eq!(expected.get(&entity).unwrap().0, *pos);
*pos = Pos(1., 1., 1.);
count += 1;
}
assert_eq!(components.len(), count);
count = 0;
for pos in query.iter_mut(&mut world) {
assert_eq!(Pos(1., 1., 1.), *pos);
count += 1;
}
assert_eq!(components.len(), count);
}
#[test]
fn query_try_with_changed_filter() {
let _ = tracing_subscriber::fmt::try_init();
#[derive(Clone, Copy, Debug, PartialEq)]
struct Sum(f32);
#[derive(Clone, Copy, Debug, PartialEq)]
struct A(f32);
#[derive(Clone, Copy, Debug, PartialEq)]
struct B(f32);
let universe = Universe::new();
let mut world = universe.create_world();
let sum_entity = world.insert((), Some((Sum(0.),)))[0];
let a_entity = world.insert((), Some((Sum(0.), A(1.))))[0];
let b_entity = world.insert((), Some((Sum(0.), B(2.))))[0];
let a_b_entity = world.insert((), Some((Sum(0.), A(1.), B(2.))))[0];
let query =
<(Write<Sum>, TryRead<A>, TryRead<B>)>::query().filter(changed::<A>() | changed::<B>());
let mut count = 0;
for (mut sum, a, b) in query.iter_mut(&mut world) {
sum.0 = a.map_or(0., |x| x.0) + b.map_or(0., |x| x.0);
count += 1;
}
assert_eq!(3, count);
assert_eq!(
world.get_component::<Sum>(sum_entity).map(|x| *x),
Some(Sum(0.))
);
assert_eq!(
world.get_component::<Sum>(a_entity).map(|x| *x),
Some(Sum(1.))
);
assert_eq!(
world.get_component::<Sum>(b_entity).map(|x| *x),
Some(Sum(2.))
);
assert_eq!(
world.get_component::<Sum>(a_b_entity).map(|x| *x),
Some(Sum(3.))
);
count = 0;
for (mut sum, a, b) in query.iter_mut(&mut world) {
sum.0 = a.map_or(0., |x| x.0) + b.map_or(0., |x| x.0);
count += 1;
}
assert_eq!(0, count);
*world.get_component_mut::<B>(a_b_entity).unwrap() = B(3.0);
count = 0;
for (mut sum, a, b) in query.iter_mut(&mut world) {
sum.0 = a.map_or(0., |x| x.0) + b.map_or(0., |x| x.0);
count += 1;
}
assert_eq!(1, count);
assert_eq!(
world.get_component::<Sum>(a_b_entity).map(|x| *x),
Some(Sum(4.))
);
}
#[test]
fn query_iter_chunks_tag() {
let _ = tracing_subscriber::fmt::try_init();
let universe = Universe::new();
let mut world = universe.create_world();
world.insert((Static, Model(0)), vec![()]);
world.insert((Static, Model(1)), vec![()]);
world.insert((Static, Model(2)), vec![()]);
let query = <(Tagged<Static>, Tagged<Model>)>::query();
for chunk in query.iter_chunks(&world) {
let model = chunk.tag::<Model>().cloned();
for entity in chunk.entities() {
assert_eq!(world.get_tag::<Model>(*entity), model.as_ref());
}
}
}

View file

@ -0,0 +1,420 @@
use legion::prelude::*;
#[derive(Clone, Copy, Debug, PartialEq)]
struct Pos(f32, f32, f32);
#[derive(Clone, Copy, Debug, PartialEq)]
struct Rot(f32, f32, f32);
#[derive(Clone, Copy, Debug, PartialEq)]
struct Scale(f32, f32, f32);
#[derive(Clone, Copy, Debug, PartialEq)]
struct Vel(f32, f32, f32);
#[derive(Clone, Copy, Debug, PartialEq)]
struct Accel(f32, f32, f32);
#[derive(Copy, Clone, Debug, Eq, PartialEq, Hash)]
struct Model(u32);
#[derive(Copy, Clone, Debug, Eq, PartialEq, Hash)]
struct Static;
#[test]
fn insert() {
let _ = tracing_subscriber::fmt::try_init();
let universe = Universe::new();
let mut world = universe.create_world();
let shared = (1usize, 2f32, 3u16);
let components = vec![(4f32, 5u64, 6u16), (4f32, 5u64, 6u16)];
let entities = world.insert(shared, components);
assert_eq!(2, entities.len());
}
#[test]
fn get_component() {
let _ = tracing_subscriber::fmt::try_init();
let universe = Universe::new();
let mut world = universe.create_world();
let shared = (Static, Model(5));
let components = vec![
(Pos(1., 2., 3.), Rot(0.1, 0.2, 0.3)),
(Pos(4., 5., 6.), Rot(0.4, 0.5, 0.6)),
];
let mut entities: Vec<Entity> = Vec::new();
for e in world.insert(shared, components.clone()) {
entities.push(*e);
}
for (i, e) in entities.iter().enumerate() {
match world.get_component(*e) {
Some(x) => assert_eq!(components.get(i).map(|(x, _)| x), Some(&x as &Pos)),
None => assert_eq!(components.get(i).map(|(x, _)| x), None),
}
match world.get_component(*e) {
Some(x) => assert_eq!(components.get(i).map(|(_, x)| x), Some(&x as &Rot)),
None => assert_eq!(components.get(i).map(|(_, x)| x), None),
}
}
}
#[test]
fn get_component_wrong_type() {
let _ = tracing_subscriber::fmt::try_init();
let universe = Universe::new();
let mut world = universe.create_world();
let entity = *world.insert((), vec![(0f64,)]).get(0).unwrap();
assert!(world.get_component::<i32>(entity).is_none());
}
#[test]
fn get_shared() {
let _ = tracing_subscriber::fmt::try_init();
let universe = Universe::new();
let mut world = universe.create_world();
let shared = (Static, Model(5));
let components = vec![
(Pos(1., 2., 3.), Rot(0.1, 0.2, 0.3)),
(Pos(4., 5., 6.), Rot(0.4, 0.5, 0.6)),
];
let mut entities: Vec<Entity> = Vec::new();
for e in world.insert(shared, components.clone()) {
entities.push(*e);
}
for e in entities.iter() {
assert_eq!(Some(&Static), world.get_tag(*e));
assert_eq!(Some(&Model(5)), world.get_tag(*e));
}
}
#[test]
fn get_shared_wrong_type() {
let _ = tracing_subscriber::fmt::try_init();
let universe = Universe::new();
let mut world = universe.create_world();
let entity = *world.insert((Static,), vec![(0f64,)]).get(0).unwrap();
assert!(world.get_tag::<Model>(entity).is_none());
}
#[test]
fn delete() {
let _ = tracing_subscriber::fmt::try_init();
let universe = Universe::new();
let mut world = universe.create_world();
let shared = (Static, Model(5));
let components = vec![
(Pos(1., 2., 3.), Rot(0.1, 0.2, 0.3)),
(Pos(4., 5., 6.), Rot(0.4, 0.5, 0.6)),
];
let mut entities: Vec<Entity> = Vec::new();
for e in world.insert(shared, components.clone()) {
entities.push(*e);
}
for e in entities.iter() {
assert_eq!(true, world.is_alive(*e));
}
for e in entities.iter() {
world.delete(*e);
assert_eq!(false, world.is_alive(*e));
}
}
#[test]
fn delete_last() {
let _ = tracing_subscriber::fmt::try_init();
let universe = Universe::new();
let mut world = universe.create_world();
let shared = (Static, Model(5));
let components = vec![
(Pos(1., 2., 3.), Rot(0.1, 0.2, 0.3)),
(Pos(4., 5., 6.), Rot(0.4, 0.5, 0.6)),
];
let mut entities: Vec<Entity> = Vec::new();
for e in world.insert(shared, components.clone()) {
entities.push(*e);
}
let last = *entities.last().unwrap();
world.delete(last);
assert_eq!(false, world.is_alive(last));
for (i, e) in entities.iter().take(entities.len() - 1).enumerate() {
assert_eq!(true, world.is_alive(*e));
match world.get_component(*e) {
Some(x) => assert_eq!(components.get(i).map(|(x, _)| x), Some(&x as &Pos)),
None => assert_eq!(components.get(i).map(|(x, _)| x), None),
}
match world.get_component(*e) {
Some(x) => assert_eq!(components.get(i).map(|(_, x)| x), Some(&x as &Rot)),
None => assert_eq!(components.get(i).map(|(_, x)| x), None),
}
}
}
#[test]
fn delete_first() {
let _ = tracing_subscriber::fmt::try_init();
let universe = Universe::new();
let mut world = universe.create_world();
let shared = (Static, Model(5));
let components = vec![
(Pos(1., 2., 3.), Rot(0.1, 0.2, 0.3)),
(Pos(4., 5., 6.), Rot(0.4, 0.5, 0.6)),
];
let mut entities: Vec<Entity> = Vec::new();
for e in world.insert(shared, components.clone()) {
entities.push(*e);
}
let first = *entities.first().unwrap();
world.delete(first);
assert_eq!(false, world.is_alive(first));
for (i, e) in entities.iter().skip(1).enumerate() {
assert_eq!(true, world.is_alive(*e));
match world.get_component(*e) {
Some(x) => assert_eq!(components.get(i + 1).map(|(x, _)| x), Some(&x as &Pos)),
None => assert_eq!(components.get(i + 1).map(|(x, _)| x), None),
}
match world.get_component(*e) {
Some(x) => assert_eq!(components.get(i + 1).map(|(_, x)| x), Some(&x as &Rot)),
None => assert_eq!(components.get(i + 1).map(|(_, x)| x), None),
}
}
}
#[test]
fn merge() {
let _ = tracing_subscriber::fmt::try_init();
let universe = Universe::new();
let mut world_1 = universe.create_world();
let mut world_2 = universe.create_world();
let shared = (Static, Model(5));
let components = vec![
(Pos(1., 2., 3.), Rot(0.1, 0.2, 0.3)),
(Pos(4., 5., 6.), Rot(0.4, 0.5, 0.6)),
];
let mut world_1_entities: Vec<Entity> = Vec::new();
for e in world_1.insert(shared, components.clone()) {
world_1_entities.push(*e);
}
let mut world_2_entities: Vec<Entity> = Vec::new();
for e in world_2.insert(shared, components.clone()) {
world_2_entities.push(*e);
}
world_1.merge(world_2);
for (i, e) in world_2_entities.iter().enumerate() {
assert!(world_1.is_alive(*e));
let (pos, rot) = components.get(i).unwrap();
assert_eq!(pos, &world_1.get_component(*e).unwrap() as &Pos);
assert_eq!(rot, &world_1.get_component(*e).unwrap() as &Rot);
}
}
#[test]
fn mutate_add_component() {
let _ = tracing_subscriber::fmt::try_init();
let universe = Universe::new();
let mut world = universe.create_world();
let shared = (Static, Model(5));
let components = vec![
(Pos(1., 2., 3.), Rot(0.1, 0.2, 0.3)),
(Pos(4., 5., 6.), Rot(0.4, 0.5, 0.6)),
(Pos(4., 5., 6.), Rot(0.4, 0.5, 0.6)),
];
let entities = world.insert(shared, components).to_vec();
let query_without_scale = <(Read<Pos>, Read<Rot>)>::query();
let query_with_scale = <(Read<Pos>, Read<Rot>, Read<Scale>)>::query();
assert_eq!(3, query_without_scale.iter(&mut world).count());
assert_eq!(0, query_with_scale.iter(&mut world).count());
world
.add_component(*entities.get(1).unwrap(), Scale(0.5, 0.5, 0.5))
.unwrap();
assert_eq!(3, query_without_scale.iter(&mut world).count());
assert_eq!(1, query_with_scale.iter(&mut world).count());
}
#[test]
fn mutate_remove_component() {
let _ = tracing_subscriber::fmt::try_init();
let universe = Universe::new();
let mut world = universe.create_world();
let shared = (Static, Model(5));
let components = vec![
(Pos(1., 2., 3.), Rot(0.1, 0.2, 0.3)),
(Pos(4., 5., 6.), Rot(0.4, 0.5, 0.6)),
(Pos(4., 5., 6.), Rot(0.4, 0.5, 0.6)),
];
let entities = world.insert(shared, components).to_vec();
let query_without_rot = Read::<Pos>::query().filter(!component::<Rot>());
let query_with_rot = <(Read<Pos>, Read<Rot>)>::query();
assert_eq!(0, query_without_rot.iter(&mut world).count());
assert_eq!(3, query_with_rot.iter(&mut world).count());
world.remove_component::<Rot>(*entities.get(1).unwrap());
assert_eq!(1, query_without_rot.iter(&mut world).count());
assert_eq!(2, query_with_rot.iter(&mut world).count());
}
#[test]
fn mutate_add_tag() {
let _ = tracing_subscriber::fmt::try_init();
let universe = Universe::new();
let mut world = universe.create_world();
let shared = (Model(5),);
let components = vec![
(Pos(1., 2., 3.), Rot(0.1, 0.2, 0.3)),
(Pos(4., 5., 6.), Rot(0.4, 0.5, 0.6)),
(Pos(4., 5., 6.), Rot(0.4, 0.5, 0.6)),
];
let entities = world.insert(shared, components).to_vec();
let query_without_static = <(Read<Pos>, Read<Rot>)>::query();
let query_with_static = <(Read<Pos>, Read<Rot>, Tagged<Static>)>::query();
assert_eq!(3, query_without_static.iter(&mut world).count());
assert_eq!(0, query_with_static.iter(&mut world).count());
world.add_tag(*entities.get(1).unwrap(), Static);
assert_eq!(3, query_without_static.iter(&mut world).count());
assert_eq!(1, query_with_static.iter(&mut world).count());
}
#[test]
fn mutate_remove_tag() {
let _ = tracing_subscriber::fmt::try_init();
let universe = Universe::new();
let mut world = universe.create_world();
let shared = (Model(5), Static);
let components = vec![
(Pos(1., 2., 3.), Rot(0.1, 0.2, 0.3)),
(Pos(4., 5., 6.), Rot(0.4, 0.5, 0.6)),
(Pos(4., 5., 6.), Rot(0.4, 0.5, 0.6)),
];
let entities = world.insert(shared, components).to_vec();
let query_without_static = <(Read<Pos>, Read<Rot>)>::query().filter(!tag::<Static>());
let query_with_static = <(Read<Pos>, Read<Rot>, Tagged<Static>)>::query();
assert_eq!(0, query_without_static.iter(&mut world).count());
assert_eq!(3, query_with_static.iter(&mut world).count());
world.remove_tag::<Static>(*entities.get(1).unwrap());
assert_eq!(1, query_without_static.iter(&mut world).count());
assert_eq!(2, query_with_static.iter(&mut world).count());
}
#[test]
fn mutate_change_tag_minimum_test() {
let _ = tracing_subscriber::fmt::try_init();
let universe = Universe::new();
let mut world = universe.create_world();
let shared = (Model(5),);
let components = vec![(Pos(1., 2., 3.), Rot(0.1, 0.2, 0.3))];
let entities = world.insert(shared, components).to_vec();
tracing::trace!("STARTING CHANGE");
world.add_tag(entities[0], Model(3));
tracing::trace!("CHANGED\n");
assert_eq!(*world.get_tag::<Model>(entities[0]).unwrap(), Model(3));
}
#[test]
#[allow(clippy::suspicious_map)]
fn mutate_change_tag() {
let _ = tracing_subscriber::fmt::try_init();
let universe = Universe::new();
let mut world = universe.create_world();
let shared = (Model(5),);
let components = vec![
(Pos(1., 2., 3.), Rot(0.1, 0.2, 0.3)),
(Pos(4., 5., 6.), Rot(0.4, 0.5, 0.6)),
(Pos(4., 5., 6.), Rot(0.4, 0.5, 0.6)),
];
let entities = world.insert(shared, components).to_vec();
let query_model_3 = <(Read<Pos>, Read<Rot>)>::query().filter(tag_value(&Model(3)));
let query_model_5 = <(Read<Pos>, Read<Rot>)>::query().filter(tag_value(&Model(5)));
assert_eq!(3, query_model_5.iter(&mut world).count());
assert_eq!(0, query_model_3.iter(&mut world).count());
tracing::trace!("STARTING CHANGE");
world.add_tag(*entities.get(1).unwrap(), Model(3));
tracing::trace!("CHANGED\n");
assert_eq!(
1,
query_model_3
.iter_entities_mut(&mut world)
.map(|e| {
tracing::trace!("iter: {:?}", e);
e
})
.count()
);
assert_eq!(
*world.get_tag::<Model>(*entities.get(1).unwrap()).unwrap(),
Model(3)
);
assert_eq!(2, query_model_5.iter(&mut world).count());
}

0
bevy_legion/uuid.ron Normal file
View file

View file

@ -7,7 +7,7 @@ edition = "2018"
license = "MIT"
[dependencies]
legion = { git = "https://github.com/TomGillen/legion.git", rev = "940ef3bfcb77e5d074ee3184b776ff1600da228d" }
legion = { path = "../bevy_legion" }
glam = "0.8.3"
log = "0.4"
rayon = "1.2"

View file

@ -0,0 +1,3 @@
/target
**/*.rs.bk
Cargo.lock

View file

@ -0,0 +1,15 @@
[package]
name = "example_plugin"
version = "0.1.0"
authors = ["Carter Anderson <mcanders1@gmail.com>"]
edition = "2018"
[lib]
crate-type = ["cdylib"]
[dependencies]
bevy = { path = "../../../../bevy" }
bevy_derive = { path = "../../../bevy_derive" }
[profile.release]
debug = true

View file

@ -0,0 +1,75 @@
use bevy::prelude::*;
use bevy::plugin::AppPlugin;
use bevy_derive::RegisterAppPlugin;
#[derive(RegisterAppPlugin)]
pub struct ExamplePlugin;
impl AppPlugin for ExamplePlugin {
fn build(&self, app_builder: AppBuilder) -> AppBuilder {
app_builder.setup_world(setup)
}
fn name(&self) -> &'static str {
"example"
}
}
pub fn setup(world: &mut World) {
let cube = Mesh::load(MeshType::Cube);
let plane = Mesh::load(MeshType::Plane { size: 10.0 });
let (cube_handle, plane_handle) = {
let mut mesh_storage = world.resources.get_mut::<AssetStorage<Mesh>>().unwrap();
(mesh_storage.add(cube), mesh_storage.add(plane))
};
world.build()
// plane
.build_archetype(MeshEntity {
mesh: plane_handle.clone(),
material: Material::new(Albedo::Color(math::vec4(0.1, 0.2, 0.1, 1.0))),
local_to_world: LocalToWorld::identity(),
translation: Translation::new(0.0, 0.0, 0.0),
})
// cube
.build_archetype(MeshEntity {
mesh: cube_handle,
material: Material::new(Albedo::Color(math::vec4(0.5, 0.3, 0.3, 1.0))),
local_to_world: LocalToWorld::identity(),
translation: Translation::new(0.0, 0.0, 1.0),
})
// light
// .build_archetype(LightEntity {
// light: Light {
// color: wgpu::Color {
// r: 0.8,
// g: 0.8,
// b: 0.5,
// a: 1.0,
// },
// fov: f32::to_radians(60.0),
// depth: 0.1..50.0,
// target_view: None,
// },
// local_to_world: LocalToWorld::identity(),
// translation: Translation::new(4.0, -4.0, 5.0),
// rotation: Rotation::from_euler_angles(0.0, 0.0, 0.0),
// })
// camera
.build_archetype(CameraEntity {
camera: Camera::new(CameraType::Projection {
fov: std::f32::consts::PI / 4.0,
near: 1.0,
far: 1000.0,
aspect_ratio: 1.0,
}),
active_camera: ActiveCamera,
local_to_world: LocalToWorld(Mat4::look_at_rh(
Vec3::new(3.0, 8.0, 5.0),
Vec3::new(0.0, 0.0, 0.0),
Vec3::new(0.0, 0.0, 1.0),
)),
})
.build();
}

View file

@ -0,0 +1,8 @@
use bevy::prelude::*;
fn main() {
AppBuilder::new()
.add_defaults_legacy()
.load_plugin("examples/plugin_loading/example_plugin/target/release/libexample_plugin.so")
.run();
}

View file

@ -13,33 +13,24 @@ fn setup(world: &mut World) {
(mesh_storage.add(cube), mesh_storage.add(plane))
};
// plane
world.insert(
(),
vec![(
plane_handle.clone(),
Material::new(Albedo::Color(math::vec4(0.1, 0.2, 0.1, 1.0))),
LocalToWorld::identity(),
Translation::new(0.0, 0.0, 0.0),
)],
);
// cube
world.insert(
(),
vec![(
cube_handle,
Material::new(Albedo::Color(math::vec4(0.5, 0.3, 0.3, 1.0))),
LocalToWorld::identity(),
Translation::new(0.0, 0.0, 1.0),
)],
);
// light
world.insert(
(),
vec![(
Light {
world.build()
// plane
.build_archetype(MeshEntity {
mesh: plane_handle.clone(),
material: Material::new(Albedo::Color(math::vec4(0.1, 0.2, 0.1, 1.0))),
local_to_world: LocalToWorld::identity(),
translation: Translation::new(0.0, 0.0, 0.0),
})
// cube
.build_archetype(MeshEntity {
mesh: cube_handle,
material: Material::new(Albedo::Color(math::vec4(0.5, 0.3, 0.3, 1.0))),
local_to_world: LocalToWorld::identity(),
translation: Translation::new(0.0, 0.0, 1.0),
})
// light
.build_archetype(LightEntity {
light: Light {
color: wgpu::Color {
r: 0.8,
g: 0.8,
@ -50,28 +41,24 @@ fn setup(world: &mut World) {
depth: 0.1..50.0,
target_view: None,
},
LocalToWorld::identity(),
Translation::new(4.0, -4.0, 5.0),
Rotation::from_euler_angles(0.0, 0.0, 0.0),
)],
);
// camera
world.insert(
(),
vec![(
Camera::new(CameraType::Projection {
local_to_world: LocalToWorld::identity(),
translation: Translation::new(4.0, -4.0, 5.0),
rotation: Rotation::from_euler_angles(0.0, 0.0, 0.0),
})
// camera
.build_archetype(CameraEntity {
camera: Camera::new(CameraType::Projection {
fov: std::f32::consts::PI / 4.0,
near: 1.0,
far: 1000.0,
aspect_ratio: 1.0,
}),
ActiveCamera,
LocalToWorld(Mat4::look_at_rh(
active_camera: ActiveCamera,
local_to_world: LocalToWorld(Mat4::look_at_rh(
Vec3::new(3.0, 8.0, 5.0),
Vec3::new(0.0, 0.0, 0.0),
Vec3::new(0.0, 0.0, 1.0),
)),
)],
);
})
.build();
}

View file

@ -6,6 +6,7 @@ use crate::{
render::render_graph_2,
render::render_graph_2::{pipelines::*, wgpu_renderer::WgpuRenderer},
render::{passes::*, *},
plugin::load_plugin,
ui,
};
@ -197,4 +198,10 @@ impl AppBuilder {
.add_render_graph_defaults()
.add_wgpu_renderer()
}
pub fn load_plugin(mut self, path: &str) -> Self {
let (_lib, plugin) = load_plugin(path);
self = plugin.build(self);
self
}
}

View file

@ -6,6 +6,7 @@ pub mod prelude;
pub mod render;
pub mod serialization;
pub mod ui;
pub mod plugin;
pub use glam as math;
pub use legion;

20
src/plugin/mod.rs Normal file
View file

@ -0,0 +1,20 @@
use crate::app::AppBuilder;
use libloading::{Library, Symbol};
use std::any::Any;
pub trait AppPlugin: Any + Send + Sync {
fn build(&self, app_builder: AppBuilder) -> AppBuilder;
fn name(&self) -> &'static str;
}
type CreateAppPlugin = unsafe fn() -> *mut dyn AppPlugin;
pub fn load_plugin(path: &str) -> (Library, Box<dyn AppPlugin>) {
let lib = Library::new(path).unwrap();
unsafe {
let func: Symbol<CreateAppPlugin> = lib.get(b"_create_plugin").unwrap();
let plugin = Box::from_raw(func());
(lib, plugin)
}
}

View file

@ -13,7 +13,7 @@ use serde::{
Deserialize, Deserializer, Serialize, Serializer,
};
use std::{
any::TypeId, cell::RefCell, collections::HashMap, iter::FromIterator, marker::PhantomData,
any::{type_name}, cell::RefCell, collections::HashMap, iter::FromIterator, marker::PhantomData,
ptr::NonNull,
};
use type_uuid::TypeUuid;
@ -101,7 +101,7 @@ impl<'de, 'a, T: for<'b> Deserialize<'b> + 'static> Visitor<'de>
#[derive(Clone)]
pub struct TagRegistration {
uuid: type_uuid::Bytes,
ty: TypeId,
ty: String,
tag_serialize_fn: fn(&TagStorage, &mut dyn FnMut(&dyn erased_serde::Serialize)),
tag_deserialize_fn: fn(
deserializer: &mut dyn erased_serde::Deserializer,
@ -123,7 +123,7 @@ impl TagRegistration {
>() -> Self {
Self {
uuid: T::UUID,
ty: TypeId::of::<T>(),
ty: type_name::<T>().to_string(),
tag_serialize_fn: |tag_storage, serialize_fn| {
// it's safe because we know this is the correct type due to lookup
let slice = unsafe { tag_storage.data_slice::<T>() };
@ -150,7 +150,7 @@ impl TagRegistration {
#[derive(Clone)]
pub struct ComponentRegistration {
uuid: type_uuid::Bytes,
ty: TypeId,
ty: String,
comp_serialize_fn: fn(&ComponentResourceSet, &mut dyn FnMut(&dyn erased_serde::Serialize)),
comp_deserialize_fn: fn(
deserializer: &mut dyn erased_serde::Deserializer,
@ -164,7 +164,7 @@ impl ComponentRegistration {
{
Self {
uuid: T::UUID,
ty: TypeId::of::<T>(),
ty: type_name::<T>().to_string(),
comp_serialize_fn: |comp_storage, serialize_fn| {
// it's safe because we know this is the correct type due to lookup
let slice = unsafe { comp_storage.data_slice::<T>() };
@ -192,8 +192,8 @@ struct SerializedArchetypeDescription {
}
pub struct SerializeImpl {
pub tag_types: HashMap<TypeId, TagRegistration>,
pub comp_types: HashMap<TypeId, ComponentRegistration>,
pub tag_types: HashMap<String, TagRegistration>,
pub comp_types: HashMap<String, ComponentRegistration>,
pub entity_map: RefCell<HashMap<Entity, uuid::Bytes>>,
}
@ -206,10 +206,10 @@ impl SerializeImpl {
comp_types: HashMap::from_iter(
component_registrations
.iter()
.map(|reg| (reg.ty, reg.clone())),
.map(|reg| (reg.ty.clone(), reg.clone())),
),
tag_types: HashMap::from_iter(
tag_registrations.iter().map(|reg| (reg.ty, reg.clone())),
tag_registrations.iter().map(|reg| (reg.ty.clone(), reg.clone())),
),
entity_map: RefCell::new(HashMap::new()),
}
@ -224,10 +224,10 @@ impl SerializeImpl {
comp_types: HashMap::from_iter(
component_registrations
.iter()
.map(|reg| (reg.ty, reg.clone())),
.map(|reg| (reg.ty.clone(), reg.clone())),
),
tag_types: HashMap::from_iter(
tag_registrations.iter().map(|reg| (reg.ty, reg.clone())),
tag_registrations.iter().map(|reg| (reg.ty.clone(), reg.clone())),
),
entity_map: RefCell::new(HashMap::from_iter(
entity_map.into_iter().map(|(uuid, e)| (e, uuid)),
@ -238,10 +238,10 @@ impl SerializeImpl {
impl legion::ser::WorldSerializer for SerializeImpl {
fn can_serialize_tag(&self, ty: &TagTypeId, _meta: &TagMeta) -> bool {
self.tag_types.get(&ty.0).is_some()
self.tag_types.get(ty.0).is_some()
}
fn can_serialize_component(&self, ty: &ComponentTypeId, _meta: &ComponentMeta) -> bool {
self.comp_types.get(&ty.0).is_some()
self.comp_types.get(ty.0).is_some()
}
fn serialize_archetype_description<S: Serializer>(
&self,
@ -251,13 +251,13 @@ impl legion::ser::WorldSerializer for SerializeImpl {
let tags_to_serialize = archetype_desc
.tags()
.iter()
.filter_map(|(ty, _)| self.tag_types.get(&ty.0))
.filter_map(|(ty, _)| self.tag_types.get(ty.0))
.map(|reg| reg.uuid)
.collect::<Vec<_>>();
let components_to_serialize = archetype_desc
.components()
.iter()
.filter_map(|(ty, _)| self.comp_types.get(&ty.0))
.filter_map(|(ty, _)| self.comp_types.get(ty.0))
.map(|reg| reg.uuid)
.collect::<Vec<_>>();
SerializedArchetypeDescription {
@ -273,7 +273,7 @@ impl legion::ser::WorldSerializer for SerializeImpl {
_component_meta: &ComponentMeta,
components: &ComponentResourceSet,
) -> Result<S::Ok, S::Error> {
if let Some(reg) = self.comp_types.get(&component_type.0) {
if let Some(reg) = self.comp_types.get(component_type.0) {
let result = RefCell::new(None);
let serializer = RefCell::new(Some(serializer));
{
@ -299,7 +299,7 @@ impl legion::ser::WorldSerializer for SerializeImpl {
_tag_meta: &TagMeta,
tags: &TagStorage,
) -> Result<S::Ok, S::Error> {
if let Some(reg) = self.tag_types.get(&tag_type.0) {
if let Some(reg) = self.tag_types.get(tag_type.0) {
let result = RefCell::new(None);
let serializer = RefCell::new(Some(serializer));
{
@ -333,8 +333,8 @@ impl legion::ser::WorldSerializer for SerializeImpl {
}
pub struct DeserializeImpl {
pub tag_types: HashMap<TypeId, TagRegistration>,
pub comp_types: HashMap<TypeId, ComponentRegistration>,
pub tag_types: HashMap<String, TagRegistration>,
pub comp_types: HashMap<String, ComponentRegistration>,
pub tag_types_by_uuid: HashMap<type_uuid::Bytes, TagRegistration>,
pub comp_types_by_uuid: HashMap<type_uuid::Bytes, ComponentRegistration>,
pub entity_map: RefCell<HashMap<uuid::Bytes, Entity>>,
@ -342,8 +342,8 @@ pub struct DeserializeImpl {
impl DeserializeImpl {
pub fn new(
component_types: HashMap<TypeId, ComponentRegistration>,
tag_types: HashMap<TypeId, TagRegistration>,
component_types: HashMap<String, ComponentRegistration>,
tag_types: HashMap<String, TagRegistration>,
entity_map: RefCell<HashMap<Entity, uuid::Bytes>>,
) -> Self {
DeserializeImpl {
@ -395,7 +395,7 @@ impl legion::de::WorldDeserializer for DeserializeImpl {
_component_meta: &ComponentMeta,
get_next_storage_fn: &mut dyn FnMut() -> Option<(NonNull<u8>, usize)>,
) -> Result<(), <D as Deserializer<'de>>::Error> {
if let Some(reg) = self.comp_types.get(&component_type.0) {
if let Some(reg) = self.comp_types.get(component_type.0) {
let mut erased = erased_serde::Deserializer::erase(deserializer);
(reg.comp_deserialize_fn)(&mut erased, get_next_storage_fn)
.map_err(<<D as serde::Deserializer<'de>>::Error as serde::de::Error>::custom)?;
@ -411,7 +411,7 @@ impl legion::de::WorldDeserializer for DeserializeImpl {
_tag_meta: &TagMeta,
tags: &mut TagStorage,
) -> Result<(), <D as Deserializer<'de>>::Error> {
if let Some(reg) = self.tag_types.get(&tag_type.0) {
if let Some(reg) = self.tag_types.get(tag_type.0) {
let mut erased = erased_serde::Deserializer::erase(deserializer);
(reg.tag_deserialize_fn)(&mut erased, tags)
.map_err(<<D as serde::Deserializer<'de>>::Error as serde::de::Error>::custom)?;