Replace std synchronization primitives with parking_lot (#210)

* Replace std::sync::Mutex with parking_lot::Mutex
* Replace std::sync::RwLock with parking_lot::RwLock
This commit is contained in:
Lachlan Sneff 2020-08-21 17:55:16 -04:00 committed by GitHub
parent fc53ff9a71
commit 1eca55e571
No known key found for this signature in database
GPG key ID: 4AEE18F83AFDEB23
28 changed files with 174 additions and 246 deletions

View file

@ -27,4 +27,5 @@ crossbeam-channel = "0.4.2"
anyhow = "1.0"
thiserror = "1.0"
log = { version = "0.4", features = ["release_max_level_info"] }
notify = { version = "5.0.0-pre.2", optional = true }
notify = { version = "5.0.0-pre.2", optional = true }
parking_lot = "0.10.2"

View file

@ -5,11 +5,12 @@ use crate::{
use anyhow::Result;
use bevy_ecs::{Res, Resource, Resources};
use crossbeam_channel::TryRecvError;
use parking_lot::RwLock;
use std::{
collections::{HashMap, HashSet},
env, fs, io,
path::{Path, PathBuf},
sync::{Arc, RwLock},
sync::Arc,
thread,
};
use thiserror::Error;
@ -107,7 +108,7 @@ impl AssetServer {
where
T: AssetLoadRequestHandler,
{
let mut asset_handlers = self.asset_handlers.write().unwrap();
let mut asset_handlers = self.asset_handlers.write();
let handler_index = asset_handlers.len();
for extension in asset_handler.extensions().iter() {
self.extension_to_handler_index
@ -140,14 +141,13 @@ impl AssetServer {
let root_path = self.get_root_path()?;
let asset_folder = root_path.join(path);
let handle_ids = self.load_assets_in_folder_recursive(&asset_folder)?;
self.asset_folders.write().unwrap().push(asset_folder);
self.asset_folders.write().push(asset_folder);
Ok(handle_ids)
}
pub fn get_handle<T, P: AsRef<Path>>(&self, path: P) -> Option<Handle<T>> {
self.asset_info_paths
.read()
.unwrap()
.get(path.as_ref())
.map(|handle_id| Handle::from(*handle_id))
}
@ -170,11 +170,11 @@ impl AssetServer {
#[cfg(feature = "filesystem_watcher")]
pub fn watch_for_changes(&self) -> Result<(), AssetServerError> {
let mut filesystem_watcher = self.filesystem_watcher.write().unwrap();
let mut filesystem_watcher = self.filesystem_watcher.write();
let _ = filesystem_watcher.get_or_insert_with(FilesystemWatcher::default);
// watch current files
let asset_info_paths = self.asset_info_paths.read().unwrap();
let asset_info_paths = self.asset_info_paths.read();
for asset_path in asset_info_paths.keys() {
Self::watch_path_for_changes(&mut filesystem_watcher, asset_path)?;
}
@ -187,9 +187,7 @@ impl AssetServer {
use notify::event::{Event, EventKind, ModifyKind};
let mut changed = HashSet::new();
while let Some(filesystem_watcher) =
asset_server.filesystem_watcher.read().unwrap().as_ref()
{
while let Some(filesystem_watcher) = asset_server.filesystem_watcher.read().as_ref() {
let result = match filesystem_watcher.receiver.try_recv() {
Ok(result) => result,
Err(TryRecvError::Empty) => {
@ -280,8 +278,8 @@ impl AssetServer {
) {
let mut new_version = 0;
let handle_id = {
let mut asset_info = self.asset_info.write().unwrap();
let mut asset_info_paths = self.asset_info_paths.write().unwrap();
let mut asset_info = self.asset_info.write();
let mut asset_info_paths = self.asset_info_paths.write();
if let Some(asset_info) = asset_info_paths
.get(path)
.and_then(|handle_id| asset_info.get_mut(&handle_id))
@ -319,7 +317,7 @@ impl AssetServer {
// TODO: watching each asset explicitly is a simpler implementation, its possible it would be more efficient to watch
// folders instead (when possible)
#[cfg(feature = "filesystem_watcher")]
Self::watch_path_for_changes(&mut self.filesystem_watcher.write().unwrap(), path)?;
Self::watch_path_for_changes(&mut self.filesystem_watcher.write(), path)?;
Ok(handle_id)
} else {
Err(AssetServerError::MissingAssetHandler)
@ -330,7 +328,7 @@ impl AssetServer {
}
pub fn set_load_state(&self, handle_id: HandleId, load_state: LoadState) {
if let Some(asset_info) = self.asset_info.write().unwrap().get_mut(&handle_id) {
if let Some(asset_info) = self.asset_info.write().get_mut(&handle_id) {
if load_state.get_version() >= asset_info.load_state.get_version() {
asset_info.load_state = load_state;
}
@ -340,7 +338,6 @@ impl AssetServer {
pub fn get_load_state_untyped(&self, handle_id: HandleId) -> Option<LoadState> {
self.asset_info
.read()
.unwrap()
.get(&handle_id)
.map(|asset_info| asset_info.load_state.clone())
}
@ -367,7 +364,7 @@ impl AssetServer {
fn send_request_to_loader_thread(&self, load_request: LoadRequest) {
// NOTE: This lock makes the call to Arc::strong_count safe. Removing (or reordering) it could result in undefined behavior
let mut loader_threads = self.loader_threads.write().unwrap();
let mut loader_threads = self.loader_threads.write();
if loader_threads.len() < self.max_loader_threads {
let loader_thread = LoaderThread {
requests: Arc::new(RwLock::new(vec![load_request])),
@ -378,9 +375,9 @@ impl AssetServer {
} else {
let most_free_thread = loader_threads
.iter()
.min_by_key(|l| l.requests.read().unwrap().len())
.min_by_key(|l| l.requests.read().len())
.unwrap();
let mut requests = most_free_thread.requests.write().unwrap();
let mut requests = most_free_thread.requests.write();
requests.push(load_request);
// if most free thread only has one reference, the thread as spun down. if so, we need to spin it back up!
if Arc::strong_count(&most_free_thread.requests) == 1 {
@ -399,7 +396,7 @@ impl AssetServer {
thread::spawn(move || {
loop {
let request = {
let mut current_requests = requests.write().unwrap();
let mut current_requests = requests.write();
if current_requests.len() == 0 {
// if there are no requests, spin down the thread
break;
@ -408,7 +405,7 @@ impl AssetServer {
current_requests.pop().unwrap()
};
let handlers = request_handlers.read().unwrap();
let handlers = request_handlers.read();
let request_handler = &handlers[request.handler_index];
request_handler.handle_request(&request);
}

View file

@ -18,6 +18,7 @@ bevy_ecs = {path = "../bevy_ecs", version = "0.1"}
# other
anyhow = "1.0"
rodio = {version = "0.11", default-features = false}
parking_lot = "0.10.2"
[features]
mp3 = ["rodio/mp3"]

View file

@ -1,8 +1,9 @@
use crate::AudioSource;
use bevy_asset::{Assets, Handle};
use bevy_ecs::Res;
use parking_lot::RwLock;
use rodio::{Decoder, Device, Sink};
use std::{collections::VecDeque, io::Cursor, sync::RwLock};
use std::{collections::VecDeque, io::Cursor};
/// Used to play audio on the current "audio device"
pub struct AudioOutput {
@ -27,11 +28,11 @@ impl AudioOutput {
}
pub fn play(&self, audio_source: Handle<AudioSource>) {
self.queue.write().unwrap().push_front(audio_source);
self.queue.write().push_front(audio_source);
}
pub fn try_play_queued(&self, audio_sources: &Assets<AudioSource>) {
let mut queue = self.queue.write().unwrap();
let mut queue = self.queue.write();
let len = queue.len();
let mut i = 0;
while i < len {

View file

@ -20,3 +20,4 @@ bevy_ecs = { path = "../bevy_ecs", version = "0.1" }
# other
uuid = { version = "0.8", features = ["v4", "serde"] }
parking_lot = "0.10"

View file

@ -1,11 +1,7 @@
use crate::{Diagnostic, DiagnosticId, Diagnostics};
use bevy_ecs::{Profiler, Res, ResMut};
use std::{
borrow::Cow,
collections::HashMap,
sync::{Arc, RwLock},
time::Instant,
};
use parking_lot::RwLock;
use std::{borrow::Cow, collections::HashMap, sync::Arc, time::Instant};
#[derive(Debug)]
struct SystemRunInfo {
@ -28,7 +24,7 @@ pub struct SystemProfiler {
impl Profiler for SystemProfiler {
fn start(&self, scope: Cow<'static, str>) {
let mut system_profiles = self.system_profiles.write().unwrap();
let mut system_profiles = self.system_profiles.write();
let profiles = system_profiles
.entry(scope.clone())
.or_insert_with(SystemProfiles::default);
@ -38,7 +34,7 @@ impl Profiler for SystemProfiler {
fn stop(&self, scope: Cow<'static, str>) {
let now = Instant::now();
let mut system_profiles = self.system_profiles.write().unwrap();
let mut system_profiles = self.system_profiles.write();
let profiles = system_profiles.get_mut(&scope).unwrap();
if let Some(current_start) = profiles.current_start.take() {
profiles.history.push(SystemRunInfo {
@ -54,7 +50,7 @@ pub fn profiler_diagnostic_system(
system_profiler: Res<Box<dyn Profiler>>,
) {
let system_profiler = system_profiler.downcast_ref::<SystemProfiler>().unwrap();
let mut system_profiles = system_profiler.system_profiles.write().unwrap();
let mut system_profiles = system_profiler.system_profiles.write();
for (scope, profiles) in system_profiles.iter_mut() {
if diagnostics.get(profiles.diagnostic_id).is_none() {
diagnostics.add(Diagnostic::new(profiles.diagnostic_id, &scope, 20))

View file

@ -20,3 +20,4 @@ rayon = "1.3"
crossbeam-channel = "0.4.2"
fixedbitset = "0.3.0"
downcast-rs = "1.1.1"
parking_lot = "0.10"

View file

@ -6,11 +6,9 @@ use crate::{
use bevy_hecs::{ArchetypesGeneration, World};
use crossbeam_channel::{Receiver, Sender};
use fixedbitset::FixedBitSet;
use parking_lot::Mutex;
use rayon::ScopeFifo;
use std::{
ops::Range,
sync::{Arc, Mutex},
};
use std::{ops::Range, sync::Arc};
/// Executes each schedule stage in parallel by analyzing system dependencies.
/// System execution order is undefined except under the following conditions:
@ -194,7 +192,7 @@ impl ExecutorStage {
if schedule_changed || archetypes_generation_changed {
// update each system's archetype access to latest world archetypes
for system_index in prepare_system_index_range.clone() {
let mut system = systems[system_index].lock().unwrap();
let mut system = systems[system_index].lock();
system.update_archetype_access(world);
}
@ -202,7 +200,7 @@ impl ExecutorStage {
let mut current_archetype_access = ArchetypeAccess::default();
let mut current_resource_access = TypeAccess::default();
for system_index in prepare_system_index_range.clone() {
let system = systems[system_index].lock().unwrap();
let system = systems[system_index].lock();
let archetype_access = system.archetype_access();
match system.thread_local_execution() {
ThreadLocalExecution::NextFlush => {
@ -215,7 +213,7 @@ impl ExecutorStage {
for earlier_system_index in
prepare_system_index_range.start..system_index
{
let earlier_system = systems[earlier_system_index].lock().unwrap();
let earlier_system = systems[earlier_system_index].lock();
// due to how prepare ranges work, previous systems should all be "NextFlush"
debug_assert_eq!(
@ -295,7 +293,7 @@ impl ExecutorStage {
// handle thread local system
{
let system = system.lock().unwrap();
let system = system.lock();
if let ThreadLocalExecution::Immediate = system.thread_local_execution() {
if systems_currently_running {
// if systems are currently running, we can't run this thread local system yet
@ -311,7 +309,7 @@ impl ExecutorStage {
let sender = self.sender.clone();
self.running_systems.insert(system_index);
scope.spawn_fifo(move |_| {
let mut system = system.lock().unwrap();
let mut system = system.lock();
system.run(world, resources);
sender.send(system_index).unwrap();
});
@ -344,7 +342,7 @@ impl ExecutorStage {
self.running_systems.grow(systems.len());
for (system_index, system) in systems.iter().enumerate() {
let system = system.lock().unwrap();
let system = system.lock();
if system.thread_local_execution() == ThreadLocalExecution::Immediate {
self.thread_local_system_indices.push(system_index);
}
@ -383,7 +381,7 @@ impl ExecutorStage {
if let RunReadyResult::ThreadLocalReady(thread_local_index) = run_ready_result {
// if a thread local system is ready to run, run it exclusively on the main thread
let mut system = systems[thread_local_index].lock().unwrap();
let mut system = systems[thread_local_index].lock();
self.running_systems.insert(thread_local_index);
system.run(world, resources);
system.run_thread_local(world, resources);
@ -423,7 +421,7 @@ impl ExecutorStage {
// "flush"
for system in systems.iter() {
let mut system = system.lock().unwrap();
let mut system = system.lock();
match system.thread_local_execution() {
ThreadLocalExecution::NextFlush => system.run_thread_local(world, resources),
ThreadLocalExecution::Immediate => { /* already ran */ }
@ -445,7 +443,8 @@ mod tests {
};
use bevy_hecs::{Entity, World};
use fixedbitset::FixedBitSet;
use std::sync::{Arc, Mutex};
use parking_lot::Mutex;
use std::sync::Arc;
#[derive(Default)]
struct Counter {
@ -529,25 +528,25 @@ mod tests {
// A systems
fn read_u32(counter: Res<Counter>, _query: Query<&u32>) {
let mut count = counter.count.lock().unwrap();
let mut count = counter.count.lock();
assert!(*count < 2, "should be one of the first two systems to run");
*count += 1;
}
fn write_float(counter: Res<Counter>, _query: Query<&f32>) {
let mut count = counter.count.lock().unwrap();
let mut count = counter.count.lock();
assert!(*count < 2, "should be one of the first two systems to run");
*count += 1;
}
fn read_u32_write_u64(counter: Res<Counter>, _query: Query<(&u32, &mut u64)>) {
let mut count = counter.count.lock().unwrap();
let mut count = counter.count.lock();
assert_eq!(*count, 2, "should always be the 3rd system to run");
*count += 1;
}
fn read_u64(counter: Res<Counter>, _query: Query<&u64>) {
let mut count = counter.count.lock().unwrap();
let mut count = counter.count.lock();
assert_eq!(*count, 3, "should always be the 4th system to run");
*count += 1;
}
@ -560,20 +559,20 @@ mod tests {
// B systems
fn write_u64(counter: Res<Counter>, _query: Query<&mut u64>) {
let mut count = counter.count.lock().unwrap();
let mut count = counter.count.lock();
assert_eq!(*count, 4, "should always be the 5th system to run");
*count += 1;
}
fn thread_local_system(_world: &mut World, resources: &mut Resources) {
let counter = resources.get::<Counter>().unwrap();
let mut count = counter.count.lock().unwrap();
let mut count = counter.count.lock();
assert_eq!(*count, 5, "should always be the 6th system to run");
*count += 1;
}
fn write_f32(counter: Res<Counter>, _query: Query<&mut f32>) {
let mut count = counter.count.lock().unwrap();
let mut count = counter.count.lock();
assert_eq!(*count, 6, "should always be the 7th system to run");
*count += 1;
}
@ -585,7 +584,7 @@ mod tests {
// C systems
fn read_f64_res(counter: Res<Counter>, _f64_res: Res<f64>) {
let mut count = counter.count.lock().unwrap();
let mut count = counter.count.lock();
assert!(
7 == *count || *count == 8,
"should always be the 8th or 9th system to run"
@ -594,7 +593,7 @@ mod tests {
}
fn read_isize_res(counter: Res<Counter>, _isize_res: Res<isize>) {
let mut count = counter.count.lock().unwrap();
let mut count = counter.count.lock();
assert!(
7 == *count || *count == 8,
"should always be the 8th or 9th system to run"
@ -607,13 +606,13 @@ mod tests {
_isize_res: Res<isize>,
_f64_res: ResMut<f64>,
) {
let mut count = counter.count.lock().unwrap();
let mut count = counter.count.lock();
assert_eq!(*count, 9, "should always be the 10th system to run");
*count += 1;
}
fn write_f64_res(counter: Res<Counter>, _f64_res: ResMut<f64>) {
let mut count = counter.count.lock().unwrap();
let mut count = counter.count.lock();
assert_eq!(*count, 10, "should always be the 11th system to run");
*count += 1;
}
@ -692,7 +691,7 @@ mod tests {
let counter = resources.get::<Counter>().unwrap();
assert_eq!(
*counter.count.lock().unwrap(),
*counter.count.lock(),
11,
"counter should have been incremented once for each system"
);
@ -701,7 +700,7 @@ mod tests {
let mut executor = ParallelExecutor::default();
run_executor_and_validate(&mut executor, &mut schedule, &mut world, &mut resources);
// run again (with counter reset) to ensure executor works correctly across runs
*resources.get::<Counter>().unwrap().count.lock().unwrap() = 0;
*resources.get::<Counter>().unwrap().count.lock() = 0;
run_executor_and_validate(&mut executor, &mut schedule, &mut world, &mut resources);
}
}

View file

@ -4,10 +4,11 @@ use crate::{
system::{System, SystemId, ThreadLocalExecution},
};
use bevy_hecs::World;
use parking_lot::Mutex;
use std::{
borrow::Cow,
collections::{HashMap, HashSet},
sync::{Arc, Mutex},
sync::Arc,
};
/// An ordered collection of stages, which each contain an ordered list of [System]s.
@ -131,7 +132,7 @@ impl Schedule {
for stage_name in self.stage_order.iter() {
if let Some(stage_systems) = self.stages.get_mut(stage_name) {
for system in stage_systems.iter_mut() {
let mut system = system.lock().unwrap();
let mut system = system.lock();
#[cfg(feature = "profiler")]
crate::profiler_start(resources, system.name().clone());
system.update_archetype_access(world);
@ -150,7 +151,7 @@ impl Schedule {
// "flush"
// NOTE: when this is made parallel a full sync is required here
for system in stage_systems.iter_mut() {
let mut system = system.lock().unwrap();
let mut system = system.lock();
match system.thread_local_execution() {
ThreadLocalExecution::NextFlush => {
system.run_thread_local(world, resources)
@ -181,7 +182,7 @@ impl Schedule {
for stage in self.stages.values_mut() {
for system in stage.iter_mut() {
let mut system = system.lock().unwrap();
let mut system = system.lock();
system.initialize(resources);
}
}

View file

@ -1,10 +1,8 @@
use super::SystemId;
use crate::resource::{Resource, Resources};
use bevy_hecs::{Bundle, Component, DynamicBundle, Entity, World};
use std::{
marker::PhantomData,
sync::{Arc, Mutex},
};
use parking_lot::Mutex;
use std::{marker::PhantomData, sync::Arc};
/// A queued command to mutate the current [World] or [Resources]
pub enum Command {
@ -235,7 +233,7 @@ impl Commands {
components: impl DynamicBundle + Send + Sync + 'static,
) -> &mut Self {
{
let mut commands = self.commands.lock().unwrap();
let mut commands = self.commands.lock();
commands.spawn_as_entity(entity, components);
}
self
@ -256,7 +254,7 @@ impl Commands {
pub fn with(&mut self, component: impl Component) -> &mut Self {
{
let mut commands = self.commands.lock().unwrap();
let mut commands = self.commands.lock();
commands.with(component);
}
self
@ -267,7 +265,7 @@ impl Commands {
components: impl DynamicBundle + Send + Sync + 'static,
) -> &mut Self {
{
let mut commands = self.commands.lock().unwrap();
let mut commands = self.commands.lock();
commands.with_bundle(components);
}
self
@ -301,7 +299,7 @@ impl Commands {
}
pub fn write_world<W: WorldWriter + 'static>(&mut self, world_writer: W) -> &mut Self {
self.commands.lock().unwrap().write_world(world_writer);
self.commands.lock().write_world(world_writer);
self
}
@ -309,15 +307,12 @@ impl Commands {
&mut self,
resources_writer: W,
) -> &mut Self {
self.commands
.lock()
.unwrap()
.write_resources(resources_writer);
self.commands.lock().write_resources(resources_writer);
self
}
pub fn apply(&self, world: &mut World, resources: &mut Resources) {
let mut commands = self.commands.lock().unwrap();
let mut commands = self.commands.lock();
for command in commands.commands.drain(..) {
match command {
Command::WriteWorld(writer) => {
@ -329,13 +324,13 @@ impl Commands {
}
pub fn current_entity(&self) -> Option<Entity> {
let commands = self.commands.lock().unwrap();
let commands = self.commands.lock();
commands.current_entity
}
pub fn for_current_entity(&mut self, mut f: impl FnMut(Entity)) -> &mut Self {
{
let commands = self.commands.lock().unwrap();
let commands = self.commands.lock();
let current_entity = commands
.current_entity
.expect("The 'current entity' is not set. You should spawn an entity first.");

View file

@ -39,6 +39,7 @@ downcast-rs = "1.1.1"
thiserror = "1.0"
anyhow = "1.0"
hexasphere = "0.1.5"
parking_lot = "0.10"
[features]
png = ["image/png"]

View file

@ -2,7 +2,8 @@ use crate::{
renderer::{BufferId, RenderContext, TextureId},
texture::Extent3d,
};
use std::sync::{Arc, Mutex};
use parking_lot::Mutex;
use std::sync::Arc;
#[derive(Clone, Debug)]
pub enum Command {
@ -34,7 +35,7 @@ pub struct CommandQueue {
impl CommandQueue {
fn push(&mut self, command: Command) {
self.queue.lock().unwrap().push(command);
self.queue.lock().push(command);
}
pub fn copy_buffer_to_buffer(
@ -81,11 +82,11 @@ impl CommandQueue {
}
pub fn clear(&mut self) {
self.queue.lock().unwrap().clear();
self.queue.lock().clear();
}
pub fn execute(&mut self, render_context: &mut dyn RenderContext) {
for command in self.queue.lock().unwrap().drain(..) {
for command in self.queue.lock().drain(..) {
match command {
Command::CopyBufferToBuffer {
source_buffer,

View file

@ -7,11 +7,8 @@ use crate::{
};
use bevy_asset::{Assets, Handle, HandleUntyped};
use bevy_window::Window;
use std::{
collections::HashMap,
ops::Range,
sync::{Arc, RwLock},
};
use parking_lot::RwLock;
use std::{collections::HashMap, ops::Range, sync::Arc};
#[derive(Default)]
pub struct HeadlessRenderResourceContext {
@ -22,14 +19,11 @@ pub struct HeadlessRenderResourceContext {
impl HeadlessRenderResourceContext {
pub fn add_buffer_info(&self, buffer: BufferId, info: BufferInfo) {
self.buffer_info.write().unwrap().insert(buffer, info);
self.buffer_info.write().insert(buffer, info);
}
pub fn add_texture_descriptor(&self, texture: TextureId, descriptor: TextureDescriptor) {
self.texture_descriptors
.write()
.unwrap()
.insert(texture, descriptor);
self.texture_descriptors.write().insert(texture, descriptor);
}
}
@ -66,7 +60,7 @@ impl RenderResourceContext for HeadlessRenderResourceContext {
_range: Range<u64>,
write: &mut dyn FnMut(&mut [u8], &dyn RenderResourceContext),
) {
let size = self.buffer_info.read().unwrap().get(&id).unwrap().size;
let size = self.buffer_info.read().get(&id).unwrap().size;
let mut buffer = vec![0; size];
write(&mut buffer, self);
}
@ -84,11 +78,11 @@ impl RenderResourceContext for HeadlessRenderResourceContext {
fn create_shader_module(&self, _shader_handle: Handle<Shader>, _shaders: &Assets<Shader>) {}
fn remove_buffer(&self, buffer: BufferId) {
self.buffer_info.write().unwrap().remove(&buffer);
self.buffer_info.write().remove(&buffer);
}
fn remove_texture(&self, texture: TextureId) {
self.texture_descriptors.write().unwrap().remove(&texture);
self.texture_descriptors.write().remove(&texture);
}
fn remove_sampler(&self, _sampler: SamplerId) {}
@ -101,7 +95,6 @@ impl RenderResourceContext for HeadlessRenderResourceContext {
) {
self.asset_resources
.write()
.unwrap()
.insert((handle, index), render_resource);
}
@ -110,11 +103,7 @@ impl RenderResourceContext for HeadlessRenderResourceContext {
handle: HandleUntyped,
index: usize,
) -> Option<RenderResourceId> {
self.asset_resources
.write()
.unwrap()
.get(&(handle, index))
.cloned()
self.asset_resources.write().get(&(handle, index)).cloned()
}
fn create_render_pipeline(
@ -135,16 +124,13 @@ impl RenderResourceContext for HeadlessRenderResourceContext {
fn create_shader_module_from_source(&self, _shader_handle: Handle<Shader>, _shader: &Shader) {}
fn remove_asset_resource_untyped(&self, handle: HandleUntyped, index: usize) {
self.asset_resources
.write()
.unwrap()
.remove(&(handle, index));
self.asset_resources.write().remove(&(handle, index));
}
fn clear_bind_groups(&self) {}
fn get_buffer_info(&self, buffer: BufferId) -> Option<BufferInfo> {
self.buffer_info.read().unwrap().get(&buffer).cloned()
self.buffer_info.read().get(&buffer).cloned()
}
fn bind_group_descriptor_exists(

View file

@ -4,7 +4,8 @@ use crate::{
renderer::{BufferUsage, RenderResourceContext},
};
use bevy_ecs::Res;
use std::sync::{Arc, RwLock};
use parking_lot::RwLock;
use std::sync::Arc;
// TODO: Instead of allocating small "exact size" buffers each frame, this should use multiple large shared buffers and probably
// a long-living "cpu mapped" staging buffer. Im punting that for now because I don't know the best way to use wgpu's new async
@ -53,7 +54,7 @@ impl SharedBuffers {
..Default::default()
});
let mut command_queue = self.command_queue.write().unwrap();
let mut command_queue = self.command_queue.write();
command_queue.copy_buffer_to_buffer(
staging_buffer,
0,
@ -62,7 +63,7 @@ impl SharedBuffers {
size as u64,
);
let mut buffers = self.buffers.write().unwrap();
let mut buffers = self.buffers.write();
buffers.push(staging_buffer);
buffers.push(destination_buffer);
Some(RenderResourceBinding::Buffer {
@ -77,14 +78,14 @@ impl SharedBuffers {
// TODO: remove this when this actually uses shared buffers
pub fn free_buffers(&self) {
let mut buffers = self.buffers.write().unwrap();
let mut buffers = self.buffers.write();
for buffer in buffers.drain(..) {
self.render_resource_context.remove_buffer(buffer)
}
}
pub fn reset_command_queue(&self) -> CommandQueue {
let mut command_queue = self.command_queue.write().unwrap();
let mut command_queue = self.command_queue.write();
std::mem::take(&mut *command_queue)
}
}

View file

@ -22,4 +22,5 @@ serde = { version = "1.0", features = ["derive"]}
bevy_ron = { path = "../bevy_ron", version = "0.1.0" }
uuid = { version = "0.8", features = ["v4", "serde"] }
anyhow = "1.0"
thiserror = "1.0"
thiserror = "1.0"
parking_lot = "0.10.2"

View file

@ -4,11 +4,9 @@ use bevy_asset::AssetLoader;
use bevy_ecs::{FromResources, Resources};
use bevy_property::PropertyTypeRegistry;
use bevy_type_registry::TypeRegistry;
use parking_lot::RwLock;
use serde::de::DeserializeSeed;
use std::{
path::Path,
sync::{Arc, RwLock},
};
use std::{path::Path, sync::Arc};
pub struct SceneLoader {
property_type_registry: Arc<RwLock<PropertyTypeRegistry>>,
@ -25,7 +23,7 @@ impl FromResources for SceneLoader {
impl AssetLoader<Scene> for SceneLoader {
fn from_bytes(&self, _asset_path: &Path, bytes: Vec<u8>) -> Result<Scene> {
let registry = self.property_type_registry.read().unwrap();
let registry = self.property_type_registry.read();
let mut deserializer = bevy_ron::de::Deserializer::from_bytes(&bytes)?;
let scene_deserializer = SceneDeserializer {
property_type_registry: &registry,

View file

@ -85,7 +85,7 @@ impl SceneSpawner {
mut instance_info: Option<&mut InstanceInfo>,
) -> Result<(), SceneSpawnError> {
let type_registry = resources.get::<TypeRegistry>().unwrap();
let component_registry = type_registry.component.read().unwrap();
let component_registry = type_registry.component.read();
let scenes = resources.get::<Assets<Scene>>().unwrap();
let scene = scenes
.get(&scene_handle)

View file

@ -127,7 +127,7 @@ pub trait BuildChildren {
impl BuildChildren for Commands {
fn with_children(&mut self, mut parent: impl FnMut(&mut ChildBuilder)) -> &mut Self {
{
let mut commands = self.commands.lock().unwrap();
let mut commands = self.commands.lock();
let current_entity = commands.current_entity.expect("Cannot add children because the 'current entity' is not set. You should spawn an entity first.");
commands.current_entity = None;
let push_children = {
@ -150,7 +150,7 @@ impl BuildChildren for Commands {
fn push_children(&mut self, parent: Entity, children: &[Entity]) -> &mut Self {
{
let mut commands = self.commands.lock().unwrap();
let mut commands = self.commands.lock();
commands.write_world(PushChildren {
children: SmallVec::from(children),
parent,
@ -161,7 +161,7 @@ impl BuildChildren for Commands {
fn insert_children(&mut self, parent: Entity, index: usize, children: &[Entity]) -> &mut Self {
{
let mut commands = self.commands.lock().unwrap();
let mut commands = self.commands.lock();
commands.write_world(InsertChildren {
children: SmallVec::from(children),
index,

View file

@ -16,4 +16,5 @@ bevy_ecs = { path = "../bevy_ecs", version = "0.1" }
bevy_property = { path = "../bevy_property", version = "0.1" }
# other
serde = { version = "1", features = ["derive"] }
serde = { version = "1", features = ["derive"] }
parking_lot = "0.10.2"

View file

@ -22,8 +22,8 @@ impl RegisterType for AppBuilder {
{
{
let type_registry = self.app.resources.get::<TypeRegistry>().unwrap();
type_registry.component.write().unwrap().register::<T>();
type_registry.property.write().unwrap().register::<T>();
type_registry.component.write().register::<T>();
type_registry.property.write().register::<T>();
}
self
}
@ -34,7 +34,7 @@ impl RegisterType for AppBuilder {
{
{
let type_registry = self.app.resources.get::<TypeRegistry>().unwrap();
type_registry.property.write().unwrap().register::<T>();
type_registry.property.write().register::<T>();
}
self
}
@ -45,7 +45,7 @@ impl RegisterType for AppBuilder {
{
{
let type_registry = self.app.resources.get::<TypeRegistry>().unwrap();
type_registry.property.write().unwrap().register::<T>();
type_registry.property.write().register::<T>();
}
self
}

View file

@ -1,9 +1,10 @@
use bevy_ecs::{Archetype, Component, Entity, FromResources, Resources, World};
use bevy_property::{Properties, Property, PropertyTypeRegistration, PropertyTypeRegistry};
use parking_lot::RwLock;
use std::{
any::TypeId,
collections::{HashMap, HashSet},
sync::{Arc, RwLock},
sync::Arc,
};
#[derive(Clone, Default)]

View file

@ -29,4 +29,5 @@ wgpu = { version = "0.1.0", package = "cart-tmp-wgpu" }
pollster = "0.2.0"
log = { version = "0.4", features = ["release_max_level_info"] }
crossbeam-channel = "0.4.2"
crossbeam-utils = "0.7.2"
crossbeam-utils = "0.7.2"
parking_lot = "0.10.2"

View file

@ -95,7 +95,6 @@ impl WgpuResourceDiagnosticsPlugin {
.resources
.window_surfaces
.read()
.unwrap()
.len() as f64,
);
@ -105,7 +104,6 @@ impl WgpuResourceDiagnosticsPlugin {
.resources
.window_swap_chains
.read()
.unwrap()
.len() as f64,
);
@ -115,58 +113,32 @@ impl WgpuResourceDiagnosticsPlugin {
.resources
.swap_chain_frames
.read()
.unwrap()
.len() as f64,
);
diagnostics.add_measurement(
Self::BUFFERS,
render_resource_context
.resources
.buffers
.read()
.unwrap()
.len() as f64,
render_resource_context.resources.buffers.read().len() as f64,
);
diagnostics.add_measurement(
Self::TEXTURES,
render_resource_context
.resources
.textures
.read()
.unwrap()
.len() as f64,
render_resource_context.resources.textures.read().len() as f64,
);
diagnostics.add_measurement(
Self::TEXTURE_VIEWS,
render_resource_context
.resources
.texture_views
.read()
.unwrap()
.len() as f64,
render_resource_context.resources.texture_views.read().len() as f64,
);
diagnostics.add_measurement(
Self::SAMPLERS,
render_resource_context
.resources
.samplers
.read()
.unwrap()
.len() as f64,
render_resource_context.resources.samplers.read().len() as f64,
);
diagnostics.add_measurement(
Self::BIND_GROUP_IDS,
render_resource_context
.resources
.bind_groups
.read()
.unwrap()
.len() as f64,
render_resource_context.resources.bind_groups.read().len() as f64,
);
let mut bind_group_count = 0;
@ -174,7 +146,6 @@ impl WgpuResourceDiagnosticsPlugin {
.resources
.bind_groups
.read()
.unwrap()
.values()
{
bind_group_count += bind_group.bind_groups.len();
@ -188,7 +159,6 @@ impl WgpuResourceDiagnosticsPlugin {
.resources
.bind_group_layouts
.read()
.unwrap()
.len() as f64,
);
@ -198,7 +168,6 @@ impl WgpuResourceDiagnosticsPlugin {
.resources
.shader_modules
.read()
.unwrap()
.len() as f64,
);
@ -208,7 +177,6 @@ impl WgpuResourceDiagnosticsPlugin {
.resources
.render_pipelines
.read()
.unwrap()
.len() as f64,
);
}

View file

@ -4,10 +4,8 @@ use bevy_render::{
render_graph::{Edge, NodeId, ResourceSlots, StageBorrow},
renderer::RenderResourceContext,
};
use std::{
collections::HashMap,
sync::{Arc, RwLock},
};
use parking_lot::RwLock;
use std::{collections::HashMap, sync::Arc};
pub struct WgpuRenderGraphExecutor {
pub max_thread_count: usize,
@ -56,7 +54,7 @@ impl WgpuRenderGraphExecutor {
..
} = node_state.edges.get_input_slot_edge(i).unwrap()
{
let node_outputs = node_outputs.read().unwrap();
let node_outputs = node_outputs.read();
let outputs = if let Some(outputs) = node_outputs.get(output_node) {
outputs
} else {
@ -80,7 +78,6 @@ impl WgpuRenderGraphExecutor {
node_outputs
.write()
.unwrap()
.insert(node_state.id, node_state.output_slots.clone());
}
}

View file

@ -33,7 +33,7 @@ impl WgpuRenderResourceContext {
}
pub fn set_window_surface(&self, window_id: WindowId, surface: wgpu::Surface) {
let mut window_surfaces = self.resources.window_surfaces.write().unwrap();
let mut window_surfaces = self.resources.window_surfaces.write();
window_surfaces.insert(window_id, surface);
}
@ -46,7 +46,7 @@ impl WgpuRenderResourceContext {
destination_offset: u64,
size: u64,
) {
let buffers = self.resources.buffers.read().unwrap();
let buffers = self.resources.buffers.read();
let source = buffers.get(&source_buffer).unwrap();
let destination = buffers.get(&destination_buffer).unwrap();
@ -71,8 +71,8 @@ impl WgpuRenderResourceContext {
destination_mip_level: u32,
size: Extent3d,
) {
let buffers = self.resources.buffers.read().unwrap();
let textures = self.resources.textures.read().unwrap();
let buffers = self.resources.buffers.read();
let textures = self.resources.textures.read();
let source = buffers.get(&source_buffer).unwrap();
let destination = textures.get(&destination_texture).unwrap();
@ -103,14 +103,13 @@ impl WgpuRenderResourceContext {
.resources
.bind_group_layouts
.read()
.unwrap()
.get(&descriptor.id)
.is_some()
{
return;
}
let mut bind_group_layouts = self.resources.bind_group_layouts.write().unwrap();
let mut bind_group_layouts = self.resources.bind_group_layouts.write();
// TODO: consider re-checking existence here
let bind_group_layout_binding = descriptor
.bindings
@ -143,8 +142,8 @@ impl WgpuRenderResourceContext {
}
fn try_next_swap_chain_texture(&self, window_id: bevy_window::WindowId) -> Option<TextureId> {
let mut window_swap_chains = self.resources.window_swap_chains.write().unwrap();
let mut swap_chain_outputs = self.resources.swap_chain_frames.write().unwrap();
let mut window_swap_chains = self.resources.window_swap_chains.write();
let mut swap_chain_outputs = self.resources.swap_chain_frames.write();
let window_swap_chain = window_swap_chains.get_mut(&window_id).unwrap();
let next_texture = window_swap_chain.get_next_frame().ok()?;
@ -156,7 +155,7 @@ impl WgpuRenderResourceContext {
impl RenderResourceContext for WgpuRenderResourceContext {
fn create_sampler(&self, sampler_descriptor: &SamplerDescriptor) -> SamplerId {
let mut samplers = self.resources.samplers.write().unwrap();
let mut samplers = self.resources.samplers.write();
let descriptor: wgpu::SamplerDescriptor = (*sampler_descriptor).wgpu_into();
let sampler = self.device.create_sampler(&descriptor);
@ -167,9 +166,9 @@ impl RenderResourceContext for WgpuRenderResourceContext {
}
fn create_texture(&self, texture_descriptor: TextureDescriptor) -> TextureId {
let mut textures = self.resources.textures.write().unwrap();
let mut texture_views = self.resources.texture_views.write().unwrap();
let mut texture_descriptors = self.resources.texture_descriptors.write().unwrap();
let mut textures = self.resources.textures.write();
let mut texture_views = self.resources.texture_views.write();
let mut texture_descriptors = self.resources.texture_descriptors.write();
let descriptor: wgpu::TextureDescriptor = (&texture_descriptor).wgpu_into();
let texture = self.device.create_texture(&descriptor);
@ -184,8 +183,8 @@ impl RenderResourceContext for WgpuRenderResourceContext {
fn create_buffer(&self, buffer_info: BufferInfo) -> BufferId {
// TODO: consider moving this below "create" for efficiency
let mut buffer_infos = self.resources.buffer_infos.write().unwrap();
let mut buffers = self.resources.buffers.write().unwrap();
let mut buffer_infos = self.resources.buffer_infos.write();
let mut buffers = self.resources.buffers.write();
let buffer = self.device.create_buffer(&wgpu::BufferDescriptor {
label: None,
@ -202,8 +201,8 @@ impl RenderResourceContext for WgpuRenderResourceContext {
fn create_buffer_with_data(&self, mut buffer_info: BufferInfo, data: &[u8]) -> BufferId {
// TODO: consider moving this below "create" for efficiency
let mut buffer_infos = self.resources.buffer_infos.write().unwrap();
let mut buffers = self.resources.buffers.write().unwrap();
let mut buffer_infos = self.resources.buffer_infos.write();
let mut buffers = self.resources.buffers.write();
buffer_info.size = data.len();
let buffer = self
@ -217,17 +216,17 @@ impl RenderResourceContext for WgpuRenderResourceContext {
}
fn remove_buffer(&self, buffer: BufferId) {
let mut buffers = self.resources.buffers.write().unwrap();
let mut buffer_infos = self.resources.buffer_infos.write().unwrap();
let mut buffers = self.resources.buffers.write();
let mut buffer_infos = self.resources.buffer_infos.write();
buffers.remove(&buffer);
buffer_infos.remove(&buffer);
}
fn remove_texture(&self, texture: TextureId) {
let mut textures = self.resources.textures.write().unwrap();
let mut texture_views = self.resources.texture_views.write().unwrap();
let mut texture_descriptors = self.resources.texture_descriptors.write().unwrap();
let mut textures = self.resources.textures.write();
let mut texture_views = self.resources.texture_views.write();
let mut texture_descriptors = self.resources.texture_descriptors.write();
textures.remove(&texture);
texture_views.remove(&texture);
@ -235,12 +234,12 @@ impl RenderResourceContext for WgpuRenderResourceContext {
}
fn remove_sampler(&self, sampler: SamplerId) {
let mut samplers = self.resources.samplers.write().unwrap();
let mut samplers = self.resources.samplers.write();
samplers.remove(&sampler);
}
fn create_shader_module_from_source(&self, shader_handle: Handle<Shader>, shader: &Shader) {
let mut shader_modules = self.resources.shader_modules.write().unwrap();
let mut shader_modules = self.resources.shader_modules.write();
let shader_module = self
.device
.create_shader_module(wgpu::ShaderModuleSource::SpirV(&shader.get_spirv(None)));
@ -252,7 +251,6 @@ impl RenderResourceContext for WgpuRenderResourceContext {
.resources
.shader_modules
.read()
.unwrap()
.get(&shader_handle)
.is_some()
{
@ -263,8 +261,8 @@ impl RenderResourceContext for WgpuRenderResourceContext {
}
fn create_swap_chain(&self, window: &Window) {
let surfaces = self.resources.window_surfaces.read().unwrap();
let mut window_swap_chains = self.resources.window_swap_chains.write().unwrap();
let surfaces = self.resources.window_surfaces.read();
let mut window_swap_chains = self.resources.window_swap_chains.write();
let swap_chain_descriptor: wgpu::SwapChainDescriptor = window.wgpu_into();
let surface = surfaces
@ -281,11 +279,7 @@ impl RenderResourceContext for WgpuRenderResourceContext {
if let Some(texture_id) = self.try_next_swap_chain_texture(window.id) {
texture_id
} else {
self.resources
.window_swap_chains
.write()
.unwrap()
.remove(&window.id);
self.resources.window_swap_chains.write().remove(&window.id);
self.create_swap_chain(window);
self.try_next_swap_chain_texture(window.id)
.expect("Failed to acquire next swap chain texture!")
@ -293,12 +287,12 @@ impl RenderResourceContext for WgpuRenderResourceContext {
}
fn drop_swap_chain_texture(&self, texture: TextureId) {
let mut swap_chain_outputs = self.resources.swap_chain_frames.write().unwrap();
let mut swap_chain_outputs = self.resources.swap_chain_frames.write();
swap_chain_outputs.remove(&texture);
}
fn drop_all_swap_chain_textures(&self) {
let mut swap_chain_outputs = self.resources.swap_chain_frames.write().unwrap();
let mut swap_chain_outputs = self.resources.swap_chain_frames.write();
swap_chain_outputs.clear();
}
@ -308,7 +302,7 @@ impl RenderResourceContext for WgpuRenderResourceContext {
render_resource: RenderResourceId,
index: usize,
) {
let mut asset_resources = self.resources.asset_resources.write().unwrap();
let mut asset_resources = self.resources.asset_resources.write();
asset_resources.insert((handle, index), render_resource);
}
@ -317,12 +311,12 @@ impl RenderResourceContext for WgpuRenderResourceContext {
handle: HandleUntyped,
index: usize,
) -> Option<RenderResourceId> {
let asset_resources = self.resources.asset_resources.read().unwrap();
let asset_resources = self.resources.asset_resources.read();
asset_resources.get(&(handle, index)).cloned()
}
fn remove_asset_resource_untyped(&self, handle: HandleUntyped, index: usize) {
let mut asset_resources = self.resources.asset_resources.write().unwrap();
let mut asset_resources = self.resources.asset_resources.write();
asset_resources.remove(&(handle, index));
}
@ -336,7 +330,6 @@ impl RenderResourceContext for WgpuRenderResourceContext {
.resources
.render_pipelines
.read()
.unwrap()
.get(&pipeline_handle)
.is_some()
{
@ -348,7 +341,7 @@ impl RenderResourceContext for WgpuRenderResourceContext {
self.create_bind_group_layout(&bind_group_descriptor);
}
let bind_group_layouts = self.resources.bind_group_layouts.read().unwrap();
let bind_group_layouts = self.resources.bind_group_layouts.read();
// setup and collect bind group layouts
let bind_group_layouts = layout
.bind_groups
@ -380,7 +373,7 @@ impl RenderResourceContext for WgpuRenderResourceContext {
self.create_shader_module(fragment_handle, shaders);
}
let shader_modules = self.resources.shader_modules.read().unwrap();
let shader_modules = self.resources.shader_modules.read();
let vertex_shader_module = shader_modules
.get(&pipeline_descriptor.shader_stages.vertex)
.unwrap();
@ -428,7 +421,7 @@ impl RenderResourceContext for WgpuRenderResourceContext {
let render_pipeline = self
.device
.create_render_pipeline(&render_pipeline_descriptor);
let mut render_pipelines = self.resources.render_pipelines.write().unwrap();
let mut render_pipelines = self.resources.render_pipelines.write();
render_pipelines.insert(pipeline_handle, render_pipeline);
}
@ -436,7 +429,7 @@ impl RenderResourceContext for WgpuRenderResourceContext {
&self,
bind_group_descriptor_id: BindGroupDescriptorId,
) -> bool {
let bind_group_layouts = self.resources.bind_group_layouts.read().unwrap();
let bind_group_layouts = self.resources.bind_group_layouts.read();
bind_group_layouts.get(&bind_group_descriptor_id).is_some()
}
@ -453,11 +446,11 @@ impl RenderResourceContext for WgpuRenderResourceContext {
"start creating bind group for RenderResourceSet {:?}",
bind_group.id
);
let texture_views = self.resources.texture_views.read().unwrap();
let samplers = self.resources.samplers.read().unwrap();
let buffers = self.resources.buffers.read().unwrap();
let bind_group_layouts = self.resources.bind_group_layouts.read().unwrap();
let mut bind_groups = self.resources.bind_groups.write().unwrap();
let texture_views = self.resources.texture_views.read();
let samplers = self.resources.samplers.read();
let buffers = self.resources.buffers.read();
let bind_group_layouts = self.resources.bind_group_layouts.read();
let mut bind_groups = self.resources.bind_groups.write();
let bindings = bind_group
.indexed_bindings
@ -508,16 +501,11 @@ impl RenderResourceContext for WgpuRenderResourceContext {
}
fn clear_bind_groups(&self) {
self.resources.bind_groups.write().unwrap().clear();
self.resources.bind_groups.write().clear();
}
fn get_buffer_info(&self, buffer: BufferId) -> Option<BufferInfo> {
self.resources
.buffer_infos
.read()
.unwrap()
.get(&buffer)
.cloned()
self.resources.buffer_infos.read().get(&buffer).cloned()
}
fn write_mapped_buffer(
@ -527,7 +515,7 @@ impl RenderResourceContext for WgpuRenderResourceContext {
write: &mut dyn FnMut(&mut [u8], &dyn RenderResourceContext),
) {
let buffer = {
let buffers = self.resources.buffers.read().unwrap();
let buffers = self.resources.buffers.read();
buffers.get(&id).unwrap().clone()
};
let buffer_slice = buffer.slice(range);
@ -536,7 +524,7 @@ impl RenderResourceContext for WgpuRenderResourceContext {
}
fn map_buffer(&self, id: BufferId) {
let buffers = self.resources.buffers.read().unwrap();
let buffers = self.resources.buffers.read();
let buffer = buffers.get(&id).unwrap();
let buffer_slice = buffer.slice(..);
let data = buffer_slice.map_async(wgpu::MapMode::Write);
@ -547,7 +535,7 @@ impl RenderResourceContext for WgpuRenderResourceContext {
}
fn unmap_buffer(&self, id: BufferId) {
let buffers = self.resources.buffers.read().unwrap();
let buffers = self.resources.buffers.read();
let buffer = buffers.get(&id).unwrap();
buffer.unmap();
}

View file

@ -6,10 +6,8 @@ use bevy_render::{
texture::TextureDescriptor,
};
use bevy_window::WindowId;
use std::{
collections::HashMap,
sync::{Arc, RwLock, RwLockReadGuard},
};
use parking_lot::{RwLock, RwLockReadGuard};
use std::{collections::HashMap, sync::Arc};
#[derive(Default)]
pub struct WgpuBindGroupInfo {
@ -89,11 +87,11 @@ pub struct WgpuResources {
impl WgpuResources {
pub fn read(&self) -> WgpuResourcesReadLock {
WgpuResourcesReadLock {
buffers: self.buffers.read().unwrap(),
textures: self.texture_views.read().unwrap(),
swap_chain_frames: self.swap_chain_frames.read().unwrap(),
render_pipelines: self.render_pipelines.read().unwrap(),
bind_groups: self.bind_groups.read().unwrap(),
buffers: self.buffers.read(),
textures: self.texture_views.read(),
swap_chain_frames: self.swap_chain_frames.read(),
render_pipelines: self.render_pipelines.read(),
bind_groups: self.bind_groups.read(),
}
}
@ -102,12 +100,7 @@ impl WgpuResources {
bind_group_descriptor_id: BindGroupDescriptorId,
bind_group_id: BindGroupId,
) -> bool {
if let Some(bind_group_info) = self
.bind_groups
.read()
.unwrap()
.get(&bind_group_descriptor_id)
{
if let Some(bind_group_info) = self.bind_groups.read().get(&bind_group_descriptor_id) {
bind_group_info.bind_groups.get(&bind_group_id).is_some()
} else {
false

View file

@ -65,7 +65,7 @@ fn setup(type_registry: Res<TypeRegistry>) {
// All properties can be serialized.
// If you #[derive(Properties)] your type doesn't even need to directly implement the Serde trait!
let registry = type_registry.property.read().unwrap();
let registry = type_registry.property.read();
let ron_string = serialize_property(&test, &registry);
println!("{}\n", ron_string);

View file

@ -117,14 +117,12 @@ fn save_scene_system(_world: &mut World, resources: &mut Resources) {
// The component registry resource contains information about all registered components. This is used to construct scenes.
let type_registry = resources.get::<TypeRegistry>().unwrap();
let scene = Scene::from_world(&world, &type_registry.component.read().unwrap());
let scene = Scene::from_world(&world, &type_registry.component.read());
// Scenes can be serialized like this:
println!(
"{}",
scene
.serialize_ron(&type_registry.property.read().unwrap())
.unwrap()
scene.serialize_ron(&type_registry.property.read()).unwrap()
);
// TODO: save scene