disable threaded graph execution until leak is fixed

This commit is contained in:
Carter Anderson 2020-05-05 13:37:13 -07:00
parent cb49e14ae0
commit 800c4342ed

View file

@ -4,7 +4,10 @@ use bevy_render::{
renderer::RenderResources,
};
use legion::prelude::{Resources, World};
use std::{collections::HashMap, sync::{RwLock, Arc}};
use std::{
collections::HashMap,
sync::{Arc, RwLock},
};
pub struct WgpuRenderGraphExecutor {
pub max_thread_count: usize,
@ -32,64 +35,59 @@ impl WgpuRenderGraphExecutor {
let (sender, receiver) = crossbeam_channel::bounded(self.max_thread_count);
let chunk_size = (stage.jobs.len() + self.max_thread_count - 1) / self.max_thread_count; // divide ints rounding remainder up
let mut actual_thread_count = 0;
crossbeam_utils::thread::scope(|s| {
for jobs_chunk in stage.jobs.chunks_mut(chunk_size) {
let sender = sender.clone();
let world = &*world;
actual_thread_count += 1;
let device = device.clone();
let wgpu_render_resources = wgpu_render_resources.clone();
let node_outputs = node_outputs.clone();
s.spawn(move |_| {
let mut render_context =
WgpuRenderContext::new(device, wgpu_render_resources);
for job in jobs_chunk.iter_mut() {
for node_state in job.node_states.iter_mut() {
// bind inputs from connected node outputs
for (i, mut input_slot) in
node_state.input_slots.iter_mut().enumerate()
{
if let Edge::SlotEdge {
output_node,
output_index,
..
} = node_state.edges.get_input_slot_edge(i).unwrap()
{
let node_outputs = node_outputs.read().unwrap();
let outputs =
if let Some(outputs) = node_outputs.get(output_node) {
outputs
} else {
panic!("node inputs not set")
};
// crossbeam_utils::thread::scope(|s| {
for jobs_chunk in stage.jobs.chunks_mut(chunk_size) {
let sender = sender.clone();
let world = &*world;
actual_thread_count += 1;
let device = device.clone();
let wgpu_render_resources = wgpu_render_resources.clone();
let node_outputs = node_outputs.clone();
// s.spawn(move |_| {
let mut render_context = WgpuRenderContext::new(device, wgpu_render_resources);
for job in jobs_chunk.iter_mut() {
for node_state in job.node_states.iter_mut() {
// bind inputs from connected node outputs
for (i, mut input_slot) in node_state.input_slots.iter_mut().enumerate() {
if let Edge::SlotEdge {
output_node,
output_index,
..
} = node_state.edges.get_input_slot_edge(i).unwrap()
{
let node_outputs = node_outputs.read().unwrap();
let outputs = if let Some(outputs) = node_outputs.get(output_node) {
outputs
} else {
panic!("node inputs not set")
};
let output_resource = outputs
.get(*output_index)
.expect("output should be set");
input_slot.resource = Some(output_resource);
} else {
panic!("no edge connected to input")
}
}
node_state.node.update(
world,
resources,
&mut render_context,
&node_state.input_slots,
&mut node_state.output_slots,
);
node_outputs.write().unwrap()
.insert(node_state.id, node_state.output_slots.clone());
let output_resource =
outputs.get(*output_index).expect("output should be set");
input_slot.resource = Some(output_resource);
} else {
panic!("no edge connected to input")
}
}
sender
.send(render_context.finish())
.unwrap();
});
node_state.node.update(
world,
resources,
&mut render_context,
&node_state.input_slots,
&mut node_state.output_slots,
);
node_outputs
.write()
.unwrap()
.insert(node_state.id, node_state.output_slots.clone());
}
}
})
.unwrap();
sender.send(render_context.finish()).unwrap();
// });
}
// })
// .unwrap();
let mut command_buffers = Vec::new();
for _i in 0..actual_thread_count {