Add possibility to control num_threads and stack_size of rayon::ThreadPool

This commit is contained in:
Fabian Würfl 2020-08-14 19:15:29 +02:00
parent 95dce3ac72
commit 458a169ad2
4 changed files with 72 additions and 0 deletions

View file

@ -122,6 +122,10 @@ path = "examples/app/headless.rs"
name = "plugin"
path = "examples/app/plugin.rs"
[[example]]
name = "thread_pool_resources"
path = "examples/app/thread_pool_resources.rs"
[[example]]
name = "hot_asset_reloading"
path = "examples/asset/hot_asset_reloading.rs"

View file

@ -68,6 +68,53 @@ impl ParallelExecutor {
}
}
/// This can be added as an app resource to control the global `rayon::ThreadPool` used by ecs.
// Dev internal note: We cannot directly expose a ThreadPoolBuilder here as it does not implement Send and Sync.
#[derive(Debug, Default, Clone)]
pub struct ParallelExecutorOptions {
/// If some value, we'll set up the thread pool to use at most n threads. See `rayon::ThreadPoolBuilder::num_threads`.
num_threads: Option<usize>,
/// If some value, we'll set up the thread pool's' workers to the given stack size. See `rayon::ThreadPoolBuilder::stack_size`.
stack_size: Option<usize>,
// TODO: Do we also need/want to expose other features (*_handler, etc.)
}
impl ParallelExecutorOptions {
/// Creates a new ParallelExecutorOptions instance
pub fn new() -> Self {
Self::default()
}
/// Sets the num_threads option, using the builder pattern
pub fn with_num_threads(mut self, num_threads: Option<usize>) -> Self {
self.num_threads = num_threads;
self
}
/// Sets the stack_size option, using the builder pattern. WARNING: Only use this if you know what you're doing,
/// otherwise your application may run into stability and performance issues.
pub fn with_stack_size(mut self, stack_size: Option<usize>) -> Self {
self.stack_size = stack_size;
self
}
/// Creates a new ThreadPoolBuilder based on the current options.
pub(crate) fn create_builder(&self) -> rayon::ThreadPoolBuilder {
let mut builder = rayon::ThreadPoolBuilder::new();
if let Some(num_threads) = self.num_threads {
builder = builder.num_threads(num_threads);
}
if let Some(stack_size) = self.stack_size {
builder = builder.stack_size(stack_size);
}
builder
}
}
#[derive(Debug, Clone)]
pub struct ExecutorStage {
/// each system's set of dependencies

View file

@ -1,5 +1,6 @@
use crate::{
resource::Resources,
schedule::ParallelExecutorOptions,
system::{System, SystemId, ThreadLocalExecution},
};
use bevy_hecs::World;
@ -169,6 +170,15 @@ impl Schedule {
return;
}
let thread_pool_builder = resources
.get::<ParallelExecutorOptions>()
.map(|options| (*options).clone())
.unwrap_or_else(|| ParallelExecutorOptions::default())
.create_builder();
// For now, bevy_ecs only uses the global thread pool so it is sufficient to configure it once here.
// Dont call .unwrap() as the function is called twice..
let _ = thread_pool_builder.build_global();
for stage in self.stages.values_mut() {
for system in stage.iter_mut() {
let mut system = system.lock().unwrap();

View file

@ -0,0 +1,11 @@
use bevy::{ecs::ParallelExecutorOptions, prelude::*};
use std::time::Duration;
/// This example illustrates how to customize the thread pool used internally (e.g. to only use a
/// certain number of threads).
fn main() {
App::build()
.add_resource(ParallelExecutorOptions::new().with_num_threads(Some(4)))
.add_default_plugins()
.run();
}