Add configuration for async pipeline creation on RenderPlugin (#11847)

# Objective

Fixes #11846

## Solution

Add a `synchronous_pipeline_compilation ` field to `RenderPlugin`,
defaulting to `false`.

Most of the diff is whitespace.

## Changelog

Added `synchronous_pipeline_compilation ` to `RenderPlugin` for
disabling async pipeline creation.

## Migration Guide

TODO: consider combining this with the guide for #11846

`RenderPlugin` has a new `synchronous_pipeline_compilation ` property.
The default value is `false`. Set this to `true` if you want to retain
the previous synchronous behavior.

---------

Co-authored-by: JMS55 <47158642+JMS55@users.noreply.github.com>
Co-authored-by: François <mockersf@gmail.com>
This commit is contained in:
Rob Parrett 2024-02-16 06:35:47 -07:00 committed by GitHub
parent 9a2ce8e31b
commit ebaa347afe
No known key found for this signature in database
GPG key ID: B5690EEEBB952194
4 changed files with 156 additions and 121 deletions

View file

@ -85,6 +85,9 @@ use std::{
#[derive(Default)]
pub struct RenderPlugin {
pub render_creation: RenderCreation,
/// If `true`, disables asynchronous pipeline compilation.
/// This has no effect on macOS, Wasm, or without the `multi-threaded` feature.
pub synchronous_pipeline_compilation: bool,
}
/// The labels of the default App rendering sets.
@ -355,7 +358,10 @@ impl Plugin for RenderPlugin {
render_app
.insert_resource(instance)
.insert_resource(PipelineCache::new(device.clone()))
.insert_resource(PipelineCache::new(
device.clone(),
self.synchronous_pipeline_compilation,
))
.insert_resource(device)
.insert_resource(queue)
.insert_resource(render_adapter)

View file

@ -480,6 +480,9 @@ pub struct PipelineCache {
pipelines: Vec<CachedPipeline>,
waiting_pipelines: HashSet<CachedPipelineId>,
new_pipelines: Mutex<Vec<CachedPipeline>>,
/// If `true`, disables asynchronous pipeline compilation.
/// This has no effect on MacOS, wasm, or without the `multi_threaded` feature.
synchronous_pipeline_compilation: bool,
}
impl PipelineCache {
@ -488,7 +491,7 @@ impl PipelineCache {
}
/// Create a new pipeline cache associated with the given render device.
pub fn new(device: RenderDevice) -> Self {
pub fn new(device: RenderDevice, synchronous_pipeline_compilation: bool) -> Self {
Self {
shader_cache: Arc::new(Mutex::new(ShaderCache::new(&device))),
device,
@ -496,6 +499,7 @@ impl PipelineCache {
waiting_pipelines: default(),
new_pipelines: default(),
pipelines: default(),
synchronous_pipeline_compilation,
}
}
@ -679,88 +683,95 @@ impl PipelineCache {
let device = self.device.clone();
let shader_cache = self.shader_cache.clone();
let layout_cache = self.layout_cache.clone();
create_pipeline_task(async move {
let mut shader_cache = shader_cache.lock().unwrap();
let mut layout_cache = layout_cache.lock().unwrap();
create_pipeline_task(
async move {
let mut shader_cache = shader_cache.lock().unwrap();
let mut layout_cache = layout_cache.lock().unwrap();
let vertex_module = match shader_cache.get(
&device,
id,
descriptor.vertex.shader.id(),
&descriptor.vertex.shader_defs,
) {
Ok(module) => module,
Err(err) => return Err(err),
};
let fragment_module = match &descriptor.fragment {
Some(fragment) => {
match shader_cache.get(&device, id, fragment.shader.id(), &fragment.shader_defs)
{
Ok(module) => Some(module),
Err(err) => return Err(err),
}
}
None => None,
};
let layout =
if descriptor.layout.is_empty() && descriptor.push_constant_ranges.is_empty() {
None
} else {
Some(layout_cache.get(
&device,
&descriptor.layout,
descriptor.push_constant_ranges.to_vec(),
))
let vertex_module = match shader_cache.get(
&device,
id,
descriptor.vertex.shader.id(),
&descriptor.vertex.shader_defs,
) {
Ok(module) => module,
Err(err) => return Err(err),
};
drop((shader_cache, layout_cache));
let fragment_module = match &descriptor.fragment {
Some(fragment) => {
match shader_cache.get(
&device,
id,
fragment.shader.id(),
&fragment.shader_defs,
) {
Ok(module) => Some(module),
Err(err) => return Err(err),
}
}
None => None,
};
let vertex_buffer_layouts = descriptor
.vertex
.buffers
.iter()
.map(|layout| RawVertexBufferLayout {
array_stride: layout.array_stride,
attributes: &layout.attributes,
step_mode: layout.step_mode,
})
.collect::<Vec<_>>();
let layout =
if descriptor.layout.is_empty() && descriptor.push_constant_ranges.is_empty() {
None
} else {
Some(layout_cache.get(
&device,
&descriptor.layout,
descriptor.push_constant_ranges.to_vec(),
))
};
let fragment_data = descriptor.fragment.as_ref().map(|fragment| {
(
fragment_module.unwrap(),
fragment.entry_point.deref(),
fragment.targets.as_slice(),
)
});
drop((shader_cache, layout_cache));
let descriptor = RawRenderPipelineDescriptor {
multiview: None,
depth_stencil: descriptor.depth_stencil.clone(),
label: descriptor.label.as_deref(),
layout: layout.as_deref(),
multisample: descriptor.multisample,
primitive: descriptor.primitive,
vertex: RawVertexState {
buffers: &vertex_buffer_layouts,
entry_point: descriptor.vertex.entry_point.deref(),
module: &vertex_module,
},
fragment: fragment_data
.as_ref()
.map(|(module, entry_point, targets)| RawFragmentState {
entry_point,
module,
targets,
}),
};
let vertex_buffer_layouts = descriptor
.vertex
.buffers
.iter()
.map(|layout| RawVertexBufferLayout {
array_stride: layout.array_stride,
attributes: &layout.attributes,
step_mode: layout.step_mode,
})
.collect::<Vec<_>>();
Ok(Pipeline::RenderPipeline(
device.create_render_pipeline(&descriptor),
))
})
let fragment_data = descriptor.fragment.as_ref().map(|fragment| {
(
fragment_module.unwrap(),
fragment.entry_point.deref(),
fragment.targets.as_slice(),
)
});
let descriptor = RawRenderPipelineDescriptor {
multiview: None,
depth_stencil: descriptor.depth_stencil.clone(),
label: descriptor.label.as_deref(),
layout: layout.as_deref(),
multisample: descriptor.multisample,
primitive: descriptor.primitive,
vertex: RawVertexState {
buffers: &vertex_buffer_layouts,
entry_point: descriptor.vertex.entry_point.deref(),
module: &vertex_module,
},
fragment: fragment_data
.as_ref()
.map(|(module, entry_point, targets)| RawFragmentState {
entry_point,
module,
targets,
}),
};
Ok(Pipeline::RenderPipeline(
device.create_render_pipeline(&descriptor),
))
},
self.synchronous_pipeline_compilation,
)
}
fn start_create_compute_pipeline(
@ -771,44 +782,47 @@ impl PipelineCache {
let device = self.device.clone();
let shader_cache = self.shader_cache.clone();
let layout_cache = self.layout_cache.clone();
create_pipeline_task(async move {
let mut shader_cache = shader_cache.lock().unwrap();
let mut layout_cache = layout_cache.lock().unwrap();
create_pipeline_task(
async move {
let mut shader_cache = shader_cache.lock().unwrap();
let mut layout_cache = layout_cache.lock().unwrap();
let compute_module = match shader_cache.get(
&device,
id,
descriptor.shader.id(),
&descriptor.shader_defs,
) {
Ok(module) => module,
Err(err) => return Err(err),
};
let layout =
if descriptor.layout.is_empty() && descriptor.push_constant_ranges.is_empty() {
None
} else {
Some(layout_cache.get(
&device,
&descriptor.layout,
descriptor.push_constant_ranges.to_vec(),
))
let compute_module = match shader_cache.get(
&device,
id,
descriptor.shader.id(),
&descriptor.shader_defs,
) {
Ok(module) => module,
Err(err) => return Err(err),
};
drop((shader_cache, layout_cache));
let layout =
if descriptor.layout.is_empty() && descriptor.push_constant_ranges.is_empty() {
None
} else {
Some(layout_cache.get(
&device,
&descriptor.layout,
descriptor.push_constant_ranges.to_vec(),
))
};
let descriptor = RawComputePipelineDescriptor {
label: descriptor.label.as_deref(),
layout: layout.as_deref(),
module: &compute_module,
entry_point: &descriptor.entry_point,
};
drop((shader_cache, layout_cache));
Ok(Pipeline::ComputePipeline(
device.create_compute_pipeline(&descriptor),
))
})
let descriptor = RawComputePipelineDescriptor {
label: descriptor.label.as_deref(),
layout: layout.as_deref(),
module: &compute_module,
entry_point: &descriptor.entry_point,
};
Ok(Pipeline::ComputePipeline(
device.create_compute_pipeline(&descriptor),
))
},
self.synchronous_pipeline_compilation,
)
}
/// Process the pipeline queue and create all pending pipelines if possible.
@ -917,21 +931,34 @@ impl PipelineCache {
}
}
#[cfg(all(
not(target_arch = "wasm32"),
not(target_os = "macos"),
feature = "multi-threaded"
))]
fn create_pipeline_task(
task: impl Future<Output = Result<Pipeline, PipelineCacheError>> + Send + 'static,
sync: bool,
) -> CachedPipelineState {
#[cfg(all(
not(target_arch = "wasm32"),
not(target_os = "macos"),
feature = "multi-threaded"
))]
return CachedPipelineState::Creating(bevy_tasks::AsyncComputeTaskPool::get().spawn(task));
if !sync {
return CachedPipelineState::Creating(bevy_tasks::AsyncComputeTaskPool::get().spawn(task));
}
#[cfg(any(
target_arch = "wasm32",
target_os = "macos",
not(feature = "multi-threaded")
))]
match futures_lite::future::block_on(task) {
Ok(pipeline) => CachedPipelineState::Ok(pipeline),
Err(err) => CachedPipelineState::Err(err),
}
}
#[cfg(any(
target_arch = "wasm32",
target_os = "macos",
not(feature = "multi-threaded")
))]
fn create_pipeline_task(
task: impl Future<Output = Result<Pipeline, PipelineCacheError>> + Send + 'static,
_sync: bool,
) -> CachedPipelineState {
match futures_lite::future::block_on(task) {
Ok(pipeline) => CachedPipelineState::Ok(pipeline),
Err(err) => CachedPipelineState::Err(err),

View file

@ -27,6 +27,7 @@ fn main() {
features: WgpuFeatures::POLYGON_MODE_LINE,
..default()
}),
..default()
}),
// You need to add this plugin to enable wireframe rendering
WireframePlugin,

View file

@ -18,6 +18,7 @@ fn main() {
..default()
}
.into(),
..default()
}),
)
.run();