Document bevy_tasks and enable #![warn(missing_docs)] (#3509)

This PR is part of the issue #3492.

# Objective

-  Add and update the bevy_tasks documentation to achieve a 100% documentation coverage (sans `prelude` module)
-  Add the #![warn(missing_docs)] lint to keep the documentation coverage for the future.

## Solution

 -  Add and update the bevy_math documentation.
 -  Add the #![warn(missing_docs)] lint.
 - Added doctest wherever there should be in the missing docs.
This commit is contained in:
James Liu 2022-01-16 04:53:22 +00:00
parent 71814ca91b
commit e30199f7a9
6 changed files with 211 additions and 94 deletions

View file

@ -6,8 +6,6 @@ impl<'a, T> ParallelIterator<std::slice::Iter<'a, T>> for ParChunks<'a, T>
where
T: 'a + Send + Sync,
{
type Item = &'a T;
fn next_batch(&mut self) -> Option<std::slice::Iter<'a, T>> {
self.0.next().map(|s| s.iter())
}
@ -18,8 +16,6 @@ impl<'a, T> ParallelIterator<std::slice::IterMut<'a, T>> for ParChunksMut<'a, T>
where
T: 'a + Send + Sync,
{
type Item = &'a mut T;
fn next_batch(&mut self) -> Option<std::slice::IterMut<'a, T>> {
self.0.next().map(|s| s.iter_mut())
}

View file

@ -10,11 +10,9 @@ pub struct Chain<T, U> {
impl<B, T, U> ParallelIterator<B> for Chain<T, U>
where
B: Iterator + Send,
T: ParallelIterator<B, Item = B::Item>,
U: ParallelIterator<B, Item = T::Item>,
T: ParallelIterator<B>,
U: ParallelIterator<B>,
{
type Item = T::Item;
fn next_batch(&mut self) -> Option<B> {
if self.left_in_progress {
match self.left.next_batch() {
@ -35,11 +33,9 @@ pub struct Map<P, F> {
impl<B, U, T, F> ParallelIterator<std::iter::Map<B, F>> for Map<U, F>
where
B: Iterator + Send,
U: ParallelIterator<B, Item = B::Item>,
F: FnMut(U::Item) -> T + Send + Clone,
U: ParallelIterator<B>,
F: FnMut(B::Item) -> T + Send + Clone,
{
type Item = T;
fn next_batch(&mut self) -> Option<std::iter::Map<B, F>> {
self.iter.next_batch().map(|b| b.map(self.f.clone()))
}
@ -54,11 +50,9 @@ pub struct Filter<P, F> {
impl<B, P, F> ParallelIterator<std::iter::Filter<B, F>> for Filter<P, F>
where
B: Iterator + Send,
P: ParallelIterator<B, Item = B::Item>,
F: FnMut(&P::Item) -> bool + Send + Clone,
P: ParallelIterator<B>,
F: FnMut(&B::Item) -> bool + Send + Clone,
{
type Item = P::Item;
fn next_batch(&mut self) -> Option<std::iter::Filter<B, F>> {
self.iter
.next_batch()
@ -75,11 +69,9 @@ pub struct FilterMap<P, F> {
impl<B, P, R, F> ParallelIterator<std::iter::FilterMap<B, F>> for FilterMap<P, F>
where
B: Iterator + Send,
P: ParallelIterator<B, Item = B::Item>,
F: FnMut(P::Item) -> Option<R> + Send + Clone,
P: ParallelIterator<B>,
F: FnMut(B::Item) -> Option<R> + Send + Clone,
{
type Item = R;
fn next_batch(&mut self) -> Option<std::iter::FilterMap<B, F>> {
self.iter.next_batch().map(|b| b.filter_map(self.f.clone()))
}
@ -94,13 +86,11 @@ pub struct FlatMap<P, F> {
impl<B, P, U, F> ParallelIterator<std::iter::FlatMap<B, U, F>> for FlatMap<P, F>
where
B: Iterator + Send,
P: ParallelIterator<B, Item = B::Item>,
F: FnMut(P::Item) -> U + Send + Clone,
P: ParallelIterator<B>,
F: FnMut(B::Item) -> U + Send + Clone,
U: IntoIterator,
U::IntoIter: Send,
{
type Item = U::Item;
// This extends each batch using the flat map. The other option is
// to turn each IntoIter into its own batch.
fn next_batch(&mut self) -> Option<std::iter::FlatMap<B, U, F>> {
@ -116,12 +106,10 @@ pub struct Flatten<P> {
impl<B, P> ParallelIterator<std::iter::Flatten<B>> for Flatten<P>
where
B: Iterator + Send,
P: ParallelIterator<B, Item = B::Item>,
P: ParallelIterator<B>,
B::Item: IntoIterator,
<B::Item as IntoIterator>::IntoIter: Send,
{
type Item = <P::Item as IntoIterator>::Item;
// This extends each batch using the flatten. The other option is to
// turn each IntoIter into its own batch.
fn next_batch(&mut self) -> Option<std::iter::Flatten<B>> {
@ -137,10 +125,8 @@ pub struct Fuse<P> {
impl<B, P> ParallelIterator<B> for Fuse<P>
where
B: Iterator + Send,
P: ParallelIterator<B, Item = B::Item>,
P: ParallelIterator<B>,
{
type Item = P::Item;
fn next_batch(&mut self) -> Option<B> {
match &mut self.iter {
Some(iter) => match iter.next_batch() {
@ -164,11 +150,9 @@ pub struct Inspect<P, F> {
impl<B, P, F> ParallelIterator<std::iter::Inspect<B, F>> for Inspect<P, F>
where
B: Iterator + Send,
P: ParallelIterator<B, Item = B::Item>,
F: FnMut(&P::Item) + Send + Clone,
P: ParallelIterator<B>,
F: FnMut(&B::Item) + Send + Clone,
{
type Item = P::Item;
fn next_batch(&mut self) -> Option<std::iter::Inspect<B, F>> {
self.iter.next_batch().map(|b| b.inspect(self.f.clone()))
}
@ -182,11 +166,9 @@ pub struct Copied<P> {
impl<'a, B, P, T> ParallelIterator<std::iter::Copied<B>> for Copied<P>
where
B: Iterator<Item = &'a T> + Send,
P: ParallelIterator<B, Item = &'a T>,
P: ParallelIterator<B>,
T: 'a + Copy,
{
type Item = T;
fn next_batch(&mut self) -> Option<std::iter::Copied<B>> {
self.iter.next_batch().map(|b| b.copied())
}
@ -200,11 +182,9 @@ pub struct Cloned<P> {
impl<'a, B, P, T> ParallelIterator<std::iter::Cloned<B>> for Cloned<P>
where
B: Iterator<Item = &'a T> + Send,
P: ParallelIterator<B, Item = &'a T>,
P: ParallelIterator<B>,
T: 'a + Copy,
{
type Item = T;
fn next_batch(&mut self) -> Option<std::iter::Cloned<B>> {
self.iter.next_batch().map(|b| b.cloned())
}
@ -219,10 +199,8 @@ pub struct Cycle<P> {
impl<B, P> ParallelIterator<B> for Cycle<P>
where
B: Iterator + Send,
P: ParallelIterator<B, Item = B::Item> + Clone,
P: ParallelIterator<B> + Clone,
{
type Item = P::Item;
fn next_batch(&mut self) -> Option<B> {
match self.curr.as_mut().and_then(|c| c.next_batch()) {
batch @ Some(_) => batch,

View file

@ -11,18 +11,16 @@ pub use adapters::*;
/// run in parallel is inexpensive, *a [`ParallelIterator`] could take longer
/// than a normal [`Iterator`]*. Therefore, you should profile your code before
/// using [`ParallelIterator`].
pub trait ParallelIterator<B>
pub trait ParallelIterator<BatchIter>
where
B: Iterator<Item = Self::Item> + Send,
BatchIter: Iterator + Send,
Self: Sized + Send,
{
type Item;
/// Returns the next batch of items for processing.
///
/// Each batch is an iterator with items of the same type as the
/// [`ParallelIterator`]. Returns `None` when there are no batches left.
fn next_batch(&mut self) -> Option<B>;
fn next_batch(&mut self) -> Option<BatchIter>;
/// Returns the bounds on the remaining number of items in the
/// parallel iterator.
@ -48,7 +46,7 @@ where
/// Consumes the parallel iterator and returns the last item.
///
/// See [`Iterator::last()`](https://doc.rust-lang.org/std/iter/trait.Iterator.html#method.last)
fn last(mut self, _pool: &TaskPool) -> Option<Self::Item> {
fn last(mut self, _pool: &TaskPool) -> Option<BatchIter::Item> {
let mut last_item = None;
while let Some(batch) = self.next_batch() {
last_item = batch.last();
@ -60,7 +58,7 @@ where
///
/// See [`Iterator::nth()`](https://doc.rust-lang.org/std/iter/trait.Iterator.html#method.nth)
// TODO: Optimize with size_hint on each batch
fn nth(mut self, _pool: &TaskPool, n: usize) -> Option<Self::Item> {
fn nth(mut self, _pool: &TaskPool, n: usize) -> Option<BatchIter::Item> {
let mut i = 0;
while let Some(batch) = self.next_batch() {
for item in batch {
@ -80,7 +78,7 @@ where
// TODO: Use IntoParallelIterator for U
fn chain<U>(self, other: U) -> Chain<Self, U>
where
U: ParallelIterator<B, Item = Self::Item>,
U: ParallelIterator<BatchIter>,
{
Chain {
left: self,
@ -95,7 +93,7 @@ where
/// See [`Iterator::map()`](https://doc.rust-lang.org/std/iter/trait.Iterator.html#method.map)
fn map<T, F>(self, f: F) -> Map<Self, F>
where
F: FnMut(Self::Item) -> T + Send + Clone,
F: FnMut(BatchIter::Item) -> T + Send + Clone,
{
Map { iter: self, f }
}
@ -105,7 +103,7 @@ where
/// See [`Iterator::for_each()`](https://doc.rust-lang.org/std/iter/trait.Iterator.html#method.for_each)
fn for_each<F>(mut self, pool: &TaskPool, f: F)
where
F: FnMut(Self::Item) + Send + Clone + Sync,
F: FnMut(BatchIter::Item) + Send + Clone + Sync,
{
pool.scope(|s| {
while let Some(batch) = self.next_batch() {
@ -123,7 +121,7 @@ where
/// See [`Iterator::filter()`](https://doc.rust-lang.org/std/iter/trait.Iterator.html#method.filter)
fn filter<F>(self, predicate: F) -> Filter<Self, F>
where
F: FnMut(&Self::Item) -> bool,
F: FnMut(&BatchIter::Item) -> bool,
{
Filter {
iter: self,
@ -136,7 +134,7 @@ where
/// See [`Iterator::filter_map()`](https://doc.rust-lang.org/std/iter/trait.Iterator.html#method.filter_map)
fn filter_map<R, F>(self, f: F) -> FilterMap<Self, F>
where
F: FnMut(Self::Item) -> Option<R>,
F: FnMut(BatchIter::Item) -> Option<R>,
{
FilterMap { iter: self, f }
}
@ -147,7 +145,7 @@ where
/// See [`Iterator::flat_map()`](https://doc.rust-lang.org/std/iter/trait.Iterator.html#method.flat_map)
fn flat_map<U, F>(self, f: F) -> FlatMap<Self, F>
where
F: FnMut(Self::Item) -> U,
F: FnMut(BatchIter::Item) -> U,
U: IntoIterator,
{
FlatMap { iter: self, f }
@ -158,7 +156,7 @@ where
/// See [`Iterator::flatten()`](https://doc.rust-lang.org/std/iter/trait.Iterator.html#method.flatten)
fn flatten(self) -> Flatten<Self>
where
Self::Item: IntoIterator,
BatchIter::Item: IntoIterator,
{
Flatten { iter: self }
}
@ -176,7 +174,7 @@ where
/// See [`Iterator::inspect()`](https://doc.rust-lang.org/std/iter/trait.Iterator.html#method.inspect)
fn inspect<F>(self, f: F) -> Inspect<Self, F>
where
F: FnMut(&Self::Item),
F: FnMut(&BatchIter::Item),
{
Inspect { iter: self, f }
}
@ -194,8 +192,8 @@ where
// TODO: Investigate optimizations for less copying
fn collect<C>(mut self, pool: &TaskPool) -> C
where
C: std::iter::FromIterator<Self::Item>,
Self::Item: Send + 'static,
C: std::iter::FromIterator<BatchIter::Item>,
BatchIter::Item: Send + 'static,
{
pool.scope(|s| {
while let Some(batch) = self.next_batch() {
@ -213,9 +211,9 @@ where
// TODO: Investigate optimizations for less copying
fn partition<C, F>(mut self, pool: &TaskPool, f: F) -> (C, C)
where
C: Default + Extend<Self::Item> + Send,
F: FnMut(&Self::Item) -> bool + Send + Sync + Clone,
Self::Item: Send + 'static,
C: Default + Extend<BatchIter::Item> + Send,
F: FnMut(&BatchIter::Item) -> bool + Send + Sync + Clone,
BatchIter::Item: Send + 'static,
{
let (mut a, mut b) = <(C, C)>::default();
pool.scope(|s| {
@ -241,7 +239,7 @@ where
/// See [`Iterator::fold()`](https://doc.rust-lang.org/std/iter/trait.Iterator.html#method.fold)
fn fold<C, F, D>(mut self, pool: &TaskPool, init: C, f: F) -> Vec<C>
where
F: FnMut(C, Self::Item) -> C + Send + Sync + Clone,
F: FnMut(C, BatchIter::Item) -> C + Send + Sync + Clone,
C: Clone + Send + Sync + 'static,
{
pool.scope(|s| {
@ -260,7 +258,7 @@ where
/// See [`Iterator::all()`](https://doc.rust-lang.org/std/iter/trait.Iterator.html#method.all)
fn all<F>(mut self, pool: &TaskPool, f: F) -> bool
where
F: FnMut(Self::Item) -> bool + Send + Sync + Clone,
F: FnMut(BatchIter::Item) -> bool + Send + Sync + Clone,
{
pool.scope(|s| {
while let Some(mut batch) = self.next_batch() {
@ -279,7 +277,7 @@ where
/// See [`Iterator::any()`](https://doc.rust-lang.org/std/iter/trait.Iterator.html#method.any)
fn any<F>(mut self, pool: &TaskPool, f: F) -> bool
where
F: FnMut(Self::Item) -> bool + Send + Sync + Clone,
F: FnMut(BatchIter::Item) -> bool + Send + Sync + Clone,
{
pool.scope(|s| {
while let Some(mut batch) = self.next_batch() {
@ -299,7 +297,7 @@ where
// TODO: Investigate optimizations for less copying
fn position<F>(mut self, pool: &TaskPool, f: F) -> Option<usize>
where
F: FnMut(Self::Item) -> bool + Send + Sync + Clone,
F: FnMut(BatchIter::Item) -> bool + Send + Sync + Clone,
{
let poses = pool.scope(|s| {
while let Some(batch) = self.next_batch() {
@ -330,9 +328,9 @@ where
/// Returns the maximum item of a parallel iterator.
///
/// See [`Iterator::max()`](https://doc.rust-lang.org/std/iter/trait.Iterator.html#method.max)
fn max(mut self, pool: &TaskPool) -> Option<Self::Item>
fn max(mut self, pool: &TaskPool) -> Option<BatchIter::Item>
where
Self::Item: Ord + Send + 'static,
BatchIter::Item: Ord + Send + 'static,
{
pool.scope(|s| {
while let Some(batch) = self.next_batch() {
@ -347,9 +345,9 @@ where
/// Returns the minimum item of a parallel iterator.
///
/// See [`Iterator::min()`](https://doc.rust-lang.org/std/iter/trait.Iterator.html#method.min)
fn min(mut self, pool: &TaskPool) -> Option<Self::Item>
fn min(mut self, pool: &TaskPool) -> Option<BatchIter::Item>
where
Self::Item: Ord + Send + 'static,
BatchIter::Item: Ord + Send + 'static,
{
pool.scope(|s| {
while let Some(batch) = self.next_batch() {
@ -364,11 +362,11 @@ where
/// Returns the item that gives the maximum value from the specified function.
///
/// See [`Iterator::max_by_key()`](https://doc.rust-lang.org/std/iter/trait.Iterator.html#method.max_by_key)
fn max_by_key<R, F>(mut self, pool: &TaskPool, f: F) -> Option<Self::Item>
fn max_by_key<R, F>(mut self, pool: &TaskPool, f: F) -> Option<BatchIter::Item>
where
R: Ord,
F: FnMut(&Self::Item) -> R + Send + Sync + Clone,
Self::Item: Send + 'static,
F: FnMut(&BatchIter::Item) -> R + Send + Sync + Clone,
BatchIter::Item: Send + 'static,
{
pool.scope(|s| {
while let Some(batch) = self.next_batch() {
@ -385,10 +383,10 @@ where
/// function.
///
/// See [`Iterator::max_by()`](https://doc.rust-lang.org/std/iter/trait.Iterator.html#method.max_by)
fn max_by<F>(mut self, pool: &TaskPool, f: F) -> Option<Self::Item>
fn max_by<F>(mut self, pool: &TaskPool, f: F) -> Option<BatchIter::Item>
where
F: FnMut(&Self::Item, &Self::Item) -> std::cmp::Ordering + Send + Sync + Clone,
Self::Item: Send + 'static,
F: FnMut(&BatchIter::Item, &BatchIter::Item) -> std::cmp::Ordering + Send + Sync + Clone,
BatchIter::Item: Send + 'static,
{
pool.scope(|s| {
while let Some(batch) = self.next_batch() {
@ -404,11 +402,11 @@ where
/// Returns the item that gives the minimum value from the specified function.
///
/// See [`Iterator::min_by_key()`](https://doc.rust-lang.org/std/iter/trait.Iterator.html#method.min_by_key)
fn min_by_key<R, F>(mut self, pool: &TaskPool, f: F) -> Option<Self::Item>
fn min_by_key<R, F>(mut self, pool: &TaskPool, f: F) -> Option<BatchIter::Item>
where
R: Ord,
F: FnMut(&Self::Item) -> R + Send + Sync + Clone,
Self::Item: Send + 'static,
F: FnMut(&BatchIter::Item) -> R + Send + Sync + Clone,
BatchIter::Item: Send + 'static,
{
pool.scope(|s| {
while let Some(batch) = self.next_batch() {
@ -425,10 +423,10 @@ where
/// function.
///
/// See [`Iterator::min_by()`](https://doc.rust-lang.org/std/iter/trait.Iterator.html#method.min_by)
fn min_by<F>(mut self, pool: &TaskPool, f: F) -> Option<Self::Item>
fn min_by<F>(mut self, pool: &TaskPool, f: F) -> Option<BatchIter::Item>
where
F: FnMut(&Self::Item, &Self::Item) -> std::cmp::Ordering + Send + Sync + Clone,
Self::Item: Send + 'static,
F: FnMut(&BatchIter::Item, &BatchIter::Item) -> std::cmp::Ordering + Send + Sync + Clone,
BatchIter::Item: Send + 'static,
{
pool.scope(|s| {
while let Some(batch) = self.next_batch() {
@ -446,7 +444,7 @@ where
/// See [`Iterator::copied()`](https://doc.rust-lang.org/std/iter/trait.Iterator.html#method.copied)
fn copied<'a, T>(self) -> Copied<Self>
where
Self: ParallelIterator<B, Item = &'a T>,
Self: ParallelIterator<BatchIter>,
T: 'a + Copy,
{
Copied { iter: self }
@ -457,7 +455,7 @@ where
/// See [`Iterator::cloned()`](https://doc.rust-lang.org/std/iter/trait.Iterator.html#method.cloned)
fn cloned<'a, T>(self) -> Cloned<Self>
where
Self: ParallelIterator<B, Item = &'a T>,
Self: ParallelIterator<BatchIter>,
T: 'a + Copy,
{
Cloned { iter: self }
@ -481,7 +479,7 @@ where
/// See [`Iterator::sum()`](https://doc.rust-lang.org/std/iter/trait.Iterator.html#method.sum)
fn sum<S, R>(mut self, pool: &TaskPool) -> R
where
S: std::iter::Sum<Self::Item> + Send + 'static,
S: std::iter::Sum<BatchIter::Item> + Send + 'static,
R: std::iter::Sum<S>,
{
pool.scope(|s| {
@ -498,7 +496,7 @@ where
/// See [`Iterator::product()`](https://doc.rust-lang.org/std/iter/trait.Iterator.html#method.product)
fn product<S, R>(mut self, pool: &TaskPool) -> R
where
S: std::iter::Product<Self::Item> + Send + 'static,
S: std::iter::Product<BatchIter::Item> + Send + 'static,
R: std::iter::Product<S>,
{
pool.scope(|s| {

View file

@ -1,3 +1,4 @@
#![warn(missing_docs)]
#![doc = include_str!("../README.md")]
mod slice;
@ -25,6 +26,7 @@ pub use countdown_event::CountdownEvent;
mod iter;
pub use iter::ParallelIterator;
#[allow(missing_docs)]
pub mod prelude {
#[doc(hidden)]
pub use crate::{
@ -34,10 +36,5 @@ pub mod prelude {
};
}
pub fn logical_core_count() -> usize {
num_cpus::get()
}
pub fn physical_core_count() -> usize {
num_cpus::get_physical()
}
pub use num_cpus::get as logical_core_count;
pub use num_cpus::get_physical as physical_core_count;

View file

@ -1,6 +1,35 @@
use super::TaskPool;
/// Provides functions for mapping read-only slices across a provided [`TaskPool`].
pub trait ParallelSlice<T: Sync>: AsRef<[T]> {
/// Splits the slice in chunks of size `chunks_size` or less and maps the chunks
/// in parallel across the provided `task_pool`. One task is spawned in the task pool
/// for every chunk.
///
/// Returns a `Vec` of the mapped results in the same order as the input.
///
/// # Example
///
/// ```rust
/// # use bevy_tasks::prelude::*;
/// # use bevy_tasks::TaskPool;
/// let task_pool = TaskPool::new();
/// let counts = (0..10000).collect::<Vec<u32>>();
/// let incremented = counts.par_chunk_map(&task_pool, 100, |chunk| {
/// let mut results = Vec::new();
/// for count in chunk {
/// results.push(*count + 2);
/// }
/// results
/// });
/// # let flattened: Vec<_> = incremented.into_iter().flatten().collect();
/// # assert_eq!(flattened, (2..10002).collect::<Vec<u32>>());
/// ```
///
/// # See Also
///
/// - [`ParallelSliceMut::par_chunk_map_mut`] for mapping mutable slices.
/// - [`ParallelSlice::par_splat_map`] for mapping when a specific chunk size is unknown.
fn par_chunk_map<F, R>(&self, task_pool: &TaskPool, chunk_size: usize, f: F) -> Vec<R>
where
F: Fn(&[T]) -> R + Send + Sync,
@ -15,6 +44,36 @@ pub trait ParallelSlice<T: Sync>: AsRef<[T]> {
})
}
/// Splits the slice into a maximum of `max_tasks` chunks, and maps the chunks in parallel
/// across the provided `task_pool`. One task is spawned in the task pool for every chunk.
///
/// If `max_tasks` is `None`, this function will attempt to use one chunk per thread in
/// `task_pool`.
///
/// Returns a `Vec` of the mapped results in the same order as the input.
///
/// # Example
///
/// ```rust
/// # use bevy_tasks::prelude::*;
/// # use bevy_tasks::TaskPool;
/// let task_pool = TaskPool::new();
/// let counts = (0..10000).collect::<Vec<u32>>();
/// let incremented = counts.par_splat_map(&task_pool, None, |chunk| {
/// let mut results = Vec::new();
/// for count in chunk {
/// results.push(*count + 2);
/// }
/// results
/// });
/// # let flattened: Vec<_> = incremented.into_iter().flatten().collect();
/// # assert_eq!(flattened, (2..10002).collect::<Vec<u32>>());
/// ```
///
/// # See Also
///
/// [`ParallelSliceMut::par_splat_map_mut`] for mapping mutable slices.
/// [`ParallelSlice::par_chunk_map`] for mapping when a specific chunk size is desirable.
fn par_splat_map<F, R>(&self, task_pool: &TaskPool, max_tasks: Option<usize>, f: F) -> Vec<R>
where
F: Fn(&[T]) -> R + Send + Sync,
@ -35,7 +94,39 @@ pub trait ParallelSlice<T: Sync>: AsRef<[T]> {
impl<S, T: Sync> ParallelSlice<T> for S where S: AsRef<[T]> {}
/// Provides functions for mapping mutable slices across a provided [`TaskPool`].
pub trait ParallelSliceMut<T: Send>: AsMut<[T]> {
/// Splits the slice in chunks of size `chunks_size` or less and maps the chunks
/// in parallel across the provided `task_pool`. One task is spawned in the task pool
/// for every chunk.
///
/// Returns a `Vec` of the mapped results in the same order as the input.
///
/// # Example
///
/// ```rust
/// # use bevy_tasks::prelude::*;
/// # use bevy_tasks::TaskPool;
/// let task_pool = TaskPool::new();
/// let mut counts = (0..10000).collect::<Vec<u32>>();
/// let incremented = counts.par_chunk_map_mut(&task_pool, 100, |chunk| {
/// let mut results = Vec::new();
/// for count in chunk {
/// *count += 5;
/// results.push(*count - 2);
/// }
/// results
/// });
///
/// assert_eq!(counts, (5..10005).collect::<Vec<u32>>());
/// # let flattened: Vec<_> = incremented.into_iter().flatten().collect();
/// # assert_eq!(flattened, (3..10003).collect::<Vec<u32>>());
/// ```
///
/// # See Also
///
/// [`ParallelSlice::par_chunk_map`] for mapping immutable slices.
/// [`ParallelSliceMut::par_splat_map_mut`] for mapping when a specific chunk size is unknown.
fn par_chunk_map_mut<F, R>(&mut self, task_pool: &TaskPool, chunk_size: usize, f: F) -> Vec<R>
where
F: Fn(&mut [T]) -> R + Send + Sync,
@ -50,6 +141,39 @@ pub trait ParallelSliceMut<T: Send>: AsMut<[T]> {
})
}
/// Splits the slice into a maximum of `max_tasks` chunks, and maps the chunks in parallel
/// across the provided `task_pool`. One task is spawned in the task pool for every chunk.
///
/// If `max_tasks` is `None`, this function will attempt to use one chunk per thread in
/// `task_pool`.
///
/// Returns a `Vec` of the mapped results in the same order as the input.
///
/// # Example
///
/// ```rust
/// # use bevy_tasks::prelude::*;
/// # use bevy_tasks::TaskPool;
/// let task_pool = TaskPool::new();
/// let mut counts = (0..10000).collect::<Vec<u32>>();
/// let incremented = counts.par_splat_map_mut(&task_pool, None, |chunk| {
/// let mut results = Vec::new();
/// for count in chunk {
/// *count += 5;
/// results.push(*count - 2);
/// }
/// results
/// });
///
/// assert_eq!(counts, (5..10005).collect::<Vec<u32>>());
/// # let flattened: Vec<_> = incremented.into_iter().flatten().collect::<Vec<u32>>();
/// # assert_eq!(flattened, (3..10003).collect::<Vec<u32>>());
/// ```
///
/// # See Also
///
/// [`ParallelSlice::par_splat_map`] for mapping immutable slices.
/// [`ParallelSliceMut::par_chunk_map_mut`] for mapping when a specific chunk size is desirable.
fn par_splat_map_mut<F, R>(
&mut self,
task_pool: &TaskPool,

View file

@ -229,6 +229,8 @@ impl TaskPool {
/// Spawns a static future onto the thread pool. The returned Task is a future. It can also be
/// cancelled and "detached" allowing it to continue running without having to be polled by the
/// end-user.
///
/// If the provided future is non-`Send`, [`TaskPool::spawn_local`] should be used instead.
pub fn spawn<T>(&self, future: impl Future<Output = T> + Send + 'static) -> Task<T>
where
T: Send + 'static,
@ -236,6 +238,11 @@ impl TaskPool {
Task::new(self.executor.spawn(future))
}
/// Spawns a static future on the thread-local async executor for the current thread. The task
/// will run entirely on the thread the task was spawned on. The returned Task is a future.
/// It can also be cancelled and "detached" allowing it to continue running without having
/// to be polled by the end-user. Users should generally prefer to use [`TaskPool::spawn`]
/// instead, unless the provided future is not `Send`.
pub fn spawn_local<T>(&self, future: impl Future<Output = T> + 'static) -> Task<T>
where
T: 'static,
@ -250,6 +257,9 @@ impl Default for TaskPool {
}
}
/// A `TaskPool` scope for running one or more non-`'static` futures.
///
/// For more information, see [`TaskPool::scope`].
#[derive(Debug)]
pub struct Scope<'scope, T> {
executor: &'scope async_executor::Executor<'scope>,
@ -258,11 +268,25 @@ pub struct Scope<'scope, T> {
}
impl<'scope, T: Send + 'scope> Scope<'scope, T> {
/// Spawns a scoped future onto the thread pool. The scope *must* outlive
/// the provided future. The results of the future will be returned as a part of
/// [`TaskPool::scope`]'s return value.
///
/// If the provided future is non-`Send`, [`Scope::spawn_local`] should be used
/// instead.
///
/// For more information, see [`TaskPool::scope`].
pub fn spawn<Fut: Future<Output = T> + 'scope + Send>(&mut self, f: Fut) {
let task = self.executor.spawn(f);
self.spawned.push(task);
}
/// Spawns a scoped future onto the thread-local executor. The scope *must* outlive
/// the provided future. The results of the future will be returned as a part of
/// [`TaskPool::scope`]'s return value. Users should generally prefer to use
/// [`Scope::spawn`] instead, unless the provided future is not `Send`.
///
/// For more information, see [`TaskPool::scope`].
pub fn spawn_local<Fut: Future<Output = T> + 'scope>(&mut self, f: Fut) {
let task = self.local_executor.spawn(f);
self.spawned.push(task);