Merge branch 'main' into new-example-multiple-sprites-one-entity

This commit is contained in:
Andrew 2024-10-31 10:17:12 -04:00 committed by GitHub
commit 2e93f49e01
No known key found for this signature in database
GPG key ID: B5690EEEBB952194
1345 changed files with 118919 additions and 51354 deletions

View file

@ -7,9 +7,9 @@
#
# ## LLD
#
# LLD is a linker from the LLVM project that supports Linux, Windows, MacOS, and WASM. It has the greatest
# LLD is a linker from the LLVM project that supports Linux, Windows, macOS, and Wasm. It has the greatest
# platform support and the easiest installation process. It is enabled by default in this file for Linux
# and Windows. On MacOS, the default linker yields higher performance than LLD and is used instead.
# and Windows. On macOS, the default linker yields higher performance than LLD and is used instead.
#
# To install, please scroll to the corresponding table for your target (eg. `[target.x86_64-pc-windows-msvc]`
# for Windows) and follow the steps under `LLD linker`.
@ -21,18 +21,18 @@
# Mold is a newer linker written by one of the authors of LLD. It boasts even greater performance, specifically
# through its high parallelism, though it only supports Linux.
#
# Mold is disabled by default in this file. If you wish to enable it, follow the installation instructions for
# Mold is disabled by default in this file. If you wish to enable it, follow the installation instructions for
# your corresponding target, disable LLD by commenting out its `-Clink-arg=...` line, and enable Mold by
# *uncommenting* its `-Clink-arg=...` line.
#
# There is a fork of Mold named Sold that supports MacOS, but it is unmaintained and is about the same speed as
# There is a fork of Mold named Sold that supports macOS, but it is unmaintained and is about the same speed as
# the default ld64 linker. For this reason, it is not included in this file.
#
# For more information, please see Mold's repository at <https://github.com/rui314/mold>.
#
# # Nightly configuration
#
# Be warned that the following features require nightly Rust, which is expiremental and may contain bugs. If you
# Be warned that the following features require nightly Rust, which is experimental and may contain bugs. If you
# are having issues, skip this section and use stable Rust instead.
#
# There are a few unstable features that can improve performance. To use them, first install nightly Rust
@ -51,7 +51,7 @@
# crates to share monomorphized generic code, so they do not duplicate work.
#
# In other words, instead of crate 1 generating `Foo<String>` and crate 2 generating `Foo<String>` separately,
# only one crate generates `Foo<String>` and the other adds on to the pre-exiting work.
# only one crate generates `Foo<String>` and the other adds on to the pre-existing work.
#
# Note that you may have some issues with this flag on Windows. If compiling fails due to the 65k symbol limit,
# you may have to disable this setting. For more information and possible solutions to this error, see
@ -83,12 +83,21 @@ rustflags = [
# - Ubuntu: `sudo apt-get install mold clang`
# - Fedora: `sudo dnf install mold clang`
# - Arch: `sudo pacman -S mold clang`
# "-Clink-arg=-fuse-ld=/usr/bin/mold",
# "-Clink-arg=-fuse-ld=mold",
# Nightly
# "-Zshare-generics=y",
# "-Zthreads=0",
]
# Some systems may experience linker performance issues when running doc tests.
# See https://github.com/bevyengine/bevy/issues/12207 for details.
rustdocflags = [
# LLD linker
"-Clink-arg=-fuse-ld=lld",
# Mold linker
# "-Clink-arg=-fuse-ld=mold",
]
[target.x86_64-apple-darwin]
rustflags = [
@ -134,14 +143,15 @@ rustflags = [
# rustup component add llvm-tools
# ```
linker = "rust-lld.exe"
rustdocflags = ["-Clinker=rust-lld.exe"]
rustflags = [
# Nightly
# "-Zshare-generics=y",
# "-Zshare-generics=n", # This needs to be off if you use dynamic linking on Windows.
# "-Zthreads=0",
]
# Optional: Uncommenting the following improves compile times, but reduces the amount of debug info to 'line number tables only'
# In most cases the gains are negligible, but if you are on macos and have slow compile times you should see significant gains.
# Optional: Uncommenting the following improves compile times, but reduces the amount of debug info to 'line number tables only'.
# In most cases the gains are negligible, but if you are on macOS and have slow compile times you should see significant gains.
# [profile.dev]
# debug = 1

View file

@ -10,4 +10,4 @@ assignees: ''
Provide a link to the documentation and describe how it could be improved. In what ways is it incomplete, incorrect, or misleading?
If you have suggestions on exactly what the new docs should say, feel free to include them here. Or alternatively, make the changes yourself and [create a pull request](https://bevyengine.org/learn/book/contributing/code/) instead.
If you have suggestions on exactly what the new docs should say, feel free to include them here. Alternatively, make the changes yourself and [create a pull request](https://bevyengine.org/learn/book/contributing/code/) instead.

View file

@ -2,7 +2,7 @@
name: Feature Request
about: Propose a new feature!
title: ''
labels: C-Enhancement, S-Needs-Triage
labels: C-Feature, S-Needs-Triage
assignees: ''
---

View file

@ -12,7 +12,7 @@
# repository before you can use this action.
#
# This action will only install dependencies when the current operating system is Linux. It will do
# nothing on any other OS (MacOS, Windows).
# nothing on any other OS (macOS, Windows).
name: Install Linux dependencies
description: Installs the dependencies necessary to build Bevy on Linux.
@ -20,19 +20,19 @@ inputs:
alsa:
description: Install alsa (libasound2-dev)
required: false
default: true
default: "true"
udev:
description: Install udev (libudev-dev)
required: false
default: true
default: "true"
wayland:
description: Install Wayland (libwayland-dev)
required: false
default: false
default: "false"
xkb:
description: Install xkb (libxkbcommon-dev)
required: false
default: false
default: "false"
runs:
using: composite
steps:

View file

@ -1,39 +0,0 @@
# Style guide: Engine
## Contributing
For more advice on contributing to the engine, see the [relevant section](../../CONTRIBUTING.md#Contributing-code) of `CONTRIBUTING.md`.
## General guidelines
1. Prefer granular imports over glob imports like `bevy_ecs::prelude::*`.
2. Use a consistent comment style:
1. `///` doc comments belong above `#[derive(Trait)]` invocations.
2. `//` comments should generally go above the line in question, rather than in-line.
3. Avoid `/* */` block comments, even when writing long comments.
4. Use \`variable_name\` code blocks in comments to signify that you're referring to specific types and variables.
5. Start comments with capital letters. End them with a period if they are sentence-like.
3. Use comments to organize long and complex stretches of code that can't sensibly be refactored into separate functions.
4. When using [Bevy error codes](https://bevyengine.org/learn/errors/) include a link to the relevant error on the Bevy website in the returned error message `... See: https://bevyengine.org/learn/errors/#b0003`.
## Rust API guidelines
As a reference for our API development we are using the [Rust API guidelines][Rust API guidelines]. Generally, these should be followed, except for the following areas of disagreement:
### Areas of disagreements
Some areas mentioned in the [Rust API guidelines][Rust API guidelines] we do not agree with. These areas will be expanded whenever we find something else we do not agree with, so be sure to check these from time to time.
> All items have a rustdoc example
- This guideline is too strong and not applicable for everything inside of the Bevy game engine. For functionality that requires more context or needs a more interactive demonstration (such as rendering or input features), make use of the `examples` folder instead.
> Examples use ?, not try!, not unwrap
- This guideline is usually reasonable, but not always required.
> Only smart pointers implement Deref and DerefMut
- Generally a good rule of thumb, but we're probably going to deliberately violate this for single-element wrapper types like `Life(u32)`. The behavior is still predictable and it significantly improves ergonomics / new user comprehension.
[Rust API guidelines]: https://rust-lang.github.io/api-guidelines/about.html

View file

@ -1,64 +0,0 @@
# Style guide: Examples
For more advice on writing examples, see the [relevant section](../../CONTRIBUTING.md#writing-examples) of CONTRIBUTING.md.
## Organization
1. Examples should live in an appropriate subfolder of `/examples`.
2. Examples should be a single file if possible.
3. Assets live in `./assets`. Try to avoid adding new assets unless strictly necessary to keep the repo small. Don't add "large" asset files.
4. Each example should try to follow this order:
1. Imports
2. A `fn main()` block
3. Example logic
5. Try to structure app / plugin construction in the same fashion as the actual code.
6. Examples should typically not have tests, as they are not directly reusable by the Bevy user.
## Stylistic preferences
1. Use simple, descriptive variable names.
1. Avoid names like `MyComponent` in favor of more descriptive terms like `Events`.
2. Prefer single letter differentiators like `EventsA` and `EventsB` to nonsense words like `EventsFoo` and `EventsBar`.
3. Avoid repeating the type of variables in their name where possible. For example, `Color` should be preferred to `ColorComponent`.
2. Prefer glob imports of `bevy::prelude::*` and `bevy::sub_crate::*` over granular imports (for terseness).
3. Use a consistent comment style:
1. `///` doc comments belong above `#[derive(Trait)]` invocations.
2. `//` comments should generally go above the line in question, rather than in-line.
3. Avoid `/* */` block comments, even when writing long comments.
4. Use \`variable_name\` code blocks in comments to signify that you're referring to specific types and variables.
5. Start comments with capital letters; end them with a period if they are sentence-like.
4. Use comments to organize long and complex stretches of code that can't sensibly be refactored into separate functions.
5. Avoid making variables `pub` unless it is needed for your example.
## Code conventions
1. Refactor configurable values ("magic numbers") out into constants with clear names.
2. Prefer `for` loops over `.for_each`. The latter is faster (for now), but it is less clear for beginners, less idiomatic, and less flexible.
3. Use `.single` and `.single_mut` where appropriate.
4. In Queries, prefer `With<T>` filters over actually fetching unused data with `&T`.
5. Prefer disjoint queries using `With` and `Without` over param sets when you need more than one query in a single system.
6. Prefer structs with named fields over tuple structs except in the case of single-field wrapper types.
7. Use enum-labels over string-labels for app / schedule / etc. labels.
## "Feature" examples
These examples demonstrate the usage of specific engine features in clear, minimal ways.
1. Focus on demonstrating exactly one feature in an example
2. Try to keep your names divorced from the context of a specific game, and focused on the feature you are demonstrating.
3. Where they exist, show good alternative approaches to accomplish the same task and explain why you may prefer one over the other.
4. Examples should have a visible effect when run, either in the command line or a graphical window.
## "Game" examples
These examples show how to build simple games in Bevy in a cohesive way.
1. Each of these examples lives in the [/examples/games] folder.
2. Aim for minimum but viable status: the game should be playable and not obviously buggy but does not need to be polished, featureful, or terribly fun.
3. Focus on code quality and demonstrating good, extensible patterns for users.
1. Make good use of enums and states to organize your game logic.
2. Keep components as small as possible but no smaller: all of the data on a component should generally be accessed at once.
3. Keep systems small: they should have a clear single purpose.
4. Avoid duplicating logic across similar entities whenever possible by sharing systems and components.
4. Use `///` doc comments to explain what each function / struct does as if the example were part of a polished production codebase.
5. Arrange your code into modules within the same file to allow for simple code folding / organization.

View file

@ -0,0 +1,2 @@
(
)

View file

@ -3,7 +3,6 @@
fixed_frame_time: Some(0.03),
),
events: [
(200, Screenshot),
(900, AppExit),
]
)

View file

@ -1,5 +0,0 @@
(
events: [
(900, AppExit),
]
)

View file

@ -1,9 +0,0 @@
(
setup: (
frame_time: Some(0.03),
),
events: [
(100, Screenshot),
(300, AppExit),
]
)

12
.github/example-run/testbed_2d.ron vendored Normal file
View file

@ -0,0 +1,12 @@
(
events: [
(100, Screenshot),
(200, Custom("switch_scene")),
(300, Screenshot),
(400, Custom("switch_scene")),
(500, Screenshot),
(600, Custom("switch_scene")),
(700, Screenshot),
(800, AppExit),
]
)

12
.github/example-run/testbed_3d.ron vendored Normal file
View file

@ -0,0 +1,12 @@
(
events: [
(100, Screenshot),
(200, Custom("switch_scene")),
(300, Screenshot),
(400, Custom("switch_scene")),
(500, Screenshot),
(600, Custom("switch_scene")),
(700, Screenshot),
(800, AppExit),
]
)

View file

@ -1,3 +1,9 @@
{
"MD013": false
"MD013": false,
"no-inline-html": {
"allowed_elements": [
"details",
"summary"
]
}
}

View file

@ -16,14 +16,26 @@
---
## Changelog
## Showcase
> This section is optional. If this was a trivial fix, or has no externally-visible impact, you can delete this section.
> This section is optional. If this PR does not include a visual change or does not add a new feature, you can delete this section.
- What changed as a result of this PR?
- If applicable, organize changes under "Added", "Changed", or "Fixed" sub-headings
- Stick to one or two sentences. If more detail is needed for a particular change, consider adding it to the "Solution" section
- If you can't summarize the work, your change may be unreasonably large / unrelated. Consider splitting your PR to make it easier to review and merge!
- Help others understand the result of this PR by showcasing your awesome work!
- If this PR adds a new feature or public API, consider adding a brief pseudo-code snippet of it in action
- If this PR includes a visual change, consider adding a screenshot, GIF, or video
- If you want, you could even include a before/after comparison!
- If the Migration Guide adequately covers the changes, you can delete this section
While a showcase should aim to be brief and digestible, you can use a toggleable section to save space on longer showcases:
<details>
<summary>Click to view showcase</summary>
```rust
println!("My super cool code.");
```
</details>
## Migration Guide

View file

@ -6,7 +6,7 @@ test.beforeEach(async ({ page }) => {
const MAX_TIMEOUT_FOR_TEST = 300_000;
test.describe('WASM example', () => {
test.describe('Wasm example', () => {
test('Wait for success', async ({ page }, testInfo) => {
let start = new Date().getTime();

View file

@ -14,7 +14,7 @@ permissions:
jobs:
comment-on-breaking-change-label:
runs-on: ubuntu-latest
if: github.event.label.name == 'C-Breaking-Change' && !contains(github.event.pull_request.body, '## Migration Guide')
if: github.event.label.name == 'M-Needs-Migration-Guide' && !contains(github.event.pull_request.body, '## Migration Guide')
steps:
- uses: actions/github-script@v7
with:

View file

@ -30,7 +30,7 @@ jobs:
var artifacts = await github.rest.actions.listWorkflowRunArtifacts({
owner: context.repo.owner,
repo: context.repo.repo,
run_id: ${{github.event.workflow_run.id }},
run_id: ${{ github.event.workflow_run.id }},
});
var matchArtifacts = artifacts.data.artifacts.filter((artifact) => {
return artifact.name == "missing-examples"
@ -88,7 +88,7 @@ jobs:
var artifacts = await github.rest.actions.listWorkflowRunArtifacts({
owner: context.repo.owner,
repo: context.repo.repo,
run_id: ${{github.event.workflow_run.id }},
run_id: ${{ github.event.workflow_run.id }},
});
var matchArtifacts = artifacts.data.artifacts.filter((artifact) => {
return artifact.name == "missing-features"
@ -146,7 +146,7 @@ jobs:
var artifacts = await github.rest.actions.listWorkflowRunArtifacts({
owner: context.repo.owner,
repo: context.repo.repo,
run_id: ${{github.event.workflow_run.id }},
run_id: ${{ github.event.workflow_run.id }},
});
var matchArtifacts = artifacts.data.artifacts.filter((artifact) => {
return artifact.name == "msrv"
@ -178,3 +178,64 @@ jobs:
issue_number: issue_number,
body: 'Your PR increases Bevy Minimum Supported Rust Version. Please update the `rust-version` field in the root Cargo.toml file.'
});
make-macos-screenshots-available:
runs-on: ubuntu-latest
timeout-minutes: 30
outputs:
branch-name: ${{ steps.branch-name.outputs.result }}
steps:
- name: 'Download artifact'
id: find-artifact
uses: actions/github-script@v7
with:
result-encoding: string
script: |
var artifacts = await github.rest.actions.listWorkflowRunArtifacts({
owner: context.repo.owner,
repo: context.repo.repo,
run_id: ${{github.event.workflow_run.id }},
});
var matchArtifacts = artifacts.data.artifacts.filter((artifact) => {
return artifact.name == "screenshots-macos"
});
if (matchArtifacts.length == 0) { return "false" }
var matchArtifact = matchArtifacts[0];
var download = await github.rest.actions.downloadArtifact({
owner: context.repo.owner,
repo: context.repo.repo,
artifact_id: matchArtifact.id,
archive_format: 'zip',
});
var fs = require('fs');
fs.writeFileSync('${{github.workspace}}/screenshots-macos.zip', Buffer.from(download.data));
return "true"
- name: prepare artifact folder
run: |
unzip screenshots-macos.zip
mkdir screenshots
mv screenshots-* screenshots/
- name: save screenshots
uses: actions/upload-artifact@v4
with:
name: screenshots-macos
path: screenshots
- name: branch name
id: branch-name
run: |
if [ -f PR ]; then
echo "result=PR-$(cat PR)-${{ github.event.workflow_run.head_branch }}" >> $GITHUB_OUTPUT
else
echo "result=${{ github.event.workflow_run.head_branch }}" >> $GITHUB_OUTPUT
fi
compare-macos-screenshots:
name: Compare macOS screenshots
needs: [make-macos-screenshots-available]
uses: ./.github/workflows/send-screenshots-to-pixeleagle.yml
with:
commit: ${{ github.event.workflow_run.head_sha }}
branch: ${{ needs.make-macos-screenshots-available.outputs.branch-name }}
artifact: screenshots-macos
os: macos
secrets: inherit

View file

@ -72,7 +72,7 @@ jobs:
run: cargo run -p ci -- lints
miri:
# Explicity use MacOS 14 to take advantage of M1 chip.
# Explicitly use macOS 14 to take advantage of M1 chip.
runs-on: macos-14
timeout-minutes: 60
steps:
@ -92,16 +92,15 @@ jobs:
components: miri
- name: CI job
# To run the tests one item at a time for troubleshooting, use
# cargo --quiet test --lib -- --list | sed 's/: test$//' | MIRIFLAGS="-Zmiri-disable-isolation -Zmiri-permissive-provenance -Zmiri-disable-weak-memory-emulation" xargs -n1 cargo miri test -p bevy_ecs --lib -- --exact
# cargo --quiet test --lib -- --list | sed 's/: test$//' | MIRIFLAGS="-Zmiri-disable-isolation -Zmiri-disable-weak-memory-emulation" xargs -n1 cargo miri test -p bevy_ecs --lib -- --exact
run: cargo miri test -p bevy_ecs
env:
# -Zrandomize-layout makes sure we dont rely on the layout of anything that might change
RUSTFLAGS: -Zrandomize-layout
# https://github.com/rust-lang/miri#miri--z-flags-and-environment-variables
# -Zmiri-disable-isolation is needed because our executor uses `fastrand` which accesses system time.
# -Zmiri-permissive-provenance disables warnings against int2ptr casts (since those are used by once_cell)
# -Zmiri-ignore-leaks is necessary because a bunch of tests don't join all threads before finishing.
MIRIFLAGS: -Zmiri-ignore-leaks -Zmiri-disable-isolation -Zmiri-permissive-provenance
MIRIFLAGS: -Zmiri-ignore-leaks -Zmiri-disable-isolation
check-compiles:
runs-on: ubuntu-latest
@ -128,6 +127,31 @@ jobs:
- name: Check Compile
# See tools/ci/src/main.rs for the commands this runs
run: cargo run -p ci -- compile
check-compiles-no-std:
runs-on: ubuntu-latest
timeout-minutes: 30
needs: ci
steps:
- uses: actions/checkout@v4
- uses: actions/cache@v4
with:
path: |
~/.cargo/bin/
~/.cargo/registry/index/
~/.cargo/registry/cache/
~/.cargo/git/db/
target/
crates/bevy_ecs_compile_fail_tests/target/
crates/bevy_reflect_compile_fail_tests/target/
key: ${{ runner.os }}-cargo-check-compiles-no-std-${{ hashFiles('**/Cargo.toml') }}
- uses: dtolnay/rust-toolchain@stable
with:
targets: x86_64-unknown-none
- name: Install Linux dependencies
uses: ./.github/actions/install-linux-deps
- name: Check Compile
run: cargo run -p ci -- compile-check-no-std
build-wasm:
runs-on: ubuntu-latest
@ -219,7 +243,7 @@ jobs:
steps:
- uses: actions/checkout@v4
- name: Check for typos
uses: crate-ci/typos@v1.22.7
uses: crate-ci/typos@v1.26.8
- name: Typos info
if: failure()
run: |
@ -229,9 +253,8 @@ jobs:
echo 'if you use VSCode, you can also install `Typos Spell Checker'
echo 'You can find the extension here: https://marketplace.visualstudio.com/items?itemName=tekumara.typos-vscode'
run-examples-macos-metal:
# Explicity use MacOS 14 to take advantage of M1 chip.
# Explicitly use macOS 14 to take advantage of M1 chip.
runs-on: macos-14
timeout-minutes: 30
steps:
@ -264,6 +287,10 @@ jobs:
with:
name: example-traces-macos
path: traces
- name: Save PR number
if: ${{ github.event_name == 'pull_request' }}
run: |
echo ${{ github.event.number }} > ./screenshots/PR
- name: save screenshots
uses: actions/upload-artifact@v4
with:
@ -274,7 +301,7 @@ jobs:
with:
name: example-run-macos
path: example-run/
check-doc:
runs-on: ubuntu-latest
timeout-minutes: 30
@ -420,44 +447,19 @@ jobs:
timeout-minutes: 30
steps:
- uses: actions/checkout@v4
- name: Check for bevy_internal imports
- name: Check for internal Bevy imports
shell: bash
run: |
errors=""
for file in $(find examples tests -name '*.rs'); do
if grep -q "use bevy_internal" "$file"; then
errors+="ERROR: Detected 'use bevy_internal' in $file\n"
if grep -q "use bevy_" "$file"; then
errors+="ERROR: Detected internal Bevy import in $file\n"
fi
done
if [ -n "$errors" ]; then
echo -e "$errors"
echo " Avoid importing bevy_internal, it should not be used directly"
echo " Fix the issue by replacing 'bevy_internal' with 'bevy'"
echo " Example: 'use bevy::sprite::MaterialMesh2dBundle;' instead of 'bevy_internal::sprite::MaterialMesh2dBundle;'"
echo " Avoid importing internal Bevy crates, they should not be used directly"
echo " Fix the issue by replacing 'bevy_*' with 'bevy'"
echo " Example: 'use bevy::sprite::Mesh2d;' instead of 'bevy_internal::sprite::Mesh2d;'"
exit 1
fi
check-cfg:
runs-on: ubuntu-latest
timeout-minutes: 30
steps:
- uses: actions/checkout@v4
- uses: actions/cache@v4
with:
path: |
~/.cargo/bin/
~/.cargo/registry/index/
~/.cargo/registry/cache/
~/.cargo/git/db/
target/
key: ${{ runner.os }}-check-doc-${{ hashFiles('**/Cargo.toml') }}
- uses: dtolnay/rust-toolchain@master
with:
toolchain: ${{ env.NIGHTLY_TOOLCHAIN }}
- name: Install Linux dependencies
uses: ./.github/actions/install-linux-deps
with:
wayland: true
xkb: true
- name: Build and check cfg typos
# See tools/ci/src/main.rs for the commands this runs
run: cargo run -p ci -- cfg-check

View file

@ -46,23 +46,32 @@ jobs:
- uses: dtolnay/rust-toolchain@stable
- name: Set up JDK 17
uses: actions/setup-java@v4
with:
java-version: '17'
distribution: 'temurin'
- name: Add Android targets
run: rustup target add aarch64-linux-android armv7-linux-androideabi
run: rustup target add aarch64-linux-android
- name: Install Cargo APK
run: cargo install --force cargo-apk
- name: Install Cargo NDK
run: cargo install --force cargo-ndk
- name: Build app for Android
run: ANDROID_NDK_ROOT=$ANDROID_NDK_LATEST_HOME cargo apk build --package bevy_mobile_example
- name: Build .so file
run: cargo ndk -t arm64-v8a -o android_example/app/src/main/jniLibs build --package bevy_mobile_example
env:
# This will reduce the APK size from 1GB to ~200MB
CARGO_PROFILE_DEV_DEBUG: false
- name: Build app for Android
run: cd examples/mobile/android_example && chmod +x gradlew && ./gradlew build
- name: Upload to Browser Stack
run: |
curl -u "${{ secrets.BROWSERSTACK_USERNAME }}:${{ secrets.BROWSERSTACK_ACCESS_KEY }}" \
-X POST "https://api-cloud.browserstack.com/app-automate/upload" \
-F "file=@target/debug/apk/bevyexample.apk" \
-F "file=@app/build/outputs/apk/debug/app-debug.apk" \
-F "custom_id=$GITHUB_RUN_ID"
nonce:

View file

@ -58,8 +58,22 @@ jobs:
- name: Build docs
env:
# needs to be in sync with [package.metadata.docs.rs]
RUSTDOCFLAGS: -Zunstable-options --cfg=docsrs
run: cargo doc --all-features --no-deps -p bevy -Zunstable-options -Zrustdoc-scrape-examples
RUSTFLAGS: --cfg docsrs_dep
RUSTDOCFLAGS: -Zunstable-options --cfg=docsrs --generate-link-to-definition
run: |
cargo doc \
-Zunstable-options \
-Zrustdoc-scrape-examples \
--all-features \
--workspace \
--no-deps \
--document-private-items \
--exclude ci \
--exclude errors \
--exclude bevy_mobile_example \
--exclude build-wasm-example \
--exclude build-templated-pages \
--exclude example-showcase
# This adds the following:
# - A top level redirect to the bevy crate documentation
@ -69,7 +83,7 @@ jobs:
run: |
echo "<meta http-equiv=\"refresh\" content=\"0; url=bevy/index.html\">" > target/doc/index.html
echo "dev-docs.bevyengine.org" > target/doc/CNAME
echo "User-Agent: *\nDisallow: /" > target/doc/robots.txt
echo $'User-Agent: *\nDisallow: /' > target/doc/robots.txt
rm target/doc/.lock
- name: Upload site artifact

View file

@ -27,9 +27,9 @@ jobs:
# Read the current version from Cargo.toml
current_version=$(cargo metadata --format-version 1 --no-deps | \
jq --raw-output '.packages | .[] | select(.name == "bevy").version')
# Sanity check: current version should be 0.X.Y
if ! grep -q '^0\.[0-9]\+\.[0-9]\+$' <<< "${current_version}"; then
echo "Invalid version (not in 0.X.Y format): ${current_version}"
# Sanity check: current version should be 0.X.Y-dev
if ! grep -q '^0\.[0-9]\+\.[0-9]\+-dev$' <<< "${current_version}"; then
echo "Invalid version (not in 0.X.Y-dev format): ${current_version}"
exit 1
fi
minor_version=$(sed 's/^0\.\([0-9]\+\).*/\1/' <<< "${current_version}")
@ -49,7 +49,7 @@ jobs:
--exclude build-wasm-example
- name: Create PR
uses: peter-evans/create-pull-request@v6
uses: peter-evans/create-pull-request@v7
with:
delete-branch: true
base: "main"

View file

@ -46,7 +46,7 @@ jobs:
--exclude build-wasm-example
- name: Create PR
uses: peter-evans/create-pull-request@v6
uses: peter-evans/create-pull-request@v7
with:
delete-branch: true
base: "main"

View file

@ -0,0 +1,94 @@
name: Send Screenshots to Pixel Eagle
on:
workflow_call:
inputs:
artifact:
required: true
type: string
commit:
required: true
type: string
branch:
required: true
type: string
os:
required: true
type: string
jobs:
send-to-pixel-eagle:
name: Send screenshots to Pixel Eagle
runs-on: ubuntu-24.04
steps:
- name: Download artifact
uses: actions/download-artifact@v4
with:
pattern: ${{ inputs.artifact }}
- name: Send to Pixel Eagle
env:
project: B04F67C0-C054-4A6F-92EC-F599FEC2FD1D
run: |
# Create a new run with its associated metadata
metadata='{"os":"${{ inputs.os }}", "commit": "${{ inputs.commit }}", "branch": "${{ inputs.branch }}"}'
run=`curl https://pixel-eagle.vleue.com/$project/runs --json "$metadata" --oauth2-bearer ${{ secrets.PIXELEAGLE_TOKEN }} | jq '.id'`
SAVEIFS=$IFS
cd ${{ inputs.artifact }}
# Read the hashes of the screenshot for fast comparison when they are equal
IFS=$'\n'
# Build a json array of screenshots and their hashes
hashes='[';
for screenshot in $(find . -type f -name "*.png");
do
name=${screenshot:14}
echo $name
hash=`shasum -a 256 $screenshot | awk '{print $1}'`
hashes="$hashes [\"$name\",\"$hash\"],"
done
hashes=`echo $hashes | rev | cut -c 2- | rev`
hashes="$hashes]"
IFS=$SAVEIFS
# Upload screenshots with unknown hashes
curl https://pixel-eagle.vleue.com/$project/runs/$run/hashes --json "$hashes" --oauth2-bearer ${{ secrets.PIXELEAGLE_TOKEN }} | jq '.[]|[.name] | @tsv' |
while IFS=$'\t' read -r name; do
name=`echo $name | tr -d '"'`
echo "Uploading $name"
curl https://pixel-eagle.vleue.com/$project/runs/$run/screenshots -F "data=@./screenshots-$name" -F "screenshot=$name" --oauth2-bearer ${{ secrets.PIXELEAGLE_TOKEN }}
echo
done
IFS=$SAVEIFS
cd ..
# Trigger comparison with the main branch on the same os
curl https://pixel-eagle.vleue.com/$project/runs/$run/compare/auto --json '{"os":"<equal>", "branch": "main"}' --oauth2-bearer ${{ secrets.PIXELEAGLE_TOKEN }} > pixeleagle.json
# Log results
compared_with=`cat pixeleagle.json | jq '.to'`
status=0
missing=`cat pixeleagle.json | jq '.missing | length'`
if [ ! $missing -eq 0 ]; then
echo "There are $missing missing screenshots"
echo "::warning title=$missing missing screenshots on ${{ inputs.os }}::https://pixel-eagle.vleue.com/$project/runs/$run/compare/$compared_with"
status=1
fi
diff=`cat pixeleagle.json | jq '.diff | length'`
if [ ! $diff -eq 0 ]; then
echo "There are $diff screenshots with a difference"
echo "::warning title=$diff different screenshots on ${{ inputs.os }}::https://pixel-eagle.vleue.com/$project/runs/$run/compare/$compared_with"
status=1
fi
echo "created run $run: https://pixel-eagle.vleue.com/$project/runs/$run/compare/$compared_with"
exit $status

View file

@ -49,6 +49,12 @@ jobs:
- uses: dtolnay/rust-toolchain@stable
- name: Set up JDK 17
uses: actions/setup-java@v4
with:
java-version: '17'
distribution: 'temurin'
- uses: actions/cache@v4
with:
path: |
@ -60,17 +66,21 @@ jobs:
key: ${{ runner.os }}-cargo-build-android-${{ hashFiles('**/Cargo.toml') }}
- name: Install Android targets
run: rustup target add aarch64-linux-android armv7-linux-androideabi
run: rustup target add aarch64-linux-android
- name: Install Cargo APK
run: cargo install --force cargo-apk
- name: Install Cargo NDK
run: cargo install --force cargo-ndk
- name: Build APK
run: ANDROID_NDK_ROOT=$ANDROID_NDK_LATEST_HOME cargo apk build --package bevy_mobile_example
- name: Build .so file
run: cargo ndk -t arm64-v8a -o android_example/app/src/main/jniLibs build --package bevy_mobile_example
- name: Build app for Android
run: cd examples/mobile/android_example && chmod +x gradlew && ./gradlew build
run-examples-linux-vulkan:
if: ${{ github.event_name == 'merge_group' }}
runs-on: ubuntu-latest
# also run when pushed to main to update reference screenshots
if: ${{ github.event_name != 'pull_request' }}
runs-on: ubuntu-22.04
timeout-minutes: 30
steps:
- uses: actions/checkout@v4
@ -126,15 +136,25 @@ jobs:
name: example-run-linux
path: example-run/
compare-linux-screenshots:
name: Compare Linux screenshots
needs: [run-examples-linux-vulkan]
uses: ./.github/workflows/send-screenshots-to-pixeleagle.yml
with:
commit: ${{ github.sha }}
branch: ${{ github.ref_name }}
artifact: screenshots-linux
os: linux
secrets: inherit
run-examples-on-windows-dx12:
if: ${{ github.event_name == 'merge_group' }}
# also run when pushed to main to update reference screenshots
if: ${{ github.event_name != 'pull_request' }}
runs-on: windows-latest
timeout-minutes: 30
steps:
- uses: actions/checkout@v4
- uses: dtolnay/rust-toolchain@master
with:
toolchain: 1.78
- uses: dtolnay/rust-toolchain@stable
- name: Build bevy
shell: bash
# this uses the same command as when running the example to ensure build is reused
@ -172,9 +192,20 @@ jobs:
name: example-run-windows
path: example-run/
compare-windows-screenshots:
name: Compare Windows screenshots
needs: [run-examples-on-windows-dx12]
uses: ./.github/workflows/send-screenshots-to-pixeleagle.yml
with:
commit: ${{ github.sha }}
branch: ${{ github.ref_name }}
artifact: screenshots-windows
os: windows
secrets: inherit
run-examples-on-wasm:
if: ${{ github.event_name == 'merge_group' }}
runs-on: ubuntu-latest
runs-on: ubuntu-22.04
timeout-minutes: 60
steps:
- uses: actions/checkout@v4
@ -211,7 +242,7 @@ jobs:
npx playwright install --with-deps
cd ../..
- name: First WASM build
- name: First Wasm build
run: |
cargo build --release --example ui --target wasm32-unknown-unknown

View file

@ -11,6 +11,8 @@ on:
jobs:
welcome:
runs-on: ubuntu-latest
permissions:
pull-requests: write
steps:
- uses: actions/github-script@v7
with:
@ -41,5 +43,5 @@ jobs:
repo: context.repo.repo,
body: `**Welcome**, new contributor!
Please make sure you've read our [contributing guide](https://github.com/bevyengine/bevy/blob/main/CONTRIBUTING.md) and we look forward to reviewing your pull request shortly ✨`
Please make sure you've read our [contributing guide](https://bevyengine.org/learn/contribute/introduction) and we look forward to reviewing your pull request shortly ✨`
})

25
.gitignore vendored
View file

@ -1,24 +1,33 @@
# Rust build artifacts
/target
crates/*/target
**/*.rs.bk
/benches/target
/tools/compile_fail_utils/target
# Cargo
Cargo.lock
.cargo/config
.cargo/config.toml
# IDE files
/.idea
/.vscode
/benches/target
.zed
dxcompiler.dll
dxil.dll
# Bevy Assets
assets/**/*.meta
crates/bevy_asset/imported_assets
imported_assets
# Bevy Examples
example_showcase_config.ron
example-showcase-reports/
# Generated by "examples/scene/scene.rs"
assets/scenes/load_scene_example-new.scn.ron
# Generated by "examples/window/screenshot.rs"
**/screenshot-*.png
assets/**/*.meta
crates/bevy_asset/imported_assets
imported_assets
example_showcase_config.ron
example-showcase-reports/

File diff suppressed because it is too large Load diff

View file

@ -1,466 +1,3 @@
# Contributing to Bevy
Hey, so you're interested in contributing to Bevy!
Feel free to pitch in on whatever interests you and we'll be happy to help you contribute.
Check out our community's [Code of Conduct](https://github.com/bevyengine/bevy/blob/main/CODE_OF_CONDUCT.md) and feel free to say hi on [Discord] if you'd like.
It's a nice place to chat about Bevy development, ask questions, and get to know the other contributors and users in a less formal setting.
Read on if you're looking for:
* The high-level design goals of Bevy.
* Conventions and informal practices we follow when developing Bevy.
* General advice on good open source collaboration practices.
* Concrete ways you can help us, no matter your background or skill level.
We're thrilled to have you along as we build!
## Getting oriented
Bevy, like any general-purpose game engine, is a large project!
It can be a bit overwhelming to start, so here's the bird's-eye view.
The [Bevy Engine Organization](https://github.com/bevyengine) has 4 primary repos:
1. [**`bevy`**](https://github.com/bevyengine/bevy): This is where the engine itself lives. The bulk of development work occurs here.
2. [**`bevy-website`**](https://github.com/bevyengine/bevy-website): Where the [official website](https://bevyengine.org/), release notes, Bevy Book, and Bevy Assets are hosted. It is created using the Zola static site generator.
3. [**`bevy-assets`**](https://github.com/bevyengine/bevy-assets): A collection of community-made tutorials, plugins, crates, games, and tools! Make a PR if you want to showcase your projects there!
4. [**`rfcs`**](https://github.com/bevyengine/rfcs): A place to collaboratively build and reach consensus on designs for large or controversial features.
The `bevy` repo itself contains many smaller subcrates. Most of them can be used by themselves and many of them can be modularly replaced. This enables developers to pick and choose the parts of Bevy that they want to use.
Some crates of interest:
* [**`bevy_ecs`**](./crates/bevy_ecs): The core data model for Bevy. Most Bevy features are implemented on top of it. It is also fully functional as a stand-alone ECS, which can be very valuable if you're looking to integrate it with other game engines or use it for non-game executables.
* [**`bevy_app`**](./crates/bevy_app): The api used to define Bevy Plugins and compose them together into Bevy Apps.
* [**`bevy_tasks`**](./crates/bevy_tasks): Our light-weight async executor. This drives most async and parallel code in Bevy.
* [**`bevy_render`**](./crates/bevy_render): Our core renderer API. It handles interaction with the GPU, such as the creation of Meshes, Textures, and Shaders. It also exposes a modular Render Graph for composing render pipelines. All 2D and 3D render features are implemented on top of this crate.
## What we're trying to build
Bevy is a completely free and open source game engine built in Rust. It currently has the following design goals:
* **Capable**: Offer a complete 2D and 3D feature set.
* **Simple**: Easy for newbies to pick up, but infinitely flexible for power users.
* **Data Focused**: Data-oriented architecture using the Entity Component System paradigm.
* **Modular**: Use only what you need. Replace what you don't like.
* **Fast**: App logic should run quickly, and when possible, in parallel.
* **Productive**: Changes should compile quickly ... waiting isn't fun.
Bevy also currently has the following "development process" goals:
* **Rapid experimentation over API stability**: We need the freedom to experiment and iterate in order to build the best engine we can. This will change over time as APIs prove their staying power.
* **Consistent vision**: The engine needs to feel consistent and cohesive. This takes precedence over democratic and/or decentralized processes. See our [*Bevy Organization doc*](/docs/the_bevy_organization.md) for more details.
* **Flexibility over bureaucracy**: Developers should feel productive and unencumbered by development processes.
* **Focus**: The Bevy Org should focus on building a small number of features excellently over merging every new community-contributed feature quickly. Sometimes this means pull requests will sit unmerged for a long time. This is the price of focus and we are willing to pay it. Fortunately Bevy is modular to its core. 3rd party plugins are a great way to work around this policy.
* **User-facing API ergonomics come first**: Solid user experience should receive significant focus and investment. It should rarely be compromised in the interest of internal implementation details.
* **Modularity over deep integration**: Individual crates and features should be "pluggable" whenever possible. Don't tie crates, features, or types together that don't need to be.
* **Don't merge everything ... don't merge too early**: Every feature we add increases maintenance burden and compile times. Only merge features that are "generally" useful. Don't merge major changes or new features unless we have relative consensus that the design is correct *and* that we have the developer capacity to support it. When possible, make a 3rd party Plugin / crate first, then consider merging once the API has been tested in the wild. Bevy's modular structure means that the only difference between "official engine features" and "third party plugins" is our endorsement and the repo the code lives in. We should take advantage of that whenever possible.
* **Control and consistency over 3rd party code reuse**: Only add a dependency if it is *absolutely* necessary. Every dependency we add decreases our autonomy and consistency. Dependencies also have the potential to increase compile times and risk pulling in sub-dependencies we don't want / need.
* **Don't re-invent every wheel**: As a counter to the previous point, don't re-invent everything at all costs. If there is a crate in the Rust ecosystem that is the "de-facto" standard (ex: wgpu, winit, cpal), we should heavily consider using it. Bevy should be a positive force in the ecosystem. We should drive the improvements we need into these core ecosystem crates.
* **Rust-first**: Engine and user-facing code should optimize and encourage Rust-only workflows. Adding additional languages increases internal complexity, fractures the Bevy ecosystem, and makes it harder for users to understand the engine. Never compromise a Rust interface in the interest of compatibility with other languages.
* **Thoughtful public interfaces over maximal configurability**: Symbols and apis should be private by default. Every public API should be thoughtfully and consistently designed. Don't expose unnecessary internal implementation details. Don't allow users to "shoot themselves in the foot". Favor one "happy path" api over multiple apis for different use cases.
* **Welcome new contributors**: Invest in new contributors. Help them fill knowledge and skill gaps. Don't ever gatekeep Bevy development according to notions of required skills or credentials. Help new developers find their niche.
* **Civil discourse**: We need to collectively discuss ideas and the best ideas *should* win. But conversations need to remain respectful at all times. Remember that we're all in this together. Always follow our [Code of Conduct](https://github.com/bevyengine/bevy/blob/main/CODE_OF_CONDUCT.md).
* **Test what you need to**: Write useful tests. Don't write tests that aren't useful. We *generally* aren't strict about unit testing every line of code. We don't want you to waste your time. But at the same time:
* Most new features should have at least one minimal [example](https://github.com/bevyengine/bevy/tree/main/examples). These also serve as simple integration tests, as they are run as part of our CI process.
* The more complex or "core" a feature is, the more strict we are about unit tests. Use your best judgement here. We will let you know if your pull request needs more tests. We use [Rust's built in testing framework](https://doc.rust-lang.org/book/ch11-01-writing-tests.html).
## The Bevy Organization
The Bevy Organization is the group of people responsible for stewarding the Bevy project. It handles things like merging pull requests, choosing project direction, managing bugs / issues / feature requests, running the Bevy website, controlling access to secrets, defining and enforcing best practices, etc.
Note that you *do not* need to be a member of the Bevy Organization to contribute to Bevy. Community contributors (this means you) can freely open issues, submit pull requests, and review pull requests.
Check out our dedicated [Bevy Organization document](/docs/the_bevy_organization.md) to learn more about how we're organized.
### Classifying PRs
[Labels](https://github.com/bevyengine/bevy/labels) are our primary tool to organize work.
Each label has a prefix denoting its category:
* **D:** Difficulty. In order, these are:
* `D-Trivial`: typos, obviously incorrect one-line bug fixes, code reorganization, renames
* `D-Straightforward`: simple bug fixes and API improvements, docs, test and examples
* `D-Modest`: new features, refactors, challenging bug fixes
* `D-Complex`: rewrites and unusually complex features
* When applied to an issue, these labels reflect the estimated level of expertise (not time) required to fix the issue.
* When applied to a PR, these labels reflect the estimated level of expertise required to *review* the PR.
* The `D-Domain-Expert` and `D-Domain-Agnostic` labels are modifiers, which describe if unusually high or low degrees of domain-specific knowledge are required.
* The `D-Unsafe` label is applied to any code that touches `unsafe` Rust, which requires special skills and scrutiny.
* **X:** Controversiality. In order, these are:
* `X-Uncontroversial`: everyone should agree that this is a good idea
* `X-Contentious`: there's real design thought needed to ensure that this is the right path forward
* `X-Controversial`: there's active disagreement and/or large-scale architectural implications involved
* `X-Blessed`: work that was controversial, but whose controversial (but perhaps not technical) elements have been endorsed by the relevant decision makers.
* **A:** Area (e.g. A-Animation, A-ECS, A-Rendering, ...).
* **C:** Category (e.g. C-Breaking-Change, C-Code-Quality, C-Docs, ...).
* **O:** Operating System (e.g. O-Linux, O-Web, O-Windows, ...).
* **P:** Priority (e.g. P-Critical, P-High, ...)
* Most work is not explicitly categorized by priority: volunteer work mostly occurs on an ad hoc basis depending on contributor interests
* **S:** Status (e.g. S-Blocked, S-Needs-Review, S-Needs-Design, ...).
The rules for how PRs get merged depend on their classification by controversy and difficulty.
More difficult PRs will require more careful review from experts,
while more controversial PRs will require rewrites to reduce the costs involved and/or sign-off from Subject Matter Experts and Maintainers.
When making PRs, try to split out more controversial changes from less controversial ones, in order to make your work easier to review and merge.
It is also a good idea to try and split out simple changes from more complex changes if it is not helpful for them to be reviewed together.
Some things that are reason to apply the [`S-Controversial`] label to a PR:
1. Changes to a project-wide workflow or style.
2. New architecture for a large feature.
3. Serious tradeoffs were made.
4. Heavy user impact.
5. New ways for users to make mistakes (footguns).
6. Adding a dependency.
7. Touching licensing information (due to level of precision required).
8. Adding root-level files (due to the high level of visibility).
Some things that are reason to apply the [`D-Complex`] label to a PR:
1. Introduction or modification of soundness relevant code (for example `unsafe` code).
2. High levels of technical complexity.
3. Large-scale code reorganization.
Examples of PRs that are not [`S-Controversial`] or [`D-Complex`]:
* Fixing dead links.
* Removing dead code or unused dependencies.
* Typo and grammar fixes.
* [Add `Mut::reborrow`](https://github.com/bevyengine/bevy/pull/7114).
* [Add `Res::clone`](https://github.com/bevyengine/bevy/pull/4109).
Examples of PRs that are [`S-Controversial`] but not [`D-Complex`]:
* [Implement and require `#[derive(Component)]` on all component structs](https://github.com/bevyengine/bevy/pull/2254).
* [Use default serde impls for Entity](https://github.com/bevyengine/bevy/pull/6194).
Examples of PRs that are not [`S-Controversial`] but are [`D-Complex`]:
* [Ensure `Ptr`/`PtrMut`/`OwningPtr` are aligned in debug builds](https://github.com/bevyengine/bevy/pull/7117).
* [Replace `BlobVec`'s `swap_scratch` with a `swap_nonoverlapping`](https://github.com/bevyengine/bevy/pull/4853).
Examples of PRs that are both [`S-Controversial`] and [`D-Complex`]:
* [bevy_reflect: Binary formats](https://github.com/bevyengine/bevy/pull/6140).
Some useful pull request queries:
* [PRs which need reviews and are not `D-Complex`](https://github.com/bevyengine/bevy/pulls?q=is%3Apr+-label%3AD-Complex+-label%3AS-Ready-For-Final-Review+-label%3AS-Blocked++).
* [`D-Complex` PRs which need reviews](https://github.com/bevyengine/bevy/pulls?q=is%3Apr+label%3AD-Complex+-label%3AS-Ready-For-Final-Review+-label%3AS-Blocked).
[`S-Controversial`]: https://github.com/bevyengine/bevy/pulls?q=is%3Aopen+is%3Apr+label%3AS-Controversial
[`D-Complex`]: https://github.com/bevyengine/bevy/pulls?q=is%3Aopen+is%3Apr+label%3AD-Complex
### Prioritizing PRs and issues
We use [Milestones](https://github.com/bevyengine/bevy/milestones) to track issues and PRs that:
* Need to be merged/fixed before the next release. This is generally for extremely bad bugs i.e. UB or important functionality being broken.
* Would have higher user impact and are almost ready to be merged/fixed.
There are also two priority labels: [`P-Critical`](https://github.com/bevyengine/bevy/issues?q=is%3Aopen+is%3Aissue+label%3AP-Critical) and [`P-High`](https://github.com/bevyengine/bevy/issues?q=is%3Aopen+is%3Aissue+label%3AP-High) that can be used to find issues and PRs that need to be resolved urgently.
### Closing PRs and Issues
From time to time, PRs are unsuitable to be merged in a way that cannot be readily fixed.
Rather than leaving these PRs open in limbo indefinitely, they should simply be closed.
This might happen if:
1. The PR is spam or malicious.
2. The work has already been done elsewhere or is otherwise fully obsolete.
3. The PR was successfully adopted.
4. The work is particularly low quality, and the author is resistant to coaching.
5. The work adds features or abstraction of limited value, especially in a way that could easily be recreated outside of the engine.
6. The work has been sitting in review for so long and accumulated so many conflicts that it would be simpler to redo it from scratch.
7. The PR is pointlessly large, and should be broken into multiple smaller PRs for easier review.
PRs that are `S-Adopt-Me` should be left open, but only if they're genuinely more useful to rebase rather than simply use as a reference.
There are several paths for PRs to be closed:
1. Obviously, authors may close their own PRs for any reason at any time.
2. If a PR is clearly spam or malicious, anyone with triage rights is encouraged to close out the PR and report it to Github.
3. If the work has already been done elsewhere, adopted or otherwise obsoleted, anyone with triage rights is encouraged to close out the PR with an explanatory comment.
4. Anyone may nominate a PR for closure, by bringing it to the attention of the author and / or one of the SMEs / maintainers. Let them press the button, but this is generally well-received and helpful.
5. SMEs or maintainers may and are encouraged to unilaterally close PRs that fall into one or more of the remaining categories.
6. In the case of PRs where some members of the community (other than the author) are in favor and some are opposed, any two relevant SMEs or maintainers may act in concert to close the PR.
When closing a PR, check if it has an issue linked.
If it does not, you should strongly consider creating an issue and linking the now-closed PR to help make sure the previous work can be discovered and credited.
## Making changes to Bevy
Most changes don't require much "process". If your change is relatively straightforward, just do the following:
1. A community member (that's you!) creates one of the following:
* [GitHub Discussions]: An informal discussion with the community. This is the place to start if you want to propose a feature or specific implementation.
* [Issue](https://github.com/bevyengine/bevy/issues): A formal way for us to track a bug or feature. Please look for duplicates before opening a new issue and consider starting with a Discussion.
* [Pull Request](https://github.com/bevyengine/bevy/pulls) (or PR for short): A request to merge code changes. This starts our "review process". You are welcome to start with a pull request, but consider starting with an Issue or Discussion for larger changes (or if you aren't certain about a design). We don't want anyone to waste their time on code that didn't have a chance to be merged! But conversely, sometimes PRs are the most efficient way to propose a change. Just use your own judgement here.
2. Other community members review and comment in an ad-hoc fashion. Active subject matter experts may be pulled into a thread using `@mentions`. If your PR has been quiet for a while and is ready for review, feel free to leave a message to "bump" the thread, or bring it up on [Discord](https://discord.gg/bevy) in an appropriate engine development channel.
3. Once they're content with the pull request (design, code quality, documentation, tests), individual reviewers leave "Approved" reviews.
4. After consensus has been reached (typically two approvals from the community or one for extremely simple changes) and CI passes, the [S-Ready-For-Final-Review](https://github.com/bevyengine/bevy/issues?q=is%3Aopen+is%3Aissue+label%3AS-Ready-For-Final-Review) label is added.
5. When they find time, someone with merge rights performs a final code review and queue the PR for merging.
### Complex changes
Individual contributors often lead major new features and reworks. However these changes require more design work and scrutiny. Complex changes like this tend to go through the following lifecycle:
1. A need or opportunity is identified and an issue is made, laying out the general problem.
2. As needed, this is discussed further on that issue thread, in cross-linked [GitHub Discussion] threads, or on [Discord] in the Engine Development channels.
3. Either a Draft Pull Request or an RFC is made. As discussed in the [RFC repo](https://github.com/bevyengine/rfcs), complex features need RFCs, but these can be submitted before or after prototyping work has been started.
4. If feasible, parts that work on their own (even if they're only useful once the full complex change is merged) get split out into individual PRs to make them easier to review.
5. The community as a whole helps improve the Draft PR and/or RFC, leaving comments, making suggestions, and submitting pull requests to the original branch.
6. Once the RFC is merged and/or the Draft Pull Request is transitioned out of draft mode, the [normal change process outlined in the previous section](#making-changes-to-bevy) can begin.
## How you can help
If you've made it to this page, you're probably already convinced that Bevy is a project you'd like to see thrive.
But how can *you* help?
No matter your experience level with Bevy or Rust or your level of commitment, there are ways to meaningfully contribute.
Take a look at the sections that follow to pick a route (or five) that appeal to you.
If you ever find yourself at a loss for what to do, or in need of mentorship or advice on how to contribute to Bevy, feel free to ask in [Discord] and one of our more experienced community members will be happy to help.
### Join a working group
Active initiatives in Bevy are organized into temporary working groups: choosing one of those and asking how to help can be a fantastic way to get up to speed and be immediately useful.
Working groups are public, open-membership groups that work together to tackle a broad-but-scoped initiative.
The work that they do is coordinated in a forum-channel on [Discord](https://discord.gg/bevy), although they also create issues and may use project boards for tangible work that needs to be done.
There are no special requirements to be a member, and no formal membership list or leadership.
Anyone can help, and you should expect to compromise and work together with others to bring a shared vision to life.
Working groups are *spaces*, not clubs.
### Start a working group
When tackling a complex initiative, friends and allies can make things go much more smoothly.
To start a working group:
1. Decide what the working group is going to focus on. This should be tightly focused and achievable!
2. Gather at least 3 people including yourself who are willing to be in the working group.
3. Ping the `@Maintainer` role on Discord in [#engine-dev](https://discord.com/channels/691052431525675048/692572690833473578) announcing your mutual intent and a one or two sentence description of your plans.
The maintainers will briefly evaluate the proposal in consultation with the relevant SMEs and give you a thumbs up or down on whether this is something Bevy can and wants to explore right now.
You don't need a concrete plan at this stage, just a sensible argument for both "why is this something that could be useful to Bevy" and "why there aren't any serious barriers in implementing this in the near future".
If they're in favor, a maintainer will create a forum channel for you and you're off to the races.
Your initial task is writing up a design doc: laying out the scope of work and general implementation strategy.
Here's a [solid example of a design doc](https://github.com/bevyengine/bevy/issues/12365), although feel free to use whatever format works best for your team.
Once that's ready, get a sign-off on the broad vision and goals from the appropriate SMEs and maintainers.
This is the primary review step: maintainers and SMEs should be broadly patient and supportive even if they're skeptical until a proper design doc is in hand to evaluate.
With a sign-off in hand, post the design doc to [Github Discussions](https://github.com/bevyengine/bevy/discussions) with the [`C-Design-Doc` label](https://github.com/bevyengine/bevy/discussions?discussions_q=is%3Aopen+label%3A%22C-Design+Doc%22) for archival purposes and begin work on implementation.
Post PRs that you need review on in your group's forum thread, ask for advice, and share the load.
Controversial PRs are still `S-Controversial`, but with a sign-off-in-priniciple, things should go more smoothly.
If work peters out and the initiative dies, maintainers can wind down working groups (in consultation with SMEs and the working group itself).
This is normal and expected: projects fail for all sorts of reasons!
However, it's important to both keep the number of working groups relatively small and ensure they're active:
they serve a vital role in onboarding new contributors.
Once your implementation work laid out in your initial design doc is complete, it's time to wind down the working group.
Feel free to make another one though to tackle the next step in your grand vision!
### Battle-testing Bevy
Ultimately, Bevy is a tool that's designed to help people make cool games.
By using Bevy, you can help us catch bugs, prioritize new features, polish off the rough edges, and promote the project.
If you need help, don't hesitate to ask for help on [GitHub Discussions], [Discord], or [reddit](https://www.reddit.com/r/bevy). Generally you should prefer asking questions as [GitHub Discussions] as they are more searchable.
When you think you've found a bug, missing documentation, or a feature that would help you make better games, please [file an issue](https://github.com/bevyengine/bevy/issues/new/choose) on the main `bevy` repo.
Do your best to search for duplicate issues, but if you're unsure, open a new issue and link to other related issues on the thread you make.
Once you've made something that you're proud of, feel free to drop a link, video, or screenshot in `#showcase` on [Discord]!
If you release a game on [itch.io](https://itch.io/games/tag-bevy) we'd be thrilled if you tagged it with `bevy`.
### Teaching others
Bevy is still very young, and light on documentation, tutorials, and accumulated expertise.
By helping others with their issues, and teaching them about Bevy, you will naturally learn the engine and codebase in greater depth while also making our community better!
Some of the best ways to do this are:
* Answering questions on [GitHub Discussions], [Discord], and [reddit](https://www.reddit.com/r/bevy).
* Writing tutorials, guides, and other informal documentation and sharing them on [Bevy Assets](https://github.com/bevyengine/bevy-assets).
* Streaming, writing blog posts about creating your game, and creating videos. Share these in the `#devlogs` channel on [Discord]!
### Writing plugins
You can improve Bevy's ecosystem by building your own Bevy Plugins and crates.
Non-trivial, reusable functionality that works well with itself is a good candidate for a plugin.
If it's closer to a snippet or design pattern, you may want to share it with the community on [Discord], Reddit, or [GitHub Discussions] instead.
Check out our [plugin guidelines](https://bevyengine.org/learn/book/plugin-development/) for helpful tips and patterns.
### Fixing bugs
Bugs in Bevy (or the associated website / book) are filed on the issue tracker using the [`C-Bug`](https://github.com/bevyengine/bevy/issues?q=is%3Aissue+is%3Aopen+label%3AC-Bug) label.
If you're looking for an easy place to start, take a look at the [`D-Good-First-Issue`](https://github.com/bevyengine/bevy/issues?q=is%3Aopen+is%3Aissue+label%3AD-Good-First-Issue) label, and feel free to ask questions on that issue's thread in question or on [Discord].
You don't need anyone's permission to try fixing a bug or adding a simple feature, but stating that you'd like to tackle an issue can be helpful to avoid duplicated work.
When you make a pull request that fixes an issue, include a line that says `Fixes #X` (or "Closes"), where `X` is the issue number.
This will cause the issue in question to be closed when your PR is merged.
General improvements to code quality are also welcome!
Bevy can always be safer, better tested, and more idiomatic.
### Writing docs
Like every other large, rapidly developing open source library you've ever used, Bevy's documentation can always use improvement.
This is incredibly valuable, easily distributed work, but requires a bit of guidance:
* Inaccurate documentation is worse than no documentation: prioritize fixing broken docs.
* Bevy is remarkably unstable: before tackling a new major documentation project, check in with the community on Discord or GitHub (making an issue about specific missing docs is a great way to plan) about the stability of that feature and upcoming plans to save yourself heartache.
* Code documentation (doc examples and in the examples folder) is easier to maintain because the compiler will tell us when it breaks.
* Inline documentation should be technical and to the point. Link relevant examples or other explanations if broader context is useful.
* The Bevy book is hosted on the `bevy-website` repo and targeted towards beginners who are just getting to know Bevy (and perhaps Rust!).
* Accepted RFCs are not documentation: they serve only as a record of accepted decisions.
[docs.rs](https://docs.rs/bevy) is built from out of the last release's documentation, which is written right in-line directly above the code it documents.
To view the current docs on `main` before you contribute, clone the `bevy` repo, and run `cargo doc --open` or go to [dev-docs.bevyengine.org](https://dev-docs.bevyengine.org/),
which has the latest API reference built from the repo on every commit made to the `main` branch.
### Writing examples
Most [examples in Bevy](https://github.com/bevyengine/bevy/tree/main/examples) aim to clearly demonstrate a single feature, group of closely related small features, or show how to accomplish a particular task (such as asset loading, creating a custom shader or testing your app).
In rare cases, creating new "game" examples is justified in order to demonstrate new features that open a complex class of functionality in a way that's hard to demonstrate in isolation or requires additional integration testing.
Examples in Bevy should be:
1. **Working:** They must compile and run, and any introduced errors in them should be obvious (through tests, simple results or clearly displayed behavior).
2. **Clear:** They must use descriptive variable names, be formatted, and be appropriately commented. Try your best to showcase best practices when it doesn't obscure the point of the example.
3. **Relevant:** They should explain, through comments or variable names, what they do and how this can be useful to a game developer.
4. **Minimal:** They should be no larger or complex than is needed to meet the goals of the example.
When you add a new example, be sure to update `examples/README.md` with the new example and add it to the root `Cargo.toml` file.
Run `cargo run -p build-templated-pages -- build-example-page` to do this automatically.
Use a generous sprinkling of keywords in your description: these are commonly used to search for a specific example.
See the [example style guide](.github/contributing/example_style_guide.md) to help make sure the style of your example matches what we're already using.
More complex demonstrations of functionality are also welcome, but these should be submitted to [bevy-assets](https://github.com/bevyengine/bevy-assets).
### Reviewing others' work
With the sheer volume of activity in Bevy's community, reviewing others work with the aim of improving it is one of the most valuable things you can do.
You don't need to be an Elder Rustacean to be useful here: anyone can catch missing tests, unclear docs, logic errors, and so on.
If you have specific skills (e.g. advanced familiarity with `unsafe` code, rendering knowledge or web development experience) or personal experience with a problem, try to prioritize those areas to ensure we can get appropriate expertise where we need it.
When you find (or make) a PR that you don't feel comfortable reviewing, but you *can* think of someone who does, consider using Github's "Request review" functionality (in the top-right of the PR screen) to bring the work to their attention.
If they're not a Bevy Org member, you'll need to ping them in the thread directly: that's fine too!
Almost everyone working on Bevy is a volunteer: this should be treated as a gentle nudge, rather than an assignment of work.
Consider checking the Git history for appropriate reviewers, or ask on Discord for suggestions.
Focus on giving constructive, actionable feedback that results in real improvements to code quality or end-user experience.
If you don't understand why an approach was taken, please ask!
Provide actual code suggestions when that is helpful. Small changes work well as comments or in-line suggestions on specific lines of codes.
Larger changes deserve a comment in the main thread, or a pull request to the original author's branch (but please mention that you've made one).
When in doubt about a matter of architectural philosophy, refer back to [*What we're trying to build*](#what-were-trying-to-build) for guidance.
Once you're happy with the work and feel you're reasonably qualified to assess quality in this particular area, leave your `Approved` review on the PR.
If you're new to GitHub, check out the [Pull Request Review documentation](https://docs.github.com/en/github/collaborating-with-pull-requests/reviewing-changes-in-pull-requests/about-pull-request-reviews).
**Anyone** can and should leave reviews ... no special permissions are required!
It's okay to leave an approval even if you aren't 100% confident on all areas of the PR: just be sure to note your limitations.
When maintainers are evaluating the PR to be merged, they'll make sure that there's good coverage on all of the critical areas.
If you can only check that the math is correct, and another reviewer can check everything *but* the math, we're in good shape!
Similarly, if there are areas that would be *good* to fix but aren't severe, please consider leaving an approval.
The author can address them immediately, or spin it out into follow-up issues or PRs.
Large PRs are much more draining for both reviewers and authors, so try to push for a smaller scope with clearly tracked follow-ups.
There are three main places you can check for things to review:
1. Pull requests which are ready and in need of more reviews on [bevy](https://github.com/bevyengine/bevy/pulls?q=is%3Aopen+is%3Apr+-label%3AS-Ready-For-Final-Review+-draft%3A%3Atrue+-label%3AS-Needs-RFC+-reviewed-by%3A%40me+-author%3A%40me).
2. Pull requests on [bevy](https://github.com/bevyengine/bevy/pulls) and the [bevy-website](https://github.com/bevyengine/bevy-website/pulls) repos.
3. [RFCs](https://github.com/bevyengine/rfcs), which need extensive thoughtful community input on their design.
Not even our Project Leads and Maintainers are exempt from reviews and RFCs!
By giving feedback on this work (and related supporting work), you can help us make sure our releases are both high-quality and timely.
Finally, if nothing brings you more satisfaction than seeing every last issue labeled and all resolved issues closed, feel free to message the Project Lead (currently @cart) for a Bevy org role to help us keep things tidy.
As discussed in our [*Bevy Organization doc*](/docs/the_bevy_organization.md), this role only requires good faith and a basic understanding of our development process.
### How to adopt pull requests
Occasionally authors of pull requests get busy or become unresponsive, or project members fail to reply in a timely manner.
This is a natural part of any open source project.
To avoid blocking these efforts, these pull requests may be *adopted*, where another contributor creates a new pull request with the same content.
If there is an old pull request that is without updates, comment to the organization whether it is appropriate to add the
*[S-Adopt-Me](https://github.com/bevyengine/bevy/labels/S-Adopt-Me)* label, to indicate that it can be *adopted*.
If you plan on adopting a PR yourself, you can also leave a comment on the PR asking the author if they plan on returning.
If the author gives permission or simply doesn't respond after a few days, then it can be adopted.
This may sometimes even skip the labeling process since at that point the PR has been adopted by you.
With this label added, it's best practice to fork the original author's branch.
This ensures that they still get credit for working on it and that the commit history is retained.
When the new pull request is ready, it should reference the original PR in the description.
Then notify org members to close the original.
* For example, you can reference the original PR by adding the following to your PR description:
`Adopted #number-original-pull-request`
### Contributing code
Bevy is actively open to code contributions from community members.
If you're new to Bevy, here's the workflow we use:
1. Fork the `bevyengine/bevy` repository on GitHub. You'll need to create a GitHub account if you don't have one already.
2. Make your changes in a local clone of your fork, typically in its own new branch.
1. Try to split your work into separate commits, each with a distinct purpose. Be particularly mindful of this when responding to reviews so it's easy to see what's changed.
2. Tip: [You can set up a global `.gitignore` file](https://docs.github.com/en/get-started/getting-started-with-git/ignoring-files#configuring-ignored-files-for-all-repositories-on-your-computer) to exclude your operating system/text editor's special/temporary files. (e.g. `.DS_Store`, `thumbs.db`, `*~`, `*.swp` or `*.swo`) This allows us to keep the `.gitignore` file in the repo uncluttered.
3. To test CI validations locally, run the `cargo run -p ci` command. This will run most checks that happen in CI, but can take some time. You can also run sub-commands to iterate faster depending on what you're contributing:
* `cargo run -p ci -- lints` - to run formatting and clippy.
* `cargo run -p ci -- test` - to run tests.
* `cargo run -p ci -- doc` - to run doc tests and doc checks.
* `cargo run -p ci -- compile` - to check that everything that must compile still does (examples and benches), and that some that shouldn't still don't ([`crates/bevy_ecs_compile_fail_tests`](./crates/bevy_ecs_compile_fail_tests)).
* to get more information on commands available and what is run, check the [tools/ci crate](./tools/ci).
4. When working with Markdown (`.md`) files, Bevy's CI will check markdown files (like this one) using [markdownlint](https://github.com/DavidAnson/markdownlint).
To locally lint your files using the same workflow as our CI:
1. Install [markdownlint-cli](https://github.com/igorshubovych/markdownlint-cli).
2. Run `markdownlint -f -c .github/linters/.markdown-lint.yml .` in the root directory of the Bevy project.
5. When working with Toml (`.toml`) files, Bevy's CI will check toml files using [taplo](https://taplo.tamasfe.dev/): `taplo fmt --check --diff`
1. If you use VSCode, install [Even better toml](https://marketplace.visualstudio.com/items?itemName=tamasfe.even-better-toml) and format your files.
2. If you want to use the cli tool, install [taplo-cli](https://taplo.tamasfe.dev/cli/installation/cargo.html) and run `taplo fmt --check --diff` to check for the formatting. Fix any issues by running `taplo fmt` in the root directory of the Bevy project.
6. Check for typos. Bevy's CI will check for them using [typos](https://github.com/crate-ci/typos).
1. If you use VSCode, install [Typos Spell Checker](https://marketplace.visualstudio.com/items?itemName=tekumara.typos-vscode).
2. You can also use the cli tool. Install [typos-cli](https://github.com/crate-ci/typos?tab=readme-ov-file#install) and run `typos` to check for typos, and fix them by running `typos -w`.
7. Push your changes to your fork on Github and open a Pull Request.
8. Respond to any CI failures or review feedback. While CI failures must be fixed before we can merge your PR, you do not need to *agree* with all feedback from your reviews, merely acknowledge that it was given. If you cannot come to an agreement, leave the thread open and defer to a Maintainer or Project Lead's final judgement.
9. When your PR is ready to merge, a Maintainer or Project Lead will review it and suggest final changes. If those changes are minimal they may even apply them directly to speed up merging.
If you end up adding a new official Bevy crate to the `bevy` repo:
1. Add the new crate to the [./tools/publish.sh](./tools/publish.sh) file.
2. Check if a new cargo feature was added, update [cargo_features.md](https://github.com/bevyengine/bevy/blob/main/docs/cargo_features.md) as needed.
When contributing, please:
* Try to loosely follow the workflow in [*Making changes to Bevy*](#making-changes-to-bevy).
* Consult the [style guide](.github/contributing/engine_style_guide.md) to help keep our code base tidy.
* Explain what you're doing and why.
* Document new code with doc comments.
* Include clear, simple tests.
* Add or improve the examples when adding new user-facing functionality.
* Break work into digestible chunks.
* Ask for any help that you need!
Your first PR will be merged in no time!
No matter how you're helping: thanks for contributing to Bevy!
[GitHub Discussions]: https://github.com/bevyengine/bevy/discussions "GitHub Discussions"
[Discord]: https://discord.gg/bevy "Discord"
Hey, we've moved our information on contributing to Bevy's website [here](https://bevyengine.org/learn/contribute/introduction). Go give it a read, and thanks for contributing!

File diff suppressed because it is too large Load diff

View file

@ -2,23 +2,28 @@
graph: (
nodes: [
(
clip: None,
node_type: Blend,
mask: 0,
weight: 1.0,
),
(
clip: None,
weight: 0.5,
),
(
clip: Some(AssetPath("models/animated/Fox.glb#Animation0")),
node_type: Blend,
mask: 0,
weight: 1.0,
),
(
clip: Some(AssetPath("models/animated/Fox.glb#Animation1")),
node_type: Clip(AssetPath("models/animated/Fox.glb#Animation0")),
mask: 0,
weight: 1.0,
),
(
clip: Some(AssetPath("models/animated/Fox.glb#Animation2")),
node_type: Clip(AssetPath("models/animated/Fox.glb#Animation1")),
mask: 0,
weight: 1.0,
),
(
node_type: Clip(AssetPath("models/animated/Fox.glb#Animation2")),
mask: 0,
weight: 1.0,
),
],
@ -32,4 +37,5 @@
],
),
root: 0,
mask_groups: {},
)

Binary file not shown.

Binary file not shown.

Binary file not shown.

File diff suppressed because it is too large Load diff

Binary file not shown.

After

Width:  |  Height:  |  Size: 540 KiB

View file

@ -0,0 +1,288 @@
{
"extensionsUsed": [
"KHR_materials_unlit"
],
"asset": {
"generator": "UniGLTF-1.27",
"version": "2.0"
},
"buffers": [
{
"uri": "craft_speederD_data.bin",
"byteLength": 20120
}
],
"bufferViews": [
{
"buffer": 0,
"byteOffset": 0,
"byteLength": 6096,
"target": 34962
},
{
"buffer": 0,
"byteOffset": 6096,
"byteLength": 6096,
"target": 34962
},
{
"buffer": 0,
"byteOffset": 12192,
"byteLength": 4064,
"target": 34962
},
{
"buffer": 0,
"byteOffset": 16256,
"byteLength": 732,
"target": 34963
},
{
"buffer": 0,
"byteOffset": 16988,
"byteLength": 1368,
"target": 34963
},
{
"buffer": 0,
"byteOffset": 18356,
"byteLength": 456,
"target": 34963
},
{
"buffer": 0,
"byteOffset": 18812,
"byteLength": 1308,
"target": 34963
}
],
"accessors": [
{
"bufferView": 0,
"byteOffset": 0,
"type": "VEC3",
"componentType": 5126,
"count": 508,
"max": [
1.4,
0.9,
1.11283529
],
"min": [
-1.4,
0,
-1.11283529
],
"normalized": false
},
{
"bufferView": 1,
"byteOffset": 0,
"type": "VEC3",
"componentType": 5126,
"count": 508,
"normalized": false
},
{
"bufferView": 2,
"byteOffset": 0,
"type": "VEC2",
"componentType": 5126,
"count": 508,
"normalized": false
},
{
"bufferView": 3,
"byteOffset": 0,
"type": "SCALAR",
"componentType": 5125,
"count": 183,
"normalized": false
},
{
"bufferView": 4,
"byteOffset": 0,
"type": "SCALAR",
"componentType": 5125,
"count": 342,
"normalized": false
},
{
"bufferView": 5,
"byteOffset": 0,
"type": "SCALAR",
"componentType": 5125,
"count": 114,
"normalized": false
},
{
"bufferView": 6,
"byteOffset": 0,
"type": "SCALAR",
"componentType": 5125,
"count": 327,
"normalized": false
}
],
"materials": [
{
"name": "metal",
"pbrMetallicRoughness": {
"baseColorFactor": [
0.843137264,
0.870588243,
0.9098039,
1
],
"metallicFactor": 1,
"roughnessFactor": 1
},
"doubleSided": false,
"alphaMode": "OPAQUE"
},
{
"name": "metalDark",
"pbrMetallicRoughness": {
"baseColorFactor": [
0.6750623,
0.7100219,
0.7735849,
1
],
"metallicFactor": 1,
"roughnessFactor": 1
},
"doubleSided": false,
"alphaMode": "OPAQUE"
},
{
"name": "dark",
"pbrMetallicRoughness": {
"baseColorFactor": [
0.274509817,
0.298039228,
0.34117648,
1
],
"metallicFactor": 1,
"roughnessFactor": 1
},
"doubleSided": false,
"alphaMode": "OPAQUE"
},
{
"name": "metalRed",
"pbrMetallicRoughness": {
"baseColorFactor": [
1,
0.628524244,
0.2028302,
1
],
"metallicFactor": 1,
"roughnessFactor": 1
},
"doubleSided": false,
"alphaMode": "OPAQUE"
}
],
"meshes": [
{
"name": "Mesh craft_speederD",
"primitives": [
{
"mode": 4,
"indices": 3,
"attributes": {
"POSITION": 0,
"NORMAL": 1,
"TEXCOORD_0": 2
},
"material": 0
},
{
"mode": 4,
"indices": 4,
"attributes": {
"POSITION": 0,
"NORMAL": 1,
"TEXCOORD_0": 2
},
"material": 1
},
{
"mode": 4,
"indices": 5,
"attributes": {
"POSITION": 0,
"NORMAL": 1,
"TEXCOORD_0": 2
},
"material": 2
},
{
"mode": 4,
"indices": 6,
"attributes": {
"POSITION": 0,
"NORMAL": 1,
"TEXCOORD_0": 2
},
"material": 3
}
]
}
],
"nodes": [
{
"children": [
1
],
"name": "tmpParent",
"translation": [
0,
0,
0
],
"rotation": [
0,
0,
0,
1
],
"scale": [
1,
1,
1
]
},
{
"name": "craft_speederD",
"translation": [
0,
0,
0
],
"rotation": [
0,
0,
0,
1
],
"scale": [
1,
1,
1
],
"mesh": 0
}
],
"scenes": [
{
"nodes": [
1
]
}
],
"scene": 0
}

Binary file not shown.

View file

@ -1,37 +1,29 @@
(
resources: {
"scene::ResourceA": (
score: 2,
score: 1,
),
},
entities: {
4294967296: (
components: {
"bevy_transform::components::transform::Transform": (
translation: (
x: 0.0,
y: 0.0,
z: 0.0
),
rotation: (
x: 0.0,
y: 0.0,
z: 0.0,
w: 1.0,
),
scale: (
x: 1.0,
y: 1.0,
z: 1.0
),
"bevy_core::name::Name": (
hash: 17588334858059901562,
name: "joe",
),
"scene::ComponentB": (
value: "hello",
"bevy_transform::components::global_transform::GlobalTransform": ((1.0, 0.0, 0.0, 0.0, 1.0, 0.0, 0.0, 0.0, 1.0, 0.0, 0.0, 0.0)),
"bevy_transform::components::transform::Transform": (
translation: (0.0, 0.0, 0.0),
rotation: (0.0, 0.0, 0.0, 1.0),
scale: (1.0, 1.0, 1.0),
),
"scene::ComponentA": (
x: 1.0,
y: 2.0,
),
"scene::ComponentB": (
value: "hello",
),
},
),
4294967297: (
@ -42,5 +34,5 @@
),
},
),
}
)
},
)

View file

@ -1,20 +0,0 @@
// This shader draws a circle with a given input color
#import bevy_ui::ui_vertex_output::UiVertexOutput
struct CustomUiMaterial {
@location(0) color: vec4<f32>
}
@group(1) @binding(0)
var<uniform> input: CustomUiMaterial;
@fragment
fn fragment(in: UiVertexOutput) -> @location(0) vec4<f32> {
// the UVs are now adjusted around the middle of the rect.
let uv = in.uv * 2.0 - 1.0;
// circle alpha, the higher the power the harsher the falloff.
let alpha = 1.0 - pow(sqrt(dot(uv, uv)), 100.0);
return vec4<f32>(input.color.rgb, alpha);
}

View file

@ -7,14 +7,16 @@ layout(location = 2) in vec2 Vertex_Uv;
layout(location = 0) out vec2 v_Uv;
layout(set = 0, binding = 0) uniform CameraViewProj {
mat4 ViewProj;
mat4 View;
mat4 InverseView;
mat4 Projection;
vec3 WorldPosition;
float width;
float height;
};
mat4 clip_from_world;
// Other attributes exist that can be described here.
// See full definition in: crates/bevy_render/src/view/view.wgsl
// Attributes added here must be in the same order as they are defined
// in view.wgsl, and they must be contiguous starting from the top to
// ensure they have the same layout.
//
// Needing to maintain this mapping yourself is one of the harder parts of using
// GLSL with Bevy. WGSL provides a much better user experience!
} camera_view;
struct Mesh {
mat3x4 Model;
@ -41,7 +43,7 @@ mat4 affine_to_square(mat3x4 affine) {
void main() {
v_Uv = Vertex_Uv;
gl_Position = ViewProj
gl_Position = camera_view.clip_from_world
* affine_to_square(Meshes[gl_InstanceIndex].Model)
* vec4(Vertex_Position, 1.0);
}

View file

@ -0,0 +1,36 @@
// `custom_phase_item.wgsl`
//
// This shader goes with the `custom_phase_item` example. It demonstrates how to
// enqueue custom rendering logic in a `RenderPhase`.
// The GPU-side vertex structure.
struct Vertex {
// The world-space position of the vertex.
@location(0) position: vec3<f32>,
// The color of the vertex.
@location(1) color: vec3<f32>,
};
// Information passed from the vertex shader to the fragment shader.
struct VertexOutput {
// The clip-space position of the vertex.
@builtin(position) clip_position: vec4<f32>,
// The color of the vertex.
@location(0) color: vec3<f32>,
};
// The vertex shader entry point.
@vertex
fn vertex(vertex: Vertex) -> VertexOutput {
// Use an orthographic projection.
var vertex_output: VertexOutput;
vertex_output.clip_position = vec4(vertex.position.xyz, 1.0);
vertex_output.color = vertex.color;
return vertex_output;
}
// The fragment shader entry point.
@fragment
fn fragment(vertex_output: VertexOutput) -> @location(0) vec4<f32> {
return vec4(vertex_output.color, 1.0);
}

View file

@ -0,0 +1,29 @@
// This shader draws a circle with a given input color
#import bevy_ui::ui_vertex_output::UiVertexOutput
@group(1) @binding(0) var<uniform> color: vec4<f32>;
@group(1) @binding(1) var<uniform> slider: f32;
@group(1) @binding(2) var material_color_texture: texture_2d<f32>;
@group(1) @binding(3) var material_color_sampler: sampler;
@group(1) @binding(4) var<uniform> border_color: vec4<f32>;
@fragment
fn fragment(in: UiVertexOutput) -> @location(0) vec4<f32> {
let r = in.uv - 0.5;
let b = vec2(
select(in.border_widths.x, in.border_widths.y, r.x < 0.),
select(in.border_widths.z, in.border_widths.w, r.y < 0.)
);
if any(0.5 - b < abs(r)) {
return border_color;
}
if in.uv.x < slider {
let output_color = textureSample(material_color_texture, material_color_sampler, in.uv) * color;
return output_color;
} else {
return vec4(0.0);
}
}

View file

@ -3,10 +3,13 @@
// This is the data that lives in the gpu only buffer
@group(0) @binding(0) var<storage, read_write> data: array<u32>;
@group(0) @binding(1) var texture: texture_storage_2d<r32uint, write>;
@compute @workgroup_size(1)
fn main(@builtin(global_invocation_id) global_id: vec3<u32>) {
// We use the global_id to index the array to make sure we don't
// access data used in another workgroup
data[global_id.x] += 1u;
// Write the same data to the texture
textureStore(texture, vec2<i32>(i32(global_id.x), 0), vec4<u32>(data[global_id.x], 0, 0, 0));
}

View file

@ -0,0 +1,48 @@
//! Very simple shader used to demonstrate how to get the world position and pass data
//! between the vertex and fragment shader. Also shows the custom vertex layout.
// First we import everything we need from bevy_pbr
// A 2d shader would be vevry similar but import from bevy_sprite instead
#import bevy_pbr::{
mesh_functions,
view_transformations::position_world_to_clip
}
struct Vertex {
// This is needed if you are using batching and/or gpu preprocessing
// It's a built in so you don't need to define it in the vertex layout
@builtin(instance_index) instance_index: u32,
// Like we defined for the vertex layout
// position is at location 0
@location(0) position: vec3<f32>,
// and color at location 1
@location(1) color: vec4<f32>,
};
// This is the output of the vertex shader and we also use it as the input for the fragment shader
struct VertexOutput {
@builtin(position) clip_position: vec4<f32>,
@location(0) world_position: vec4<f32>,
@location(1) color: vec3<f32>,
};
@vertex
fn vertex(vertex: Vertex) -> VertexOutput {
var out: VertexOutput;
// This is how bevy computes the world position
// The vertex.instance_index is very important. Esepecially if you are using batching and gpu preprocessing
var world_from_local = mesh_functions::get_world_from_local(vertex.instance_index);
out.world_position = mesh_functions::mesh_position_local_to_world(world_from_local, vec4(vertex.position, 1.0));
out.clip_position = position_world_to_clip(out.world_position.xyz);
// We just use the raw vertex color
out.color = vertex.color.rgb;
return out;
}
@fragment
fn fragment(in: VertexOutput) -> @location(0) vec4<f32> {
// output the color directly
return vec4(in.color, 1.0);
}

View file

@ -0,0 +1,38 @@
#import bevy_pbr::{
mesh_functions,
view_transformations::position_world_to_clip
}
@group(2) @binding(0) var<storage, read> colors: array<vec4<f32>, 5>;
struct Vertex {
@builtin(instance_index) instance_index: u32,
@location(0) position: vec3<f32>,
};
struct VertexOutput {
@builtin(position) clip_position: vec4<f32>,
@location(0) world_position: vec4<f32>,
@location(1) color: vec4<f32>,
};
@vertex
fn vertex(vertex: Vertex) -> VertexOutput {
var out: VertexOutput;
var world_from_local = mesh_functions::get_world_from_local(vertex.instance_index);
out.world_position = mesh_functions::mesh_position_local_to_world(world_from_local, vec4(vertex.position, 1.0));
out.clip_position = position_world_to_clip(out.world_position.xyz);
// We have 5 colors in the storage buffer, but potentially many instances of the mesh, so
// we use the instance index to select a color from the storage buffer.
out.color = colors[vertex.instance_index % 5];
return out;
}
@fragment
fn fragment(
mesh: VertexOutput,
) -> @location(0) vec4<f32> {
return mesh.color;
}

Binary file not shown.

After

Width:  |  Height:  |  Size: 620 B

Binary file not shown.

After

Width:  |  Height:  |  Size: 83 KiB

BIN
assets/volumes/bunny.ktx2 Normal file

Binary file not shown.

Binary file not shown.

View file

@ -6,17 +6,23 @@ publish = false
license = "MIT OR Apache-2.0"
[dev-dependencies]
glam = "0.27"
glam = "0.29"
rand = "0.8"
rand_chacha = "0.3"
criterion = { version = "0.3", features = ["html_reports"] }
bevy_app = { path = "../crates/bevy_app" }
bevy_ecs = { path = "../crates/bevy_ecs", features = ["multi_threaded"] }
bevy_reflect = { path = "../crates/bevy_reflect" }
bevy_hierarchy = { path = "../crates/bevy_hierarchy" }
bevy_math = { path = "../crates/bevy_math" }
bevy_picking = { path = "../crates/bevy_picking", features = ["bevy_mesh"] }
bevy_reflect = { path = "../crates/bevy_reflect", features = ["functions"] }
bevy_render = { path = "../crates/bevy_render" }
bevy_tasks = { path = "../crates/bevy_tasks" }
bevy_utils = { path = "../crates/bevy_utils" }
bevy_math = { path = "../crates/bevy_math" }
bevy_render = { path = "../crates/bevy_render" }
# make bevy_render compile on linux. x11 vs wayland does not matter here as the benches do not actually use a window
[target.'cfg(target_os = "linux")'.dev-dependencies]
bevy_winit = { path = "../crates/bevy_winit", features = ["x11"] }
[profile.release]
opt-level = 3
@ -32,6 +38,16 @@ name = "ecs"
path = "benches/bevy_ecs/benches.rs"
harness = false
[[bench]]
name = "ray_mesh_intersection"
path = "benches/bevy_picking/ray_mesh_intersection.rs"
harness = false
[[bench]]
name = "reflect_function"
path = "benches/bevy_reflect/function.rs"
harness = false
[[bench]]
name = "reflect_list"
path = "benches/bevy_reflect/list.rs"

View file

@ -2,7 +2,10 @@ use criterion::criterion_main;
mod components;
mod events;
mod fragmentation;
mod iteration;
mod observers;
mod param;
mod scheduling;
mod world;
@ -10,6 +13,9 @@ criterion_main!(
components::components_benches,
events::event_benches,
iteration::iterations_benches,
fragmentation::fragmentation_benches,
observers::observer_benches,
scheduling::scheduling_benches,
world::world_benches,
param::param_benches,
);

View file

@ -1,7 +1,8 @@
use bevy_ecs::{
component::Component,
entity::Entity,
prelude::{Added, Changed},
prelude::{Added, Changed, EntityWorldMut, QueryState},
query::QueryFilter,
world::World,
};
use criterion::{black_box, criterion_group, criterion_main, Criterion};
@ -14,15 +15,28 @@ criterion_group!(
all_changed_detection,
few_changed_detection,
none_changed_detection,
multiple_archetype_none_changed_detection
);
criterion_main!(benches);
macro_rules! modify {
($components:ident;$($index:tt),*) => {
$(
$components.$index.map(|mut v| {
v.0+=1.
});
)*
};
}
#[derive(Component, Default)]
#[component(storage = "Table")]
struct Table(f32);
#[derive(Component, Default)]
#[component(storage = "SparseSet")]
struct Sparse(f32);
#[derive(Component, Default)]
#[component(storage = "Table")]
struct Data<const X: u16>(f32);
trait BenchModify {
fn bench_modify(&mut self) -> f32;
@ -41,7 +55,7 @@ impl BenchModify for Sparse {
}
}
const RANGE_ENTITIES_TO_BENCH_COUNT: std::ops::Range<u32> = 5..7;
const ENTITIES_TO_BENCH_COUNT: &[u32] = &[5000, 50000];
type BenchGroup<'a> = criterion::BenchmarkGroup<'a, criterion::measurement::WallTime>;
@ -55,6 +69,11 @@ fn setup<T: Component + Default>(entity_count: u32) -> World {
black_box(world)
}
// create a cached query in setup to avoid extra costs in each iter
fn generic_filter_query<F: QueryFilter>(world: &mut World) -> QueryState<Entity, F> {
world.query_filtered::<Entity, F>()
}
fn generic_bench<P: Copy>(
bench_group: &mut BenchGroup,
mut benches: Vec<Box<dyn FnMut(&mut BenchGroup, P)>>,
@ -67,13 +86,16 @@ fn generic_bench<P: Copy>(
fn all_added_detection_generic<T: Component + Default>(group: &mut BenchGroup, entity_count: u32) {
group.bench_function(
format!("{}_entities_{}", entity_count, std::any::type_name::<T>()),
format!("{}_entities_{}", entity_count, core::any::type_name::<T>()),
|bencher| {
bencher.iter_batched(
|| setup::<T>(entity_count),
|mut world| {
bencher.iter_batched_ref(
|| {
let mut world = setup::<T>(entity_count);
let query = generic_filter_query::<Added<T>>(&mut world);
(world, query)
},
|(ref mut world, ref mut query)| {
let mut count = 0;
let mut query = world.query_filtered::<Entity, Added<T>>();
for entity in query.iter(&world) {
black_box(entity);
count += 1;
@ -88,9 +110,9 @@ fn all_added_detection_generic<T: Component + Default>(group: &mut BenchGroup, e
fn all_added_detection(criterion: &mut Criterion) {
let mut group = criterion.benchmark_group("all_added_detection");
group.warm_up_time(std::time::Duration::from_millis(500));
group.measurement_time(std::time::Duration::from_secs(4));
for entity_count in RANGE_ENTITIES_TO_BENCH_COUNT.map(|i| i * 10_000) {
group.warm_up_time(core::time::Duration::from_millis(500));
group.measurement_time(core::time::Duration::from_secs(4));
for &entity_count in ENTITIES_TO_BENCH_COUNT {
generic_bench(
&mut group,
vec![
@ -107,9 +129,9 @@ fn all_changed_detection_generic<T: Component + Default + BenchModify>(
entity_count: u32,
) {
group.bench_function(
format!("{}_entities_{}", entity_count, std::any::type_name::<T>()),
format!("{}_entities_{}", entity_count, core::any::type_name::<T>()),
|bencher| {
bencher.iter_batched(
bencher.iter_batched_ref(
|| {
let mut world = setup::<T>(entity_count);
world.clear_trackers();
@ -117,11 +139,11 @@ fn all_changed_detection_generic<T: Component + Default + BenchModify>(
for mut component in query.iter_mut(&mut world) {
black_box(component.bench_modify());
}
world
let query = generic_filter_query::<Changed<T>>(&mut world);
(world, query)
},
|mut world| {
|(ref mut world, ref mut query)| {
let mut count = 0;
let mut query = world.query_filtered::<Entity, Changed<T>>();
for entity in query.iter(&world) {
black_box(entity);
count += 1;
@ -136,9 +158,9 @@ fn all_changed_detection_generic<T: Component + Default + BenchModify>(
fn all_changed_detection(criterion: &mut Criterion) {
let mut group = criterion.benchmark_group("all_changed_detection");
group.warm_up_time(std::time::Duration::from_millis(500));
group.measurement_time(std::time::Duration::from_secs(4));
for entity_count in RANGE_ENTITIES_TO_BENCH_COUNT.map(|i| i * 10_000) {
group.warm_up_time(core::time::Duration::from_millis(500));
group.measurement_time(core::time::Duration::from_secs(4));
for &entity_count in ENTITIES_TO_BENCH_COUNT {
generic_bench(
&mut group,
vec![
@ -157,9 +179,9 @@ fn few_changed_detection_generic<T: Component + Default + BenchModify>(
let ratio_to_modify = 0.1;
let amount_to_modify = (entity_count as f32 * ratio_to_modify) as usize;
group.bench_function(
format!("{}_entities_{}", entity_count, std::any::type_name::<T>()),
format!("{}_entities_{}", entity_count, core::any::type_name::<T>()),
|bencher| {
bencher.iter_batched(
bencher.iter_batched_ref(
|| {
let mut world = setup::<T>(entity_count);
world.clear_trackers();
@ -170,10 +192,10 @@ fn few_changed_detection_generic<T: Component + Default + BenchModify>(
for component in to_modify[0..amount_to_modify].iter_mut() {
black_box(component.bench_modify());
}
world
let query = generic_filter_query::<Changed<T>>(&mut world);
(world, query)
},
|mut world| {
let mut query = world.query_filtered::<Entity, Changed<T>>();
|(ref mut world, ref mut query)| {
for entity in query.iter(&world) {
black_box(entity);
}
@ -186,9 +208,9 @@ fn few_changed_detection_generic<T: Component + Default + BenchModify>(
fn few_changed_detection(criterion: &mut Criterion) {
let mut group = criterion.benchmark_group("few_changed_detection");
group.warm_up_time(std::time::Duration::from_millis(500));
group.measurement_time(std::time::Duration::from_secs(4));
for entity_count in RANGE_ENTITIES_TO_BENCH_COUNT.map(|i| i * 10_000) {
group.warm_up_time(core::time::Duration::from_millis(500));
group.measurement_time(core::time::Duration::from_secs(4));
for &entity_count in ENTITIES_TO_BENCH_COUNT {
generic_bench(
&mut group,
vec![
@ -205,17 +227,17 @@ fn none_changed_detection_generic<T: Component + Default>(
entity_count: u32,
) {
group.bench_function(
format!("{}_entities_{}", entity_count, std::any::type_name::<T>()),
format!("{}_entities_{}", entity_count, core::any::type_name::<T>()),
|bencher| {
bencher.iter_batched(
bencher.iter_batched_ref(
|| {
let mut world = setup::<T>(entity_count);
world.clear_trackers();
world
let query = generic_filter_query::<Changed<T>>(&mut world);
(world, query)
},
|mut world| {
|(ref mut world, ref mut query)| {
let mut count = 0;
let mut query = world.query_filtered::<Entity, Changed<T>>();
for entity in query.iter(&world) {
black_box(entity);
count += 1;
@ -230,9 +252,9 @@ fn none_changed_detection_generic<T: Component + Default>(
fn none_changed_detection(criterion: &mut Criterion) {
let mut group = criterion.benchmark_group("none_changed_detection");
group.warm_up_time(std::time::Duration::from_millis(500));
group.measurement_time(std::time::Duration::from_secs(4));
for entity_count in RANGE_ENTITIES_TO_BENCH_COUNT.map(|i| i * 10_000) {
group.warm_up_time(core::time::Duration::from_millis(500));
group.measurement_time(core::time::Duration::from_secs(4));
for &entity_count in ENTITIES_TO_BENCH_COUNT {
generic_bench(
&mut group,
vec![
@ -243,3 +265,111 @@ fn none_changed_detection(criterion: &mut Criterion) {
);
}
}
fn insert_if_bit_enabled<const B: u16>(entity: &mut EntityWorldMut, i: u16) {
if i & 1 << B != 0 {
entity.insert(Data::<B>(1.0));
}
}
fn add_archetypes_entities<T: Component + Default>(
world: &mut World,
archetype_count: u16,
entity_count: u32,
) {
for i in 0..archetype_count {
for _j in 0..entity_count {
let mut e = world.spawn(T::default());
insert_if_bit_enabled::<0>(&mut e, i);
insert_if_bit_enabled::<1>(&mut e, i);
insert_if_bit_enabled::<2>(&mut e, i);
insert_if_bit_enabled::<3>(&mut e, i);
insert_if_bit_enabled::<4>(&mut e, i);
insert_if_bit_enabled::<5>(&mut e, i);
insert_if_bit_enabled::<6>(&mut e, i);
insert_if_bit_enabled::<7>(&mut e, i);
insert_if_bit_enabled::<8>(&mut e, i);
insert_if_bit_enabled::<9>(&mut e, i);
insert_if_bit_enabled::<10>(&mut e, i);
insert_if_bit_enabled::<11>(&mut e, i);
insert_if_bit_enabled::<12>(&mut e, i);
insert_if_bit_enabled::<13>(&mut e, i);
insert_if_bit_enabled::<14>(&mut e, i);
insert_if_bit_enabled::<15>(&mut e, i);
}
}
}
fn multiple_archetype_none_changed_detection_generic<T: Component + Default + BenchModify>(
group: &mut BenchGroup,
archetype_count: u16,
entity_count: u32,
) {
group.bench_function(
format!(
"{}_archetypes_{}_entities_{}",
archetype_count,
entity_count,
core::any::type_name::<T>()
),
|bencher| {
bencher.iter_batched_ref(
|| {
let mut world = World::new();
add_archetypes_entities::<T>(&mut world, archetype_count, entity_count);
world.clear_trackers();
let mut query = world.query::<(
Option<&mut Data<0>>,
Option<&mut Data<1>>,
Option<&mut Data<2>>,
Option<&mut Data<3>>,
Option<&mut Data<4>>,
Option<&mut Data<5>>,
Option<&mut Data<6>>,
Option<&mut Data<7>>,
Option<&mut Data<8>>,
Option<&mut Data<9>>,
Option<&mut Data<10>>,
Option<&mut Data<11>>,
Option<&mut Data<12>>,
Option<&mut Data<13>>,
Option<&mut Data<14>>,
)>();
for components in query.iter_mut(&mut world) {
// change Data<X> while keeping T unchanged
modify!(components;0,1,2,3,4,5,6,7,8,9,10,11,12,13,14);
}
let query = generic_filter_query::<Changed<T>>(&mut world);
(world, query)
},
|(ref mut world, ref mut query)| {
let mut count = 0;
for entity in query.iter(&world) {
black_box(entity);
count += 1;
}
assert_eq!(0, count);
},
criterion::BatchSize::LargeInput,
)
},
);
}
fn multiple_archetype_none_changed_detection(criterion: &mut Criterion) {
let mut group = criterion.benchmark_group("multiple_archetypes_none_changed_detection");
group.warm_up_time(core::time::Duration::from_millis(800));
group.measurement_time(core::time::Duration::from_secs(8));
for archetype_count in [5, 20, 100] {
for entity_count in [10, 100, 1000, 10000] {
multiple_archetype_none_changed_detection_generic::<Table>(
&mut group,
archetype_count,
entity_count,
);
multiple_archetype_none_changed_detection_generic::<Sparse>(
&mut group,
archetype_count,
entity_count,
);
}
}
}

View file

@ -0,0 +1,111 @@
#![allow(dead_code)]
use bevy_ecs::prelude::*;
use glam::*;
#[derive(Component, Copy, Clone)]
struct A<const N: usize>(Mat4);
#[derive(Component, Copy, Clone)]
struct B<const N: usize>(Mat4);
#[derive(Component, Copy, Clone)]
struct C<const N: usize>(Mat4);
#[derive(Component, Copy, Clone)]
struct D<const N: usize>(Mat4);
#[derive(Component, Copy, Clone)]
struct E<const N: usize>(Mat4);
#[derive(Component, Copy, Clone)]
struct F<const N: usize>(Mat4);
#[derive(Component, Copy, Clone)]
struct Z<const N: usize>;
pub struct Benchmark(World, Vec<Entity>);
impl Benchmark {
pub fn new() -> Self {
let mut world = World::default();
let mut entities = Vec::with_capacity(10_000);
for _ in 0..10_000 {
entities.push(
world
.spawn((
(
A::<1>(Mat4::from_scale(Vec3::ONE)),
B::<1>(Mat4::from_scale(Vec3::ONE)),
C::<1>(Mat4::from_scale(Vec3::ONE)),
D::<1>(Mat4::from_scale(Vec3::ONE)),
E::<1>(Mat4::from_scale(Vec3::ONE)),
A::<2>(Mat4::from_scale(Vec3::ONE)),
B::<2>(Mat4::from_scale(Vec3::ONE)),
C::<2>(Mat4::from_scale(Vec3::ONE)),
D::<2>(Mat4::from_scale(Vec3::ONE)),
E::<2>(Mat4::from_scale(Vec3::ONE)),
),
(
A::<3>(Mat4::from_scale(Vec3::ONE)),
B::<3>(Mat4::from_scale(Vec3::ONE)),
C::<3>(Mat4::from_scale(Vec3::ONE)),
D::<3>(Mat4::from_scale(Vec3::ONE)),
E::<3>(Mat4::from_scale(Vec3::ONE)),
A::<4>(Mat4::from_scale(Vec3::ONE)),
B::<4>(Mat4::from_scale(Vec3::ONE)),
C::<4>(Mat4::from_scale(Vec3::ONE)),
D::<4>(Mat4::from_scale(Vec3::ONE)),
E::<4>(Mat4::from_scale(Vec3::ONE)),
),
(
A::<5>(Mat4::from_scale(Vec3::ONE)),
B::<5>(Mat4::from_scale(Vec3::ONE)),
C::<5>(Mat4::from_scale(Vec3::ONE)),
D::<5>(Mat4::from_scale(Vec3::ONE)),
E::<5>(Mat4::from_scale(Vec3::ONE)),
A::<6>(Mat4::from_scale(Vec3::ONE)),
B::<6>(Mat4::from_scale(Vec3::ONE)),
C::<6>(Mat4::from_scale(Vec3::ONE)),
D::<6>(Mat4::from_scale(Vec3::ONE)),
E::<6>(Mat4::from_scale(Vec3::ONE)),
),
(
A::<7>(Mat4::from_scale(Vec3::ONE)),
B::<7>(Mat4::from_scale(Vec3::ONE)),
C::<7>(Mat4::from_scale(Vec3::ONE)),
D::<7>(Mat4::from_scale(Vec3::ONE)),
E::<7>(Mat4::from_scale(Vec3::ONE)),
Z::<1>,
Z::<2>,
Z::<3>,
Z::<4>,
Z::<5>,
Z::<6>,
Z::<7>,
),
))
.id(),
);
}
Self(world, entities)
}
pub fn run(&mut self) {
for entity in &self.1 {
self.0.entity_mut(*entity).insert((
F::<1>(Mat4::from_scale(Vec3::ONE)),
F::<2>(Mat4::from_scale(Vec3::ONE)),
F::<3>(Mat4::from_scale(Vec3::ONE)),
F::<4>(Mat4::from_scale(Vec3::ONE)),
F::<5>(Mat4::from_scale(Vec3::ONE)),
F::<6>(Mat4::from_scale(Vec3::ONE)),
F::<7>(Mat4::from_scale(Vec3::ONE)),
));
}
for entity in &self.1 {
self.0
.entity_mut(*entity)
.remove::<(F<1>, F<2>, F<3>, F<4>, F<5>, F<6>, F<7>)>();
self.0
.entity_mut(*entity)
.remove::<(Z<1>, Z<2>, Z<3>, Z<4>, Z<5>, Z<6>, Z<7>)>();
}
}
}

View file

@ -4,6 +4,7 @@ mod add_remove_big_sparse_set;
mod add_remove_big_table;
mod add_remove_sparse_set;
mod add_remove_table;
mod add_remove_very_big_table;
mod archetype_updates;
mod insert_simple;
mod insert_simple_unbatched;
@ -14,6 +15,7 @@ criterion_group!(
components_benches,
add_remove,
add_remove_big,
add_remove_very_big,
insert_simple,
no_archetypes,
added_archetypes,
@ -21,8 +23,8 @@ criterion_group!(
fn add_remove(c: &mut Criterion) {
let mut group = c.benchmark_group("add_remove");
group.warm_up_time(std::time::Duration::from_millis(500));
group.measurement_time(std::time::Duration::from_secs(4));
group.warm_up_time(core::time::Duration::from_millis(500));
group.measurement_time(core::time::Duration::from_secs(4));
group.bench_function("table", |b| {
let mut bench = add_remove_table::Benchmark::new();
b.iter(move || bench.run());
@ -36,8 +38,8 @@ fn add_remove(c: &mut Criterion) {
fn add_remove_big(c: &mut Criterion) {
let mut group = c.benchmark_group("add_remove_big");
group.warm_up_time(std::time::Duration::from_millis(500));
group.measurement_time(std::time::Duration::from_secs(4));
group.warm_up_time(core::time::Duration::from_millis(500));
group.measurement_time(core::time::Duration::from_secs(4));
group.bench_function("table", |b| {
let mut bench = add_remove_big_table::Benchmark::new();
b.iter(move || bench.run());
@ -49,10 +51,21 @@ fn add_remove_big(c: &mut Criterion) {
group.finish();
}
fn add_remove_very_big(c: &mut Criterion) {
let mut group = c.benchmark_group("add_remove_very_big");
group.warm_up_time(core::time::Duration::from_millis(500));
group.measurement_time(core::time::Duration::from_secs(4));
group.bench_function("table", |b| {
let mut bench = add_remove_very_big_table::Benchmark::new();
b.iter(move || bench.run());
});
group.finish();
}
fn insert_simple(c: &mut Criterion) {
let mut group = c.benchmark_group("insert_simple");
group.warm_up_time(std::time::Duration::from_millis(500));
group.measurement_time(std::time::Duration::from_secs(4));
group.warm_up_time(core::time::Duration::from_millis(500));
group.measurement_time(core::time::Duration::from_secs(4));
group.bench_function("base", |b| {
let mut bench = insert_simple::Benchmark::new();
b.iter(move || bench.run());

View file

@ -17,9 +17,9 @@ impl<const SIZE: usize> Benchmark<SIZE> {
}
pub fn run(&mut self) {
let mut reader = self.0.get_reader();
let mut reader = self.0.get_cursor();
for evt in reader.read(&self.0) {
std::hint::black_box(evt);
core::hint::black_box(evt);
}
}
}

View file

@ -7,8 +7,8 @@ criterion_group!(event_benches, send, iter);
fn send(c: &mut Criterion) {
let mut group = c.benchmark_group("events_send");
group.warm_up_time(std::time::Duration::from_millis(500));
group.measurement_time(std::time::Duration::from_secs(4));
group.warm_up_time(core::time::Duration::from_millis(500));
group.measurement_time(core::time::Duration::from_secs(4));
for count in [100, 1000, 10000, 50000] {
group.bench_function(format!("size_4_events_{}", count), |b| {
let mut bench = send::Benchmark::<4>::new(count);
@ -32,8 +32,8 @@ fn send(c: &mut Criterion) {
fn iter(c: &mut Criterion) {
let mut group = c.benchmark_group("events_iter");
group.warm_up_time(std::time::Duration::from_millis(500));
group.measurement_time(std::time::Duration::from_secs(4));
group.warm_up_time(core::time::Duration::from_millis(500));
group.measurement_time(core::time::Duration::from_secs(4));
for count in [100, 1000, 10000, 50000] {
group.bench_function(format!("size_4_events_{}", count), |b| {
let mut bench = iter::Benchmark::<4>::new(count);

View file

@ -32,7 +32,7 @@ impl<const SIZE: usize> Benchmark<SIZE> {
pub fn run(&mut self) {
for _ in 0..self.count {
self.events
.send(std::hint::black_box(BenchEvent([0u8; SIZE])));
.send(core::hint::black_box(BenchEvent([0u8; SIZE])));
}
self.events.update();
}

View file

@ -0,0 +1,99 @@
use bevy_ecs::prelude::*;
use bevy_ecs::system::SystemState;
use criterion::*;
use glam::*;
use core::hint::black_box;
criterion_group!(fragmentation_benches, iter_frag_empty);
#[derive(Component, Default)]
struct Table<const X: usize = 0>(usize);
#[derive(Component, Default)]
#[component(storage = "SparseSet")]
struct Sparse<const X: usize = 0>(usize);
fn flip_coin() -> bool {
rand::random::<bool>()
}
fn iter_frag_empty(c: &mut Criterion) {
let mut group = c.benchmark_group("iter_fragmented(4096)_empty");
group.warm_up_time(core::time::Duration::from_millis(500));
group.measurement_time(core::time::Duration::from_secs(4));
group.bench_function("foreach_table", |b| {
let mut world = World::new();
spawn_empty_frag_archetype::<Table>(&mut world);
let mut q: SystemState<Query<(Entity, &Table)>> =
SystemState::<Query<(Entity, &Table<0>)>>::new(&mut world);
let query = q.get(&world);
b.iter(move || {
let mut res = 0;
query.iter().for_each(|(e, t)| {
res += e.to_bits();
black_box(t);
});
});
});
group.bench_function("foreach_sparse", |b| {
let mut world = World::new();
spawn_empty_frag_archetype::<Sparse>(&mut world);
let mut q: SystemState<Query<(Entity, &Sparse)>> =
SystemState::<Query<(Entity, &Sparse<0>)>>::new(&mut world);
let query = q.get(&world);
b.iter(move || {
let mut res = 0;
query.iter().for_each(|(e, t)| {
res += e.to_bits();
black_box(t);
});
});
});
group.finish();
fn spawn_empty_frag_archetype<T: Component + Default>(world: &mut World) {
for i in 0..65536 {
let mut e = world.spawn_empty();
if flip_coin() {
e.insert(Table::<1>(0));
}
if flip_coin() {
e.insert(Table::<2>(0));
}
if flip_coin() {
e.insert(Table::<3>(0));
}
if flip_coin() {
e.insert(Table::<4>(0));
}
if flip_coin() {
e.insert(Table::<5>(0));
}
if flip_coin() {
e.insert(Table::<6>(0));
}
if flip_coin() {
e.insert(Table::<7>(0));
}
if flip_coin() {
e.insert(Table::<8>(0));
}
if flip_coin() {
e.insert(Table::<9>(0));
}
if flip_coin() {
e.insert(Table::<10>(0));
}
if flip_coin() {
e.insert(Table::<11>(0));
}
if flip_coin() {
e.insert(Table::<12>(0));
}
e.insert(T::default());
if i != 0 {
e.despawn();
}
}
}
}

View file

@ -17,8 +17,8 @@ pub fn heavy_compute(c: &mut Criterion) {
struct Transform(Mat4);
let mut group = c.benchmark_group("heavy_compute");
group.warm_up_time(std::time::Duration::from_millis(500));
group.measurement_time(std::time::Duration::from_secs(4));
group.warm_up_time(core::time::Duration::from_millis(500));
group.measurement_time(core::time::Duration::from_secs(4));
group.bench_function("base", |b| {
ComputeTaskPool::get_or_init(TaskPool::default);

View file

@ -20,7 +20,7 @@ impl<'w> Benchmark<'w> {
let mut world = World::new();
world.spawn_batch(
std::iter::repeat((
core::iter::repeat((
Transform(Mat4::from_scale(Vec3::ONE)),
Position(Vec3::X),
Rotation(Vec3::X),

View file

@ -20,7 +20,7 @@ impl<'w> Benchmark<'w> {
let mut world = World::new();
world.spawn_batch(
std::iter::repeat((
core::iter::repeat((
Transform(Mat4::from_scale(Vec3::ONE)),
Position(Vec3::X),
Rotation(Vec3::X),

View file

@ -0,0 +1,43 @@
use bevy_ecs::prelude::*;
use rand::{prelude::SliceRandom, SeedableRng};
use rand_chacha::ChaCha8Rng;
#[derive(Component, Copy, Clone)]
struct TableData(f32);
#[derive(Component, Copy, Clone)]
#[component(storage = "SparseSet")]
struct SparseData(f32);
fn deterministic_rand() -> ChaCha8Rng {
ChaCha8Rng::seed_from_u64(42)
}
pub struct Benchmark<'w>(World, QueryState<(&'w mut TableData, &'w SparseData)>);
impl<'w> Benchmark<'w> {
pub fn new() -> Self {
let mut world = World::new();
let mut v = vec![];
for _ in 0..10000 {
world.spawn((TableData(0.0), SparseData(0.0)));
v.push(world.spawn(TableData(0.)).id());
}
// by shuffling ,randomize the archetype iteration order to significantly deviate from the table order. This maximizes the loss of cache locality during archetype-based iteration.
v.shuffle(&mut deterministic_rand());
for e in v.into_iter() {
world.entity_mut(e).despawn();
}
let query = world.query::<(&mut TableData, &SparseData)>();
Self(world, query)
}
#[inline(never)]
pub fn run(&mut self) {
self.1
.iter_mut(&mut self.0)
.for_each(|(mut v1, v2)| v1.0 += v2.0)
}
}

View file

@ -22,7 +22,7 @@ impl<'w> Benchmark<'w> {
let mut world = World::new();
world.spawn_batch(
std::iter::repeat((
core::iter::repeat((
Transform(Mat4::from_scale(Vec3::ONE)),
Position(Vec3::X),
Rotation(Vec3::X),

View file

@ -34,7 +34,7 @@ impl<'w> Benchmark<'w> {
let mut world = World::new();
world.spawn_batch(
std::iter::repeat((
core::iter::repeat((
Transform(Mat4::from_scale(Vec3::ONE)),
Rotation(Vec3::X),
Position::<0>(Vec3::X),

View file

@ -36,7 +36,7 @@ impl<'w> Benchmark<'w> {
let mut world = World::new();
world.spawn_batch(
std::iter::repeat((
core::iter::repeat((
Transform(Mat4::from_scale(Vec3::ONE)),
Rotation(Vec3::X),
Position::<0>(Vec3::X),

View file

@ -22,7 +22,7 @@ impl<'w> Benchmark<'w> {
let mut world = World::new();
world.spawn_batch(
std::iter::repeat((
core::iter::repeat((
Transform(Mat4::from_scale(Vec3::ONE)),
Position(Vec3::X),
Rotation(Vec3::X),

View file

@ -20,7 +20,7 @@ impl Benchmark {
let mut world = World::new();
world.spawn_batch(
std::iter::repeat((
core::iter::repeat((
Transform(Mat4::from_scale(Vec3::ONE)),
Position(Vec3::X),
Rotation(Vec3::X),

View file

@ -34,7 +34,7 @@ impl<'w> Benchmark<'w> {
let mut world = World::new();
world.spawn_batch(
std::iter::repeat((
core::iter::repeat((
Transform(Mat4::from_scale(Vec3::ONE)),
Rotation(Vec3::X),
Position::<0>(Vec3::X),

View file

@ -36,7 +36,7 @@ impl<'w> Benchmark<'w> {
let mut world = World::new();
world.spawn_batch(
std::iter::repeat((
core::iter::repeat((
Transform(Mat4::from_scale(Vec3::ONE)),
Rotation(Vec3::X),
Position::<0>(Vec3::X),

View file

@ -11,6 +11,7 @@ mod iter_frag_wide;
mod iter_frag_wide_sparse;
mod iter_simple;
mod iter_simple_foreach;
mod iter_simple_foreach_hybrid;
mod iter_simple_foreach_sparse_set;
mod iter_simple_foreach_wide;
mod iter_simple_foreach_wide_sparse_set;
@ -19,6 +20,7 @@ mod iter_simple_system;
mod iter_simple_wide;
mod iter_simple_wide_sparse_set;
mod par_iter_simple;
mod par_iter_simple_foreach_hybrid;
use heavy_compute::*;
@ -33,8 +35,8 @@ criterion_group!(
fn iter_simple(c: &mut Criterion) {
let mut group = c.benchmark_group("iter_simple");
group.warm_up_time(std::time::Duration::from_millis(500));
group.measurement_time(std::time::Duration::from_secs(4));
group.warm_up_time(core::time::Duration::from_millis(500));
group.measurement_time(core::time::Duration::from_secs(4));
group.bench_function("base", |b| {
let mut bench = iter_simple::Benchmark::new();
b.iter(move || bench.run());
@ -71,13 +73,17 @@ fn iter_simple(c: &mut Criterion) {
let mut bench = iter_simple_foreach_wide_sparse_set::Benchmark::new();
b.iter(move || bench.run());
});
group.bench_function("foreach_hybrid", |b| {
let mut bench = iter_simple_foreach_hybrid::Benchmark::new();
b.iter(move || bench.run());
});
group.finish();
}
fn iter_frag(c: &mut Criterion) {
let mut group = c.benchmark_group("iter_fragmented");
group.warm_up_time(std::time::Duration::from_millis(500));
group.measurement_time(std::time::Duration::from_secs(4));
group.warm_up_time(core::time::Duration::from_millis(500));
group.measurement_time(core::time::Duration::from_secs(4));
group.bench_function("base", |b| {
let mut bench = iter_frag::Benchmark::new();
b.iter(move || bench.run());
@ -99,8 +105,8 @@ fn iter_frag(c: &mut Criterion) {
fn iter_frag_sparse(c: &mut Criterion) {
let mut group = c.benchmark_group("iter_fragmented_sparse");
group.warm_up_time(std::time::Duration::from_millis(500));
group.measurement_time(std::time::Duration::from_secs(4));
group.warm_up_time(core::time::Duration::from_millis(500));
group.measurement_time(core::time::Duration::from_secs(4));
group.bench_function("base", |b| {
let mut bench = iter_frag_sparse::Benchmark::new();
b.iter(move || bench.run());
@ -122,12 +128,16 @@ fn iter_frag_sparse(c: &mut Criterion) {
fn par_iter_simple(c: &mut Criterion) {
let mut group = c.benchmark_group("par_iter_simple");
group.warm_up_time(std::time::Duration::from_millis(500));
group.measurement_time(std::time::Duration::from_secs(4));
group.warm_up_time(core::time::Duration::from_millis(500));
group.measurement_time(core::time::Duration::from_secs(4));
for f in [0, 10, 100, 1000] {
group.bench_function(format!("with_{}_fragment", f), |b| {
let mut bench = par_iter_simple::Benchmark::new(f);
b.iter(move || bench.run());
});
}
group.bench_function(format!("hybrid"), |b| {
let mut bench = par_iter_simple_foreach_hybrid::Benchmark::new();
b.iter(move || bench.run());
});
}

View file

@ -31,7 +31,7 @@ impl<'w> Benchmark<'w> {
let mut world = World::new();
let iter = world.spawn_batch(
std::iter::repeat((
core::iter::repeat((
Transform(Mat4::from_scale(Vec3::ONE)),
Position(Vec3::X),
Rotation(Vec3::X),

View file

@ -0,0 +1,45 @@
use bevy_ecs::prelude::*;
use bevy_tasks::{ComputeTaskPool, TaskPool};
use rand::{prelude::SliceRandom, SeedableRng};
use rand_chacha::ChaCha8Rng;
#[derive(Component, Copy, Clone)]
struct TableData(f32);
#[derive(Component, Copy, Clone)]
#[component(storage = "SparseSet")]
struct SparseData(f32);
fn deterministic_rand() -> ChaCha8Rng {
ChaCha8Rng::seed_from_u64(42)
}
pub struct Benchmark<'w>(World, QueryState<(&'w mut TableData, &'w SparseData)>);
impl<'w> Benchmark<'w> {
pub fn new() -> Self {
let mut world = World::new();
ComputeTaskPool::get_or_init(TaskPool::default);
let mut v = vec![];
for _ in 0..100000 {
world.spawn((TableData(0.0), SparseData(0.0)));
v.push(world.spawn(TableData(0.)).id());
}
// by shuffling ,randomize the archetype iteration order to significantly deviate from the table order. This maximizes the loss of cache locality during archetype-based iteration.
v.shuffle(&mut deterministic_rand());
for e in v.into_iter() {
world.entity_mut(e).despawn();
}
let query = world.query::<(&mut TableData, &SparseData)>();
Self(world, query)
}
#[inline(never)]
pub fn run(&mut self) {
self.1
.par_iter_mut(&mut self.0)
.for_each(|(mut v1, v2)| v1.0 += v2.0)
}
}

View file

@ -0,0 +1,8 @@
use criterion::criterion_group;
mod propagation;
mod simple;
use propagation::*;
use simple::*;
criterion_group!(observer_benches, event_propagation, observe_simple);

View file

@ -0,0 +1,124 @@
use bevy_ecs::{
component::Component, entity::Entity, event::Event, observer::Trigger, world::World,
};
use bevy_hierarchy::{BuildChildren, Parent};
use criterion::{black_box, Criterion};
use rand::SeedableRng;
use rand::{seq::IteratorRandom, Rng};
use rand_chacha::ChaCha8Rng;
const DENSITY: usize = 20; // percent of nodes with listeners
const ENTITY_DEPTH: usize = 64;
const ENTITY_WIDTH: usize = 200;
const N_EVENTS: usize = 500;
fn deterministic_rand() -> ChaCha8Rng {
ChaCha8Rng::seed_from_u64(42)
}
pub fn event_propagation(criterion: &mut Criterion) {
let mut group = criterion.benchmark_group("event_propagation");
group.warm_up_time(core::time::Duration::from_millis(500));
group.measurement_time(core::time::Duration::from_secs(4));
group.bench_function("single_event_type", |bencher| {
let mut world = World::new();
let (roots, leaves, nodes) = spawn_listener_hierarchy(&mut world);
add_listeners_to_hierarchy::<DENSITY, 1>(&roots, &leaves, &nodes, &mut world);
bencher.iter(|| {
send_events::<1, N_EVENTS>(&mut world, &leaves);
});
});
group.bench_function("single_event_type_no_listeners", |bencher| {
let mut world = World::new();
let (roots, leaves, nodes) = spawn_listener_hierarchy(&mut world);
add_listeners_to_hierarchy::<DENSITY, 1>(&roots, &leaves, &nodes, &mut world);
bencher.iter(|| {
// no listeners to observe TestEvent<9>
send_events::<9, N_EVENTS>(&mut world, &leaves);
});
});
group.bench_function("four_event_types", |bencher| {
let mut world = World::new();
let (roots, leaves, nodes) = spawn_listener_hierarchy(&mut world);
const FRAC_N_EVENTS_4: usize = N_EVENTS / 4;
const FRAC_DENSITY_4: usize = DENSITY / 4;
add_listeners_to_hierarchy::<FRAC_DENSITY_4, 1>(&roots, &leaves, &nodes, &mut world);
add_listeners_to_hierarchy::<FRAC_DENSITY_4, 2>(&roots, &leaves, &nodes, &mut world);
add_listeners_to_hierarchy::<FRAC_DENSITY_4, 3>(&roots, &leaves, &nodes, &mut world);
add_listeners_to_hierarchy::<FRAC_DENSITY_4, 4>(&roots, &leaves, &nodes, &mut world);
bencher.iter(|| {
send_events::<1, FRAC_N_EVENTS_4>(&mut world, &leaves);
send_events::<2, FRAC_N_EVENTS_4>(&mut world, &leaves);
send_events::<3, FRAC_N_EVENTS_4>(&mut world, &leaves);
send_events::<4, FRAC_N_EVENTS_4>(&mut world, &leaves);
});
});
group.finish();
}
#[derive(Clone, Component)]
struct TestEvent<const N: usize> {}
impl<const N: usize> Event for TestEvent<N> {
type Traversal = &'static Parent;
const AUTO_PROPAGATE: bool = true;
}
fn send_events<const N: usize, const N_EVENTS: usize>(world: &mut World, leaves: &Vec<Entity>) {
let target = leaves.iter().choose(&mut rand::thread_rng()).unwrap();
(0..N_EVENTS).for_each(|_| {
world.trigger_targets(TestEvent::<N> {}, *target);
});
}
fn spawn_listener_hierarchy(world: &mut World) -> (Vec<Entity>, Vec<Entity>, Vec<Entity>) {
let mut roots = vec![];
let mut leaves = vec![];
let mut nodes = vec![];
for _ in 0..ENTITY_WIDTH {
let mut parent = world.spawn_empty().id();
roots.push(parent);
for _ in 0..ENTITY_DEPTH {
let child = world.spawn_empty().id();
nodes.push(child);
world.entity_mut(parent).add_child(child);
parent = child;
}
nodes.pop();
leaves.push(parent);
}
(roots, leaves, nodes)
}
fn add_listeners_to_hierarchy<const DENSITY: usize, const N: usize>(
roots: &Vec<Entity>,
leaves: &Vec<Entity>,
nodes: &Vec<Entity>,
world: &mut World,
) {
for e in roots.iter() {
world.entity_mut(*e).observe(empty_listener::<N>);
}
for e in leaves.iter() {
world.entity_mut(*e).observe(empty_listener::<N>);
}
let mut rng = deterministic_rand();
for e in nodes.iter() {
if rng.gen_bool(DENSITY as f64 / 100.0) {
world.entity_mut(*e).observe(empty_listener::<N>);
}
}
}
fn empty_listener<const N: usize>(trigger: Trigger<TestEvent<N>>) {
black_box(trigger);
}

View file

@ -0,0 +1,49 @@
use bevy_ecs::{entity::Entity, event::Event, observer::Trigger, world::World};
use criterion::{black_box, Criterion};
use rand::{prelude::SliceRandom, SeedableRng};
use rand_chacha::ChaCha8Rng;
fn deterministic_rand() -> ChaCha8Rng {
ChaCha8Rng::seed_from_u64(42)
}
#[derive(Clone, Event)]
struct EventBase;
pub fn observe_simple(criterion: &mut Criterion) {
let mut group = criterion.benchmark_group("observe");
group.warm_up_time(core::time::Duration::from_millis(500));
group.measurement_time(core::time::Duration::from_secs(4));
group.bench_function("trigger_simple", |bencher| {
let mut world = World::new();
world.add_observer(empty_listener_base);
bencher.iter(|| {
for _ in 0..10000 {
world.trigger(EventBase)
}
});
});
group.bench_function("trigger_targets_simple/10000_entity", |bencher| {
let mut world = World::new();
let mut entities = vec![];
for _ in 0..10000 {
entities.push(world.spawn_empty().observe(empty_listener_base).id());
}
entities.shuffle(&mut deterministic_rand());
bencher.iter(|| {
send_base_event(&mut world, &entities);
});
});
group.finish();
}
fn empty_listener_base(trigger: Trigger<EventBase>) {
black_box(trigger);
}
fn send_base_event(world: &mut World, entities: &Vec<Entity>) {
world.trigger_targets(EventBase, entities);
}

View file

@ -0,0 +1,31 @@
use bevy_ecs::prelude::*;
use criterion::Criterion;
pub fn combinator_system(criterion: &mut Criterion) {
let mut world = World::new();
let mut group = criterion.benchmark_group("param/combinator_system");
group.warm_up_time(core::time::Duration::from_millis(500));
group.measurement_time(core::time::Duration::from_secs(3));
let mut schedule = Schedule::default();
schedule.add_systems(
(|| {})
.pipe(|| {})
.pipe(|| {})
.pipe(|| {})
.pipe(|| {})
.pipe(|| {})
.pipe(|| {})
.pipe(|| {}),
);
// run once to initialize systems
schedule.run(&mut world);
group.bench_function("8_piped_systems", |bencher| {
bencher.iter(|| {
schedule.run(&mut world);
});
});
group.finish();
}

View file

@ -0,0 +1,49 @@
use bevy_ecs::{
prelude::*,
system::{DynParamBuilder, DynSystemParam, ParamBuilder},
};
use criterion::Criterion;
pub fn dyn_param(criterion: &mut Criterion) {
let mut world = World::new();
let mut group = criterion.benchmark_group("param/combinator_system");
group.warm_up_time(core::time::Duration::from_millis(500));
group.measurement_time(core::time::Duration::from_secs(3));
#[derive(Resource)]
struct R;
let mut schedule = Schedule::default();
let system = (
DynParamBuilder::new::<Res<R>>(ParamBuilder),
DynParamBuilder::new::<Res<R>>(ParamBuilder),
DynParamBuilder::new::<Res<R>>(ParamBuilder),
DynParamBuilder::new::<Res<R>>(ParamBuilder),
DynParamBuilder::new::<Res<R>>(ParamBuilder),
DynParamBuilder::new::<Res<R>>(ParamBuilder),
DynParamBuilder::new::<Res<R>>(ParamBuilder),
DynParamBuilder::new::<Res<R>>(ParamBuilder),
)
.build_state(&mut world)
.build_system(
|_: DynSystemParam,
_: DynSystemParam,
_: DynSystemParam,
_: DynSystemParam,
_: DynSystemParam,
_: DynSystemParam,
_: DynSystemParam,
_: DynSystemParam| {},
);
schedule.add_systems(system);
// run once to initialize systems
schedule.run(&mut world);
group.bench_function("8_dyn_params_system", |bencher| {
bencher.iter(|| {
schedule.run(&mut world);
});
});
group.finish();
}

View file

@ -0,0 +1,11 @@
use criterion::criterion_group;
mod combinator_system;
mod dyn_param;
mod param_set;
use combinator_system::*;
use dyn_param::*;
use param_set::*;
criterion_group!(param_benches, combinator_system, dyn_param, param_set);

View file

@ -0,0 +1,36 @@
use bevy_ecs::prelude::*;
use criterion::Criterion;
pub fn param_set(criterion: &mut Criterion) {
let mut world = World::new();
let mut group = criterion.benchmark_group("param/combinator_system");
group.warm_up_time(core::time::Duration::from_millis(500));
group.measurement_time(core::time::Duration::from_secs(3));
#[derive(Resource)]
struct R;
let mut schedule = Schedule::default();
schedule.add_systems(
|_: ParamSet<(
ResMut<R>,
ResMut<R>,
ResMut<R>,
ResMut<R>,
ResMut<R>,
ResMut<R>,
ResMut<R>,
ResMut<R>,
)>| {},
);
// run once to initialize systems
schedule.run(&mut world);
group.bench_function("8_variant_param_set_system", |bencher| {
bencher.iter(|| {
schedule.run(&mut world);
});
});
group.finish();
}

View file

@ -14,8 +14,8 @@ fn no() -> bool {
pub fn run_condition_yes(criterion: &mut Criterion) {
let mut world = World::new();
let mut group = criterion.benchmark_group("run_condition/yes");
group.warm_up_time(std::time::Duration::from_millis(500));
group.measurement_time(std::time::Duration::from_secs(3));
group.warm_up_time(core::time::Duration::from_millis(500));
group.measurement_time(core::time::Duration::from_secs(3));
fn empty() {}
for amount in 0..21 {
let mut schedule = Schedule::default();
@ -37,8 +37,8 @@ pub fn run_condition_yes(criterion: &mut Criterion) {
pub fn run_condition_no(criterion: &mut Criterion) {
let mut world = World::new();
let mut group = criterion.benchmark_group("run_condition/no");
group.warm_up_time(std::time::Duration::from_millis(500));
group.measurement_time(std::time::Duration::from_secs(3));
group.warm_up_time(core::time::Duration::from_millis(500));
group.measurement_time(core::time::Duration::from_secs(3));
fn empty() {}
for amount in 0..21 {
let mut schedule = Schedule::default();
@ -64,11 +64,11 @@ pub fn run_condition_yes_with_query(criterion: &mut Criterion) {
let mut world = World::new();
world.spawn(TestBool(true));
let mut group = criterion.benchmark_group("run_condition/yes_using_query");
group.warm_up_time(std::time::Duration::from_millis(500));
group.measurement_time(std::time::Duration::from_secs(3));
group.warm_up_time(core::time::Duration::from_millis(500));
group.measurement_time(core::time::Duration::from_secs(3));
fn empty() {}
fn yes_with_query(query: Query<&TestBool>) -> bool {
query.single().0
fn yes_with_query(query: Single<&TestBool>) -> bool {
query.0
}
for amount in 0..21 {
let mut schedule = Schedule::default();
@ -93,8 +93,8 @@ pub fn run_condition_yes_with_resource(criterion: &mut Criterion) {
let mut world = World::new();
world.insert_resource(TestBool(true));
let mut group = criterion.benchmark_group("run_condition/yes_using_resource");
group.warm_up_time(std::time::Duration::from_millis(500));
group.measurement_time(std::time::Duration::from_secs(3));
group.warm_up_time(core::time::Duration::from_millis(500));
group.measurement_time(core::time::Duration::from_secs(3));
fn empty() {}
fn yes_with_resource(res: Res<TestBool>) -> bool {
res.0

View file

@ -17,8 +17,8 @@ const ENTITY_BUNCH: usize = 5000;
pub fn empty_systems(criterion: &mut Criterion) {
let mut world = World::new();
let mut group = criterion.benchmark_group("empty_systems");
group.warm_up_time(std::time::Duration::from_millis(500));
group.measurement_time(std::time::Duration::from_secs(3));
group.warm_up_time(core::time::Duration::from_millis(500));
group.measurement_time(core::time::Duration::from_secs(3));
fn empty() {}
for amount in 0..5 {
let mut schedule = Schedule::default();
@ -50,23 +50,23 @@ pub fn empty_systems(criterion: &mut Criterion) {
pub fn busy_systems(criterion: &mut Criterion) {
fn ab(mut q: Query<(&mut A, &mut B)>) {
q.iter_mut().for_each(|(mut a, mut b)| {
std::mem::swap(&mut a.0, &mut b.0);
core::mem::swap(&mut a.0, &mut b.0);
});
}
fn cd(mut q: Query<(&mut C, &mut D)>) {
q.iter_mut().for_each(|(mut c, mut d)| {
std::mem::swap(&mut c.0, &mut d.0);
core::mem::swap(&mut c.0, &mut d.0);
});
}
fn ce(mut q: Query<(&mut C, &mut E)>) {
q.iter_mut().for_each(|(mut c, mut e)| {
std::mem::swap(&mut c.0, &mut e.0);
core::mem::swap(&mut c.0, &mut e.0);
});
}
let mut world = World::new();
let mut group = criterion.benchmark_group("busy_systems");
group.warm_up_time(std::time::Duration::from_millis(500));
group.measurement_time(std::time::Duration::from_secs(3));
group.warm_up_time(core::time::Duration::from_millis(500));
group.measurement_time(core::time::Duration::from_secs(3));
for entity_bunches in 1..6 {
world.spawn_batch((0..4 * ENTITY_BUNCH).map(|_| (A(0.0), B(0.0))));
world.spawn_batch((0..4 * ENTITY_BUNCH).map(|_| (A(0.0), B(0.0), C(0.0))));
@ -99,26 +99,26 @@ pub fn busy_systems(criterion: &mut Criterion) {
pub fn contrived(criterion: &mut Criterion) {
fn s_0(mut q_0: Query<(&mut A, &mut B)>) {
q_0.iter_mut().for_each(|(mut c_0, mut c_1)| {
std::mem::swap(&mut c_0.0, &mut c_1.0);
core::mem::swap(&mut c_0.0, &mut c_1.0);
});
}
fn s_1(mut q_0: Query<(&mut A, &mut C)>, mut q_1: Query<(&mut B, &mut D)>) {
q_0.iter_mut().for_each(|(mut c_0, mut c_1)| {
std::mem::swap(&mut c_0.0, &mut c_1.0);
core::mem::swap(&mut c_0.0, &mut c_1.0);
});
q_1.iter_mut().for_each(|(mut c_0, mut c_1)| {
std::mem::swap(&mut c_0.0, &mut c_1.0);
core::mem::swap(&mut c_0.0, &mut c_1.0);
});
}
fn s_2(mut q_0: Query<(&mut C, &mut D)>) {
q_0.iter_mut().for_each(|(mut c_0, mut c_1)| {
std::mem::swap(&mut c_0.0, &mut c_1.0);
core::mem::swap(&mut c_0.0, &mut c_1.0);
});
}
let mut world = World::new();
let mut group = criterion.benchmark_group("contrived");
group.warm_up_time(std::time::Duration::from_millis(500));
group.measurement_time(std::time::Duration::from_secs(3));
group.warm_up_time(core::time::Duration::from_millis(500));
group.measurement_time(core::time::Duration::from_secs(3));
for entity_bunches in 1..6 {
world.spawn_batch((0..ENTITY_BUNCH).map(|_| (A(0.0), B(0.0), C(0.0), D(0.0))));
world.spawn_batch((0..ENTITY_BUNCH).map(|_| (A(0.0), B(0.0))));

View file

@ -16,25 +16,25 @@ pub fn schedule(c: &mut Criterion) {
fn ab(mut query: Query<(&mut A, &mut B)>) {
query.iter_mut().for_each(|(mut a, mut b)| {
std::mem::swap(&mut a.0, &mut b.0);
core::mem::swap(&mut a.0, &mut b.0);
});
}
fn cd(mut query: Query<(&mut C, &mut D)>) {
query.iter_mut().for_each(|(mut c, mut d)| {
std::mem::swap(&mut c.0, &mut d.0);
core::mem::swap(&mut c.0, &mut d.0);
});
}
fn ce(mut query: Query<(&mut C, &mut E)>) {
query.iter_mut().for_each(|(mut c, mut e)| {
std::mem::swap(&mut c.0, &mut e.0);
core::mem::swap(&mut c.0, &mut e.0);
});
}
let mut group = c.benchmark_group("schedule");
group.warm_up_time(std::time::Duration::from_millis(500));
group.measurement_time(std::time::Duration::from_secs(4));
group.warm_up_time(core::time::Duration::from_millis(500));
group.measurement_time(core::time::Duration::from_secs(4));
group.bench_function("base", |b| {
let mut world = World::default();
@ -68,8 +68,8 @@ pub fn build_schedule(criterion: &mut Criterion) {
struct DummySet;
let mut group = criterion.benchmark_group("build_schedule");
group.warm_up_time(std::time::Duration::from_millis(500));
group.measurement_time(std::time::Duration::from_secs(15));
group.warm_up_time(core::time::Duration::from_millis(500));
group.measurement_time(core::time::Duration::from_secs(15));
// Method: generate a set of `graph_size` systems which have a One True Ordering.
// Add system to the schedule with full constraints. Hopefully this should be maximally

View file

@ -1,6 +1,5 @@
use bevy_ecs::{
component::Component,
entity::Entity,
system::Commands,
world::{Command, CommandQueue, World},
};
@ -15,8 +14,8 @@ struct C;
pub fn empty_commands(criterion: &mut Criterion) {
let mut group = criterion.benchmark_group("empty_commands");
group.warm_up_time(std::time::Duration::from_millis(500));
group.measurement_time(std::time::Duration::from_secs(4));
group.warm_up_time(core::time::Duration::from_millis(500));
group.measurement_time(core::time::Duration::from_secs(4));
group.bench_function("0_entities", |bencher| {
let mut world = World::default();
@ -32,8 +31,8 @@ pub fn empty_commands(criterion: &mut Criterion) {
pub fn spawn_commands(criterion: &mut Criterion) {
let mut group = criterion.benchmark_group("spawn_commands");
group.warm_up_time(std::time::Duration::from_millis(500));
group.measurement_time(std::time::Duration::from_secs(4));
group.warm_up_time(core::time::Duration::from_millis(500));
group.measurement_time(core::time::Duration::from_secs(4));
for entity_count in (1..5).map(|i| i * 2 * 1000) {
group.bench_function(format!("{}_entities", entity_count), |bencher| {
@ -44,18 +43,10 @@ pub fn spawn_commands(criterion: &mut Criterion) {
let mut commands = Commands::new(&mut command_queue, &world);
for i in 0..entity_count {
let mut entity = commands.spawn_empty();
if black_box(i % 2 == 0) {
entity.insert(A);
}
if black_box(i % 3 == 0) {
entity.insert(B);
}
if black_box(i % 4 == 0) {
entity.insert(C);
}
entity
.insert_if(A, || black_box(i % 2 == 0))
.insert_if(B, || black_box(i % 3 == 0))
.insert_if(C, || black_box(i % 4 == 0));
if black_box(i % 5 == 0) {
entity.despawn();
@ -77,8 +68,8 @@ struct Vec3([f32; 3]);
pub fn insert_commands(criterion: &mut Criterion) {
let mut group = criterion.benchmark_group("insert_commands");
group.warm_up_time(std::time::Duration::from_millis(500));
group.measurement_time(std::time::Duration::from_secs(4));
group.warm_up_time(core::time::Duration::from_millis(500));
group.measurement_time(core::time::Duration::from_secs(4));
let entity_count = 10_000;
group.bench_function("insert", |bencher| {
@ -99,7 +90,7 @@ pub fn insert_commands(criterion: &mut Criterion) {
command_queue.apply(&mut world);
});
});
group.bench_function("insert_batch", |bencher| {
group.bench_function("insert_or_spawn_batch", |bencher| {
let mut world = World::default();
let mut command_queue = CommandQueue::default();
let mut entities = Vec::new();
@ -117,6 +108,24 @@ pub fn insert_commands(criterion: &mut Criterion) {
command_queue.apply(&mut world);
});
});
group.bench_function("insert_batch", |bencher| {
let mut world = World::default();
let mut command_queue = CommandQueue::default();
let mut entities = Vec::new();
for _ in 0..entity_count {
entities.push(world.spawn_empty().id());
}
bencher.iter(|| {
let mut commands = Commands::new(&mut command_queue, &world);
let mut values = Vec::with_capacity(entity_count);
for entity in &entities {
values.push((*entity, (Matrix::default(), Vec3::default())));
}
commands.insert_batch(values);
command_queue.apply(&mut world);
});
});
group.finish();
}
@ -140,8 +149,8 @@ impl Command for FakeCommandB {
pub fn fake_commands(criterion: &mut Criterion) {
let mut group = criterion.benchmark_group("fake_commands");
group.warm_up_time(std::time::Duration::from_millis(500));
group.measurement_time(std::time::Duration::from_secs(4));
group.warm_up_time(core::time::Duration::from_millis(500));
group.measurement_time(core::time::Duration::from_secs(4));
for command_count in (1..5).map(|i| i * 2 * 1000) {
group.bench_function(format!("{}_commands", command_count), |bencher| {
@ -152,9 +161,9 @@ pub fn fake_commands(criterion: &mut Criterion) {
let mut commands = Commands::new(&mut command_queue, &world);
for i in 0..command_count {
if black_box(i % 2 == 0) {
commands.add(FakeCommandA);
commands.queue(FakeCommandA);
} else {
commands.add(FakeCommandB(0));
commands.queue(FakeCommandB(0));
}
}
command_queue.apply(&mut world);
@ -184,10 +193,9 @@ impl Default for LargeStruct {
}
pub fn sized_commands_impl<T: Default + Command>(criterion: &mut Criterion) {
let mut group =
criterion.benchmark_group(format!("sized_commands_{}_bytes", std::mem::size_of::<T>()));
group.warm_up_time(std::time::Duration::from_millis(500));
group.measurement_time(std::time::Duration::from_secs(4));
let mut group = criterion.benchmark_group(format!("sized_commands_{}_bytes", size_of::<T>()));
group.warm_up_time(core::time::Duration::from_millis(500));
group.measurement_time(core::time::Duration::from_secs(4));
for command_count in (1..5).map(|i| i * 2 * 1000) {
group.bench_function(format!("{}_commands", command_count), |bencher| {
@ -197,7 +205,7 @@ pub fn sized_commands_impl<T: Default + Command>(criterion: &mut Criterion) {
bencher.iter(|| {
let mut commands = Commands::new(&mut command_queue, &world);
for _ in 0..command_count {
commands.add(T::default());
commands.queue(T::default());
}
command_queue.apply(&mut world);
});
@ -218,41 +226,3 @@ pub fn medium_sized_commands(criterion: &mut Criterion) {
pub fn large_sized_commands(criterion: &mut Criterion) {
sized_commands_impl::<SizedCommand<LargeStruct>>(criterion);
}
pub fn get_or_spawn(criterion: &mut Criterion) {
let mut group = criterion.benchmark_group("get_or_spawn");
group.warm_up_time(std::time::Duration::from_millis(500));
group.measurement_time(std::time::Duration::from_secs(4));
group.bench_function("individual", |bencher| {
let mut world = World::default();
let mut command_queue = CommandQueue::default();
bencher.iter(|| {
let mut commands = Commands::new(&mut command_queue, &world);
for i in 0..10_000 {
commands
.get_or_spawn(Entity::from_raw(i))
.insert((Matrix::default(), Vec3::default()));
}
command_queue.apply(&mut world);
});
});
group.bench_function("batched", |bencher| {
let mut world = World::default();
let mut command_queue = CommandQueue::default();
bencher.iter(|| {
let mut commands = Commands::new(&mut command_queue, &world);
let mut values = Vec::with_capacity(10_000);
for i in 0..10_000 {
values.push((Entity::from_raw(i), (Matrix::default(), Vec3::default())));
}
commands.insert_or_spawn_batch(values);
command_queue.apply(&mut world);
});
});
group.finish();
}

View file

@ -0,0 +1,32 @@
use bevy_ecs::prelude::*;
use criterion::Criterion;
use glam::*;
#[derive(Component)]
struct A(Mat4);
#[derive(Component)]
struct B(Vec4);
pub fn world_despawn(criterion: &mut Criterion) {
let mut group = criterion.benchmark_group("despawn_world");
group.warm_up_time(core::time::Duration::from_millis(500));
group.measurement_time(core::time::Duration::from_secs(4));
for entity_count in (0..5).map(|i| 10_u32.pow(i)) {
let mut world = World::default();
for _ in 0..entity_count {
world.spawn((A(Mat4::default()), B(Vec4::default())));
}
let ents = world.iter_entities().map(|e| e.id()).collect::<Vec<_>>();
group.bench_function(format!("{}_entities", entity_count), |bencher| {
bencher.iter(|| {
ents.iter().for_each(|e| {
world.despawn(*e);
});
});
});
}
group.finish();
}

View file

@ -0,0 +1,39 @@
use bevy_ecs::prelude::*;
use bevy_hierarchy::despawn_with_children_recursive;
use bevy_hierarchy::BuildChildren;
use bevy_hierarchy::ChildBuild;
use criterion::Criterion;
use glam::*;
#[derive(Component)]
struct A(Mat4);
#[derive(Component)]
struct B(Vec4);
pub fn world_despawn_recursive(criterion: &mut Criterion) {
let mut group = criterion.benchmark_group("despawn_world_recursive");
group.warm_up_time(core::time::Duration::from_millis(500));
group.measurement_time(core::time::Duration::from_secs(4));
for entity_count in (0..5).map(|i| 10_u32.pow(i)) {
let mut world = World::default();
for _ in 0..entity_count {
world
.spawn((A(Mat4::default()), B(Vec4::default())))
.with_children(|parent| {
parent.spawn((A(Mat4::default()), B(Vec4::default())));
});
}
let ents = world.iter_entities().map(|e| e.id()).collect::<Vec<_>>();
group.bench_function(format!("{}_entities", entity_count), |bencher| {
bencher.iter(|| {
ents.iter().for_each(|e| {
despawn_with_children_recursive(&mut world, *e, true);
});
});
});
}
group.finish();
}

View file

@ -33,7 +33,7 @@ pub fn entity_set_build_and_lookup(c: &mut Criterion) {
// Get some random-but-consistent entities to use for all the benches below.
let mut rng = ChaCha8Rng::seed_from_u64(size as u64);
let entities =
Vec::from_iter(std::iter::repeat_with(|| make_entity(&mut rng, size)).take(size));
Vec::from_iter(core::iter::repeat_with(|| make_entity(&mut rng, size)).take(size));
group.throughput(Throughput::Elements(size as u64));
group.bench_function(BenchmarkId::new("entity_set_build", size), |bencher| {

View file

@ -3,6 +3,12 @@ use criterion::criterion_group;
mod commands;
use commands::*;
mod despawn;
use despawn::*;
mod despawn_recursive;
use despawn_recursive::*;
mod spawn;
use spawn::*;
@ -21,13 +27,14 @@ criterion_group!(
zero_sized_commands,
medium_sized_commands,
large_sized_commands,
get_or_spawn,
world_entity,
world_get,
world_query_get,
world_query_iter,
world_query_for_each,
world_spawn,
world_despawn,
world_despawn_recursive,
query_get,
query_get_many::<2>,
query_get_many::<5>,

View file

@ -9,8 +9,8 @@ struct B(Vec4);
pub fn world_spawn(criterion: &mut Criterion) {
let mut group = criterion.benchmark_group("spawn_world");
group.warm_up_time(std::time::Duration::from_millis(500));
group.measurement_time(std::time::Duration::from_secs(4));
group.warm_up_time(core::time::Duration::from_millis(500));
group.measurement_time(core::time::Duration::from_secs(4));
for entity_count in (0..5).map(|i| 10_u32.pow(i)) {
group.bench_function(format!("{}_entities", entity_count), |bencher| {

View file

@ -22,7 +22,7 @@ struct WideTable<const X: usize>(f32);
#[component(storage = "SparseSet")]
struct WideSparse<const X: usize>(f32);
const RANGE: std::ops::Range<u32> = 5..6;
const RANGE: core::ops::Range<u32> = 5..6;
fn deterministic_rand() -> ChaCha8Rng {
ChaCha8Rng::seed_from_u64(42)
@ -42,8 +42,8 @@ fn setup_wide<T: Bundle + Default>(entity_count: u32) -> World {
pub fn world_entity(criterion: &mut Criterion) {
let mut group = criterion.benchmark_group("world_entity");
group.warm_up_time(std::time::Duration::from_millis(500));
group.measurement_time(std::time::Duration::from_secs(4));
group.warm_up_time(core::time::Duration::from_millis(500));
group.measurement_time(core::time::Duration::from_secs(4));
for entity_count in RANGE.map(|i| i * 10_000) {
group.bench_function(format!("{}_entities", entity_count), |bencher| {
@ -63,8 +63,8 @@ pub fn world_entity(criterion: &mut Criterion) {
pub fn world_get(criterion: &mut Criterion) {
let mut group = criterion.benchmark_group("world_get");
group.warm_up_time(std::time::Duration::from_millis(500));
group.measurement_time(std::time::Duration::from_secs(4));
group.warm_up_time(core::time::Duration::from_millis(500));
group.measurement_time(core::time::Duration::from_secs(4));
for entity_count in RANGE.map(|i| i * 10_000) {
group.bench_function(format!("{}_entities_table", entity_count), |bencher| {
@ -94,8 +94,8 @@ pub fn world_get(criterion: &mut Criterion) {
pub fn world_query_get(criterion: &mut Criterion) {
let mut group = criterion.benchmark_group("world_query_get");
group.warm_up_time(std::time::Duration::from_millis(500));
group.measurement_time(std::time::Duration::from_secs(4));
group.warm_up_time(core::time::Duration::from_millis(500));
group.measurement_time(core::time::Duration::from_secs(4));
for entity_count in RANGE.map(|i| i * 10_000) {
group.bench_function(format!("{}_entities_table", entity_count), |bencher| {
@ -180,8 +180,8 @@ pub fn world_query_get(criterion: &mut Criterion) {
pub fn world_query_iter(criterion: &mut Criterion) {
let mut group = criterion.benchmark_group("world_query_iter");
group.warm_up_time(std::time::Duration::from_millis(500));
group.measurement_time(std::time::Duration::from_secs(4));
group.warm_up_time(core::time::Duration::from_millis(500));
group.measurement_time(core::time::Duration::from_secs(4));
for entity_count in RANGE.map(|i| i * 10_000) {
group.bench_function(format!("{}_entities_table", entity_count), |bencher| {
@ -219,8 +219,8 @@ pub fn world_query_iter(criterion: &mut Criterion) {
pub fn world_query_for_each(criterion: &mut Criterion) {
let mut group = criterion.benchmark_group("world_query_for_each");
group.warm_up_time(std::time::Duration::from_millis(500));
group.measurement_time(std::time::Duration::from_secs(4));
group.warm_up_time(core::time::Duration::from_millis(500));
group.measurement_time(core::time::Duration::from_secs(4));
for entity_count in RANGE.map(|i| i * 10_000) {
group.bench_function(format!("{}_entities_table", entity_count), |bencher| {
@ -258,8 +258,8 @@ pub fn world_query_for_each(criterion: &mut Criterion) {
pub fn query_get(criterion: &mut Criterion) {
let mut group = criterion.benchmark_group("query_get");
group.warm_up_time(std::time::Duration::from_millis(500));
group.measurement_time(std::time::Duration::from_secs(4));
group.warm_up_time(core::time::Duration::from_millis(500));
group.measurement_time(core::time::Duration::from_secs(4));
for entity_count in RANGE.map(|i| i * 10_000) {
group.bench_function(format!("{}_entities_table", entity_count), |bencher| {
@ -307,8 +307,8 @@ pub fn query_get(criterion: &mut Criterion) {
pub fn query_get_many<const N: usize>(criterion: &mut Criterion) {
let mut group = criterion.benchmark_group(&format!("query_get_many_{N}"));
group.warm_up_time(std::time::Duration::from_millis(500));
group.measurement_time(std::time::Duration::from_secs(2 * N as u64));
group.warm_up_time(core::time::Duration::from_millis(500));
group.measurement_time(core::time::Duration::from_secs(2 * N as u64));
for entity_count in RANGE.map(|i| i * 10_000) {
group.bench_function(format!("{}_calls_table", entity_count), |bencher| {

View file

@ -20,7 +20,8 @@ fn cubic_2d(c: &mut Criterion) {
vec2(1.0, 0.0),
vec2(1.0, 1.0),
]])
.to_curve();
.to_curve()
.expect("Unable to build a curve from this data");
c.bench_function("cubic_position_Vec2", |b| {
b.iter(|| black_box(bezier.position(black_box(0.5))));
});
@ -33,7 +34,8 @@ fn cubic(c: &mut Criterion) {
vec3a(1.0, 0.0, 0.0),
vec3a(1.0, 1.0, 1.0),
]])
.to_curve();
.to_curve()
.expect("Unable to build a curve from this data");
c.bench_function("cubic_position_Vec3A", |b| {
b.iter(|| black_box(bezier.position(black_box(0.5))));
});
@ -46,7 +48,8 @@ fn cubic_vec3(c: &mut Criterion) {
vec3(1.0, 0.0, 0.0),
vec3(1.0, 1.0, 1.0),
]])
.to_curve();
.to_curve()
.expect("Unable to build a curve from this data");
c.bench_function("cubic_position_Vec3", |b| {
b.iter(|| black_box(bezier.position(black_box(0.5))));
});
@ -59,7 +62,8 @@ fn build_pos_cubic(c: &mut Criterion) {
vec3a(1.0, 0.0, 0.0),
vec3a(1.0, 1.0, 1.0),
]])
.to_curve();
.to_curve()
.expect("Unable to build a curve from this data");
c.bench_function("build_pos_cubic_100_points", |b| {
b.iter(|| black_box(bezier.iter_positions(black_box(100)).collect::<Vec<_>>()));
});
@ -72,7 +76,8 @@ fn build_accel_cubic(c: &mut Criterion) {
vec3a(1.0, 0.0, 0.0),
vec3a(1.0, 1.0, 1.0),
]])
.to_curve();
.to_curve()
.expect("Unable to build a curve from this data");
c.bench_function("build_accel_cubic_100_points", |b| {
b.iter(|| black_box(bezier.iter_positions(black_box(100)).collect::<Vec<_>>()));
});

View file

@ -0,0 +1,120 @@
use bevy_math::{Dir3, Mat4, Ray3d, Vec3};
use bevy_picking::mesh_picking::ray_cast;
use criterion::{black_box, criterion_group, criterion_main, Criterion};
fn ptoxznorm(p: u32, size: u32) -> (f32, f32) {
let ij = (p / (size), p % (size));
(ij.0 as f32 / size as f32, ij.1 as f32 / size as f32)
}
struct SimpleMesh {
positions: Vec<[f32; 3]>,
normals: Vec<[f32; 3]>,
indices: Vec<u32>,
}
fn mesh_creation(vertices_per_side: u32) -> SimpleMesh {
let mut positions = Vec::new();
let mut normals = Vec::new();
for p in 0..vertices_per_side.pow(2) {
let xz = ptoxznorm(p, vertices_per_side);
positions.push([xz.0 - 0.5, 0.0, xz.1 - 0.5]);
normals.push([0.0, 1.0, 0.0]);
}
let mut indices = vec![];
for p in 0..vertices_per_side.pow(2) {
if p % (vertices_per_side) != vertices_per_side - 1
&& p / (vertices_per_side) != vertices_per_side - 1
{
indices.extend_from_slice(&[p, p + 1, p + vertices_per_side]);
indices.extend_from_slice(&[p + vertices_per_side, p + 1, p + vertices_per_side + 1]);
}
}
SimpleMesh {
positions,
normals,
indices,
}
}
fn ray_mesh_intersection(c: &mut Criterion) {
let mut group = c.benchmark_group("ray_mesh_intersection");
group.warm_up_time(std::time::Duration::from_millis(500));
for vertices_per_side in [10_u32, 100, 1000] {
group.bench_function(format!("{}_vertices", vertices_per_side.pow(2)), |b| {
let ray = Ray3d::new(Vec3::new(0.0, 1.0, 0.0), Dir3::NEG_Y);
let mesh_to_world = Mat4::IDENTITY;
let mesh = mesh_creation(vertices_per_side);
b.iter(|| {
black_box(ray_cast::ray_mesh_intersection(
ray,
&mesh_to_world,
&mesh.positions,
Some(&mesh.normals),
Some(&mesh.indices),
ray_cast::Backfaces::Cull,
));
});
});
}
}
fn ray_mesh_intersection_no_cull(c: &mut Criterion) {
let mut group = c.benchmark_group("ray_mesh_intersection_no_cull");
group.warm_up_time(std::time::Duration::from_millis(500));
for vertices_per_side in [10_u32, 100, 1000] {
group.bench_function(format!("{}_vertices", vertices_per_side.pow(2)), |b| {
let ray = Ray3d::new(Vec3::new(0.0, 1.0, 0.0), Dir3::NEG_Y);
let mesh_to_world = Mat4::IDENTITY;
let mesh = mesh_creation(vertices_per_side);
b.iter(|| {
black_box(ray_cast::ray_mesh_intersection(
ray,
&mesh_to_world,
&mesh.positions,
Some(&mesh.normals),
Some(&mesh.indices),
ray_cast::Backfaces::Include,
));
});
});
}
}
fn ray_mesh_intersection_no_intersection(c: &mut Criterion) {
let mut group = c.benchmark_group("ray_mesh_intersection_no_intersection");
group.warm_up_time(std::time::Duration::from_millis(500));
for vertices_per_side in [10_u32, 100, 1000] {
group.bench_function(format!("{}_vertices", (vertices_per_side).pow(2)), |b| {
let ray = Ray3d::new(Vec3::new(0.0, 1.0, 0.0), Dir3::X);
let mesh_to_world = Mat4::IDENTITY;
let mesh = mesh_creation(vertices_per_side);
b.iter(|| {
black_box(ray_cast::ray_mesh_intersection(
ray,
&mesh_to_world,
&mesh.positions,
Some(&mesh.normals),
Some(&mesh.indices),
ray_cast::Backfaces::Cull,
));
});
});
}
}
criterion_group!(
benches,
ray_mesh_intersection,
ray_mesh_intersection_no_cull,
ray_mesh_intersection_no_intersection
);
criterion_main!(benches);

View file

@ -0,0 +1,87 @@
use bevy_reflect::func::{ArgList, IntoFunction, IntoFunctionMut, TypedFunction};
use criterion::{criterion_group, criterion_main, BatchSize, Criterion};
criterion_group!(benches, typed, into, call, clone);
criterion_main!(benches);
fn add(a: i32, b: i32) -> i32 {
a + b
}
fn typed(c: &mut Criterion) {
c.benchmark_group("typed")
.bench_function("function", |b| {
b.iter(|| add.get_function_info());
})
.bench_function("closure", |b| {
let capture = 25;
let closure = |a: i32| a + capture;
b.iter(|| closure.get_function_info());
})
.bench_function("closure_mut", |b| {
let mut capture = 25;
let closure = |a: i32| capture += a;
b.iter(|| closure.get_function_info());
});
}
fn into(c: &mut Criterion) {
c.benchmark_group("into")
.bench_function("function", |b| {
b.iter(|| add.into_function());
})
.bench_function("closure", |b| {
let capture = 25;
let closure = |a: i32| a + capture;
b.iter(|| closure.into_function());
})
.bench_function("closure_mut", |b| {
let mut _capture = 25;
let closure = move |a: i32| _capture += a;
b.iter(|| closure.into_function_mut());
});
}
fn call(c: &mut Criterion) {
c.benchmark_group("call")
.bench_function("trait_object", |b| {
b.iter_batched(
|| Box::new(add) as Box<dyn Fn(i32, i32) -> i32>,
|func| func(75, 25),
BatchSize::SmallInput,
);
})
.bench_function("function", |b| {
let add = add.into_function();
b.iter_batched(
|| ArgList::new().push_owned(75_i32).push_owned(25_i32),
|args| add.call(args),
BatchSize::SmallInput,
);
})
.bench_function("closure", |b| {
let capture = 25;
let add = (|a: i32| a + capture).into_function();
b.iter_batched(
|| ArgList::new().push_owned(75_i32),
|args| add.call(args),
BatchSize::SmallInput,
);
})
.bench_function("closure_mut", |b| {
let mut capture = 25;
let mut add = (|a: i32| capture += a).into_function_mut();
b.iter_batched(
|| ArgList::new().push_owned(75_i32),
|args| add.call(args),
BatchSize::SmallInput,
);
});
}
fn clone(c: &mut Criterion) {
c.benchmark_group("clone").bench_function("function", |b| {
let add = add.into_function();
b.iter(|| add.clone());
});
}

View file

@ -1,4 +1,4 @@
use std::{iter, time::Duration};
use core::{iter, time::Duration};
use bevy_reflect::{DynamicList, List};
use criterion::{

View file

@ -1,4 +1,4 @@
use std::{fmt::Write, iter, time::Duration};
use core::{fmt::Write, iter, time::Duration};
use bevy_reflect::{DynamicMap, Map};
use bevy_utils::HashMap;

View file

@ -1,4 +1,4 @@
use std::{fmt::Write, str, time::Duration};
use core::{fmt::Write, str, time::Duration};
use bevy_reflect::ParsedPath;
use criterion::{

View file

@ -1,6 +1,6 @@
use std::time::Duration;
use core::time::Duration;
use bevy_reflect::{DynamicStruct, GetField, Reflect, Struct};
use bevy_reflect::{DynamicStruct, GetField, PartialReflect, Reflect, Struct};
use criterion::{
black_box, criterion_group, criterion_main, BatchSize, BenchmarkId, Criterion, Throughput,
};
@ -62,7 +62,7 @@ fn concrete_struct_apply(criterion: &mut Criterion) {
// Use functions that produce trait objects of varying concrete types as the
// input to the benchmark.
let inputs: &[fn() -> (Box<dyn Struct>, Box<dyn Reflect>)] = &[
let inputs: &[fn() -> (Box<dyn Struct>, Box<dyn PartialReflect>)] = &[
|| (Box::new(Struct16::default()), Box::new(Struct16::default())),
|| (Box::new(Struct32::default()), Box::new(Struct32::default())),
|| (Box::new(Struct64::default()), Box::new(Struct64::default())),
@ -240,7 +240,7 @@ fn dynamic_struct_apply(criterion: &mut Criterion) {
group.warm_up_time(WARM_UP_TIME);
group.measurement_time(MEASUREMENT_TIME);
let patches: &[(fn() -> Box<dyn Reflect>, usize)] = &[
let patches: &[(fn() -> Box<dyn PartialReflect>, usize)] = &[
(|| Box::new(Struct16::default()), 16),
(|| Box::new(Struct32::default()), 32),
(|| Box::new(Struct64::default()), 64),

View file

@ -4,12 +4,9 @@ use bevy_render::mesh::TorusMeshBuilder;
fn torus(c: &mut Criterion) {
c.bench_function("build_torus", |b| {
b.iter(|| black_box(TorusMeshBuilder::new(black_box(0.5),black_box(1.0))));
b.iter(|| black_box(TorusMeshBuilder::new(black_box(0.5), black_box(1.0))));
});
}
criterion_group!(
benches,
torus,
);
criterion_group!(benches, torus,);
criterion_main!(benches);

View file

@ -1,22 +1,22 @@
use bevy_tasks::{ParallelIterator, TaskPoolBuilder};
use criterion::{black_box, criterion_group, criterion_main, BenchmarkId, Criterion};
struct ParChunks<'a, T>(std::slice::Chunks<'a, T>);
impl<'a, T> ParallelIterator<std::slice::Iter<'a, T>> for ParChunks<'a, T>
struct ParChunks<'a, T>(core::slice::Chunks<'a, T>);
impl<'a, T> ParallelIterator<core::slice::Iter<'a, T>> for ParChunks<'a, T>
where
T: 'a + Send + Sync,
{
fn next_batch(&mut self) -> Option<std::slice::Iter<'a, T>> {
fn next_batch(&mut self) -> Option<core::slice::Iter<'a, T>> {
self.0.next().map(|s| s.iter())
}
}
struct ParChunksMut<'a, T>(std::slice::ChunksMut<'a, T>);
impl<'a, T> ParallelIterator<std::slice::IterMut<'a, T>> for ParChunksMut<'a, T>
struct ParChunksMut<'a, T>(core::slice::ChunksMut<'a, T>);
impl<'a, T> ParallelIterator<core::slice::IterMut<'a, T>> for ParChunksMut<'a, T>
where
T: 'a + Send + Sync,
{
fn next_batch(&mut self) -> Option<std::slice::IterMut<'a, T>> {
fn next_batch(&mut self) -> Option<core::slice::IterMut<'a, T>> {
self.0.next().map(|s| s.iter_mut())
}
}

View file

@ -1,7 +1,7 @@
doc-valid-idents = [
"GilRs",
"glTF",
"MacOS",
"macOS",
"NVidia",
"OpenXR",
"sRGB",
@ -10,3 +10,35 @@ doc-valid-idents = [
"WebGPU",
"..",
]
check-private-items = true
disallowed-methods = [
{ path = "f32::powi", reason = "use bevy_math::ops::FloatPow::squared, bevy_math::ops::FloatPow::cubed, or bevy_math::ops::powf instead for libm determinism" },
{ path = "f32::log", reason = "use bevy_math::ops::ln, bevy_math::ops::log2, or bevy_math::ops::log10 instead for libm determinism" },
{ path = "f32::abs_sub", reason = "deprecated and deeply confusing method" },
{ path = "f32::powf", reason = "use bevy_math::ops::powf instead for libm determinism" },
{ path = "f32::exp", reason = "use bevy_math::ops::exp instead for libm determinism" },
{ path = "f32::exp2", reason = "use bevy_math::ops::exp2 instead for libm determinism" },
{ path = "f32::ln", reason = "use bevy_math::ops::ln instead for libm determinism" },
{ path = "f32::log2", reason = "use bevy_math::ops::log2 instead for libm determinism" },
{ path = "f32::log10", reason = "use bevy_math::ops::log10 instead for libm determinism" },
{ path = "f32::cbrt", reason = "use bevy_math::ops::cbrt instead for libm determinism" },
{ path = "f32::hypot", reason = "use bevy_math::ops::hypot instead for libm determinism" },
{ path = "f32::sin", reason = "use bevy_math::ops::sin instead for libm determinism" },
{ path = "f32::cos", reason = "use bevy_math::ops::cos instead for libm determinism" },
{ path = "f32::tan", reason = "use bevy_math::ops::tan instead for libm determinism" },
{ path = "f32::asin", reason = "use bevy_math::ops::asin instead for libm determinism" },
{ path = "f32::acos", reason = "use bevy_math::ops::acos instead for libm determinism" },
{ path = "f32::atan", reason = "use bevy_math::ops::atan instead for libm determinism" },
{ path = "f32::atan2", reason = "use bevy_math::ops::atan2 instead for libm determinism" },
{ path = "f32::sin_cos", reason = "use bevy_math::ops::sin_cos instead for libm determinism" },
{ path = "f32::exp_m1", reason = "use bevy_math::ops::exp_m1 instead for libm determinism" },
{ path = "f32::ln_1p", reason = "use bevy_math::ops::ln_1p instead for libm determinism" },
{ path = "f32::sinh", reason = "use bevy_math::ops::sinh instead for libm determinism" },
{ path = "f32::cosh", reason = "use bevy_math::ops::cosh instead for libm determinism" },
{ path = "f32::tanh", reason = "use bevy_math::ops::tanh instead for libm determinism" },
{ path = "f32::asinh", reason = "use bevy_math::ops::asinh instead for libm determinism" },
{ path = "f32::acosh", reason = "use bevy_math::ops::acosh instead for libm determinism" },
{ path = "f32::atanh", reason = "use bevy_math::ops::atanh instead for libm determinism" },
]

View file

@ -1,6 +1,6 @@
[package]
name = "bevy_a11y"
version = "0.14.0-dev"
version = "0.15.0-dev"
edition = "2021"
description = "Provides accessibility support for Bevy Engine"
homepage = "https://bevyengine.org"
@ -10,15 +10,16 @@ keywords = ["bevy", "accessibility", "a11y"]
[dependencies]
# bevy
bevy_app = { path = "../bevy_app", version = "0.14.0-dev" }
bevy_derive = { path = "../bevy_derive", version = "0.14.0-dev" }
bevy_ecs = { path = "../bevy_ecs", version = "0.14.0-dev" }
bevy_app = { path = "../bevy_app", version = "0.15.0-dev" }
bevy_derive = { path = "../bevy_derive", version = "0.15.0-dev" }
bevy_ecs = { path = "../bevy_ecs", version = "0.15.0-dev" }
bevy_reflect = { path = "../bevy_reflect", version = "0.15.0-dev" }
accesskit = "0.15"
accesskit = "0.16"
[lints]
workspace = true
[package.metadata.docs.rs]
rustdoc-args = ["-Zunstable-options", "--cfg", "docsrs"]
rustdoc-args = ["-Zunstable-options", "--generate-link-to-definition"]
all-features = true

Some files were not shown because too many files have changed in this diff Show more