Camera-driven UI (#10559)

# Objective

Add support for presenting each UI tree on a specific window and
viewport, while making as few breaking changes as possible.

This PR is meant to resolve the following issues at once, since they're
all related.

- Fixes #5622 
- Fixes #5570 
- Fixes #5621 

Adopted #5892 , but started over since the current codebase diverged
significantly from the original PR branch. Also, I made a decision to
propagate component to children instead of recursively iterating over
nodes in search for the root.


## Solution

Add a new optional component that can be inserted to UI root nodes and
propagate to children to specify which camera it should render onto.
This is then used to get the render target and the viewport for that UI
tree. Since this component is optional, the default behavior should be
to render onto the single camera (if only one exist) and warn of
ambiguity if multiple cameras exist. This reduces the complexity for
users with just one camera, while giving control in contexts where it
matters.

## Changelog

- Adds `TargetCamera(Entity)` component to specify which camera should a
node tree be rendered into. If only one camera exists, this component is
optional.
- Adds an example of rendering UI to a texture and using it as a
material in a 3D world.
- Fixes recalculation of physical viewport size when target scale factor
changes. This can happen when the window is moved between displays with
different DPI.
- Changes examples to demonstrate assigning UI to different viewports
and windows and make interactions in an offset viewport testable.
- Removes `UiCameraConfig`. UI visibility now can be controlled via
combination of explicit `TargetCamera` and `Visibility` on the root
nodes.

---------

Co-authored-by: davier <bricedavier@gmail.com>
Co-authored-by: Alice Cecile <alice.i.cecile@gmail.com>
Co-authored-by: Alice Cecile <alice.i.cecil@gmail.com>
This commit is contained in:
Roman Salnikov 2024-01-16 01:39:10 +01:00 committed by GitHub
parent ee9a1503ed
commit eb9db21113
No known key found for this signature in database
GPG key ID: 4AEE18F83AFDEB23
16 changed files with 874 additions and 298 deletions

View file

@ -2249,6 +2249,17 @@ description = "Showcases the RelativeCursorPosition component"
category = "UI (User Interface)"
wasm = true
[[example]]
name = "render_ui_to_texture"
path = "examples/ui/render_ui_to_texture.rs"
doc-scrape-examples = true
[package.metadata.example.render_ui_to_texture]
name = "Render UI to Texture"
description = "An example of rendering UI as a part of a 3D world"
category = "UI (User Interface)"
wasm = true
[[example]]
name = "size_constraints"
path = "examples/ui/size_constraints.rs"

View file

@ -28,6 +28,7 @@ use bevy_transform::components::GlobalTransform;
use bevy_utils::{HashMap, HashSet};
use bevy_window::{
NormalizedWindowRef, PrimaryWindow, Window, WindowCreated, WindowRef, WindowResized,
WindowScaleFactorChanged,
};
use std::{borrow::Cow, ops::Range};
use wgpu::{BlendState, LoadOp, TextureFormat};
@ -79,7 +80,7 @@ pub struct RenderTargetInfo {
pub struct ComputedCameraValues {
projection_matrix: Mat4,
target_info: Option<RenderTargetInfo>,
// position and size of the `Viewport`
// size of the `Viewport`
old_viewport_size: Option<UVec2>,
}
@ -229,6 +230,11 @@ impl Camera {
self.computed.target_info.as_ref().map(|t| t.physical_size)
}
#[inline]
pub fn target_scaling_factor(&self) -> Option<f32> {
self.computed.target_info.as_ref().map(|t| t.scale_factor)
}
/// The projection matrix computed using this camera's [`CameraProjection`].
#[inline]
pub fn projection_matrix(&self) -> Mat4 {
@ -575,9 +581,9 @@ impl NormalizedRenderTarget {
/// System in charge of updating a [`Camera`] when its window or projection changes.
///
/// The system detects window creation and resize events to update the camera projection if
/// needed. It also queries any [`CameraProjection`] component associated with the same entity
/// as the [`Camera`] one, to automatically update the camera projection matrix.
/// The system detects window creation, resize, and scale factor change events to update the camera
/// projection if needed. It also queries any [`CameraProjection`] component associated with the same
/// entity as the [`Camera`] one, to automatically update the camera projection matrix.
///
/// The system function is generic over the camera projection type, and only instances of
/// [`OrthographicProjection`] and [`PerspectiveProjection`] are automatically added to
@ -595,6 +601,7 @@ impl NormalizedRenderTarget {
pub fn camera_system<T: CameraProjection + Component>(
mut window_resized_events: EventReader<WindowResized>,
mut window_created_events: EventReader<WindowCreated>,
mut window_scale_factor_changed_events: EventReader<WindowScaleFactorChanged>,
mut image_asset_events: EventReader<AssetEvent<Image>>,
primary_window: Query<Entity, With<PrimaryWindow>>,
windows: Query<(Entity, &Window)>,
@ -607,6 +614,11 @@ pub fn camera_system<T: CameraProjection + Component>(
let mut changed_window_ids = HashSet::new();
changed_window_ids.extend(window_created_events.read().map(|event| event.window));
changed_window_ids.extend(window_resized_events.read().map(|event| event.window));
let scale_factor_changed_window_ids: HashSet<_> = window_scale_factor_changed_events
.read()
.map(|event| event.window)
.collect();
changed_window_ids.extend(scale_factor_changed_window_ids.clone());
let changed_image_handles: HashSet<&AssetId<Image>> = image_asset_events
.read()
@ -617,7 +629,7 @@ pub fn camera_system<T: CameraProjection + Component>(
.collect();
for (mut camera, mut camera_projection) in &mut cameras {
let viewport_size = camera
let mut viewport_size = camera
.viewport
.as_ref()
.map(|viewport| viewport.physical_size);
@ -628,11 +640,36 @@ pub fn camera_system<T: CameraProjection + Component>(
|| camera_projection.is_changed()
|| camera.computed.old_viewport_size != viewport_size
{
camera.computed.target_info = normalized_target.get_render_target_info(
let new_computed_target_info = normalized_target.get_render_target_info(
&windows,
&images,
&manual_texture_views,
);
// Check for the scale factor changing, and resize the viewport if needed.
// This can happen when the window is moved between monitors with different DPIs.
// Without this, the viewport will take a smaller portion of the window moved to
// a higher DPI monitor.
if normalized_target.is_changed(&scale_factor_changed_window_ids, &HashSet::new()) {
if let (Some(new_scale_factor), Some(old_scale_factor)) = (
new_computed_target_info
.as_ref()
.map(|info| info.scale_factor),
camera
.computed
.target_info
.as_ref()
.map(|info| info.scale_factor),
) {
let resize_factor = new_scale_factor / old_scale_factor;
if let Some(ref mut viewport) = camera.viewport {
let resize = |vec: UVec2| (vec.as_vec2() * resize_factor).as_uvec2();
viewport.physical_position = resize(viewport.physical_position);
viewport.physical_size = resize(viewport.physical_size);
viewport_size = Some(viewport.physical_size);
}
}
}
camera.computed.target_info = new_computed_target_info;
if let Some(size) = camera.logical_viewport_size() {
camera_projection.update(size.x, size.y);
camera.computed.projection_matrix = camera_projection.get_projection_matrix();

View file

@ -1,30 +0,0 @@
//! Configuration for cameras related to UI.
use bevy_ecs::component::Component;
use bevy_ecs::prelude::With;
use bevy_ecs::reflect::ReflectComponent;
use bevy_reflect::{std_traits::ReflectDefault, Reflect};
use bevy_render::camera::Camera;
use bevy_render::extract_component::ExtractComponent;
/// Configuration for cameras related to UI.
///
/// When a [`Camera`] doesn't have the [`UiCameraConfig`] component,
/// it will display the UI by default.
///
#[derive(Component, Clone, ExtractComponent, Reflect)]
#[extract_component_filter(With<Camera>)]
#[reflect(Component, Default)]
pub struct UiCameraConfig {
/// Whether to output UI to this camera view.
///
/// When a [`Camera`] doesn't have the [`UiCameraConfig`] component,
/// it will display the UI by default.
pub show_ui: bool,
}
impl Default for UiCameraConfig {
fn default() -> Self {
Self { show_ui: true }
}
}

View file

@ -1,4 +1,4 @@
use crate::{camera_config::UiCameraConfig, CalculatedClip, Node, UiScale, UiStack};
use crate::{CalculatedClip, DefaultUiCamera, Node, TargetCamera, UiScale, UiStack};
use bevy_ecs::{
change_detection::DetectChangesMut,
entity::Entity,
@ -13,7 +13,7 @@ use bevy_reflect::{std_traits::ReflectDefault, Reflect};
use bevy_render::{camera::NormalizedRenderTarget, prelude::Camera, view::ViewVisibility};
use bevy_transform::components::GlobalTransform;
use bevy_utils::smallvec::SmallVec;
use bevy_utils::{smallvec::SmallVec, HashMap};
use bevy_window::{PrimaryWindow, Window};
#[cfg(feature = "serialize")]
@ -133,6 +133,7 @@ pub struct NodeQuery {
focus_policy: Option<&'static FocusPolicy>,
calculated_clip: Option<&'static CalculatedClip>,
view_visibility: Option<&'static ViewVisibility>,
target_camera: Option<&'static TargetCamera>,
}
/// The system that sets Interaction for all UI elements based on the mouse cursor activity
@ -141,14 +142,15 @@ pub struct NodeQuery {
#[allow(clippy::too_many_arguments)]
pub fn ui_focus_system(
mut state: Local<State>,
camera: Query<(&Camera, Option<&UiCameraConfig>)>,
camera_query: Query<(Entity, &Camera)>,
default_ui_camera: DefaultUiCamera,
primary_window: Query<Entity, With<PrimaryWindow>>,
windows: Query<&Window>,
mouse_button_input: Res<ButtonInput<MouseButton>>,
touches_input: Res<Touches>,
ui_scale: Res<UiScale>,
ui_stack: Res<UiStack>,
mut node_query: Query<NodeQuery>,
primary_window: Query<Entity, With<PrimaryWindow>>,
) {
let primary_window = primary_window.iter().next();
@ -174,31 +176,31 @@ pub fn ui_focus_system(
let mouse_clicked =
mouse_button_input.just_pressed(MouseButton::Left) || touches_input.any_just_pressed();
let is_ui_disabled =
|camera_ui| matches!(camera_ui, Some(&UiCameraConfig { show_ui: false, .. }));
let cursor_position = camera
let camera_cursor_positions: HashMap<Entity, Vec2> = camera_query
.iter()
.filter(|(_, camera_ui)| !is_ui_disabled(*camera_ui))
.filter_map(|(camera, _)| {
if let Some(NormalizedRenderTarget::Window(window_ref)) =
.filter_map(|(entity, camera)| {
// Interactions are only supported for cameras rendering to a window.
let Some(NormalizedRenderTarget::Window(window_ref)) =
camera.target.normalize(primary_window)
{
Some(window_ref)
} else {
None
}
})
.find_map(|window_ref| {
else {
return None;
};
let viewport_position = camera
.logical_viewport_rect()
.map(|rect| rect.min)
.unwrap_or_default();
windows
.get(window_ref.entity())
.ok()
.and_then(|window| window.cursor_position())
.or_else(|| touches_input.first_pressed_position())
.map(|cursor_position| (entity, cursor_position - viewport_position))
})
.or_else(|| touches_input.first_pressed_position())
// The cursor position returned by `Window` only takes into account the window scale factor and not `UiScale`.
// To convert the cursor position to logical UI viewport coordinates we have to divide it by `UiScale`.
.map(|cursor_position| cursor_position / ui_scale.0);
.map(|(entity, cursor_position)| (entity, cursor_position / ui_scale.0))
.collect();
// prepare an iterator that contains all the nodes that have the cursor in their rect,
// from the top node to the bottom one. this will also reset the interaction to `None`
@ -209,61 +211,69 @@ pub fn ui_focus_system(
// reverse the iterator to traverse the tree from closest nodes to furthest
.rev()
.filter_map(|entity| {
if let Ok(node) = node_query.get_mut(*entity) {
// Nodes that are not rendered should not be interactable
if let Some(view_visibility) = node.view_visibility {
if !view_visibility.get() {
// Reset their interaction to None to avoid strange stuck state
if let Some(mut interaction) = node.interaction {
// We cannot simply set the interaction to None, as that will trigger change detection repeatedly
interaction.set_if_neq(Interaction::None);
}
let Ok(node) = node_query.get_mut(*entity) else {
return None;
};
return None;
}
let Some(view_visibility) = node.view_visibility else {
return None;
};
// Nodes that are not rendered should not be interactable
if !view_visibility.get() {
// Reset their interaction to None to avoid strange stuck state
if let Some(mut interaction) = node.interaction {
// We cannot simply set the interaction to None, as that will trigger change detection repeatedly
interaction.set_if_neq(Interaction::None);
}
return None;
}
let Some(camera_entity) = node
.target_camera
.map(TargetCamera::entity)
.or(default_ui_camera.get())
else {
return None;
};
let node_rect = node.node.logical_rect(node.global_transform);
let node_rect = node.node.logical_rect(node.global_transform);
// Intersect with the calculated clip rect to find the bounds of the visible region of the node
let visible_rect = node
.calculated_clip
.map(|clip| node_rect.intersect(clip.clip))
.unwrap_or(node_rect);
// Intersect with the calculated clip rect to find the bounds of the visible region of the node
let visible_rect = node
.calculated_clip
.map(|clip| node_rect.intersect(clip.clip))
.unwrap_or(node_rect);
// The mouse position relative to the node
// (0., 0.) is the top-left corner, (1., 1.) is the bottom-right corner
// Coordinates are relative to the entire node, not just the visible region.
let relative_cursor_position = cursor_position
.map(|cursor_position| (cursor_position - node_rect.min) / node_rect.size());
// The mouse position relative to the node
// (0., 0.) is the top-left corner, (1., 1.) is the bottom-right corner
// Coordinates are relative to the entire node, not just the visible region.
let relative_cursor_position = camera_cursor_positions
.get(&camera_entity)
.map(|cursor_position| (*cursor_position - node_rect.min) / node_rect.size());
// If the current cursor position is within the bounds of the node's visible area, consider it for
// clicking
let relative_cursor_position_component = RelativeCursorPosition {
normalized_visible_node_rect: visible_rect.normalize(node_rect),
normalized: relative_cursor_position,
};
// If the current cursor position is within the bounds of the node's visible area, consider it for
// clicking
let relative_cursor_position_component = RelativeCursorPosition {
normalized_visible_node_rect: visible_rect.normalize(node_rect),
normalized: relative_cursor_position,
};
let contains_cursor = relative_cursor_position_component.mouse_over();
let contains_cursor = relative_cursor_position_component.mouse_over();
// Save the relative cursor position to the correct component
if let Some(mut node_relative_cursor_position_component) =
node.relative_cursor_position
{
*node_relative_cursor_position_component = relative_cursor_position_component;
}
// Save the relative cursor position to the correct component
if let Some(mut node_relative_cursor_position_component) = node.relative_cursor_position
{
*node_relative_cursor_position_component = relative_cursor_position_component;
}
if contains_cursor {
Some(*entity)
} else {
if let Some(mut interaction) = node.interaction {
if *interaction == Interaction::Hovered || (cursor_position.is_none()) {
interaction.set_if_neq(Interaction::None);
}
}
None
}
if contains_cursor {
Some(*entity)
} else {
if let Some(mut interaction) = node.interaction {
if *interaction == Interaction::Hovered || (relative_cursor_position.is_none())
{
interaction.set_if_neq(Interaction::None);
}
}
None
}
})

View file

@ -12,7 +12,7 @@ pub fn print_ui_layout_tree(ui_surface: &UiSurface) {
.iter()
.map(|(entity, node)| (*node, *entity))
.collect();
for (&entity, roots) in &ui_surface.window_roots {
for (&entity, roots) in &ui_surface.camera_roots {
let mut out = String::new();
for root in roots {
print_node(
@ -25,7 +25,7 @@ pub fn print_ui_layout_tree(ui_surface: &UiSurface) {
&mut out,
);
}
bevy_log::info!("Layout tree for window entity: {entity:?}\n{out}");
bevy_log::info!("Layout tree for camera entity: {entity:?}\n{out}");
}
}

View file

@ -1,7 +1,7 @@
mod convert;
pub mod debug;
use crate::{ContentSize, Node, Outline, Style, UiScale};
use crate::{ContentSize, DefaultUiCamera, Node, Outline, Style, TargetCamera, UiScale};
use bevy_ecs::{
change_detection::{DetectChanges, DetectChangesMut},
entity::Entity,
@ -13,10 +13,11 @@ use bevy_ecs::{
};
use bevy_hierarchy::{Children, Parent};
use bevy_log::warn;
use bevy_math::Vec2;
use bevy_math::{UVec2, Vec2};
use bevy_render::camera::{Camera, NormalizedRenderTarget};
use bevy_transform::components::Transform;
use bevy_utils::{default, EntityHashMap};
use bevy_window::{PrimaryWindow, Window, WindowResolution, WindowScaleFactorChanged};
use bevy_utils::{default, EntityHashMap, HashMap, HashSet};
use bevy_window::{PrimaryWindow, Window, WindowScaleFactorChanged};
use std::fmt;
use taffy::Taffy;
use thiserror::Error;
@ -51,7 +52,7 @@ struct RootNodePair {
#[derive(Resource)]
pub struct UiSurface {
entity_to_taffy: EntityHashMap<Entity, taffy::node::Node>,
window_roots: EntityHashMap<Entity, Vec<RootNodePair>>,
camera_roots: EntityHashMap<Entity, Vec<RootNodePair>>,
taffy: Taffy,
}
@ -66,7 +67,7 @@ impl fmt::Debug for UiSurface {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
f.debug_struct("UiSurface")
.field("entity_to_taffy", &self.entity_to_taffy)
.field("window_nodes", &self.window_roots)
.field("camera_roots", &self.camera_roots)
.finish()
}
}
@ -77,7 +78,7 @@ impl Default for UiSurface {
taffy.disable_rounding();
Self {
entity_to_taffy: Default::default(),
window_roots: Default::default(),
camera_roots: Default::default(),
taffy,
}
}
@ -142,9 +143,9 @@ without UI components as a child of an entity with UI components, results may be
}
/// Set the ui node entities without a [`Parent`] as children to the root node in the taffy layout.
pub fn set_window_children(
pub fn set_camera_children(
&mut self,
window_id: Entity,
camera_id: Entity,
children: impl Iterator<Item = Entity>,
) {
let viewport_style = taffy::style::Style {
@ -160,7 +161,7 @@ without UI components as a child of an entity with UI components, results may be
..default()
};
let existing_roots = self.window_roots.entry(window_id).or_default();
let existing_roots = self.camera_roots.entry(camera_id).or_default();
let mut new_roots = Vec::new();
for entity in children {
let node = *self.entity_to_taffy.get(&entity).unwrap();
@ -185,18 +186,20 @@ without UI components as a child of an entity with UI components, results may be
}
}
self.window_roots.insert(window_id, new_roots);
self.camera_roots.insert(camera_id, new_roots);
}
/// Compute the layout for each window entity's corresponding root node in the layout.
pub fn compute_window_layout(&mut self, window: Entity, window_resolution: &WindowResolution) {
let available_space = taffy::geometry::Size {
width: taffy::style::AvailableSpace::Definite(window_resolution.physical_width() as f32),
height: taffy::style::AvailableSpace::Definite(
window_resolution.physical_height() as f32
),
pub fn compute_camera_layout(&mut self, camera: Entity, render_target_resolution: UVec2) {
let Some(camera_root_nodes) = self.camera_roots.get(&camera) else {
return;
};
for root_nodes in self.window_roots.entry(window).or_default() {
let available_space = taffy::geometry::Size {
width: taffy::style::AvailableSpace::Definite(render_target_resolution.x as f32),
height: taffy::style::AvailableSpace::Definite(render_target_resolution.y as f32),
};
for root_nodes in camera_root_nodes {
self.taffy
.compute_layout(root_nodes.implicit_viewport_node, available_space)
.unwrap();
@ -241,64 +244,109 @@ pub enum LayoutError {
#[allow(clippy::too_many_arguments)]
pub fn ui_layout_system(
primary_window: Query<(Entity, &Window), With<PrimaryWindow>>,
windows: Query<(Entity, &Window)>,
cameras: Query<(Entity, &Camera)>,
default_ui_camera: DefaultUiCamera,
ui_scale: Res<UiScale>,
mut scale_factor_events: EventReader<WindowScaleFactorChanged>,
mut resize_events: EventReader<bevy_window::WindowResized>,
mut ui_surface: ResMut<UiSurface>,
root_node_query: Query<Entity, (With<Node>, Without<Parent>)>,
style_query: Query<(Entity, Ref<Style>), With<Node>>,
root_node_query: Query<(Entity, Option<&TargetCamera>), (With<Node>, Without<Parent>)>,
style_query: Query<(Entity, Ref<Style>, Option<&TargetCamera>), With<Node>>,
mut measure_query: Query<(Entity, &mut ContentSize)>,
children_query: Query<(Entity, Ref<Children>), With<Node>>,
just_children_query: Query<&Children>,
mut removed_children: RemovedComponents<Children>,
mut removed_content_sizes: RemovedComponents<ContentSize>,
mut node_transform_query: Query<(&mut Node, &mut Transform)>,
mut removed_nodes: RemovedComponents<Node>,
mut node_transform_query: Query<(&mut Node, &mut Transform)>,
) {
// assume one window for time being...
// TODO: Support window-independent scaling: https://github.com/bevyengine/bevy/issues/5621
let (primary_window_entity, logical_to_physical_factor, physical_size) =
if let Ok((entity, primary_window)) = primary_window.get_single() {
(
entity,
primary_window.resolution.scale_factor(),
Vec2::new(
primary_window.resolution.physical_width() as f32,
primary_window.resolution.physical_height() as f32,
),
)
} else {
return;
};
struct CameraLayoutInfo {
size: UVec2,
resized: bool,
scale_factor: f32,
root_nodes: Vec<Entity>,
}
let resized = resize_events
.read()
.any(|resized_window| resized_window.window == primary_window_entity);
let camera_with_default = |target_camera: Option<&TargetCamera>| {
target_camera
.map(TargetCamera::entity)
.or(default_ui_camera.get())
};
let scale_factor = logical_to_physical_factor * ui_scale.0;
let layout_context = LayoutContext::new(scale_factor, physical_size);
if !scale_factor_events.is_empty() || ui_scale.is_changed() || resized {
scale_factor_events.clear();
// update all nodes
for (entity, style) in style_query.iter() {
ui_surface.upsert_node(entity, &style, &layout_context);
let resized_windows: HashSet<Entity> = resize_events.read().map(|event| event.window).collect();
let calculate_camera_layout_info = |camera: &Camera| {
let size = camera.physical_viewport_size().unwrap_or(UVec2::ZERO);
let scale_factor = camera.target_scaling_factor().unwrap_or(1.0);
let camera_target = camera
.target
.normalize(primary_window.get_single().map(|(e, _)| e).ok());
let resized = matches!(camera_target,
Some(NormalizedRenderTarget::Window(window_ref)) if resized_windows.contains(&window_ref.entity())
);
CameraLayoutInfo {
size,
resized,
scale_factor: scale_factor * ui_scale.0,
root_nodes: Vec::new(),
}
} else {
for (entity, style) in style_query.iter() {
if style.is_changed() {
};
// Precalculate the layout info for each camera, so we have fast access to it for each node
let mut camera_layout_info: HashMap<Entity, CameraLayoutInfo> = HashMap::new();
for (entity, target_camera) in &root_node_query {
match camera_with_default(target_camera) {
Some(camera_entity) => {
let Ok((_, camera)) = cameras.get(camera_entity) else {
warn!(
"TargetCamera is pointing to a camera {:?} which doesn't exist",
camera_entity
);
continue;
};
let layout_info = camera_layout_info
.entry(camera_entity)
.or_insert_with(|| calculate_camera_layout_info(camera));
layout_info.root_nodes.push(entity);
}
None => {
if cameras.is_empty() {
warn!("No camera found to render UI to. To fix this, add at least one camera to the scene.");
} else {
warn!(
"Multiple cameras found, causing UI target ambiguity. \
To fix this, add an explicit `TargetCamera` component to the root UI node {:?}",
entity
);
}
continue;
}
}
}
// Resize all nodes
for (entity, style, target_camera) in style_query.iter() {
if let Some(camera) =
camera_with_default(target_camera).and_then(|c| camera_layout_info.get(&c))
{
if camera.resized
|| !scale_factor_events.is_empty()
|| ui_scale.is_changed()
|| style.is_changed()
{
let layout_context = LayoutContext::new(
camera.scale_factor,
[camera.size.x as f32, camera.size.y as f32].into(),
);
ui_surface.upsert_node(entity, &style, &layout_context);
}
}
}
scale_factor_events.clear();
// When a `ContentSize` component is removed from an entity, we need to remove the measure from the corresponding taffy node.
for entity in removed_content_sizes.read() {
ui_surface.try_remove_measure(entity);
}
for (entity, mut content_size) in &mut measure_query {
if let Some(measure_func) = content_size.measure_func.take() {
ui_surface.update_measure(entity, measure_func);
@ -308,8 +356,10 @@ pub fn ui_layout_system(
// clean up removed nodes
ui_surface.remove_entities(removed_nodes.read());
// update window children (for now assuming all Nodes live in the primary window)
ui_surface.set_window_children(primary_window_entity, root_node_query.iter());
// update camera children
for (camera_id, CameraLayoutInfo { root_nodes, .. }) in &camera_layout_info {
ui_surface.set_camera_children(*camera_id, root_nodes.iter().cloned());
}
// update and remove children
for entity in removed_children.read() {
@ -321,12 +371,22 @@ pub fn ui_layout_system(
}
}
// compute layouts
for (entity, window) in windows.iter() {
ui_surface.compute_window_layout(entity, &window.resolution);
}
for (camera_id, camera) in &camera_layout_info {
let inverse_target_scale_factor = camera.scale_factor.recip();
let inverse_target_scale_factor = 1. / scale_factor;
ui_surface.compute_camera_layout(*camera_id, camera.size);
for root in &camera.root_nodes {
update_uinode_geometry_recursive(
*root,
&ui_surface,
&mut node_transform_query,
&just_children_query,
inverse_target_scale_factor,
Vec2::ZERO,
Vec2::ZERO,
);
}
}
fn update_uinode_geometry_recursive(
entity: Entity,
@ -375,18 +435,6 @@ pub fn ui_layout_system(
}
}
}
for entity in root_node_query.iter() {
update_uinode_geometry_recursive(
entity,
&ui_surface,
&mut node_transform_query,
&just_children_query,
inverse_target_scale_factor,
Vec2::ZERO,
Vec2::ZERO,
);
}
}
/// Resolve and update the widths of Node outlines
@ -450,10 +498,16 @@ mod tests {
use crate::layout::round_layout_coords;
use crate::prelude::*;
use crate::ui_layout_system;
use crate::update::update_target_camera_system;
use crate::ContentSize;
use crate::UiSurface;
use bevy_asset::AssetEvent;
use bevy_asset::Assets;
use bevy_core_pipeline::core_2d::Camera2dBundle;
use bevy_ecs::entity::Entity;
use bevy_ecs::event::Events;
use bevy_ecs::schedule::apply_deferred;
use bevy_ecs::schedule::IntoSystemConfigs;
use bevy_ecs::schedule::Schedule;
use bevy_ecs::world::World;
use bevy_hierarchy::despawn_with_children_recursive;
@ -461,10 +515,14 @@ mod tests {
use bevy_hierarchy::Children;
use bevy_math::vec2;
use bevy_math::Vec2;
use bevy_render::camera::ManualTextureViews;
use bevy_render::camera::OrthographicProjection;
use bevy_render::texture::Image;
use bevy_utils::prelude::default;
use bevy_utils::HashMap;
use bevy_window::PrimaryWindow;
use bevy_window::Window;
use bevy_window::WindowCreated;
use bevy_window::WindowResized;
use bevy_window::WindowResolution;
use bevy_window::WindowScaleFactorChanged;
@ -485,18 +543,33 @@ mod tests {
world.init_resource::<UiSurface>();
world.init_resource::<Events<WindowScaleFactorChanged>>();
world.init_resource::<Events<WindowResized>>();
// Required for the camera system
world.init_resource::<Events<WindowCreated>>();
world.init_resource::<Events<AssetEvent<Image>>>();
world.init_resource::<Assets<Image>>();
world.init_resource::<ManualTextureViews>();
// spawn a dummy primary window
// spawn a dummy primary window and camera
world.spawn((
Window {
resolution: WindowResolution::new(WINDOW_WIDTH, WINDOW_HEIGHT),
..Default::default()
..default()
},
PrimaryWindow,
));
world.spawn(Camera2dBundle::default());
let mut ui_schedule = Schedule::default();
ui_schedule.add_systems(ui_layout_system);
ui_schedule.add_systems(
(
// UI is driven by calculated camera target info, so we need to run the camera system first
bevy_render::camera::camera_system::<OrthographicProjection>,
update_target_camera_system,
apply_deferred,
ui_layout_system,
)
.chain(),
);
(world, ui_schedule)
}

View file

@ -3,7 +3,6 @@
//! Spawn UI elements with [`node_bundles::ButtonBundle`], [`node_bundles::ImageBundle`], [`node_bundles::TextBundle`] and [`node_bundles::NodeBundle`]
//! This UI is laid out with the Flexbox and CSS Grid layout models (see <https://cssreference.io/flexbox/>)
pub mod camera_config;
pub mod measurement;
pub mod node_bundles;
pub mod ui_material;
@ -36,22 +35,21 @@ use widget::UiImageSize;
pub mod prelude {
#[doc(hidden)]
pub use crate::{
camera_config::*, geometry::*, node_bundles::*, ui_material::*, ui_node::*, widget::Button,
widget::Label, Interaction, UiMaterialPlugin, UiScale,
geometry::*, node_bundles::*, ui_material::*, ui_node::*, widget::Button, widget::Label,
Interaction, UiMaterialPlugin, UiScale,
};
}
use crate::prelude::UiCameraConfig;
#[cfg(feature = "bevy_text")]
use crate::widget::TextFlags;
use bevy_app::prelude::*;
use bevy_ecs::prelude::*;
use bevy_input::InputSystem;
use bevy_render::{extract_component::ExtractComponentPlugin, RenderApp};
use bevy_render::RenderApp;
use bevy_transform::TransformSystem;
use stack::ui_stack_system;
pub use stack::UiStack;
use update::update_clipping_system;
use update::{update_clipping_system, update_target_camera_system};
/// The basic plugin for Bevy UI
#[derive(Default)]
@ -85,8 +83,7 @@ impl Default for UiScale {
impl Plugin for UiPlugin {
fn build(&self, app: &mut App) {
app.add_plugins(ExtractComponentPlugin::<UiCameraConfig>::default())
.init_resource::<UiSurface>()
app.init_resource::<UiSurface>()
.init_resource::<UiScale>()
.init_resource::<UiStack>()
.register_type::<AlignContent>()
@ -116,7 +113,7 @@ impl Plugin for UiPlugin {
.register_type::<RelativeCursorPosition>()
.register_type::<RepeatedGridTrack>()
.register_type::<Style>()
.register_type::<UiCameraConfig>()
.register_type::<TargetCamera>()
.register_type::<UiImage>()
.register_type::<UiImageSize>()
.register_type::<UiRect>()
@ -182,13 +179,18 @@ impl Plugin for UiPlugin {
system
});
app.add_systems(
PostUpdate,
widget::update_atlas_content_size_system.before(UiSystem::Layout),
);
app.add_systems(
PostUpdate,
(
(
widget::update_atlas_content_size_system,
update_target_camera_system,
)
.before(UiSystem::Layout),
apply_deferred
.after(update_target_camera_system)
.before(UiSystem::Layout),
ui_layout_system
.in_set(UiSystem::Layout)
.before(TransformSystem::TransformPropagate),

View file

@ -4,19 +4,19 @@ mod ui_material_pipeline;
use bevy_core_pipeline::{core_2d::Camera2d, core_3d::Camera3d};
use bevy_hierarchy::Parent;
use bevy_render::render_phase::PhaseItem;
use bevy_render::view::ViewVisibility;
use bevy_render::{render_resource::BindGroupEntries, ExtractSchedule, Render};
use bevy_window::{PrimaryWindow, Window};
use bevy_render::{
render_phase::PhaseItem, render_resource::BindGroupEntries, view::ViewVisibility,
ExtractSchedule, Render,
};
pub use pipeline::*;
pub use render_pass::*;
pub use ui_material_pipeline::*;
use crate::Outline;
use crate::{
prelude::UiCameraConfig, BackgroundColor, BorderColor, CalculatedClip, ContentSize, Node,
Style, UiImage, UiScale, UiTextureAtlasImage, Val,
BackgroundColor, BorderColor, CalculatedClip, ContentSize, Node, Style, UiImage, UiScale,
UiTextureAtlasImage, Val,
};
use crate::{DefaultUiCamera, Outline, TargetCamera};
use bevy_app::prelude::*;
use bevy_asset::{load_internal_asset, AssetEvent, AssetId, Assets, Handle};
@ -163,6 +163,10 @@ pub struct ExtractedUiNode {
pub clip: Option<Rect>,
pub flip_x: bool,
pub flip_y: bool,
// Camera to render this UI node to. By the time it is extracted,
// it is defaulted to a single camera if only one exists.
// Nodes with ambiguous camera will be ignored.
pub camera_entity: Entity,
}
#[derive(Resource, Default)]
@ -174,6 +178,7 @@ pub fn extract_atlas_uinodes(
mut extracted_uinodes: ResMut<ExtractedUiNodes>,
images: Extract<Res<Assets<Image>>>,
texture_atlases: Extract<Res<Assets<TextureAtlas>>>,
default_ui_camera: Extract<DefaultUiCamera>,
uinode_query: Extract<
Query<
(
@ -185,6 +190,7 @@ pub fn extract_atlas_uinodes(
Option<&CalculatedClip>,
&Handle<TextureAtlas>,
&UiTextureAtlasImage,
Option<&TargetCamera>,
),
Without<UiImage>,
>,
@ -199,8 +205,13 @@ pub fn extract_atlas_uinodes(
clip,
texture_atlas_handle,
atlas_image,
camera,
) in uinode_query.iter()
{
let Some(camera_entity) = camera.map(TargetCamera::entity).or(default_ui_camera.get())
else {
continue;
};
// Skip invisible and completely transparent nodes
if !view_visibility.get() || color.0.is_fully_transparent() {
continue;
@ -250,6 +261,7 @@ pub fn extract_atlas_uinodes(
atlas_size: Some(atlas_size),
flip_x: atlas_image.flip_x,
flip_y: atlas_image.flip_y,
camera_entity,
},
);
}
@ -270,7 +282,8 @@ pub(crate) fn resolve_border_thickness(value: Val, parent_width: f32, viewport_s
pub fn extract_uinode_borders(
mut commands: Commands,
mut extracted_uinodes: ResMut<ExtractedUiNodes>,
windows: Extract<Query<&Window, With<PrimaryWindow>>>,
camera_query: Extract<Query<(Entity, &Camera)>>,
default_ui_camera: Extract<DefaultUiCamera>,
ui_scale: Extract<Res<UiScale>>,
uinode_query: Extract<
Query<
@ -282,6 +295,7 @@ pub fn extract_uinode_borders(
Option<&Parent>,
&ViewVisibility,
Option<&CalculatedClip>,
Option<&TargetCamera>,
),
Without<ContentSize>,
>,
@ -290,17 +304,13 @@ pub fn extract_uinode_borders(
) {
let image = AssetId::<Image>::default();
let ui_logical_viewport_size = windows
.get_single()
.map(|window| Vec2::new(window.resolution.width(), window.resolution.height()))
.unwrap_or(Vec2::ZERO)
// The logical window resolution returned by `Window` only takes into account the window scale factor and not `UiScale`,
// so we have to divide by `UiScale` to get the size of the UI viewport.
/ ui_scale.0;
for (node, global_transform, style, border_color, parent, view_visibility, clip) in
uinode_query.iter()
for (node, global_transform, style, border_color, parent, view_visibility, clip, camera) in
&uinode_query
{
let Some(camera_entity) = camera.map(TargetCamera::entity).or(default_ui_camera.get())
else {
continue;
};
// Skip invisible borders
if !view_visibility.get()
|| border_color.0.is_fully_transparent()
@ -310,6 +320,15 @@ pub fn extract_uinode_borders(
continue;
}
let ui_logical_viewport_size = camera_query
.get(camera_entity)
.ok()
.and_then(|(_, c)| c.logical_viewport_size())
.unwrap_or(Vec2::ZERO)
// The logical window resolution returned by `Window` only takes into account the window scale factor and not `UiScale`,
// so we have to divide by `UiScale` to get the size of the UI viewport.
/ ui_scale.0;
// Both vertical and horizontal percentage border values are calculated based on the width of the parent node
// <https://developer.mozilla.org/en-US/docs/Web/CSS/border-width>
let parent_width = parent
@ -374,6 +393,7 @@ pub fn extract_uinode_borders(
clip: clip.map(|clip| clip.clip),
flip_x: false,
flip_y: false,
camera_entity,
},
);
}
@ -384,6 +404,7 @@ pub fn extract_uinode_borders(
pub fn extract_uinode_outlines(
mut commands: Commands,
mut extracted_uinodes: ResMut<ExtractedUiNodes>,
default_ui_camera: Extract<DefaultUiCamera>,
uinode_query: Extract<
Query<(
&Node,
@ -391,11 +412,16 @@ pub fn extract_uinode_outlines(
&Outline,
&ViewVisibility,
Option<&CalculatedClip>,
Option<&TargetCamera>,
)>,
>,
) {
let image = AssetId::<Image>::default();
for (node, global_transform, outline, view_visibility, maybe_clip) in uinode_query.iter() {
for (node, global_transform, outline, view_visibility, maybe_clip, camera) in &uinode_query {
let Some(camera_entity) = camera.map(TargetCamera::entity).or(default_ui_camera.get())
else {
continue;
};
// Skip invisible outlines
if !view_visibility.get()
|| outline.color.is_fully_transparent()
@ -458,6 +484,7 @@ pub fn extract_uinode_outlines(
clip: maybe_clip.map(|clip| clip.clip),
flip_x: false,
flip_y: false,
camera_entity,
},
);
}
@ -468,6 +495,7 @@ pub fn extract_uinode_outlines(
pub fn extract_uinodes(
mut extracted_uinodes: ResMut<ExtractedUiNodes>,
images: Extract<Res<Assets<Image>>>,
default_ui_camera: Extract<DefaultUiCamera>,
uinode_query: Extract<
Query<
(
@ -478,14 +506,19 @@ pub fn extract_uinodes(
Option<&UiImage>,
&ViewVisibility,
Option<&CalculatedClip>,
Option<&TargetCamera>,
),
Without<UiTextureAtlasImage>,
>,
>,
) {
for (entity, uinode, transform, color, maybe_image, view_visibility, clip) in
for (entity, uinode, transform, color, maybe_image, view_visibility, clip, camera) in
uinode_query.iter()
{
let Some(camera_entity) = camera.map(TargetCamera::entity).or(default_ui_camera.get())
else {
continue;
};
// Skip invisible and completely transparent nodes
if !view_visibility.get() || color.0.is_fully_transparent() {
continue;
@ -516,6 +549,7 @@ pub fn extract_uinodes(
atlas_size: None,
flip_x,
flip_y,
camera_entity,
},
);
}
@ -538,14 +572,10 @@ pub struct DefaultCameraView(pub Entity);
pub fn extract_default_ui_camera_view<T: Component>(
mut commands: Commands,
ui_scale: Extract<Res<UiScale>>,
query: Extract<Query<(Entity, &Camera, Option<&UiCameraConfig>), With<T>>>,
query: Extract<Query<(Entity, &Camera), With<T>>>,
) {
let scale = ui_scale.0.recip();
for (entity, camera, camera_ui) in &query {
// ignore cameras with disabled ui
if matches!(camera_ui, Some(&UiCameraConfig { show_ui: false, .. })) {
continue;
}
for (entity, camera) in &query {
// ignore inactive cameras
if !camera.is_active {
continue;
@ -603,8 +633,9 @@ pub fn extract_default_ui_camera_view<T: Component>(
pub fn extract_text_uinodes(
mut commands: Commands,
mut extracted_uinodes: ResMut<ExtractedUiNodes>,
camera_query: Extract<Query<(Entity, &Camera)>>,
default_ui_camera: Extract<DefaultUiCamera>,
texture_atlases: Extract<Res<Assets<TextureAtlas>>>,
windows: Extract<Query<&Window, With<PrimaryWindow>>>,
ui_scale: Extract<Res<UiScale>>,
uinode_query: Extract<
Query<(
@ -614,27 +645,30 @@ pub fn extract_text_uinodes(
&TextLayoutInfo,
&ViewVisibility,
Option<&CalculatedClip>,
Option<&TargetCamera>,
)>,
>,
) {
// TODO: Support window-independent UI scale: https://github.com/bevyengine/bevy/issues/5621
let scale_factor = windows
.get_single()
.map(|window| window.scale_factor())
.unwrap_or(1.)
* ui_scale.0;
let inverse_scale_factor = scale_factor.recip();
for (uinode, global_transform, text, text_layout_info, view_visibility, clip) in
for (uinode, global_transform, text, text_layout_info, view_visibility, clip, camera) in
uinode_query.iter()
{
let Some(camera_entity) = camera.map(TargetCamera::entity).or(default_ui_camera.get())
else {
continue;
};
// Skip if not visible or if size is set to zero (e.g. when a parent is set to `Display::None`)
if !view_visibility.get() || uinode.size().x == 0. || uinode.size().y == 0. {
continue;
}
let scale_factor = camera_query
.get(camera_entity)
.ok()
.and_then(|(_, c)| c.target_scaling_factor())
.unwrap_or(1.0)
* ui_scale.0;
let inverse_scale_factor = scale_factor.recip();
// Align the text to the nearest physical pixel:
// * Translate by minus the text node's half-size
// (The transform translates to the center of the node but the text coordinates are relative to the node's top left corner)
@ -679,6 +713,7 @@ pub fn extract_text_uinodes(
clip: clip.map(|clip| clip.clip),
flip_x: false,
flip_y: false,
camera_entity,
},
);
}
@ -722,6 +757,7 @@ pub(crate) const QUAD_INDICES: [usize; 6] = [0, 2, 3, 0, 1, 2];
pub struct UiBatch {
pub range: Range<u32>,
pub image: AssetId<Image>,
pub camera: Entity,
}
const TEXTURED_QUAD: u32 = 0;
@ -737,30 +773,29 @@ pub fn queue_uinodes(
draw_functions: Res<DrawFunctions<TransparentUi>>,
) {
let draw_function = draw_functions.read().id::<DrawUi>();
for (view, mut transparent_phase) in &mut views {
for (entity, extracted_uinode) in extracted_uinodes.uinodes.iter() {
let Ok((view, mut transparent_phase)) = views.get_mut(extracted_uinode.camera_entity)
else {
continue;
};
let pipeline = pipelines.specialize(
&pipeline_cache,
&ui_pipeline,
UiPipelineKey { hdr: view.hdr },
);
transparent_phase
.items
.reserve(extracted_uinodes.uinodes.len());
for (entity, extracted_uinode) in extracted_uinodes.uinodes.iter() {
transparent_phase.add(TransparentUi {
draw_function,
pipeline,
entity: *entity,
sort_key: (
FloatOrd(extracted_uinode.stack_index as f32),
entity.index(),
),
// batch_range will be calculated in prepare_uinodes
batch_range: 0..0,
dynamic_offset: None,
});
}
transparent_phase.add(TransparentUi {
draw_function,
pipeline,
entity: *entity,
sort_key: (
FloatOrd(extracted_uinode.stack_index as f32),
entity.index(),
),
// batch_range will be calculated in prepare_uinodes
batch_range: 0..0,
dynamic_offset: None,
});
}
}
@ -823,6 +858,8 @@ pub fn prepare_uinodes(
|| (batch_image_handle != AssetId::default()
&& extracted_uinode.image != AssetId::default()
&& batch_image_handle != extracted_uinode.image)
|| existing_batch.as_ref().map(|(_, b)| b.camera)
!= Some(extracted_uinode.camera_entity)
{
if let Some(gpu_image) = gpu_images.get(extracted_uinode.image) {
batch_item_index = item_index;
@ -831,6 +868,7 @@ pub fn prepare_uinodes(
let new_batch = UiBatch {
range: index..index,
image: extracted_uinode.image,
camera: extracted_uinode.camera_entity,
};
batches.push((item.entity, new_batch));

View file

@ -1,12 +1,13 @@
use std::ops::Range;
use super::{UiBatch, UiImageBindGroups, UiMeta};
use crate::{prelude::UiCameraConfig, DefaultCameraView};
use crate::DefaultCameraView;
use bevy_ecs::{
prelude::*,
system::{lifetimeless::*, SystemParamItem},
};
use bevy_render::{
camera::ExtractedCamera,
render_graph::*,
render_phase::*,
render_resource::{CachedRenderPipelineId, RenderPassDescriptor},
@ -20,7 +21,7 @@ pub struct UiPassNode {
(
&'static RenderPhase<TransparentUi>,
&'static ViewTarget,
Option<&'static UiCameraConfig>,
&'static ExtractedCamera,
),
With<ExtractedView>,
>,
@ -50,7 +51,7 @@ impl Node for UiPassNode {
) -> Result<(), NodeRunError> {
let input_view_entity = graph.view_entity();
let Ok((transparent_phase, target, camera_ui)) =
let Ok((transparent_phase, target, camera)) =
self.ui_view_query.get_manual(world, input_view_entity)
else {
return Ok(());
@ -58,10 +59,6 @@ impl Node for UiPassNode {
if transparent_phase.items.is_empty() {
return Ok(());
}
// Don't render UI for cameras where it is explicitly disabled
if matches!(camera_ui, Some(&UiCameraConfig { show_ui: false })) {
return Ok(());
}
// use the "default" view entity if it is defined
let view_entity = if let Ok(default_view) = self
@ -79,7 +76,9 @@ impl Node for UiPassNode {
timestamp_writes: None,
occlusion_query_set: None,
});
if let Some(viewport) = camera.viewport.as_ref() {
render_pass.set_camera_viewport(viewport);
}
transparent_phase.render(&mut render_pass, world, view_entity);
Ok(())

View file

@ -1,11 +1,16 @@
use crate::{UiRect, Val};
use bevy_asset::Handle;
use bevy_ecs::{prelude::Component, reflect::ReflectComponent};
use bevy_ecs::{prelude::*, system::SystemParam};
use bevy_math::{Rect, Vec2};
use bevy_reflect::prelude::*;
use bevy_render::{color::Color, texture::Image};
use bevy_render::{
camera::{Camera, RenderTarget},
color::Color,
texture::Image,
};
use bevy_transform::prelude::GlobalTransform;
use bevy_utils::smallvec::SmallVec;
use bevy_window::{PrimaryWindow, WindowRef};
use std::num::{NonZeroI16, NonZeroU16};
use thiserror::Error;
@ -1833,3 +1838,40 @@ mod tests {
assert_eq!(GridPlacement::end_span(-4, 12).get_start(), None);
}
}
/// Indicates that this root [`Node`] entity should be rendered to a specific camera.
/// UI then will be layed out respecting the camera's viewport and scale factor, and
/// rendered to this camera's [`bevy_render::camera::RenderTarget`].
///
/// Setting this component on a non-root node will have no effect. It will be overriden
/// by the root node's component.
///
/// Optional if there is only one camera in the world. Required otherwise.
#[derive(Component, Clone, Debug, Reflect, Eq, PartialEq)]
pub struct TargetCamera(pub Entity);
impl TargetCamera {
pub fn entity(&self) -> Entity {
self.0
}
}
#[derive(SystemParam)]
pub struct DefaultUiCamera<'w, 's> {
cameras: Query<'w, 's, (Entity, &'static Camera)>,
primary_window: Query<'w, 's, Entity, With<PrimaryWindow>>,
}
impl<'w, 's> DefaultUiCamera<'w, 's> {
pub fn get(&self) -> Option<Entity> {
self.cameras
.iter()
.filter(|(_, c)| match c.target {
RenderTarget::Window(WindowRef::Primary) => true,
RenderTarget::Window(WindowRef::Entity(w)) => self.primary_window.get(w).is_ok(),
_ => false,
})
.max_by_key(|(e, c)| (c.order, *e))
.map(|(e, _)| e)
}
}

View file

@ -1,16 +1,17 @@
//! This module contains systems that update the UI when something changes
use crate::{CalculatedClip, Display, OverflowAxis, Style};
use crate::{CalculatedClip, Display, OverflowAxis, Style, TargetCamera};
use super::Node;
use bevy_ecs::{
entity::Entity,
query::{With, Without},
query::{Changed, With, Without},
system::{Commands, Query},
};
use bevy_hierarchy::{Children, Parent};
use bevy_math::Rect;
use bevy_transform::components::GlobalTransform;
use bevy_utils::HashSet;
/// Updates clipping for all nodes
pub fn update_clipping_system(
@ -97,3 +98,84 @@ fn update_clipping(
}
}
}
pub fn update_target_camera_system(
mut commands: Commands,
changed_root_nodes_query: Query<
(Entity, Option<&TargetCamera>),
(With<Node>, Without<Parent>, Changed<TargetCamera>),
>,
changed_children_query: Query<(Entity, Option<&TargetCamera>), (With<Node>, Changed<Children>)>,
children_query: Query<&Children, With<Node>>,
node_query: Query<Option<&TargetCamera>, With<Node>>,
) {
// Track updated entities to prevent redundant updates, as `Commands` changes are deferred,
// and updates done for changed_children_query can overlap with itself or with root_node_query
let mut updated_entities = HashSet::new();
// Assuming that TargetCamera is manually set on the root node only,
// update root nodes first, since it implies the biggest change
for (root_node, target_camera) in &changed_root_nodes_query {
update_children_target_camera(
root_node,
target_camera,
&node_query,
&children_query,
&mut commands,
&mut updated_entities,
);
}
// If the root node TargetCamera was changed, then every child is updated
// by this point, and iteration will be skipped.
// Otherwise, update changed children
for (parent, target_camera) in &changed_children_query {
update_children_target_camera(
parent,
target_camera,
&node_query,
&children_query,
&mut commands,
&mut updated_entities,
);
}
}
fn update_children_target_camera(
entity: Entity,
camera_to_set: Option<&TargetCamera>,
node_query: &Query<Option<&TargetCamera>, With<Node>>,
children_query: &Query<&Children, With<Node>>,
commands: &mut Commands,
updated_entities: &mut HashSet<Entity>,
) {
let Ok(children) = children_query.get(entity) else {
return;
};
for &child in children {
// Skip if the child has already been updated or update is not needed
if updated_entities.contains(&child) || camera_to_set == node_query.get(child).unwrap() {
continue;
}
match camera_to_set {
Some(camera) => {
commands.entity(child).insert(camera.clone());
}
None => {
commands.entity(child).remove::<TargetCamera>();
}
}
updated_entities.insert(child);
update_children_target_camera(
child,
camera_to_set,
node_query,
children_query,
commands,
updated_entities,
);
}
}

View file

@ -10,7 +10,7 @@ fn main() {
App::new()
.add_plugins(DefaultPlugins)
.add_systems(Startup, setup)
.add_systems(Update, set_camera_viewports)
.add_systems(Update, (set_camera_viewports, button_system))
.run();
}
@ -51,29 +51,131 @@ fn setup(
});
// Left Camera
commands.spawn((
Camera3dBundle {
transform: Transform::from_xyz(0.0, 200.0, -100.0).looking_at(Vec3::ZERO, Vec3::Y),
..default()
},
LeftCamera,
));
// Right Camera
commands.spawn((
Camera3dBundle {
transform: Transform::from_xyz(100.0, 100., 150.0).looking_at(Vec3::ZERO, Vec3::Y),
camera: Camera {
// Renders the right camera after the left camera, which has a default priority of 0
order: 1,
// Don't clear on the second camera because the first camera already cleared the window
clear_color: ClearColorConfig::None,
let left_camera = commands
.spawn((
Camera3dBundle {
transform: Transform::from_xyz(0.0, 200.0, -100.0).looking_at(Vec3::ZERO, Vec3::Y),
..default()
},
..default()
},
RightCamera,
));
LeftCamera,
))
.id();
// Right Camera
let right_camera = commands
.spawn((
Camera3dBundle {
transform: Transform::from_xyz(100.0, 100., 150.0).looking_at(Vec3::ZERO, Vec3::Y),
camera: Camera {
// Renders the right camera after the left camera, which has a default priority of 0
order: 1,
// don't clear on the second camera because the first camera already cleared the window
clear_color: ClearColorConfig::None,
..default()
},
..default()
},
RightCamera,
))
.id();
// Set up UI
commands
.spawn((
TargetCamera(left_camera),
NodeBundle {
style: Style {
width: Val::Percent(100.),
height: Val::Percent(100.),
..default()
},
..default()
},
))
.with_children(|parent| {
parent.spawn(TextBundle::from_section(
"Left",
TextStyle {
font_size: 20.,
..default()
},
));
buttons_panel(parent);
});
commands
.spawn((
TargetCamera(right_camera),
NodeBundle {
style: Style {
width: Val::Percent(100.),
height: Val::Percent(100.),
..default()
},
..default()
},
))
.with_children(|parent| {
parent.spawn(TextBundle::from_section(
"Right",
TextStyle {
font_size: 20.,
..default()
},
));
buttons_panel(parent);
});
fn buttons_panel(parent: &mut ChildBuilder) {
parent
.spawn(NodeBundle {
style: Style {
position_type: PositionType::Absolute,
width: Val::Percent(100.),
height: Val::Percent(100.),
display: Display::Flex,
flex_direction: FlexDirection::Row,
justify_content: JustifyContent::SpaceBetween,
align_items: AlignItems::Center,
padding: UiRect::all(Val::Px(20.)),
..default()
},
..default()
})
.with_children(|parent| {
rotate_button(parent, "<", Direction::Left);
rotate_button(parent, ">", Direction::Right);
});
}
fn rotate_button(parent: &mut ChildBuilder, caption: &str, direction: Direction) {
parent
.spawn((
RotateCamera(direction),
ButtonBundle {
style: Style {
width: Val::Px(40.),
height: Val::Px(40.),
border: UiRect::all(Val::Px(2.)),
justify_content: JustifyContent::Center,
align_items: AlignItems::Center,
..default()
},
border_color: Color::WHITE.into(),
background_color: Color::DARK_GRAY.into(),
..default()
},
))
.with_children(|parent| {
parent.spawn(TextBundle::from_section(
caption,
TextStyle {
font_size: 20.,
..default()
},
));
});
}
}
#[derive(Component)]
@ -82,6 +184,14 @@ struct LeftCamera;
#[derive(Component)]
struct RightCamera;
#[derive(Component)]
struct RotateCamera(Direction);
enum Direction {
Left,
Right,
}
fn set_camera_viewports(
windows: Query<&Window>,
mut resize_events: EventReader<WindowResized>,
@ -114,3 +224,26 @@ fn set_camera_viewports(
});
}
}
#[allow(clippy::type_complexity)]
fn button_system(
interaction_query: Query<
(&Interaction, &TargetCamera, &RotateCamera),
(Changed<Interaction>, With<Button>),
>,
mut camera_query: Query<&mut Transform, With<Camera>>,
) {
for (interaction, target_camera, RotateCamera(direction)) in &interaction_query {
if let Interaction::Pressed = *interaction {
// Since TargetCamera propagates to the children, we can use it to find
// which side of the screen the button is on.
if let Ok(mut camera_transform) = camera_query.get_mut(target_camera.entity()) {
let angle = match direction {
Direction::Left => -0.1,
Direction::Right => 0.1,
};
camera_transform.rotate_around(Vec3::ZERO, Quat::from_axis_angle(Vec3::Y, angle));
}
}
}
}

View file

@ -369,6 +369,7 @@ Example | Description
[Overflow](../examples/ui/overflow.rs) | Simple example demonstrating overflow behavior
[Overflow and Clipping Debug](../examples/ui/overflow_debug.rs) | An example to debug overflow and clipping behavior
[Relative Cursor Position](../examples/ui/relative_cursor_position.rs) | Showcases the RelativeCursorPosition component
[Render UI to Texture](../examples/ui/render_ui_to_texture.rs) | An example of rendering UI as a part of a 3D world
[Size Constraints](../examples/ui/size_constraints.rs) | Demonstrates how the to use the size constraints to control the size of a UI node.
[Text](../examples/ui/text.rs) | Illustrates creating and updating text
[Text Debug](../examples/ui/text_debug.rs) | An example for debugging text layout

View file

@ -1,6 +1,8 @@
//! Showcases the [`RelativeCursorPosition`] component, used to check the position of the cursor relative to a UI node.
use bevy::{prelude::*, ui::RelativeCursorPosition, winit::WinitSettings};
use bevy::{
prelude::*, render::camera::Viewport, ui::RelativeCursorPosition, winit::WinitSettings,
};
fn main() {
App::new()
@ -13,7 +15,18 @@ fn main() {
}
fn setup(mut commands: Commands, asset_server: Res<AssetServer>) {
commands.spawn(Camera2dBundle::default());
commands.spawn(Camera2dBundle {
camera: Camera {
// Cursor position will take the viewport offset into account
viewport: Some(Viewport {
physical_position: [200, 100].into(),
physical_size: [600, 600].into(),
..default()
}),
..default()
},
..default()
});
commands
.spawn(NodeBundle {

View file

@ -0,0 +1,145 @@
//! Shows how to render UI to a texture. Useful for displaying UI in 3D space.
use std::f32::consts::PI;
use bevy::{
prelude::*,
render::{
camera::RenderTarget,
render_resource::{
Extent3d, TextureDescriptor, TextureDimension, TextureFormat, TextureUsages,
},
},
};
fn main() {
App::new()
.add_plugins(DefaultPlugins)
.add_systems(Startup, setup)
.add_systems(Update, rotator_system)
.run();
}
// Marks the cube, to which the UI texture is applied.
#[derive(Component)]
struct Cube;
fn setup(
mut commands: Commands,
mut meshes: ResMut<Assets<Mesh>>,
mut materials: ResMut<Assets<StandardMaterial>>,
mut images: ResMut<Assets<Image>>,
) {
let size = Extent3d {
width: 512,
height: 512,
..default()
};
// This is the texture that will be rendered to.
let mut image = Image {
texture_descriptor: TextureDescriptor {
label: None,
size,
dimension: TextureDimension::D2,
format: TextureFormat::Bgra8UnormSrgb,
mip_level_count: 1,
sample_count: 1,
usage: TextureUsages::TEXTURE_BINDING
| TextureUsages::COPY_DST
| TextureUsages::RENDER_ATTACHMENT,
view_formats: &[],
},
..default()
};
// fill image.data with zeroes
image.resize(size);
let image_handle = images.add(image);
// Light
commands.spawn(PointLightBundle {
transform: Transform::from_translation(Vec3::new(0.0, 0.0, 10.0)),
..default()
});
let texture_camera = commands
.spawn(Camera2dBundle {
camera: Camera {
// render before the "main pass" camera
order: -1,
target: RenderTarget::Image(image_handle.clone()),
..default()
},
..default()
})
.id();
commands
.spawn((
NodeBundle {
style: Style {
// Cover the whole image
width: Val::Percent(100.),
height: Val::Percent(100.),
flex_direction: FlexDirection::Column,
justify_content: JustifyContent::Center,
align_items: AlignItems::Center,
..default()
},
background_color: Color::GOLD.into(),
..default()
},
TargetCamera(texture_camera),
))
.with_children(|parent| {
parent.spawn(TextBundle::from_section(
"This is a cube",
TextStyle {
font_size: 40.0,
color: Color::BLACK,
..default()
},
));
});
let cube_size = 4.0;
let cube_handle = meshes.add(Mesh::from(shape::Box::new(cube_size, cube_size, cube_size)));
// This material has the texture that has been rendered.
let material_handle = materials.add(StandardMaterial {
base_color_texture: Some(image_handle),
reflectance: 0.02,
unlit: false,
..default()
});
// Cube with material containing the rendered UI texture.
commands.spawn((
PbrBundle {
mesh: cube_handle,
material: material_handle,
transform: Transform::from_xyz(0.0, 0.0, 1.5)
.with_rotation(Quat::from_rotation_x(-PI / 5.0)),
..default()
},
Cube,
));
// The main pass camera.
commands.spawn(Camera3dBundle {
transform: Transform::from_xyz(0.0, 0.0, 15.0).looking_at(Vec3::ZERO, Vec3::Y),
..default()
});
}
const ROTATION_SPEED: f32 = 0.5;
fn rotator_system(time: Res<Time>, mut query: Query<&mut Transform, With<Cube>>) {
for mut transform in &mut query {
transform.rotate_x(1.0 * time.delta_seconds() * ROTATION_SPEED);
transform.rotate_y(0.7 * time.delta_seconds() * ROTATION_SPEED);
}
}

View file

@ -22,12 +22,13 @@ fn setup_scene(mut commands: Commands, asset_server: Res<AssetServer>) {
transform: Transform::from_xyz(4.0, 5.0, 4.0),
..default()
});
// main camera, cameras default to the primary window
// so we don't need to specify that.
commands.spawn(Camera3dBundle {
transform: Transform::from_xyz(0.0, 0.0, 6.0).looking_at(Vec3::ZERO, Vec3::Y),
..default()
});
let first_window_camera = commands
.spawn(Camera3dBundle {
transform: Transform::from_xyz(0.0, 0.0, 6.0).looking_at(Vec3::ZERO, Vec3::Y),
..default()
})
.id();
// Spawn a second window
let second_window = commands
@ -37,13 +38,32 @@ fn setup_scene(mut commands: Commands, asset_server: Res<AssetServer>) {
})
.id();
// second window camera
commands.spawn(Camera3dBundle {
transform: Transform::from_xyz(6.0, 0.0, 0.0).looking_at(Vec3::ZERO, Vec3::Y),
camera: Camera {
target: RenderTarget::Window(WindowRef::Entity(second_window)),
let second_window_camera = commands
.spawn(Camera3dBundle {
transform: Transform::from_xyz(6.0, 0.0, 0.0).looking_at(Vec3::ZERO, Vec3::Y),
camera: Camera {
target: RenderTarget::Window(WindowRef::Entity(second_window)),
..default()
},
..default()
},
..default()
});
})
.id();
// Since we are using multiple cameras, we need to specify which camera UI should be rendered to
commands
.spawn((NodeBundle::default(), TargetCamera(first_window_camera)))
.with_children(|parent| {
parent.spawn(TextBundle::from_section(
"First window",
TextStyle::default(),
));
});
commands
.spawn((NodeBundle::default(), TargetCamera(second_window_camera)))
.with_children(|parent| {
parent.spawn(TextBundle::from_section(
"Second window",
TextStyle::default(),
));
});
}