Files
bevy/crates/bevy_ecs/src/schedule/auto_insert_apply_deferred.rs
T
andriyDev efc6464f9b Create types to store serializable data about the ECS schedule and provide tools for extracting this data. (#22520)
# Objective

- A step towards #10981.
- Allow us to extract data from an app about the schedules.

Here are also some **non-goals** for this PR. These are left as future
work:

- Extract every piece of data in the schedule. I've focused on a rough
set of information that should let us visualization the most important
parts.
- Provide utilities for interpreting the data. This PR is focused on
just extracting the data, interpreting it comes next.
- Any sort of dot graph tools.

## Solution

- Create ser/de compatible structs for representing schedule data.
- Create a function to get schedule data from an initialized `Schedule`.
- Create a plugin to automatically extract the schedule data for every
schedule in the `App`.
- Note this doesn't include other subapps, I'll also leave that to
another PR.
- Make `bevy_ecs` return edges that build passes added.
- Make `bevy_ecs` return the "build metadata" to the caller, and also
trigger as an event.

## Testing

- Added tests!
- Added an example - it outputs a ron file. I assume its all valid.

---------

Co-authored-by: Alice Cecile <alice.i.cecile@gmail.com>
Co-authored-by: Kevin Chen <chen.kevin.f@gmail.com>
2026-04-06 23:06:15 +00:00

264 lines
11 KiB
Rust

use alloc::{borrow::ToOwned, boxed::Box, collections::BTreeSet, vec::Vec};
use bevy_platform::{collections::HashMap, hash::FixedHasher};
use indexmap::IndexSet;
use crate::{
schedule::{FlattenedDependencies, SystemKey, SystemSetKey},
system::{IntoSystem, System},
world::World,
};
use super::{
is_apply_deferred, ApplyDeferred, DiGraph, Direction, NodeId, ScheduleBuildError,
ScheduleBuildPass, ScheduleGraph,
};
/// A [`ScheduleBuildPass`] that inserts [`ApplyDeferred`] systems into the schedule graph
/// when there are [`Deferred`](crate::prelude::Deferred)
/// in one system and there are ordering dependencies on that system. [`Commands`](crate::system::Commands) is one
/// such deferred buffer.
///
/// This pass is typically automatically added to the schedule. You can disable this by setting
/// [`ScheduleBuildSettings::auto_insert_apply_deferred`](crate::schedule::ScheduleBuildSettings::auto_insert_apply_deferred)
/// to `false`. You may want to disable this if you only want to sync deferred params at the end of the schedule,
/// or want to manually insert all your sync points.
#[derive(Debug, Default)]
pub struct AutoInsertApplyDeferredPass {
/// Dependency edges that will **not** automatically insert an instance of `ApplyDeferred` on the edge.
no_sync_edges: BTreeSet<(NodeId, NodeId)>,
auto_sync_node_ids: HashMap<u32, SystemKey>,
}
/// If added to a dependency edge, the edge will not be considered for auto sync point insertions.
pub struct IgnoreDeferred;
impl AutoInsertApplyDeferredPass {
/// Returns the `NodeId` of the cached auto sync point. Will create
/// a new one if needed.
fn get_sync_point(&mut self, graph: &mut ScheduleGraph, distance: u32) -> SystemKey {
self.auto_sync_node_ids
.get(&distance)
.copied()
.unwrap_or_else(|| {
let key = self.add_auto_sync(graph);
self.auto_sync_node_ids.insert(distance, key);
key
})
}
/// add an [`ApplyDeferred`] system with no config
fn add_auto_sync(&mut self, graph: &mut ScheduleGraph) -> SystemKey {
let key = graph
.systems
.insert(Box::new(IntoSystem::into_system(ApplyDeferred)), Vec::new());
// ignore ambiguities with auto sync points
// They aren't under user control, so no one should know or care.
graph.ambiguous_with_all.insert(NodeId::System(key));
key
}
}
impl ScheduleBuildPass for AutoInsertApplyDeferredPass {
type EdgeOptions = IgnoreDeferred;
fn add_dependency(&mut self, from: NodeId, to: NodeId, options: Option<&Self::EdgeOptions>) {
if options.is_some() {
self.no_sync_edges.insert((from, to));
}
}
fn build(
&mut self,
_world: &mut World,
graph: &mut ScheduleGraph,
mut dependency_flattened: FlattenedDependencies<'_>,
) -> Result<(), ScheduleBuildError> {
let (topo, flat_dependency) = dependency_flattened
.toposort_and_graph()
.map_err(ScheduleBuildError::FlatDependencySort)?;
let topo = topo.to_owned();
let flat_dependency = flat_dependency.to_owned();
fn set_has_conditions(graph: &ScheduleGraph, set: SystemSetKey) -> bool {
graph.system_sets.has_conditions(set)
|| graph
.hierarchy()
.graph()
.edges_directed(NodeId::Set(set), Direction::Incoming)
.any(|(parent, _)| {
parent
.as_set()
.is_some_and(|p| set_has_conditions(graph, p))
})
}
fn system_has_conditions(graph: &ScheduleGraph, key: SystemKey) -> bool {
graph.systems.has_conditions(key)
|| graph
.hierarchy()
.graph()
.edges_directed(NodeId::System(key), Direction::Incoming)
.any(|(parent, _)| {
parent
.as_set()
.is_some_and(|p| set_has_conditions(graph, p))
})
}
let mut system_has_conditions_cache = HashMap::<SystemKey, bool>::default();
let mut is_valid_explicit_sync_point = |key: SystemKey| {
is_apply_deferred(&graph.systems[key])
&& !*system_has_conditions_cache
.entry(key)
.or_insert_with(|| system_has_conditions(graph, key))
};
// Calculate the distance for each node.
// The "distance" is the number of sync points between a node and the beginning of the graph.
// Also store if a preceding edge would have added a sync point but was ignored to add it at
// a later edge that is not ignored.
let mut distances_and_pending_sync: HashMap<SystemKey, (u32, bool)> =
HashMap::with_capacity_and_hasher(topo.len(), Default::default());
// Keep track of any explicit sync nodes for a specific distance.
let mut distance_to_explicit_sync_node: HashMap<u32, SystemKey> = HashMap::default();
// Determine the distance for every node and collect the explicit sync points.
for &key in topo.iter() {
let (node_distance, mut node_needs_sync) = distances_and_pending_sync
.get(&key)
.copied()
.unwrap_or_default();
if is_valid_explicit_sync_point(key) {
// The distance of this sync point does not change anymore as the iteration order
// makes sure that this node is no unvisited target of another node.
// Because of this, the sync point can be stored for this distance to be reused as
// automatically added sync points later.
distance_to_explicit_sync_node.insert(node_distance, key);
// This node just did a sync, so the only reason to do another sync is if one was
// explicitly scheduled afterwards.
node_needs_sync = false;
} else if !node_needs_sync {
// No previous node has postponed sync points to add so check if the system itself
// has deferred params that require a sync point to apply them.
node_needs_sync = graph.systems[key].has_deferred();
}
for target in flat_dependency.neighbors_directed(key, Direction::Outgoing) {
let (target_distance, target_pending_sync) =
distances_and_pending_sync.entry(target).or_default();
let mut edge_needs_sync = node_needs_sync;
if node_needs_sync
&& !graph.systems[target].is_exclusive()
&& self
.no_sync_edges
.contains(&(NodeId::System(key), NodeId::System(target)))
{
// The node has deferred params to apply, but this edge is ignoring sync points.
// Mark the target as 'delaying' those commands to a future edge and the current
// edge as not needing a sync point.
*target_pending_sync = true;
edge_needs_sync = false;
}
let mut weight = 0;
if edge_needs_sync || is_valid_explicit_sync_point(target) {
// The target distance grows if a sync point is added between it and the node.
// Also raise the distance if the target is a sync point itself so it then again
// raises the distance of following nodes as that is what the distance is about.
weight = 1;
}
// The target cannot have fewer sync points in front of it than the preceding node.
*target_distance = (node_distance + weight).max(*target_distance);
}
}
// Find any edges which have a different number of sync points between them and make sure
// there is a sync point between them.
for &key in topo.iter() {
let (node_distance, _) = distances_and_pending_sync
.get(&key)
.copied()
.unwrap_or_default();
for target in flat_dependency.neighbors_directed(key, Direction::Outgoing) {
let (target_distance, _) = distances_and_pending_sync
.get(&target)
.copied()
.unwrap_or_default();
if node_distance == target_distance {
// These nodes are the same distance, so they don't need an edge between them.
continue;
}
if is_apply_deferred(&graph.systems[target]) {
// We don't need to insert a sync point since ApplyDeferred is a sync point
// already!
continue;
}
let sync_point = distance_to_explicit_sync_node
.get(&target_distance)
.copied()
.unwrap_or_else(|| self.get_sync_point(graph, target_distance));
dependency_flattened.add_edge(key, sync_point);
dependency_flattened.add_edge(sync_point, target);
// The edge without the sync point is now redundant.
dependency_flattened.remove_edge(key, target);
}
}
Ok(())
}
fn collapse_set(
&mut self,
set: SystemSetKey,
systems: &IndexSet<SystemKey, FixedHasher>,
dependency_flattening: &DiGraph<NodeId>,
) -> impl Iterator<Item = (NodeId, NodeId)> {
if systems.is_empty() {
// collapse dependencies for empty sets
for a in dependency_flattening.neighbors_directed(NodeId::Set(set), Direction::Incoming)
{
for b in
dependency_flattening.neighbors_directed(NodeId::Set(set), Direction::Outgoing)
{
if self.no_sync_edges.contains(&(a, NodeId::Set(set)))
&& self.no_sync_edges.contains(&(NodeId::Set(set), b))
{
self.no_sync_edges.insert((a, b));
}
}
}
} else {
for a in dependency_flattening.neighbors_directed(NodeId::Set(set), Direction::Incoming)
{
for &sys in systems {
if self.no_sync_edges.contains(&(a, NodeId::Set(set))) {
self.no_sync_edges.insert((a, NodeId::System(sys)));
}
}
}
for b in dependency_flattening.neighbors_directed(NodeId::Set(set), Direction::Outgoing)
{
for &sys in systems {
if self.no_sync_edges.contains(&(NodeId::Set(set), b)) {
self.no_sync_edges.insert((NodeId::System(sys), b));
}
}
}
}
core::iter::empty()
}
}