wip: algo for scheduler finished

This commit is contained in:
Jonathan Kelley 2021-08-26 17:05:28 -04:00
parent 5f63eda294
commit 9ad5e49654
5 changed files with 463 additions and 507 deletions

View file

@ -5,7 +5,7 @@ use crate::innerlude::*;
/// This iterator is useful when it's important to load the next real root onto the top of the stack for operations like
/// "InsertBefore".
pub struct RealChildIterator<'a> {
scopes: &'a Scheduler,
scopes: &'a ResourcePool,
// Heuristcally we should never bleed into 4 completely nested fragments/components
// Smallvec lets us stack allocate our little stack machine so the vast majority of cases are sane
@ -14,14 +14,14 @@ pub struct RealChildIterator<'a> {
}
impl<'a> RealChildIterator<'a> {
pub fn new(starter: &'a VNode<'a>, scopes: &'a Scheduler) -> Self {
pub fn new(starter: &'a VNode<'a>, scopes: &'a ResourcePool) -> Self {
Self {
scopes,
stack: smallvec::smallvec![(0, starter)],
}
}
pub fn new_from_slice(nodes: &'a [VNode<'a>], scopes: &'a Scheduler) -> Self {
pub fn new_from_slice(nodes: &'a [VNode<'a>], scopes: &'a ResourcePool) -> Self {
let mut stack = smallvec::smallvec![];
for node in nodes {
stack.push((0, node));

View file

@ -106,48 +106,20 @@ use DomEdit::*;
/// Funnily enough, this stack machine's entire job is to create instructions for another stack machine to execute. It's
/// stack machines all the way down!
pub struct DiffMachine<'bump> {
vdom: SharedVdom<'bump>,
pub channel: EventChannel,
pub vdom: &'bump ResourcePool,
pub mutations: Mutations<'bump>,
pub stack: DiffStack<'bump>,
pub diffed: FxHashSet<ScopeId>,
pub seen_scopes: FxHashSet<ScopeId>,
}
pub struct SharedVdom<'bump> {
pub components: &'bump mut Slab<Scope>,
pub elements: &'bump mut Slab<()>,
pub channel: EventChannel,
}
impl<'bump> SharedVdom<'bump> {
fn get_scope_mut(&mut self, scope: ScopeId) -> Option<&'bump mut Scope> {
todo!()
}
fn get_scope(&mut self, scope: ScopeId) -> Option<&'bump Scope> {
todo!()
}
fn reserve_node(&mut self) -> ElementId {
todo!()
}
fn collect_garbage(&mut self, element: ElementId) {}
pub fn insert_scope_with_key(&mut self, f: impl FnOnce(ScopeId) -> Scope) -> ScopeId {
let entry = self.components.vacant_entry();
let id = ScopeId(entry.key());
entry.insert(f(id));
id
}
}
/// a "saved" form of a diff machine
/// in regular diff machine, the &'bump reference is a stack borrow, but the
/// bump lifetimes are heap borrows.
pub struct SavedDiffWork<'bump> {
pub channel: EventChannel,
pub mutations: Mutations<'bump>,
pub stack: DiffStack<'bump>,
pub diffed: FxHashSet<ScopeId>,
pub seen_scopes: FxHashSet<ScopeId>,
}
@ -155,34 +127,38 @@ impl<'a> SavedDiffWork<'a> {
pub unsafe fn extend(self: SavedDiffWork<'a>) -> SavedDiffWork<'static> {
std::mem::transmute(self)
}
pub unsafe fn promote<'b>(self, vdom: SharedVdom<'b>) -> DiffMachine<'b> {
pub unsafe fn promote<'b>(self, vdom: &'b mut ResourcePool) -> DiffMachine<'b> {
let extended: SavedDiffWork<'b> = std::mem::transmute(self);
DiffMachine {
vdom,
channel: extended.channel,
mutations: extended.mutations,
stack: extended.stack,
diffed: extended.diffed,
seen_scopes: extended.seen_scopes,
}
}
}
impl<'bump> DiffMachine<'bump> {
pub(crate) fn new(edits: Mutations<'bump>, shared: SharedVdom<'bump>) -> Self {
pub(crate) fn new(
edits: Mutations<'bump>,
shared: &'bump mut ResourcePool,
channel: EventChannel,
) -> Self {
Self {
channel,
stack: DiffStack::new(),
mutations: edits,
vdom: shared,
diffed: FxHashSet::default(),
seen_scopes: FxHashSet::default(),
}
}
pub fn save(self) -> SavedDiffWork<'bump> {
SavedDiffWork {
channel: self.channel,
mutations: self.mutations,
stack: self.stack,
diffed: self.diffed,
seen_scopes: self.seen_scopes,
}
}
@ -194,11 +170,10 @@ impl<'bump> DiffMachine<'bump> {
// }
//
pub async fn diff_scope(&mut self, id: ScopeId) {
pub async fn diff_scope(&'bump mut self, id: ScopeId) {
if let Some(component) = self.vdom.get_scope_mut(id) {
let (old, new) = (component.frames.wip_head(), component.frames.fin_head());
self.stack.push(DiffInstruction::DiffNode { new, old });
self.work().await;
}
}
@ -368,7 +343,7 @@ impl<'bump> DiffMachine<'bump> {
let parent_idx = self.stack.current_scope().unwrap();
let shared = self.vdom.channel.clone();
let shared = self.channel.clone();
// Insert a new scope into our component list
let parent_scope = self.vdom.get_scope(parent_idx).unwrap();
let new_idx = self.vdom.insert_scope_with_key(|new_idx| {

View file

@ -13,16 +13,12 @@ pub struct Mutations<'a> {
use DomEdit::*;
impl<'a> Mutations<'a> {
pub fn new() -> Self {
pub(crate) fn new() -> Self {
let edits = Vec::new();
let noderefs = Vec::new();
Self { edits, noderefs }
}
pub fn extend(&mut self, other: &mut Mutations) {
// self.edits.extend(other.edits);
}
// Navigation
pub(crate) fn push_root(&mut self, root: ElementId) {
let id = root.as_u64();

View file

@ -129,28 +129,7 @@ pub enum SchedulerMsg {
///
///
pub struct Scheduler {
/*
This *has* to be an UnsafeCell.
Each BumpFrame and Scope is located in this Slab - and we'll need mutable access to a scope while holding on to
its bumpframe conents immutably.
However, all of the interaction with this Slab is done in this module and the Diff module, so it should be fairly
simple to audit.
Wrapped in Rc so the "get_shared_context" closure can walk the tree (immutably!)
*/
pub components: Rc<UnsafeCell<Slab<Scope>>>,
/*
Yes, a slab of "nil". We use this for properly ordering ElementIDs - all we care about is the allocation strategy
that slab uses. The slab essentially just provides keys for ElementIDs that we can re-use in a Vec on the client.
This just happened to be the simplest and most efficient way to implement a deterministic keyed map with slot reuse.
In the future, we could actually store a pointer to the VNode instead of nil to provide O(1) lookup for VNodes...
*/
pub raw_elements: Slab<()>,
pub pool: ResourcePool,
pub heuristics: HeuristicsEngine,
@ -175,7 +154,7 @@ pub struct Scheduler {
pub garbage_scopes: HashSet<ScopeId>,
pub lanes: [PriortySystem; 4],
pub lanes: [PriorityLane; 4],
}
impl Scheduler {
@ -185,7 +164,12 @@ impl Scheduler {
Perhaps this should be configurable?
*/
let components = Rc::new(UnsafeCell::new(Slab::with_capacity(100)));
let raw_elements = Slab::with_capacity(2000);
let raw_elements = Rc::new(UnsafeCell::new(Slab::with_capacity(2000)));
let pool = ResourcePool {
components: components.clone(),
raw_elements,
};
let heuristics = HeuristicsEngine::new();
@ -231,16 +215,15 @@ impl Scheduler {
};
Self {
pool,
channel,
receiver,
components,
async_tasks: FuturesUnordered::new(),
pending_garbage: FxHashSet::default(),
heuristics,
raw_elements,
// a storage for our receiver to dump into
ui_events: VecDeque::new(),
@ -255,14 +238,441 @@ impl Scheduler {
// a dedicated fiber for each priority
lanes: [
PriortySystem::new(),
PriortySystem::new(),
PriortySystem::new(),
PriortySystem::new(),
PriorityLane::new(),
PriorityLane::new(),
PriorityLane::new(),
PriorityLane::new(),
],
}
}
pub fn manually_poll_events(&mut self) {
while let Ok(Some(msg)) = self.receiver.try_next() {
self.handle_channel_msg(msg);
}
}
// Converts UI events into dirty scopes with various priorities
pub fn consume_pending_events(&mut self) -> Result<()> {
// while let Some(trigger) = self.ui_events.pop_back() {
// match &trigger.event {
// SyntheticEvent::ClipboardEvent(_)
// | SyntheticEvent::CompositionEvent(_)
// | SyntheticEvent::KeyboardEvent(_)
// | SyntheticEvent::FocusEvent(_)
// | SyntheticEvent::FormEvent(_)
// | SyntheticEvent::SelectionEvent(_)
// | SyntheticEvent::TouchEvent(_)
// | SyntheticEvent::UIEvent(_)
// | SyntheticEvent::WheelEvent(_)
// | SyntheticEvent::MediaEvent(_)
// | SyntheticEvent::AnimationEvent(_)
// | SyntheticEvent::TransitionEvent(_)
// | SyntheticEvent::ToggleEvent(_)
// | SyntheticEvent::MouseEvent(_)
// | SyntheticEvent::PointerEvent(_) => {
// if let Some(scope) = self.get_scope_mut(trigger.scope) {
// if let Some(element) = trigger.mounted_dom_id {
// scope.call_listener(trigger.event, element)?;
// // let receiver = self.immediate_receiver.clone();
// // let mut receiver = receiver.borrow_mut();
// // // Drain the immediates into the dirty scopes, setting the appropiate priorities
// // while let Ok(Some(dirty_scope)) = receiver.try_next() {
// // self.add_dirty_scope(dirty_scope, trigger.priority)
// // }
// }
// }
// }
// }
// }
Ok(())
}
// nothing to do, no events on channels, no work
pub fn has_any_work(&self) -> bool {
self.has_work() || self.has_pending_events() || self.has_pending_garbage()
}
pub fn has_pending_events(&self) -> bool {
self.ui_events.len() > 0
}
pub fn has_work(&self) -> bool {
todo!()
// self.high_priorty.has_work()
// || self.medium_priority.has_work()
// || self.low_priority.has_work()
}
pub fn has_pending_garbage(&self) -> bool {
!self.garbage_scopes.is_empty()
}
fn get_current_fiber<'a>(&'a mut self) -> &mut DiffMachine<'a> {
todo!()
// let fib = match self.current_priority {
// EventPriority::High => &mut self.high_priorty,
// EventPriority::Medium => &mut self.medium_priority,
// EventPriority::Low => &mut self.low_priority,
// };
// unsafe { std::mem::transmute(fib) }
}
fn shift_priorities(&mut self) {
self.current_priority = match (
self.lanes[0].has_work(),
self.lanes[1].has_work(),
self.lanes[2].has_work(),
self.lanes[3].has_work(),
) {
(true, _, _, _) => EventPriority::Immediate,
(false, true, _, _) => EventPriority::High,
(false, false, true, _) => EventPriority::Medium,
(false, false, false, _) => EventPriority::Low,
};
}
fn load_current_lane(&mut self) -> &mut PriorityLane {
match self.current_priority {
EventPriority::Immediate => todo!(),
EventPriority::High => todo!(),
EventPriority::Medium => todo!(),
EventPriority::Low => todo!(),
}
}
fn save_work(&mut self, lane: SavedDiffWork) {
let saved: SavedDiffWork<'static> = unsafe { std::mem::transmute(lane) };
self.load_current_lane().saved_state = Some(saved);
}
fn load_work(&mut self) -> SavedDiffWork<'static> {
match self.current_priority {
EventPriority::Immediate => todo!(),
EventPriority::High => todo!(),
EventPriority::Medium => todo!(),
EventPriority::Low => todo!(),
}
}
/// The primary workhorse of the VirtualDOM.
///
/// Uses some fairly complex logic to schedule what work should be produced.
///
/// Returns a list of successful mutations.
///
///
pub async fn work_with_deadline<'a>(
&'a mut self,
mut deadline: Pin<Box<impl FusedFuture<Output = ()>>>,
) -> Vec<Mutations<'a>> {
/*
Strategy:
- When called, check for any UI events that might've been received since the last frame.
- Dump all UI events into a "pending discrete" queue and a "pending continuous" queue.
- If there are any pending discrete events, then elevate our priorty level. If our priority level is already "high,"
then we need to finish the high priority work first. If the current work is "low" then analyze what scopes
will be invalidated by this new work. If this interferes with any in-flight medium or low work, then we need
to bump the other work out of the way, or choose to process it so we don't have any conflicts.
'static components have a leg up here since their work can be re-used among multiple scopes.
"High priority" is only for blocking! Should only be used on "clicks"
- If there are no pending discrete events, then check for continuous events. These can be completely batched
Open questions:
- what if we get two clicks from the component during the same slice?
- should we batch?
- react says no - they are continuous
- but if we received both - then we don't need to diff, do we? run as many as we can and then finally diff?
*/
let mut committed_mutations = Vec::<Mutations<'static>>::new();
// TODO:
// the scheduler uses a bunch of different receivers to mimic a "topic" queue system. The futures-channel implementation
// doesn't really have a concept of a "topic" queue, so there's a lot of noise in the hand-rolled scheduler. We should
// explore abstracting the scheduler into a topic-queue channel system - similar to Kafka or something similar.
loop {
// Internalize any pending work since the last time we ran
self.manually_poll_events();
// Wait for any new events if we have nothing to do
if !self.has_any_work() {
self.pool.clean_up_garbage();
let deadline_expired = self.wait_for_any_trigger(&mut deadline).await;
if deadline_expired {
return committed_mutations;
}
}
// Create work from the pending event queue
self.consume_pending_events().unwrap();
// Work through the current subtree, and commit the results when it finishes
// When the deadline expires, give back the work
self.shift_priorities();
let saved_state = self.load_work();
// We have to split away some parts of ourself - current lane is borrowed mutably
let mut shared = self.pool.clone();
let mut machine = unsafe { saved_state.promote(&mut shared) };
if machine.stack.is_empty() {
let shared = self.pool.clone();
self.current_lane().dirty_scopes.sort_by(|a, b| {
let h1 = shared.get_scope(*a).unwrap().height;
let h2 = shared.get_scope(*b).unwrap().height;
h1.cmp(&h2)
});
if let Some(scope) = self.current_lane().dirty_scopes.pop() {
let component = self.pool.get_scope(scope).unwrap();
let (old, new) = (component.frames.wip_head(), component.frames.fin_head());
machine.stack.push(DiffInstruction::DiffNode { new, old });
}
}
let completed = {
let fut = machine.work();
pin_mut!(fut);
use futures_util::future::{select, Either};
match select(fut, &mut deadline).await {
Either::Left((work, _other)) => true,
Either::Right((deadline, _other)) => false,
}
};
let machine: DiffMachine<'static> = unsafe { std::mem::transmute(machine) };
let mut saved = machine.save();
if completed {
for node in saved.seen_scopes.drain() {
self.current_lane().dirty_scopes.remove(&node);
}
let mut new_mutations = Mutations::new();
std::mem::swap(&mut new_mutations, &mut saved.mutations);
committed_mutations.push(new_mutations);
}
self.save_work(saved);
if !completed {
break;
}
}
committed_mutations
}
// waits for a trigger, canceling early if the deadline is reached
// returns true if the deadline was reached
// does not return the trigger, but caches it in the scheduler
pub async fn wait_for_any_trigger(
&mut self,
deadline: &mut Pin<Box<impl FusedFuture<Output = ()>>>,
) -> bool {
use futures_util::future::{select, Either};
let event_fut = async {
match select(self.receiver.next(), self.async_tasks.next()).await {
Either::Left((msg, _other)) => {
self.handle_channel_msg(msg.unwrap());
}
Either::Right((task, _other)) => {
// do nothing, async task will likely generate a set of scheduler messages
}
}
};
pin_mut!(event_fut);
match select(event_fut, deadline).await {
Either::Left((msg, _other)) => false,
Either::Right((deadline, _)) => true,
}
}
pub fn current_lane(&mut self) -> &mut PriorityLane {
match self.current_priority {
EventPriority::Immediate => &mut self.lanes[0],
EventPriority::High => &mut self.lanes[1],
EventPriority::Medium => &mut self.lanes[2],
EventPriority::Low => &mut self.lanes[3],
}
}
pub fn handle_channel_msg(&mut self, msg: SchedulerMsg) {
match msg {
SchedulerMsg::Immediate(_) => todo!(),
SchedulerMsg::UiEvent(_) => todo!(),
//
SchedulerMsg::SubmitTask(_, _) => todo!(),
SchedulerMsg::ToggleTask(_) => todo!(),
SchedulerMsg::PauseTask(_) => todo!(),
SchedulerMsg::ResumeTask(_) => todo!(),
SchedulerMsg::DropTask(_) => todo!(),
}
}
fn add_dirty_scope(&mut self, scope: ScopeId, priority: EventPriority) {
todo!()
// match priority {
// EventPriority::High => self.high_priorty.dirty_scopes.insert(scope),
// EventPriority::Medium => self.medium_priority.dirty_scopes.insert(scope),
// EventPriority::Low => self.low_priority.dirty_scopes.insert(scope),
// };
}
}
pub struct PriorityLane {
pub dirty_scopes: IndexSet<ScopeId>,
pub saved_state: Option<SavedDiffWork<'static>>,
pub in_progress: bool,
}
impl PriorityLane {
pub fn new() -> Self {
Self {
saved_state: None,
dirty_scopes: Default::default(),
in_progress: false,
}
}
fn has_work(&self) -> bool {
todo!()
}
fn work(&mut self) {
let scope = self.dirty_scopes.pop();
}
}
pub struct TaskHandle {
pub sender: UnboundedSender<SchedulerMsg>,
pub our_id: u64,
}
impl TaskHandle {
/// Toggles this coroutine off/on.
///
/// This method is not synchronous - your task will not stop immediately.
pub fn toggle(&self) {}
/// This method is not synchronous - your task will not stop immediately.
pub fn start(&self) {}
/// This method is not synchronous - your task will not stop immediately.
pub fn stop(&self) {}
/// This method is not synchronous - your task will not stop immediately.
pub fn restart(&self) {}
}
#[derive(serde::Serialize, serde::Deserialize, Copy, Clone, PartialEq, Eq, Hash, Debug)]
pub struct ScopeId(pub usize);
#[derive(Copy, Clone, PartialEq, Eq, Hash, Debug)]
pub struct ElementId(pub usize);
impl Display for ElementId {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
write!(f, "{}", self.0)
}
}
impl ElementId {
pub fn as_u64(self) -> u64 {
self.0 as u64
}
}
/// Priority of Event Triggers.
///
/// Internally, Dioxus will abort work that's taking too long if new, more important, work arrives. Unlike React, Dioxus
/// won't be afraid to pause work or flush changes to the RealDOM. This is called "cooperative scheduling". Some Renderers
/// implement this form of scheduling internally, however Dioxus will perform its own scheduling as well.
///
/// The ultimate goal of the scheduler is to manage latency of changes, prioritizing "flashier" changes over "subtler" changes.
///
/// React has a 5-tier priority system. However, they break things into "Continuous" and "Discrete" priority. For now,
/// we keep it simple, and just use a 3-tier priority system.
///
/// - NoPriority = 0
/// - LowPriority = 1
/// - NormalPriority = 2
/// - UserBlocking = 3
/// - HighPriority = 4
/// - ImmediatePriority = 5
///
/// We still have a concept of discrete vs continuous though - discrete events won't be batched, but continuous events will.
/// This means that multiple "scroll" events will be processed in a single frame, but multiple "click" events will be
/// flushed before proceeding. Multiple discrete events is highly unlikely, though.
#[derive(Debug, PartialEq, Eq, Clone, Copy, Hash, PartialOrd, Ord)]
pub enum EventPriority {
/// Work that must be completed during the EventHandler phase
///
///
Immediate = 3,
/// "High Priority" work will not interrupt other high priority work, but will interrupt medium and low priority work.
///
/// This is typically reserved for things like user interaction.
///
/// React calls these "discrete" events, but with an extra category of "user-blocking" (Immediate).
High = 2,
/// "Medium priority" work is generated by page events not triggered by the user. These types of events are less important
/// than "High Priority" events and will take presedence over low priority events.
///
/// This is typically reserved for VirtualEvents that are not related to keyboard or mouse input.
///
/// React calls these "continuous" events (e.g. mouse move, mouse wheel, touch move, etc).
Medium = 1,
/// "Low Priority" work will always be pre-empted unless the work is significantly delayed, in which case it will be
/// advanced to the front of the work queue until completed.
///
/// The primary user of Low Priority work is the asynchronous work system (suspense).
///
/// This is considered "idle" work or "background" work.
Low = 0,
}
#[derive(Clone)]
pub struct ResourcePool {
/*
This *has* to be an UnsafeCell.
Each BumpFrame and Scope is located in this Slab - and we'll need mutable access to a scope while holding on to
its bumpframe conents immutably.
However, all of the interaction with this Slab is done in this module and the Diff module, so it should be fairly
simple to audit.
Wrapped in Rc so the "get_shared_context" closure can walk the tree (immutably!)
*/
pub components: Rc<UnsafeCell<Slab<Scope>>>,
/*
Yes, a slab of "nil". We use this for properly ordering ElementIDs - all we care about is the allocation strategy
that slab uses. The slab essentially just provides keys for ElementIDs that we can re-use in a Vec on the client.
This just happened to be the simplest and most efficient way to implement a deterministic keyed map with slot reuse.
In the future, we could actually store a pointer to the VNode instead of nil to provide O(1) lookup for VNodes...
*/
pub raw_elements: Rc<UnsafeCell<Slab<()>>>,
}
impl ResourcePool {
/// this is unsafe because the caller needs to track which other scopes it's already using
pub fn get_scope(&self, idx: ScopeId) -> Option<&Scope> {
let inner = unsafe { &*self.components.get() };
@ -373,430 +783,4 @@ impl Scheduler {
// // kill em
// }
}
pub fn manually_poll_events(&mut self) {
while let Ok(Some(msg)) = self.receiver.try_next() {
self.handle_channel_msg(msg);
}
}
// Converts UI events into dirty scopes with various priorities
pub fn consume_pending_events(&mut self) -> Result<()> {
// while let Some(trigger) = self.ui_events.pop_back() {
// match &trigger.event {
// SyntheticEvent::ClipboardEvent(_)
// | SyntheticEvent::CompositionEvent(_)
// | SyntheticEvent::KeyboardEvent(_)
// | SyntheticEvent::FocusEvent(_)
// | SyntheticEvent::FormEvent(_)
// | SyntheticEvent::SelectionEvent(_)
// | SyntheticEvent::TouchEvent(_)
// | SyntheticEvent::UIEvent(_)
// | SyntheticEvent::WheelEvent(_)
// | SyntheticEvent::MediaEvent(_)
// | SyntheticEvent::AnimationEvent(_)
// | SyntheticEvent::TransitionEvent(_)
// | SyntheticEvent::ToggleEvent(_)
// | SyntheticEvent::MouseEvent(_)
// | SyntheticEvent::PointerEvent(_) => {
// if let Some(scope) = self.get_scope_mut(trigger.scope) {
// if let Some(element) = trigger.mounted_dom_id {
// scope.call_listener(trigger.event, element)?;
// // let receiver = self.immediate_receiver.clone();
// // let mut receiver = receiver.borrow_mut();
// // // Drain the immediates into the dirty scopes, setting the appropiate priorities
// // while let Ok(Some(dirty_scope)) = receiver.try_next() {
// // self.add_dirty_scope(dirty_scope, trigger.priority)
// // }
// }
// }
// }
// }
// }
Ok(())
}
// nothing to do, no events on channels, no work
pub fn has_any_work(&self) -> bool {
self.has_work() || self.has_pending_events() || self.has_pending_garbage()
}
pub fn has_pending_events(&self) -> bool {
self.ui_events.len() > 0
}
pub fn has_work(&self) -> bool {
todo!()
// self.high_priorty.has_work()
// || self.medium_priority.has_work()
// || self.low_priority.has_work()
}
pub fn has_pending_garbage(&self) -> bool {
!self.garbage_scopes.is_empty()
}
fn get_current_fiber<'a>(&'a mut self) -> &mut DiffMachine<'a> {
todo!()
// let fib = match self.current_priority {
// EventPriority::High => &mut self.high_priorty,
// EventPriority::Medium => &mut self.medium_priority,
// EventPriority::Low => &mut self.low_priority,
// };
// unsafe { std::mem::transmute(fib) }
}
/// The primary workhorse of the VirtualDOM.
///
/// Uses some fairly complex logic to schedule what work should be produced.
///
/// Returns a list of successful mutations.
///
///
pub async fn work_with_deadline<'a>(
&'a mut self,
mut deadline: Pin<Box<impl FusedFuture<Output = ()>>>,
) -> Vec<Mutations<'a>> {
/*
Strategy:
- When called, check for any UI events that might've been received since the last frame.
- Dump all UI events into a "pending discrete" queue and a "pending continuous" queue.
- If there are any pending discrete events, then elevate our priorty level. If our priority level is already "high,"
then we need to finish the high priority work first. If the current work is "low" then analyze what scopes
will be invalidated by this new work. If this interferes with any in-flight medium or low work, then we need
to bump the other work out of the way, or choose to process it so we don't have any conflicts.
'static components have a leg up here since their work can be re-used among multiple scopes.
"High priority" is only for blocking! Should only be used on "clicks"
- If there are no pending discrete events, then check for continuous events. These can be completely batched
Open questions:
- what if we get two clicks from the component during the same slice?
- should we batch?
- react says no - they are continuous
- but if we received both - then we don't need to diff, do we? run as many as we can and then finally diff?
*/
let mut committed_mutations = Vec::<Mutations<'static>>::new();
// TODO:
// the scheduler uses a bunch of different receivers to mimic a "topic" queue system. The futures-channel implementation
// doesn't really have a concept of a "topic" queue, so there's a lot of noise in the hand-rolled scheduler. We should
// explore abstracting the scheduler into a topic-queue channel system - similar to Kafka or something similar.
loop {
// Internalize any pending work since the last time we ran
self.manually_poll_events();
// Wait for any new events if we have nothing to do
if !self.has_any_work() {
self.clean_up_garbage();
let deadline_expired = self.wait_for_any_trigger(&mut deadline).await;
if deadline_expired {
return committed_mutations;
}
}
// Create work from the pending event queue
self.consume_pending_events().unwrap();
// Work through the current subtree, and commit the results when it finishes
// When the deadline expires, give back the work
self.current_priority = match (
self.lanes[0].has_work(),
self.lanes[1].has_work(),
self.lanes[2].has_work(),
self.lanes[3].has_work(),
) {
(true, _, _, _) => EventPriority::Immediate,
(false, true, _, _) => EventPriority::High,
(false, false, true, _) => EventPriority::Medium,
(false, false, false, _) => EventPriority::Low,
};
let current_lane = match self.current_priority {
EventPriority::Immediate => &mut self.lanes[0],
EventPriority::High => &mut self.lanes[1],
EventPriority::Medium => &mut self.lanes[2],
EventPriority::Low => &mut self.lanes[3],
};
if self.current_priority == EventPriority::Immediate {
// IDGAF - get this out the door right now. loop poll if we need to
}
use futures_util::future::{select, Either};
// We have to split away some parts of ourself - current lane is borrowed mutably
let shared = SharedVdom {
channel: self.channel.clone(),
components: unsafe { &mut *self.components.get() },
elements: &mut self.raw_elements,
};
let mut state = current_lane.saved_state.take().unwrap();
let mut machine = unsafe { state.promote(shared) };
if machine.stack.is_empty() {
// if let Some(scope) = current_lane.dirty_scopes.pop() {
// let component = self.components.get_mut().get_mut(scope.0).unwrap();
// let (old, new) = (component.frames.wip_head(), component.frames.fin_head());
// machine.stack.push(DiffInstruction::DiffNode { new, old });
// } else {
// }
} else {
}
// if let Some(state) = current_lane.saved_state.take() {
// let mut machine = unsafe { state.promote(&self) };
// machine.work().await;
// } else {
// if let Some(scope) = current_lane.dirty_scopes.pop() {
//
// let work_complete = {
// let fut = machine.diff_scope(scope);
// pin_mut!(fut);
// match select(fut, &mut deadline).await {
// Either::Left((work, _other)) => {
// //
// true
// }
// Either::Right((deadline, _other)) => {
// //
// false
// }
// }
// };
// let mut saved = unsafe { machine.save().extend() };
// // release the stack borrow of ourself
// if work_complete {
// for scope in saved.seen_scopes.drain() {
// current_lane.dirty_scopes.remove(&scope);
// }
// } else {
// }
// }
// };
// let mut new_mutations = Mutations::new();
// match self.work_with_deadline(&mut deadline).await {
// Some(mutations) => {
// // safety: the scheduler will never let us mutate
// let extended: Mutations<'static> = unsafe { std::mem::transmute(mutations) };
// committed_mutations.push(extended)
// }
// None => return committed_mutations,
// }
}
// // check if we need to elevate priority
// // let mut machine = DiffMachine::new(mutations, ScopeId(0), &self);
// let dirty_root = {
// let dirty_roots = match self.current_priority {
// EventPriority::High => &self.high_priorty.dirty_scopes,
// EventPriority::Medium => &self.medium_priority.dirty_scopes,
// EventPriority::Low => &self.low_priority.dirty_scopes,
// };
// let mut height = 0;
// let mut dirty_root = {
// let root = dirty_roots.iter().next();
// if root.is_none() {
// return true;
// }
// root.unwrap()
// };
// for root in dirty_roots {
// if let Some(scope) = self.get_scope(*root) {
// if scope.height < height {
// height = scope.height;
// dirty_root = root;
// }
// }
// }
// dirty_root
// };
// let fut = machine.diff_scope(*dirty_root).fuse();
// pin_mut!(fut);
// match futures_util::future::select(deadline, fut).await {
// futures_util::future::Either::Left((deadline, work_fut)) => true,
// futures_util::future::Either::Right((_, deadline_fut)) => false,
// }
}
// waits for a trigger, canceling early if the deadline is reached
// returns true if the deadline was reached
// does not return the trigger, but caches it in the scheduler
pub async fn wait_for_any_trigger(
&mut self,
deadline: &mut Pin<Box<impl FusedFuture<Output = ()>>>,
) -> bool {
use futures_util::future::{select, Either};
let event_fut = async {
match select(self.receiver.next(), self.async_tasks.next()).await {
Either::Left((msg, _other)) => {
self.handle_channel_msg(msg.unwrap());
}
Either::Right((task, _other)) => {
// do nothing, async task will likely generate a set of scheduler messages
}
}
};
pin_mut!(event_fut);
match select(event_fut, deadline).await {
Either::Left((msg, _other)) => false,
Either::Right((deadline, _)) => true,
}
}
pub fn handle_channel_msg(&mut self, msg: SchedulerMsg) {
match msg {
SchedulerMsg::Immediate(_) => todo!(),
SchedulerMsg::UiEvent(_) => todo!(),
//
SchedulerMsg::SubmitTask(_, _) => todo!(),
SchedulerMsg::ToggleTask(_) => todo!(),
SchedulerMsg::PauseTask(_) => todo!(),
SchedulerMsg::ResumeTask(_) => todo!(),
SchedulerMsg::DropTask(_) => todo!(),
}
}
pub fn add_dirty_scope(&mut self, scope: ScopeId, priority: EventPriority) {
todo!()
// match priority {
// EventPriority::High => self.high_priorty.dirty_scopes.insert(scope),
// EventPriority::Medium => self.medium_priority.dirty_scopes.insert(scope),
// EventPriority::Low => self.low_priority.dirty_scopes.insert(scope),
// };
}
}
pub struct PriortySystem {
pub dirty_scopes: IndexSet<ScopeId>,
pub saved_state: Option<SavedDiffWork<'static>>,
pub in_progress: bool,
}
impl PriortySystem {
pub fn new() -> Self {
Self {
saved_state: None,
dirty_scopes: Default::default(),
in_progress: false,
}
}
fn has_work(&self) -> bool {
todo!()
}
fn work(&mut self) {
let scope = self.dirty_scopes.pop();
}
}
pub struct TaskHandle {
pub sender: UnboundedSender<SchedulerMsg>,
pub our_id: u64,
}
impl TaskHandle {
/// Toggles this coroutine off/on.
///
/// This method is not synchronous - your task will not stop immediately.
pub fn toggle(&self) {}
/// This method is not synchronous - your task will not stop immediately.
pub fn start(&self) {}
/// This method is not synchronous - your task will not stop immediately.
pub fn stop(&self) {}
/// This method is not synchronous - your task will not stop immediately.
pub fn restart(&self) {}
}
#[derive(serde::Serialize, serde::Deserialize, Copy, Clone, PartialEq, Eq, Hash, Debug)]
pub struct ScopeId(pub usize);
#[derive(Copy, Clone, PartialEq, Eq, Hash, Debug)]
pub struct ElementId(pub usize);
impl Display for ElementId {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
write!(f, "{}", self.0)
}
}
impl ElementId {
pub fn as_u64(self) -> u64 {
self.0 as u64
}
}
/// Priority of Event Triggers.
///
/// Internally, Dioxus will abort work that's taking too long if new, more important, work arrives. Unlike React, Dioxus
/// won't be afraid to pause work or flush changes to the RealDOM. This is called "cooperative scheduling". Some Renderers
/// implement this form of scheduling internally, however Dioxus will perform its own scheduling as well.
///
/// The ultimate goal of the scheduler is to manage latency of changes, prioritizing "flashier" changes over "subtler" changes.
///
/// React has a 5-tier priority system. However, they break things into "Continuous" and "Discrete" priority. For now,
/// we keep it simple, and just use a 3-tier priority system.
///
/// - NoPriority = 0
/// - LowPriority = 1
/// - NormalPriority = 2
/// - UserBlocking = 3
/// - HighPriority = 4
/// - ImmediatePriority = 5
///
/// We still have a concept of discrete vs continuous though - discrete events won't be batched, but continuous events will.
/// This means that multiple "scroll" events will be processed in a single frame, but multiple "click" events will be
/// flushed before proceeding. Multiple discrete events is highly unlikely, though.
#[derive(Debug, PartialEq, Eq, Clone, Copy, Hash, PartialOrd, Ord)]
pub enum EventPriority {
/// Work that must be completed during the EventHandler phase
///
///
Immediate = 3,
/// "High Priority" work will not interrupt other high priority work, but will interrupt medium and low priority work.
///
/// This is typically reserved for things like user interaction.
///
/// React calls these "discrete" events, but with an extra category of "user-blocking" (Immediate).
High = 2,
/// "Medium priority" work is generated by page events not triggered by the user. These types of events are less important
/// than "High Priority" events and will take presedence over low priority events.
///
/// This is typically reserved for VirtualEvents that are not related to keyboard or mouse input.
///
/// React calls these "continuous" events (e.g. mouse move, mouse wheel, touch move, etc).
Medium = 1,
/// "Low Priority" work will always be pre-empted unless the work is significantly delayed, in which case it will be
/// advanced to the front of the work queue until completed.
///
/// The primary user of Low Priority work is the asynchronous work system (suspense).
///
/// This is considered "idle" work or "background" work.
Low = 0,
}

View file

@ -107,7 +107,7 @@ impl VirtualDom {
let props_ptr = _root_props.as_ref().downcast_ref::<P>().unwrap() as *const P;
let base_scope = scheduler.insert_scope_with_key(|myidx| {
let base_scope = scheduler.pool.insert_scope_with_key(|myidx| {
let caller = NodeFactory::create_component_caller(root, props_ptr as *const _);
let name = type_name_of(root);
Scope::new(
@ -129,11 +129,11 @@ impl VirtualDom {
}
pub fn base_scope(&self) -> &Scope {
self.scheduler.get_scope(self.base_scope).unwrap()
self.scheduler.pool.get_scope(self.base_scope).unwrap()
}
pub fn get_scope(&self, id: ScopeId) -> Option<&Scope> {
self.scheduler.get_scope(id)
self.scheduler.pool.get_scope(id)
}
/// Performs a *full* rebuild of the virtual dom, returning every edit required to generate the actual dom rom scratch
@ -194,10 +194,11 @@ impl VirtualDom {
}
pub async fn diff_async<'s>(&'s mut self) -> Mutations<'s> {
let mut diff_machine = DiffMachine::new(Mutations::new(), todo!());
let mut diff_machine = DiffMachine::new(Mutations::new(), todo!(), todo!());
let cur_component = self
.scheduler
.pool
.get_scope_mut(self.base_scope)
.expect("The base scope should never be moved");