mirror of
https://github.com/DioxusLabs/dioxus
synced 2024-11-23 12:43:08 +00:00
wip: refactor to fix some ownership quirks
This commit is contained in:
parent
3bf19d8106
commit
05e960b6b0
16 changed files with 549 additions and 535 deletions
19
packages/core/examples/vdom_usage.rs
Normal file
19
packages/core/examples/vdom_usage.rs
Normal file
|
@ -0,0 +1,19 @@
|
|||
use std::time::Duration;
|
||||
|
||||
use dioxus_core::prelude::*;
|
||||
|
||||
#[async_std::main]
|
||||
async fn main() {
|
||||
static App: FC<()> = |cx| cx.render(LazyNodes::new(|f| f.text(format_args!("hello"))));
|
||||
|
||||
let mut dom = VirtualDom::new(App);
|
||||
|
||||
dom.rebuild();
|
||||
|
||||
let mut deadline = async_std::task::sleep(Duration::from_millis(50));
|
||||
let fut = dom.run_with_deadline(deadline);
|
||||
|
||||
if let Some(mutations) = fut.await {
|
||||
//
|
||||
}
|
||||
}
|
|
@ -10,30 +10,19 @@ use fxhash::{FxHashMap, FxHashSet};
|
|||
use slab::Slab;
|
||||
use smallvec::SmallVec;
|
||||
|
||||
// slotmap::new_key_type! {
|
||||
// // A dedicated key type for the all the scopes
|
||||
// pub struct ScopeId;
|
||||
// }
|
||||
// #[cfg(feature = "serialize", serde::Serialize)]
|
||||
// #[cfg(feature = "serialize", serde::Serialize)]
|
||||
#[derive(serde::Serialize, serde::Deserialize, Copy, Clone, PartialEq, Eq, Hash, Debug)]
|
||||
pub struct ScopeId(pub usize);
|
||||
use std::any::Any;
|
||||
|
||||
#[derive(Copy, Clone, PartialEq, Eq, Hash, Debug)]
|
||||
pub struct ElementId(pub usize);
|
||||
impl Display for ElementId {
|
||||
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
|
||||
write!(f, "{}", self.0)
|
||||
}
|
||||
}
|
||||
use std::any::TypeId;
|
||||
use std::cell::Ref;
|
||||
use std::collections::{BTreeMap, BTreeSet, BinaryHeap, HashMap, HashSet, VecDeque};
|
||||
use std::pin::Pin;
|
||||
|
||||
impl ElementId {
|
||||
pub fn as_u64(self) -> u64 {
|
||||
self.0 as u64
|
||||
}
|
||||
}
|
||||
use futures_util::future::FusedFuture;
|
||||
use futures_util::pin_mut;
|
||||
use futures_util::Future;
|
||||
use futures_util::FutureExt;
|
||||
use futures_util::StreamExt;
|
||||
|
||||
type Shared<T> = Rc<RefCell<T>>;
|
||||
type UiReceiver = UnboundedReceiver<EventTrigger>;
|
||||
type UiSender = UnboundedSender<EventTrigger>;
|
||||
|
||||
|
@ -41,38 +30,51 @@ type TaskReceiver = UnboundedReceiver<ScopeId>;
|
|||
type TaskSender = UnboundedSender<ScopeId>;
|
||||
|
||||
/// These are resources shared among all the components and the virtualdom itself
|
||||
#[derive(Clone)]
|
||||
pub struct SharedResources {
|
||||
pub components: Rc<UnsafeCell<Slab<Scope>>>,
|
||||
pub struct Scheduler {
|
||||
pub components: UnsafeCell<Slab<Scope>>,
|
||||
|
||||
pub(crate) heuristics: Shared<HeuristicsEngine>,
|
||||
pub(crate) heuristics: HeuristicsEngine,
|
||||
|
||||
// Used by "set_state" and co - is its own queue
|
||||
pub immediate_sender: TaskSender,
|
||||
pub immediate_receiver: Shared<TaskReceiver>,
|
||||
pub immediate_receiver: TaskReceiver,
|
||||
|
||||
/// Triggered by event listeners
|
||||
pub ui_event_sender: UiSender,
|
||||
pub ui_event_receiver: Shared<UiReceiver>,
|
||||
pub ui_event_receiver: UiReceiver,
|
||||
|
||||
// Garbage stored
|
||||
pub pending_garbage: Shared<FxHashSet<ScopeId>>,
|
||||
pub pending_garbage: FxHashSet<ScopeId>,
|
||||
|
||||
// In-flight futures
|
||||
pub async_tasks: Shared<FuturesUnordered<FiberTask>>,
|
||||
pub async_tasks: FuturesUnordered<FiberTask>,
|
||||
|
||||
/// We use a SlotSet to keep track of the keys that are currently being used.
|
||||
/// However, we don't store any specific data since the "mirror"
|
||||
pub raw_elements: Shared<Slab<()>>,
|
||||
pub raw_elements: Slab<()>,
|
||||
|
||||
pub task_setter: Rc<dyn Fn(ScopeId)>,
|
||||
|
||||
// scheduler stuff
|
||||
pub current_priority: EventPriority,
|
||||
|
||||
pub pending_events: VecDeque<EventTrigger>,
|
||||
|
||||
pub pending_immediates: VecDeque<ScopeId>,
|
||||
|
||||
pub pending_tasks: VecDeque<EventTrigger>,
|
||||
|
||||
pub garbage_scopes: HashSet<ScopeId>,
|
||||
|
||||
pub high_priorty: PriortySystem,
|
||||
pub medium_priority: PriortySystem,
|
||||
pub low_priority: PriortySystem,
|
||||
}
|
||||
|
||||
impl SharedResources {
|
||||
impl Scheduler {
|
||||
pub fn new() -> Self {
|
||||
// preallocate 2000 elements and 20 scopes to avoid dynamic allocation
|
||||
let components: Rc<UnsafeCell<Slab<Scope>>> =
|
||||
Rc::new(UnsafeCell::new(Slab::with_capacity(100)));
|
||||
let components: UnsafeCell<Slab<Scope>> = UnsafeCell::new(Slab::with_capacity(100));
|
||||
|
||||
// elements are super cheap - the value takes no space
|
||||
let raw_elements = Slab::with_capacity(2000);
|
||||
|
@ -107,19 +109,32 @@ impl SharedResources {
|
|||
|
||||
Self {
|
||||
components,
|
||||
async_tasks: Rc::new(RefCell::new(FuturesUnordered::new())),
|
||||
async_tasks: FuturesUnordered::new(),
|
||||
|
||||
ui_event_receiver: Rc::new(RefCell::new(ui_receiver)),
|
||||
ui_event_receiver: ui_receiver,
|
||||
ui_event_sender: ui_sender,
|
||||
|
||||
immediate_receiver: Rc::new(RefCell::new(immediate_receiver)),
|
||||
immediate_receiver: immediate_receiver,
|
||||
immediate_sender: immediate_sender,
|
||||
|
||||
pending_garbage: Rc::new(RefCell::new(FxHashSet::default())),
|
||||
pending_garbage: FxHashSet::default(),
|
||||
|
||||
heuristics: Rc::new(RefCell::new(heuristics)),
|
||||
raw_elements: Rc::new(RefCell::new(raw_elements)),
|
||||
heuristics: heuristics,
|
||||
raw_elements: raw_elements,
|
||||
task_setter,
|
||||
|
||||
// a storage for our receiver to dump into
|
||||
pending_events: VecDeque::new(),
|
||||
pending_immediates: VecDeque::new(),
|
||||
pending_tasks: VecDeque::new(),
|
||||
|
||||
garbage_scopes: HashSet::new(),
|
||||
|
||||
current_priority: EventPriority::Low,
|
||||
|
||||
high_priorty: PriortySystem::new(),
|
||||
medium_priority: PriortySystem::new(),
|
||||
low_priority: PriortySystem::new(),
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -198,6 +213,309 @@ impl SharedResources {
|
|||
priority: trigger.priority,
|
||||
}
|
||||
}
|
||||
|
||||
pub fn clean_up_garbage(&mut self) {
|
||||
let mut scopes_to_kill = Vec::new();
|
||||
let mut garbage_list = Vec::new();
|
||||
|
||||
for scope in self.garbage_scopes.drain() {
|
||||
let scope = self.get_scope_mut(scope).unwrap();
|
||||
for node in scope.consume_garbage() {
|
||||
garbage_list.push(node);
|
||||
}
|
||||
|
||||
while let Some(node) = garbage_list.pop() {
|
||||
match &node {
|
||||
VNode::Text(_) => {
|
||||
self.collect_garbage(node.direct_id());
|
||||
}
|
||||
VNode::Anchor(_) => {
|
||||
self.collect_garbage(node.direct_id());
|
||||
}
|
||||
VNode::Suspended(_) => {
|
||||
self.collect_garbage(node.direct_id());
|
||||
}
|
||||
|
||||
VNode::Element(el) => {
|
||||
self.collect_garbage(node.direct_id());
|
||||
for child in el.children {
|
||||
garbage_list.push(child);
|
||||
}
|
||||
}
|
||||
|
||||
VNode::Fragment(frag) => {
|
||||
for child in frag.children {
|
||||
garbage_list.push(child);
|
||||
}
|
||||
}
|
||||
|
||||
VNode::Component(comp) => {
|
||||
// TODO: run the hook destructors and then even delete the scope
|
||||
|
||||
let scope_id = comp.ass_scope.get().unwrap();
|
||||
let scope = self.get_scope(scope_id).unwrap();
|
||||
let root = scope.root();
|
||||
garbage_list.push(root);
|
||||
scopes_to_kill.push(scope_id);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
for scope in scopes_to_kill.drain(..) {
|
||||
//
|
||||
// kill em
|
||||
}
|
||||
}
|
||||
|
||||
// channels don't have these methods, so we just implement our own wrapper
|
||||
pub fn next_event(&mut self) -> Option<EventTrigger> {
|
||||
// pop the last event off the internal queue
|
||||
self.pending_events.pop_back().or_else(|| {
|
||||
self.ui_event_receiver
|
||||
.borrow_mut()
|
||||
.try_next()
|
||||
.ok()
|
||||
.flatten()
|
||||
})
|
||||
}
|
||||
|
||||
pub fn manually_poll_events(&mut self) {
|
||||
// 1. Poll the UI event receiver
|
||||
// 2. Poll the set_state receiver
|
||||
|
||||
// poll the immediates first, creating work out of them
|
||||
let shared_receiver = self.immediate_receiver.clone();
|
||||
let mut receiver = shared_receiver.borrow_mut();
|
||||
while let Ok(Some(trigger)) = receiver.try_next() {
|
||||
self.add_dirty_scope(trigger, EventPriority::Low);
|
||||
}
|
||||
|
||||
// next poll the UI events,
|
||||
let mut receiver = self.ui_event_receiver.borrow_mut();
|
||||
while let Ok(Some(trigger)) = receiver.try_next() {
|
||||
self.pending_events.push_back(trigger);
|
||||
}
|
||||
}
|
||||
|
||||
// Converts UI events into dirty scopes with various priorities
|
||||
pub fn consume_pending_events(&mut self) -> Result<()> {
|
||||
while let Some(trigger) = self.pending_events.pop_back() {
|
||||
match &trigger.event {
|
||||
VirtualEvent::AsyncEvent { .. } => {}
|
||||
|
||||
// This suspense system works, but it's not the most elegant solution.
|
||||
// TODO: Replace this system
|
||||
VirtualEvent::SuspenseEvent { hook_idx, domnode } => {
|
||||
todo!("suspense needs to be converted into its own channel");
|
||||
|
||||
// // Safety: this handler is the only thing that can mutate shared items at this moment in tim
|
||||
// let scope = diff_machine.get_scope_mut(&trigger.originator).unwrap();
|
||||
|
||||
// // safety: we are sure that there are no other references to the inner content of suspense hooks
|
||||
// let hook = unsafe { scope.hooks.get_mut::<SuspenseHook>(*hook_idx) }.unwrap();
|
||||
|
||||
// let cx = Context { scope, props: &() };
|
||||
// let scx = SuspendedContext { inner: cx };
|
||||
|
||||
// // generate the new node!
|
||||
// let nodes: Option<VNode> = (&hook.callback)(scx);
|
||||
|
||||
// if let Some(nodes) = nodes {
|
||||
// // allocate inside the finished frame - not the WIP frame
|
||||
// let nodes = scope.frames.finished_frame().bump.alloc(nodes);
|
||||
|
||||
// // push the old node's root onto the stack
|
||||
// let real_id = domnode.get().ok_or(Error::NotMounted)?;
|
||||
// diff_machine.edit_push_root(real_id);
|
||||
|
||||
// // push these new nodes onto the diff machines stack
|
||||
// let meta = diff_machine.create_vnode(&*nodes);
|
||||
|
||||
// // replace the placeholder with the new nodes we just pushed on the stack
|
||||
// diff_machine.edit_replace_with(1, meta.added_to_stack);
|
||||
// } else {
|
||||
// log::warn!(
|
||||
// "Suspense event came through, but there were no generated nodes >:(."
|
||||
// );
|
||||
// }
|
||||
}
|
||||
|
||||
VirtualEvent::ClipboardEvent(_)
|
||||
| VirtualEvent::CompositionEvent(_)
|
||||
| VirtualEvent::KeyboardEvent(_)
|
||||
| VirtualEvent::FocusEvent(_)
|
||||
| VirtualEvent::FormEvent(_)
|
||||
| VirtualEvent::SelectionEvent(_)
|
||||
| VirtualEvent::TouchEvent(_)
|
||||
| VirtualEvent::UIEvent(_)
|
||||
| VirtualEvent::WheelEvent(_)
|
||||
| VirtualEvent::MediaEvent(_)
|
||||
| VirtualEvent::AnimationEvent(_)
|
||||
| VirtualEvent::TransitionEvent(_)
|
||||
| VirtualEvent::ToggleEvent(_)
|
||||
| VirtualEvent::MouseEvent(_)
|
||||
| VirtualEvent::PointerEvent(_) => {
|
||||
if let Some(scope) = self.get_scope_mut(trigger.originator) {
|
||||
if let Some(element) = trigger.real_node_id {
|
||||
scope.call_listener(trigger.event, element)?;
|
||||
|
||||
let receiver = self.immediate_receiver.clone();
|
||||
let mut receiver = receiver.borrow_mut();
|
||||
|
||||
// Drain the immediates into the dirty scopes, setting the appropiate priorities
|
||||
while let Ok(Some(dirty_scope)) = receiver.try_next() {
|
||||
self.add_dirty_scope(dirty_scope, trigger.priority)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
// nothing to do, no events on channels, no work
|
||||
pub fn has_any_work(&self) -> bool {
|
||||
self.has_work() || self.has_pending_events() || self.has_pending_garbage()
|
||||
}
|
||||
|
||||
pub fn has_pending_events(&self) -> bool {
|
||||
self.pending_events.len() > 0
|
||||
}
|
||||
|
||||
pub fn has_work(&self) -> bool {
|
||||
self.high_priorty.has_work()
|
||||
|| self.medium_priority.has_work()
|
||||
|| self.low_priority.has_work()
|
||||
}
|
||||
|
||||
pub fn has_pending_garbage(&self) -> bool {
|
||||
!self.garbage_scopes.is_empty()
|
||||
}
|
||||
|
||||
fn get_current_fiber<'a>(&'a mut self) -> &mut DiffMachine<'a> {
|
||||
let fib = match self.current_priority {
|
||||
EventPriority::High => &mut self.high_priorty,
|
||||
EventPriority::Medium => &mut self.medium_priority,
|
||||
EventPriority::Low => &mut self.low_priority,
|
||||
};
|
||||
unsafe { std::mem::transmute(fib) }
|
||||
}
|
||||
|
||||
/// If a the fiber finishes its works (IE needs to be committed) the scheduler will drop the dirty scope
|
||||
pub async fn work_with_deadline<'a>(
|
||||
&mut self,
|
||||
mutations: &mut Mutations<'_>,
|
||||
deadline: &mut Pin<Box<impl FusedFuture<Output = ()>>>,
|
||||
) -> FiberResult {
|
||||
// check if we need to elevate priority
|
||||
self.current_priority = match (
|
||||
self.high_priorty.has_work(),
|
||||
self.medium_priority.has_work(),
|
||||
self.low_priority.has_work(),
|
||||
) {
|
||||
(true, _, _) => EventPriority::High,
|
||||
(false, true, _) => EventPriority::Medium,
|
||||
(false, false, _) => EventPriority::Low,
|
||||
};
|
||||
|
||||
let mut machine = DiffMachine::new(mutations, ScopeId(0), &self);
|
||||
|
||||
let dirty_root = {
|
||||
let dirty_roots = match self.current_priority {
|
||||
EventPriority::High => &self.high_priorty.dirty_scopes,
|
||||
EventPriority::Medium => &self.medium_priority.dirty_scopes,
|
||||
EventPriority::Low => &self.low_priority.dirty_scopes,
|
||||
};
|
||||
let mut height = 0;
|
||||
let mut dirty_root = {
|
||||
let root = dirty_roots.iter().next();
|
||||
if root.is_none() {
|
||||
return FiberResult::Done;
|
||||
}
|
||||
root.unwrap()
|
||||
};
|
||||
|
||||
for root in dirty_roots {
|
||||
if let Some(scope) = self.get_scope(*root) {
|
||||
if scope.height < height {
|
||||
height = scope.height;
|
||||
dirty_root = root;
|
||||
}
|
||||
}
|
||||
}
|
||||
dirty_root
|
||||
};
|
||||
|
||||
match {
|
||||
let fut = machine.diff_scope(*dirty_root).fuse();
|
||||
pin_mut!(fut);
|
||||
|
||||
match futures_util::future::select(deadline, fut).await {
|
||||
futures_util::future::Either::Left((deadline, work_fut)) => true,
|
||||
futures_util::future::Either::Right((_, deadline_fut)) => false,
|
||||
}
|
||||
} {
|
||||
true => FiberResult::Done,
|
||||
false => FiberResult::Interrupted,
|
||||
}
|
||||
}
|
||||
|
||||
// waits for a trigger, canceling early if the deadline is reached
|
||||
// returns true if the deadline was reached
|
||||
// does not return the trigger, but caches it in the scheduler
|
||||
pub async fn wait_for_any_trigger(
|
||||
&mut self,
|
||||
mut deadline: &mut Pin<Box<impl FusedFuture<Output = ()>>>,
|
||||
) -> bool {
|
||||
use futures_util::select;
|
||||
|
||||
let _immediates = self.immediate_receiver.clone();
|
||||
let mut immediates = _immediates.borrow_mut();
|
||||
|
||||
let mut _ui_events = self.ui_event_receiver.clone();
|
||||
let mut ui_events = _ui_events.borrow_mut();
|
||||
|
||||
let mut _tasks = self.async_tasks.clone();
|
||||
let mut tasks = _tasks.borrow_mut();
|
||||
|
||||
// set_state called
|
||||
select! {
|
||||
dirty_scope = immediates.next() => {
|
||||
if let Some(scope) = dirty_scope {
|
||||
self.add_dirty_scope(scope, EventPriority::Low);
|
||||
}
|
||||
}
|
||||
|
||||
ui_event = ui_events.next() => {
|
||||
if let Some(event) = ui_event {
|
||||
self.pending_events.push_back(event);
|
||||
}
|
||||
}
|
||||
|
||||
async_task = tasks.next() => {
|
||||
if let Some(event) = async_task {
|
||||
self.pending_events.push_back(event);
|
||||
}
|
||||
}
|
||||
|
||||
_ = deadline => {
|
||||
return true;
|
||||
}
|
||||
|
||||
}
|
||||
false
|
||||
}
|
||||
|
||||
pub fn add_dirty_scope(&mut self, scope: ScopeId, priority: EventPriority) {
|
||||
match priority {
|
||||
EventPriority::High => self.high_priorty.dirty_scopes.insert(scope),
|
||||
EventPriority::Medium => self.medium_priority.dirty_scopes.insert(scope),
|
||||
EventPriority::Low => self.low_priority.dirty_scopes.insert(scope),
|
||||
};
|
||||
}
|
||||
}
|
||||
|
||||
pub struct TaskHandle {}
|
||||
|
@ -208,3 +526,44 @@ impl TaskHandle {
|
|||
pub fn stop(&self) {}
|
||||
pub fn restart(&self) {}
|
||||
}
|
||||
|
||||
#[derive(PartialEq, Eq, Copy, Clone, Debug, Hash)]
|
||||
pub struct DirtyScope {
|
||||
height: u32,
|
||||
start_tick: u32,
|
||||
}
|
||||
|
||||
pub struct PriortySystem {
|
||||
pub pending_scopes: Vec<ScopeId>,
|
||||
pub dirty_scopes: HashSet<ScopeId>,
|
||||
}
|
||||
|
||||
impl PriortySystem {
|
||||
pub fn new() -> Self {
|
||||
Self {
|
||||
pending_scopes: Default::default(),
|
||||
dirty_scopes: Default::default(),
|
||||
}
|
||||
}
|
||||
|
||||
fn has_work(&self) -> bool {
|
||||
self.pending_scopes.len() > 0 || self.dirty_scopes.len() > 0
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(serde::Serialize, serde::Deserialize, Copy, Clone, PartialEq, Eq, Hash, Debug)]
|
||||
pub struct ScopeId(pub usize);
|
||||
|
||||
#[derive(Copy, Clone, PartialEq, Eq, Hash, Debug)]
|
||||
pub struct ElementId(pub usize);
|
||||
impl Display for ElementId {
|
||||
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
|
||||
write!(f, "{}", self.0)
|
||||
}
|
||||
}
|
||||
|
||||
impl ElementId {
|
||||
pub fn as_u64(self) -> u64 {
|
||||
self.0 as u64
|
||||
}
|
||||
}
|
||||
|
|
|
@ -5,7 +5,7 @@ use crate::innerlude::*;
|
|||
/// This iterator is useful when it's important to load the next real root onto the top of the stack for operations like
|
||||
/// "InsertBefore".
|
||||
pub struct RealChildIterator<'a> {
|
||||
scopes: &'a SharedResources,
|
||||
scopes: &'a Scheduler,
|
||||
|
||||
// Heuristcally we should never bleed into 4 completely nested fragments/components
|
||||
// Smallvec lets us stack allocate our little stack machine so the vast majority of cases are sane
|
||||
|
@ -14,14 +14,14 @@ pub struct RealChildIterator<'a> {
|
|||
}
|
||||
|
||||
impl<'a> RealChildIterator<'a> {
|
||||
pub fn new(starter: &'a VNode<'a>, scopes: &'a SharedResources) -> Self {
|
||||
pub fn new(starter: &'a VNode<'a>, scopes: &'a Scheduler) -> Self {
|
||||
Self {
|
||||
scopes,
|
||||
stack: smallvec::smallvec![(0, starter)],
|
||||
}
|
||||
}
|
||||
|
||||
pub fn new_from_slice(nodes: &'a [VNode<'a>], scopes: &'a SharedResources) -> Self {
|
||||
pub fn new_from_slice(nodes: &'a [VNode<'a>], scopes: &'a Scheduler) -> Self {
|
||||
let mut stack = smallvec::smallvec![];
|
||||
for node in nodes {
|
||||
stack.push((0, node));
|
||||
|
|
|
@ -88,7 +88,7 @@
|
|||
//! More info on how to improve this diffing algorithm:
|
||||
//! - https://hacks.mozilla.org/2019/03/fast-bump-allocated-virtual-doms-with-rust-and-wasm/
|
||||
|
||||
use crate::{arena::SharedResources, innerlude::*};
|
||||
use crate::{arena::Scheduler, innerlude::*};
|
||||
use fxhash::{FxHashMap, FxHashSet};
|
||||
use DomEdit::*;
|
||||
|
||||
|
@ -105,9 +105,9 @@ use DomEdit::*;
|
|||
/// Funnily enough, this stack machine's entire job is to create instructions for another stack machine to execute. It's
|
||||
/// stack machines all the way down!
|
||||
pub struct DiffMachine<'bump> {
|
||||
vdom: &'bump SharedResources,
|
||||
vdom: &'bump Scheduler,
|
||||
|
||||
pub mutations: Mutations<'bump>,
|
||||
pub mutations: &'bump mut Mutations<'bump>,
|
||||
|
||||
pub stack: DiffStack<'bump>,
|
||||
|
||||
|
@ -118,9 +118,9 @@ pub struct DiffMachine<'bump> {
|
|||
|
||||
impl<'bump> DiffMachine<'bump> {
|
||||
pub(crate) fn new(
|
||||
edits: Mutations<'bump>,
|
||||
edits: &'bump mut Mutations<'bump>,
|
||||
cur_scope: ScopeId,
|
||||
shared: &'bump SharedResources,
|
||||
shared: &'bump Scheduler,
|
||||
) -> Self {
|
||||
Self {
|
||||
stack: DiffStack::new(cur_scope),
|
||||
|
@ -131,11 +131,11 @@ impl<'bump> DiffMachine<'bump> {
|
|||
}
|
||||
}
|
||||
|
||||
pub fn new_headless(shared: &'bump SharedResources) -> Self {
|
||||
let edits = Mutations::new();
|
||||
let cur_scope = ScopeId(0);
|
||||
Self::new(edits, cur_scope, shared)
|
||||
}
|
||||
// pub fn new_headless(shared: &'bump SharedResources) -> Self {
|
||||
// let edits = Mutations::new();
|
||||
// let cur_scope = ScopeId(0);
|
||||
// Self::new(edits, cur_scope, shared)
|
||||
// }
|
||||
|
||||
//
|
||||
pub async fn diff_scope(&mut self, id: ScopeId) {
|
||||
|
@ -320,7 +320,7 @@ impl<'bump> DiffMachine<'bump> {
|
|||
Some(parent_idx),
|
||||
height,
|
||||
ScopeChildren(vcomponent.children),
|
||||
self.vdom.clone(),
|
||||
// self.vdom.clone(),
|
||||
vcomponent.name,
|
||||
)
|
||||
});
|
||||
|
|
|
@ -68,9 +68,13 @@ impl Ord for EventKey {
|
|||
/// - UserBlocking = 3
|
||||
/// - HighPriority = 4
|
||||
/// - ImmediatePriority = 5
|
||||
///
|
||||
/// We still have a concept of discrete vs continuous though - discrete events won't be batched, but continuous events will.
|
||||
/// This means that multiple "scroll" events will be processed in a single frame, but multiple "click" events will be
|
||||
/// flushed before proceeding. Multiple discrete events is highly unlikely, though.
|
||||
#[derive(Debug, PartialEq, Eq, Clone, Copy, Hash, PartialOrd, Ord)]
|
||||
pub enum EventPriority {
|
||||
/// "High Priority" work will not interrupt other high priority work, but will interrupt long medium and low priority work.
|
||||
/// "High Priority" work will not interrupt other high priority work, but will interrupt medium and low priority work.
|
||||
///
|
||||
/// This is typically reserved for things like user interaction.
|
||||
///
|
||||
|
|
|
@ -43,7 +43,7 @@ pub(crate) mod innerlude {
|
|||
pub use crate::hooks::*;
|
||||
pub use crate::mutations::*;
|
||||
pub use crate::nodes::*;
|
||||
pub use crate::scheduler::*;
|
||||
// pub use crate::scheduler::*;
|
||||
pub use crate::scope::*;
|
||||
pub use crate::util::*;
|
||||
pub use crate::virtual_dom::*;
|
||||
|
@ -75,7 +75,7 @@ pub mod hooklist;
|
|||
pub mod hooks;
|
||||
pub mod mutations;
|
||||
pub mod nodes;
|
||||
pub mod scheduler;
|
||||
// pub mod scheduler;
|
||||
pub mod scope;
|
||||
pub mod signals;
|
||||
pub mod util;
|
||||
|
|
|
@ -19,7 +19,9 @@ impl<'a> Mutations<'a> {
|
|||
Self { edits, noderefs }
|
||||
}
|
||||
|
||||
pub fn extend(&mut self, other: &mut Mutations) {}
|
||||
pub fn extend(&mut self, other: &mut Mutations) {
|
||||
// self.edits.extend(other.edits);
|
||||
}
|
||||
|
||||
// Navigation
|
||||
pub(crate) fn push_root(&mut self, root: ElementId) {
|
||||
|
|
|
@ -1,409 +0,0 @@
|
|||
//! Provides resumable task scheduling for Dioxus.
|
||||
//!
|
||||
//!
|
||||
//! ## Design
|
||||
//!
|
||||
//! The recent React fiber architecture rewrite enabled pauseable and resumable diffing through the development of
|
||||
//! something called a "Fiber." Fibers were created to provide a way of "saving a stack frame", making it possible to
|
||||
//! resume said stack frame at a later time, or to drop it altogether. This made it possible to
|
||||
//!
|
||||
//!
|
||||
//!
|
||||
//!
|
||||
//!
|
||||
//!
|
||||
//!
|
||||
//!
|
||||
//!
|
||||
//!
|
||||
//!
|
||||
//!
|
||||
//!
|
||||
//!
|
||||
|
||||
use std::any::Any;
|
||||
|
||||
use std::any::TypeId;
|
||||
use std::cell::{Ref, RefCell, RefMut};
|
||||
use std::collections::{BTreeMap, BTreeSet, BinaryHeap, HashMap, HashSet, VecDeque};
|
||||
use std::pin::Pin;
|
||||
|
||||
use futures_util::future::FusedFuture;
|
||||
use futures_util::pin_mut;
|
||||
use futures_util::Future;
|
||||
use futures_util::FutureExt;
|
||||
use futures_util::StreamExt;
|
||||
use smallvec::SmallVec;
|
||||
|
||||
use crate::innerlude::*;
|
||||
|
||||
pub struct Scheduler {
|
||||
current_priority: EventPriority,
|
||||
|
||||
pending_events: VecDeque<EventTrigger>,
|
||||
|
||||
pending_immediates: VecDeque<ScopeId>,
|
||||
|
||||
pending_tasks: VecDeque<EventTrigger>,
|
||||
|
||||
garbage_scopes: HashSet<ScopeId>,
|
||||
|
||||
shared: SharedResources,
|
||||
|
||||
high_priorty: PriortySystem,
|
||||
medium_priority: PriortySystem,
|
||||
low_priority: PriortySystem,
|
||||
}
|
||||
|
||||
pub enum FiberResult<'a> {
|
||||
Done(Mutations<'a>),
|
||||
Interrupted,
|
||||
}
|
||||
|
||||
impl Scheduler {
|
||||
pub fn new(shared: SharedResources) -> Self {
|
||||
Self {
|
||||
shared,
|
||||
|
||||
// a storage for our receiver to dump into
|
||||
pending_events: VecDeque::new(),
|
||||
pending_immediates: VecDeque::new(),
|
||||
pending_tasks: VecDeque::new(),
|
||||
|
||||
garbage_scopes: HashSet::new(),
|
||||
|
||||
current_priority: EventPriority::Low,
|
||||
|
||||
high_priorty: PriortySystem::new(),
|
||||
medium_priority: PriortySystem::new(),
|
||||
low_priority: PriortySystem::new(),
|
||||
}
|
||||
}
|
||||
|
||||
// channels don't have these methods, so we just implement our own wrapper
|
||||
pub fn next_event(&mut self) -> Option<EventTrigger> {
|
||||
// pop the last event off the internal queue
|
||||
self.pending_events.pop_back().or_else(|| {
|
||||
self.shared
|
||||
.ui_event_receiver
|
||||
.borrow_mut()
|
||||
.try_next()
|
||||
.ok()
|
||||
.flatten()
|
||||
})
|
||||
}
|
||||
|
||||
pub fn manually_poll_events(&mut self) {
|
||||
// 1. Poll the UI event receiver
|
||||
// 2. Poll the set_state receiver
|
||||
|
||||
// poll the immediates first, creating work out of them
|
||||
let shared_receiver = self.shared.immediate_receiver.clone();
|
||||
let mut receiver = shared_receiver.borrow_mut();
|
||||
while let Ok(Some(trigger)) = receiver.try_next() {
|
||||
self.add_dirty_scope(trigger, EventPriority::Low);
|
||||
}
|
||||
|
||||
// next poll the UI events,
|
||||
let mut receiver = self.shared.ui_event_receiver.borrow_mut();
|
||||
while let Ok(Some(trigger)) = receiver.try_next() {
|
||||
self.pending_events.push_back(trigger);
|
||||
}
|
||||
}
|
||||
|
||||
// Converts UI events into dirty scopes with various priorities
|
||||
pub fn consume_pending_events(&mut self) -> Result<()> {
|
||||
while let Some(trigger) = self.pending_events.pop_back() {
|
||||
match &trigger.event {
|
||||
VirtualEvent::AsyncEvent { .. } => {}
|
||||
|
||||
// This suspense system works, but it's not the most elegant solution.
|
||||
// TODO: Replace this system
|
||||
VirtualEvent::SuspenseEvent { hook_idx, domnode } => {
|
||||
todo!("suspense needs to be converted into its own channel");
|
||||
|
||||
// // Safety: this handler is the only thing that can mutate shared items at this moment in tim
|
||||
// let scope = diff_machine.get_scope_mut(&trigger.originator).unwrap();
|
||||
|
||||
// // safety: we are sure that there are no other references to the inner content of suspense hooks
|
||||
// let hook = unsafe { scope.hooks.get_mut::<SuspenseHook>(*hook_idx) }.unwrap();
|
||||
|
||||
// let cx = Context { scope, props: &() };
|
||||
// let scx = SuspendedContext { inner: cx };
|
||||
|
||||
// // generate the new node!
|
||||
// let nodes: Option<VNode> = (&hook.callback)(scx);
|
||||
|
||||
// if let Some(nodes) = nodes {
|
||||
// // allocate inside the finished frame - not the WIP frame
|
||||
// let nodes = scope.frames.finished_frame().bump.alloc(nodes);
|
||||
|
||||
// // push the old node's root onto the stack
|
||||
// let real_id = domnode.get().ok_or(Error::NotMounted)?;
|
||||
// diff_machine.edit_push_root(real_id);
|
||||
|
||||
// // push these new nodes onto the diff machines stack
|
||||
// let meta = diff_machine.create_vnode(&*nodes);
|
||||
|
||||
// // replace the placeholder with the new nodes we just pushed on the stack
|
||||
// diff_machine.edit_replace_with(1, meta.added_to_stack);
|
||||
// } else {
|
||||
// log::warn!(
|
||||
// "Suspense event came through, but there were no generated nodes >:(."
|
||||
// );
|
||||
// }
|
||||
}
|
||||
|
||||
VirtualEvent::ClipboardEvent(_)
|
||||
| VirtualEvent::CompositionEvent(_)
|
||||
| VirtualEvent::KeyboardEvent(_)
|
||||
| VirtualEvent::FocusEvent(_)
|
||||
| VirtualEvent::FormEvent(_)
|
||||
| VirtualEvent::SelectionEvent(_)
|
||||
| VirtualEvent::TouchEvent(_)
|
||||
| VirtualEvent::UIEvent(_)
|
||||
| VirtualEvent::WheelEvent(_)
|
||||
| VirtualEvent::MediaEvent(_)
|
||||
| VirtualEvent::AnimationEvent(_)
|
||||
| VirtualEvent::TransitionEvent(_)
|
||||
| VirtualEvent::ToggleEvent(_)
|
||||
| VirtualEvent::MouseEvent(_)
|
||||
| VirtualEvent::PointerEvent(_) => {
|
||||
if let Some(scope) = self.shared.get_scope_mut(trigger.originator) {
|
||||
if let Some(element) = trigger.real_node_id {
|
||||
scope.call_listener(trigger.event, element)?;
|
||||
|
||||
let receiver = self.shared.immediate_receiver.clone();
|
||||
let mut receiver = receiver.borrow_mut();
|
||||
|
||||
// Drain the immediates into the dirty scopes, setting the appropiate priorities
|
||||
while let Ok(Some(dirty_scope)) = receiver.try_next() {
|
||||
self.add_dirty_scope(dirty_scope, trigger.priority)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
// nothing to do, no events on channels, no work
|
||||
pub fn has_any_work(&self) -> bool {
|
||||
self.has_work() || self.has_pending_events() || self.has_pending_garbage()
|
||||
}
|
||||
|
||||
pub fn has_pending_events(&self) -> bool {
|
||||
self.pending_events.len() > 0
|
||||
}
|
||||
|
||||
pub fn has_work(&self) -> bool {
|
||||
self.high_priorty.has_work()
|
||||
|| self.medium_priority.has_work()
|
||||
|| self.low_priority.has_work()
|
||||
}
|
||||
|
||||
pub fn has_pending_garbage(&self) -> bool {
|
||||
!self.garbage_scopes.is_empty()
|
||||
}
|
||||
|
||||
fn get_current_fiber<'a>(&'a mut self) -> &mut DiffMachine<'a> {
|
||||
let fib = match self.current_priority {
|
||||
EventPriority::High => &mut self.high_priorty,
|
||||
EventPriority::Medium => &mut self.medium_priority,
|
||||
EventPriority::Low => &mut self.low_priority,
|
||||
};
|
||||
unsafe { std::mem::transmute(fib) }
|
||||
}
|
||||
|
||||
/// If a the fiber finishes its works (IE needs to be committed) the scheduler will drop the dirty scope
|
||||
pub async fn work_with_deadline<'a>(
|
||||
&'a mut self,
|
||||
deadline: &mut Pin<Box<impl FusedFuture<Output = ()>>>,
|
||||
) -> FiberResult<'a> {
|
||||
// check if we need to elevate priority
|
||||
self.current_priority = match (
|
||||
self.high_priorty.has_work(),
|
||||
self.medium_priority.has_work(),
|
||||
self.low_priority.has_work(),
|
||||
) {
|
||||
(true, _, _) => EventPriority::High,
|
||||
(false, true, _) => EventPriority::Medium,
|
||||
(false, false, _) => EventPriority::Low,
|
||||
};
|
||||
|
||||
let mut machine = DiffMachine::new_headless(&self.shared);
|
||||
|
||||
let dirty_root = {
|
||||
let dirty_roots = match self.current_priority {
|
||||
EventPriority::High => &self.high_priorty.dirty_scopes,
|
||||
EventPriority::Medium => &self.medium_priority.dirty_scopes,
|
||||
EventPriority::Low => &self.low_priority.dirty_scopes,
|
||||
};
|
||||
let mut height = 0;
|
||||
let mut dirty_root = {
|
||||
let root = dirty_roots.iter().next();
|
||||
if root.is_none() {
|
||||
return FiberResult::Done(machine.mutations);
|
||||
}
|
||||
root.unwrap()
|
||||
};
|
||||
|
||||
for root in dirty_roots {
|
||||
if let Some(scope) = self.shared.get_scope(*root) {
|
||||
if scope.height < height {
|
||||
height = scope.height;
|
||||
dirty_root = root;
|
||||
}
|
||||
}
|
||||
}
|
||||
dirty_root
|
||||
};
|
||||
|
||||
match {
|
||||
let fut = machine.diff_scope(*dirty_root).fuse();
|
||||
pin_mut!(fut);
|
||||
|
||||
match futures_util::future::select(deadline, fut).await {
|
||||
futures_util::future::Either::Left((deadline, work_fut)) => true,
|
||||
futures_util::future::Either::Right((_, deadline_fut)) => false,
|
||||
}
|
||||
} {
|
||||
true => FiberResult::Done(machine.mutations),
|
||||
false => FiberResult::Interrupted,
|
||||
}
|
||||
}
|
||||
|
||||
// waits for a trigger, canceling early if the deadline is reached
|
||||
// returns true if the deadline was reached
|
||||
// does not return the trigger, but caches it in the scheduler
|
||||
pub async fn wait_for_any_trigger(
|
||||
&mut self,
|
||||
mut deadline: &mut Pin<Box<impl FusedFuture<Output = ()>>>,
|
||||
) -> bool {
|
||||
use futures_util::select;
|
||||
|
||||
let _immediates = self.shared.immediate_receiver.clone();
|
||||
let mut immediates = _immediates.borrow_mut();
|
||||
|
||||
let mut _ui_events = self.shared.ui_event_receiver.clone();
|
||||
let mut ui_events = _ui_events.borrow_mut();
|
||||
|
||||
let mut _tasks = self.shared.async_tasks.clone();
|
||||
let mut tasks = _tasks.borrow_mut();
|
||||
|
||||
// set_state called
|
||||
select! {
|
||||
dirty_scope = immediates.next() => {
|
||||
if let Some(scope) = dirty_scope {
|
||||
self.add_dirty_scope(scope, EventPriority::Low);
|
||||
}
|
||||
}
|
||||
|
||||
ui_event = ui_events.next() => {
|
||||
if let Some(event) = ui_event {
|
||||
self.pending_events.push_back(event);
|
||||
}
|
||||
}
|
||||
|
||||
async_task = tasks.next() => {
|
||||
if let Some(event) = async_task {
|
||||
self.pending_events.push_back(event);
|
||||
}
|
||||
}
|
||||
|
||||
_ = deadline => {
|
||||
return true;
|
||||
}
|
||||
|
||||
}
|
||||
false
|
||||
}
|
||||
|
||||
pub fn add_dirty_scope(&mut self, scope: ScopeId, priority: EventPriority) {
|
||||
match priority {
|
||||
EventPriority::High => self.high_priorty.dirty_scopes.insert(scope),
|
||||
EventPriority::Medium => self.medium_priority.dirty_scopes.insert(scope),
|
||||
EventPriority::Low => self.low_priority.dirty_scopes.insert(scope),
|
||||
};
|
||||
}
|
||||
|
||||
pub fn clean_up_garbage(&mut self) {
|
||||
let mut scopes_to_kill = Vec::new();
|
||||
let mut garbage_list = Vec::new();
|
||||
|
||||
for scope in self.garbage_scopes.drain() {
|
||||
let scope = self.shared.get_scope_mut(scope).unwrap();
|
||||
for node in scope.consume_garbage() {
|
||||
garbage_list.push(node);
|
||||
}
|
||||
|
||||
while let Some(node) = garbage_list.pop() {
|
||||
match &node {
|
||||
VNode::Text(_) => {
|
||||
self.shared.collect_garbage(node.direct_id());
|
||||
}
|
||||
VNode::Anchor(_) => {
|
||||
self.shared.collect_garbage(node.direct_id());
|
||||
}
|
||||
VNode::Suspended(_) => {
|
||||
self.shared.collect_garbage(node.direct_id());
|
||||
}
|
||||
|
||||
VNode::Element(el) => {
|
||||
self.shared.collect_garbage(node.direct_id());
|
||||
for child in el.children {
|
||||
garbage_list.push(child);
|
||||
}
|
||||
}
|
||||
|
||||
VNode::Fragment(frag) => {
|
||||
for child in frag.children {
|
||||
garbage_list.push(child);
|
||||
}
|
||||
}
|
||||
|
||||
VNode::Component(comp) => {
|
||||
// TODO: run the hook destructors and then even delete the scope
|
||||
|
||||
let scope_id = comp.ass_scope.get().unwrap();
|
||||
let scope = self.shared.get_scope(scope_id).unwrap();
|
||||
let root = scope.root();
|
||||
garbage_list.push(root);
|
||||
scopes_to_kill.push(scope_id);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
for scope in scopes_to_kill.drain(..) {
|
||||
//
|
||||
// kill em
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(PartialEq, Eq, Copy, Clone, Debug, Hash)]
|
||||
pub struct DirtyScope {
|
||||
height: u32,
|
||||
start_tick: u32,
|
||||
}
|
||||
|
||||
pub struct PriortySystem {
|
||||
pub pending_scopes: Vec<ScopeId>,
|
||||
pub dirty_scopes: HashSet<ScopeId>,
|
||||
}
|
||||
|
||||
impl PriortySystem {
|
||||
pub fn new() -> Self {
|
||||
Self {
|
||||
pending_scopes: Default::default(),
|
||||
dirty_scopes: Default::default(),
|
||||
}
|
||||
}
|
||||
|
||||
fn has_work(&self) -> bool {
|
||||
self.pending_scopes.len() > 0 || self.dirty_scopes.len() > 0
|
||||
}
|
||||
}
|
|
@ -45,9 +45,8 @@ pub struct Scope {
|
|||
|
||||
// meta
|
||||
pub(crate) function_name: &'static str,
|
||||
|
||||
// A reference to the resources shared by all the comonents
|
||||
pub(crate) vdom: SharedResources,
|
||||
// // A reference to the resources shared by all the comonents
|
||||
// pub(crate) vdom: SharedResources,
|
||||
}
|
||||
|
||||
// The type of closure that wraps calling components
|
||||
|
@ -75,18 +74,17 @@ impl Scope {
|
|||
|
||||
child_nodes: ScopeChildren,
|
||||
|
||||
vdom: SharedResources,
|
||||
|
||||
// vdom: SharedResources,
|
||||
function_name: &'static str,
|
||||
) -> Self {
|
||||
let child_nodes = unsafe { child_nodes.extend_lifetime() };
|
||||
|
||||
// insert ourself as a descendent of the parent
|
||||
// when the parent is removed, this map will be traversed, and we will also be cleaned up.
|
||||
if let Some(parent) = &parent {
|
||||
let parent = unsafe { vdom.get_scope(*parent) }.unwrap();
|
||||
parent.descendents.borrow_mut().insert(arena_idx);
|
||||
}
|
||||
// // insert ourself as a descendent of the parent
|
||||
// // when the parent is removed, this map will be traversed, and we will also be cleaned up.
|
||||
// if let Some(parent) = &parent {
|
||||
// let parent = unsafe { vdom.get_scope(*parent) }.unwrap();
|
||||
// parent.descendents.borrow_mut().insert(arena_idx);
|
||||
// }
|
||||
|
||||
Self {
|
||||
function_name,
|
||||
|
@ -95,7 +93,7 @@ impl Scope {
|
|||
parent_idx: parent,
|
||||
our_arena_idx: arena_idx,
|
||||
height,
|
||||
vdom,
|
||||
// vdom,
|
||||
frames: ActiveFrame::new(),
|
||||
|
||||
hooks: Default::default(),
|
||||
|
@ -168,33 +166,35 @@ impl Scope {
|
|||
"clean up your garabge please"
|
||||
);
|
||||
|
||||
// make sure we drop all borrowed props manually to guarantee that their drop implementation is called before we
|
||||
// run the hooks (which hold an &mut Referrence)
|
||||
// right now, we don't drop
|
||||
let vdom = &self.vdom;
|
||||
self.borrowed_props
|
||||
.get_mut()
|
||||
.drain(..)
|
||||
.map(|li| unsafe { &*li })
|
||||
.for_each(|comp| {
|
||||
// First drop the component's undropped references
|
||||
let scope_id = comp.ass_scope.get().unwrap();
|
||||
let scope = unsafe { vdom.get_scope_mut(scope_id) }.unwrap();
|
||||
scope.ensure_drop_safety();
|
||||
todo!("arch changes");
|
||||
|
||||
// Now, drop our own reference
|
||||
let mut dropper = comp.drop_props.borrow_mut().take().unwrap();
|
||||
dropper();
|
||||
});
|
||||
// // make sure we drop all borrowed props manually to guarantee that their drop implementation is called before we
|
||||
// // run the hooks (which hold an &mut Referrence)
|
||||
// // right now, we don't drop
|
||||
// // let vdom = &self.vdom;
|
||||
// self.borrowed_props
|
||||
// .get_mut()
|
||||
// .drain(..)
|
||||
// .map(|li| unsafe { &*li })
|
||||
// .for_each(|comp| {
|
||||
// // First drop the component's undropped references
|
||||
// let scope_id = comp.ass_scope.get().unwrap();
|
||||
// let scope = unsafe { vdom.get_scope_mut(scope_id) }.unwrap();
|
||||
// scope.ensure_drop_safety();
|
||||
|
||||
// Now that all the references are gone, we can safely drop our own references in our listeners.
|
||||
self.listeners
|
||||
.get_mut()
|
||||
.drain(..)
|
||||
.map(|li| unsafe { &*li })
|
||||
.for_each(|listener| {
|
||||
listener.callback.borrow_mut().take();
|
||||
});
|
||||
// // Now, drop our own reference
|
||||
// let mut dropper = comp.drop_props.borrow_mut().take().unwrap();
|
||||
// dropper();
|
||||
// });
|
||||
|
||||
// // Now that all the references are gone, we can safely drop our own references in our listeners.
|
||||
// self.listeners
|
||||
// .get_mut()
|
||||
// .drain(..)
|
||||
// .map(|li| unsafe { &*li })
|
||||
// .for_each(|listener| {
|
||||
// listener.callback.borrow_mut().take();
|
||||
// });
|
||||
}
|
||||
|
||||
// A safe wrapper around calling listeners
|
||||
|
|
|
@ -40,14 +40,12 @@ pub struct VirtualDom {
|
|||
///
|
||||
/// This is wrapped in an UnsafeCell because we will need to get mutable access to unique values in unique bump arenas
|
||||
/// and rusts's guartnees cannot prove that this is safe. We will need to maintain the safety guarantees manually.
|
||||
pub shared: SharedResources,
|
||||
pub scheduler: Scheduler,
|
||||
|
||||
/// The index of the root component
|
||||
/// Should always be the first (gen=0, id=0)
|
||||
base_scope: ScopeId,
|
||||
|
||||
scheduler: Scheduler,
|
||||
|
||||
// for managing the props that were used to create the dom
|
||||
#[doc(hidden)]
|
||||
_root_prop_type: std::any::TypeId,
|
||||
|
@ -102,7 +100,7 @@ impl VirtualDom {
|
|||
///
|
||||
/// Note: the VirtualDOM is not progressed, you must either "run_with_deadline" or use "rebuild" to progress it.
|
||||
pub fn new_with_props<P: Properties + 'static>(root: FC<P>, root_props: P) -> Self {
|
||||
let components = SharedResources::new();
|
||||
let components = Scheduler::new();
|
||||
|
||||
let root_props: Pin<Box<dyn Any>> = Box::pin(root_props);
|
||||
let props_ptr = root_props.as_ref().downcast_ref::<P>().unwrap() as *const P;
|
||||
|
@ -119,17 +117,17 @@ impl VirtualDom {
|
|||
base_scope,
|
||||
_root_props: root_props,
|
||||
scheduler: Scheduler::new(components.clone()),
|
||||
shared: components,
|
||||
scheduler: components,
|
||||
_root_prop_type: TypeId::of::<P>(),
|
||||
}
|
||||
}
|
||||
|
||||
pub fn base_scope(&self) -> &Scope {
|
||||
self.shared.get_scope(self.base_scope).unwrap()
|
||||
self.scheduler.get_scope(self.base_scope).unwrap()
|
||||
}
|
||||
|
||||
pub fn get_scope(&self, id: ScopeId) -> Option<&Scope> {
|
||||
self.shared.get_scope(id)
|
||||
self.scheduler.get_scope(id)
|
||||
}
|
||||
|
||||
/// Performs a *full* rebuild of the virtual dom, returning every edit required to generate the actual dom rom scratch
|
||||
|
@ -154,10 +152,10 @@ impl VirtualDom {
|
|||
/// the diff and creating nodes can be expensive, so we provide this method to avoid blocking the main thread. This
|
||||
/// method can be useful when needing to perform some crucial periodic tasks.
|
||||
pub async fn rebuild_async<'s>(&'s mut self) -> Mutations<'s> {
|
||||
let mut diff_machine = DiffMachine::new(Mutations::new(), self.base_scope, &self.shared);
|
||||
let mut diff_machine = DiffMachine::new(Mutations::new(), self.base_scope, &self.scheduler);
|
||||
|
||||
let cur_component = self
|
||||
.shared
|
||||
.scheduler
|
||||
.get_scope_mut(self.base_scope)
|
||||
.expect("The base scope should never be moved");
|
||||
|
||||
|
@ -189,10 +187,10 @@ impl VirtualDom {
|
|||
}
|
||||
|
||||
pub async fn diff_async<'s>(&'s mut self) -> Mutations<'s> {
|
||||
let mut diff_machine = DiffMachine::new(Mutations::new(), self.base_scope, &self.shared);
|
||||
let mut diff_machine = DiffMachine::new(Mutations::new(), self.base_scope, &self.scheduler);
|
||||
|
||||
let cur_component = self
|
||||
.shared
|
||||
.scheduler
|
||||
.get_scope_mut(self.base_scope)
|
||||
.expect("The base scope should never be moved");
|
||||
|
||||
|
@ -275,8 +273,8 @@ impl VirtualDom {
|
|||
pub async fn run_with_deadline<'s>(
|
||||
&'s mut self,
|
||||
deadline: impl Future<Output = ()>,
|
||||
) -> Option<Mutations<'s>> {
|
||||
let mut committed_mutations = Mutations::new();
|
||||
) -> Option<Vec<Mutations<'s>>> {
|
||||
let mut committed_mutations = Vec::new();
|
||||
let mut deadline = Box::pin(deadline.fuse());
|
||||
|
||||
// TODO:
|
||||
|
@ -302,9 +300,14 @@ impl VirtualDom {
|
|||
|
||||
// Work through the current subtree, and commit the results when it finishes
|
||||
// When the deadline expires, give back the work
|
||||
let mut new_mutations = Mutations::new();
|
||||
match self.scheduler.work_with_deadline(&mut deadline).await {
|
||||
FiberResult::Done(mut mutations) => {
|
||||
committed_mutations.extend(&mut mutations);
|
||||
FiberResult::Done => {
|
||||
// return Some(mutations);
|
||||
// for edit in mutations.edits {
|
||||
// committed_mutations.edits.push(edit);
|
||||
// }
|
||||
// committed_mutations.extend(&mut mutations);
|
||||
|
||||
/*
|
||||
quick return if there's no work left, so we can commit before the deadline expires
|
||||
|
@ -315,9 +318,9 @@ impl VirtualDom {
|
|||
It makes sense to try and progress the DOM faster
|
||||
*/
|
||||
|
||||
if !self.scheduler.has_any_work() {
|
||||
return Some(committed_mutations);
|
||||
}
|
||||
// if !self.scheduler.has_any_work() {
|
||||
// return Some(committed_mutations);
|
||||
// }
|
||||
}
|
||||
FiberResult::Interrupted => return None,
|
||||
}
|
||||
|
@ -325,7 +328,7 @@ impl VirtualDom {
|
|||
}
|
||||
|
||||
pub fn get_event_sender(&self) -> futures_channel::mpsc::UnboundedSender<EventTrigger> {
|
||||
self.shared.ui_event_sender.clone()
|
||||
self.scheduler.ui_event_sender.clone()
|
||||
}
|
||||
|
||||
pub fn has_work(&self) -> bool {
|
||||
|
|
|
@ -1,7 +1,7 @@
|
|||
//! tests to prove that the iterative implementation works
|
||||
|
||||
use anyhow::{Context, Result};
|
||||
use dioxus::{arena::SharedResources, diff::DiffMachine, prelude::*, DomEdit, Mutations};
|
||||
use dioxus::{arena::Scheduler, diff::DiffMachine, prelude::*, DomEdit, Mutations};
|
||||
mod test_logging;
|
||||
use dioxus_core as dioxus;
|
||||
use dioxus_html as dioxus_elements;
|
||||
|
|
|
@ -3,7 +3,7 @@
|
|||
use bumpalo::Bump;
|
||||
|
||||
use dioxus::{
|
||||
arena::SharedResources, diff::DiffMachine, prelude::*, DiffInstruction, DomEdit, MountType,
|
||||
arena::Scheduler, diff::DiffMachine, prelude::*, DiffInstruction, DomEdit, MountType,
|
||||
};
|
||||
use dioxus_core as dioxus;
|
||||
use dioxus_html as dioxus_elements;
|
||||
|
@ -17,13 +17,13 @@ const IS_LOGGING_ENABLED: bool = false;
|
|||
|
||||
struct TestDom {
|
||||
bump: Bump,
|
||||
resources: SharedResources,
|
||||
resources: Scheduler,
|
||||
}
|
||||
impl TestDom {
|
||||
fn new() -> TestDom {
|
||||
test_logging::set_up_logging(IS_LOGGING_ENABLED);
|
||||
let bump = Bump::new();
|
||||
let resources = SharedResources::new();
|
||||
let resources = Scheduler::new();
|
||||
TestDom { bump, resources }
|
||||
}
|
||||
fn new_factory<'a>(&'a self) -> NodeFactory<'a> {
|
||||
|
|
|
@ -1,7 +1,7 @@
|
|||
use bumpalo::Bump;
|
||||
|
||||
use anyhow::{Context, Result};
|
||||
use dioxus::{arena::SharedResources, diff::DiffMachine, prelude::*, DomEdit};
|
||||
use dioxus::{arena::Scheduler, diff::DiffMachine, prelude::*, DomEdit};
|
||||
use dioxus_core as dioxus;
|
||||
use dioxus_html as dioxus_elements;
|
||||
|
||||
|
@ -17,11 +17,6 @@ async fn event_queue_works() {
|
|||
let edits = dom.rebuild();
|
||||
|
||||
async_std::task::spawn_local(async move {
|
||||
match dom.run_unbounded().await {
|
||||
Err(_) => todo!(),
|
||||
Ok(mutations) => {
|
||||
//
|
||||
}
|
||||
}
|
||||
let mutations = dom.run_unbounded().await;
|
||||
});
|
||||
}
|
||||
|
|
|
@ -1,5 +1,5 @@
|
|||
use anyhow::{Context, Result};
|
||||
use dioxus::{arena::SharedResources, diff::DiffMachine, prelude::*, DomEdit};
|
||||
use dioxus::{arena::Scheduler, diff::DiffMachine, prelude::*, DomEdit};
|
||||
use dioxus_core as dioxus;
|
||||
use dioxus_html as dioxus_elements;
|
||||
|
||||
|
|
41
packages/core/tests/set_state_batch.rs
Normal file
41
packages/core/tests/set_state_batch.rs
Normal file
|
@ -0,0 +1,41 @@
|
|||
use futures_util::StreamExt;
|
||||
|
||||
/*
|
||||
furtures_channel provides us some batching simply due to how Rust's async works.
|
||||
|
||||
Any hook that uses schedule_update is simply deferring to unbounded_send. Multiple
|
||||
unbounded_sends can be linked together in succession provided there isn't an "await"
|
||||
between them. Our internal batching mechanism simply waits for the "schedule_update"
|
||||
to fire and then pulls any messages off the unbounded_send queue.
|
||||
|
||||
Additionally, due to how our "time slicing" works we'll always come back and check
|
||||
in for new work if the deadline hasn't expired. On average, our deadline should be
|
||||
about 10ms, which is way more than enough for diffing/creating to happen.
|
||||
*/
|
||||
#[async_std::test]
|
||||
async fn batch() {
|
||||
let (sender, mut recver) = futures_channel::mpsc::unbounded::<i32>();
|
||||
|
||||
let handle = async_std::task::spawn(async move {
|
||||
let msg = recver.next().await;
|
||||
while let Ok(msg) = recver.try_next() {
|
||||
println!("{:#?}", msg);
|
||||
}
|
||||
let msg = recver.next().await;
|
||||
while let Ok(msg) = recver.try_next() {
|
||||
println!("{:#?}", msg);
|
||||
}
|
||||
});
|
||||
|
||||
sender.unbounded_send(1).unwrap();
|
||||
sender.unbounded_send(2).unwrap();
|
||||
sender.unbounded_send(3).unwrap();
|
||||
sender.unbounded_send(4).unwrap();
|
||||
|
||||
async_std::task::sleep(std::time::Duration::from_millis(100)).await;
|
||||
|
||||
sender.unbounded_send(5).unwrap();
|
||||
sender.unbounded_send(6).unwrap();
|
||||
sender.unbounded_send(7).unwrap();
|
||||
sender.unbounded_send(8).unwrap();
|
||||
}
|
|
@ -3,7 +3,7 @@
|
|||
//! This means you can actually call it synchronously if you want.
|
||||
|
||||
use anyhow::{Context, Result};
|
||||
use dioxus::{arena::SharedResources, diff::DiffMachine, prelude::*, scope::Scope};
|
||||
use dioxus::{arena::Scheduler, diff::DiffMachine, prelude::*, scope::Scope};
|
||||
use dioxus_core as dioxus;
|
||||
use dioxus_html as dioxus_elements;
|
||||
use futures_util::FutureExt;
|
||||
|
|
Loading…
Reference in a new issue