feat: suspense!

This commit is contained in:
Jonathan Kelley 2022-11-06 01:48:34 -07:00
parent aec1b326ba
commit a38fc9e4ab
16 changed files with 469 additions and 236 deletions

View file

@ -38,6 +38,10 @@ indexmap = "1.7"
# Serialize the Edits for use in Webview/Liveview instances
serde = { version = "1", features = ["derive"], optional = true }
futures-task = "0.3.25"
[dev-dependencies]
tokio = { version = "*", features = ["full"] }
[features]
default = []

View file

@ -1,11 +1,11 @@
use std::pin::Pin;
use crate::factory::{FiberLeaf, RenderReturn};
use crate::innerlude::SuspenseContext;
use crate::mutations::Mutation;
use crate::mutations::Mutation::*;
use crate::nodes::VNode;
use crate::nodes::{DynamicNode, TemplateNode};
use crate::suspense::LeafLocation;
use crate::virtualdom::VirtualDom;
use crate::{AttributeValue, Element, ElementId, TemplateAttribute};
use bumpalo::boxed::Box as BumpBox;
@ -209,29 +209,36 @@ impl VirtualDom {
RenderReturn::Async(fut) => {
let new_id = self.next_element(template);
// move up the tree looking for the first suspense boundary
// our current component can not be a suspense boundary, so we skip it
for scope_id in self.scope_stack.iter().rev().skip(1) {
let scope = &mut self.scopes[scope_id.0];
if let Some(fiber) = &mut scope.suspense_boundary {
// save the fiber leaf onto the fiber itself
let detached: &mut FiberLeaf<'static> =
unsafe { std::mem::transmute(fut) };
let scope = self.scope_stack.last().unwrap();
let scope = &self.scopes[scope.0];
let boundary = scope.consume_context::<SuspenseContext>().unwrap();
// And save the fiber leaf using the placeholder node
// this way, when we resume the fiber, we just need to "pick up placeholder"
fiber.futures.insert(
LeafLocation {
element: new_id,
scope: *scope_id,
},
detached,
);
// try to poll the future once - many times it will be ready immediately or require little to no work
self.suspended_scopes.insert(*scope_id);
break;
}
}
todo!();
// // move up the tree looking for the first suspense boundary
// // our current component can not be a suspense boundary, so we skip it
// for scope_id in self.scope_stack.iter().rev().skip(1) {
// if let Some(fiber) = &mut scope.suspense_boundary {
// // save the fiber leaf onto the fiber itself
// let detached: &mut FiberLeaf<'static> =
// unsafe { std::mem::transmute(fut) };
// // And save the fiber leaf using the placeholder node
// // this way, when we resume the fiber, we just need to "pick up placeholder"
// fiber.futures.insert(
// LeafLocation {
// element: new_id,
// scope: *scope_id,
// },
// detached,
// );
// self.suspended_scopes.insert(*scope_id);
// break;
// }
placeholder.set(Some(new_id));
mutations.push(AssignId {

View file

@ -1,6 +1,7 @@
use futures_channel::mpsc::UnboundedSender;
use futures_util::Future;
use std::{cell::RefCell, rc::Rc};
use slab::Slab;
use std::{cell::RefCell, rc::Rc, sync::Arc};
use crate::innerlude::ScopeId;
/// The type of message that can be sent to the scheduler.
@ -25,7 +26,7 @@ pub enum SchedulerMsg {
#[derive(Clone)]
pub struct FutureQueue {
pub sender: UnboundedSender<SchedulerMsg>,
pub queue: Rc<RefCell<Vec<Box<dyn Future<Output = ()>>>>>,
pub queue: RefCell<Slab<Arc<dyn Future<Output = ()>>>>,
}
impl FutureQueue {
@ -36,13 +37,10 @@ impl FutureQueue {
}
}
pub fn spawn(&self, id: ScopeId, fut: impl Future<Output = ()> + 'static) -> TaskId {
self.sender
.unbounded_send(SchedulerMsg::NewTask(id))
.unwrap();
self.queue.borrow_mut().push(Box::new(fut));
pub fn spawn(&self, scope: ScopeId, fut: impl Future<Output = ()> + 'static) -> TaskId {
let id = self.queue.borrow_mut().insert(Arc::new(fut));
todo!()
TaskId { id, scope }
}
pub fn remove(&self, id: TaskId) {

View file

@ -6,26 +6,24 @@ mod create;
mod diff;
mod events;
mod factory;
mod future_container;
mod garbage;
mod lazynodes;
mod mutations;
mod nodes;
mod properties;
mod scheduler;
mod scope_arena;
mod scopes;
// mod subtree;
mod suspense;
mod virtualdom;
pub(crate) mod innerlude {
pub use crate::arena::*;
pub use crate::events::*;
pub use crate::future_container::*;
pub use crate::lazynodes::*;
pub use crate::mutations::*;
pub use crate::nodes::*;
pub use crate::properties::*;
pub use crate::scheduler::*;
pub use crate::scopes::*;
pub use crate::virtualdom::*;

View file

@ -0,0 +1,73 @@
use std::{cell::RefCell, rc::Rc, sync::Arc};
use futures_task::ArcWake;
use futures_util::Future;
use slab::Slab;
use crate::{innerlude::Mutation, ScopeId};
type Shared<T> = Rc<RefCell<T>>;
struct LocalTask {}
pub struct Fiber {
// The work-in progress of this suspended tree
pub mutations: Vec<Mutation<'static>>,
}
#[derive(Clone)]
pub struct SchedulerHandle {
tasks: Shared<Slab<LocalTask>>,
suspended: Shared<Slab<LocalTask>>,
fibers: Shared<Slab<Fiber>>,
tx: futures_channel::mpsc::UnboundedSender<SchedulerMsg>,
}
struct TaskEntry {}
struct LocalTaskWaker<T> {
future: T,
id: TaskId,
tx: futures_channel::mpsc::UnboundedSender<SchedulerMsg>,
}
unsafe impl<T> Send for LocalTaskWaker<T> {}
unsafe impl<T> Sync for LocalTaskWaker<T> {}
impl<T> ArcWake for LocalTaskWaker<T> {
fn wake(self: Arc<Self>) {
Self::wake_by_ref(&self)
}
fn wake_by_ref(arc_self: &Arc<Self>) {
arc_self
.tx
.unbounded_send(SchedulerMsg::TaskNotified(arc_self.id))
.unwrap();
}
}
impl SchedulerHandle {
fn spawn(&self, fut: impl Future<Output = ()> + 'static) -> TaskId {
use futures_task::waker;
let tasks = self.tasks.borrow_mut();
let entry = tasks.vacant_entry();
let id = TaskId(entry.key());
let task = Arc::new(LocalTaskWaker {
future: fut,
id,
tx: self.tx.clone(),
});
let local_task = waker(task.clone());
entry.insert(val);
//
todo!()
}
fn remove(&self, id: TaskId) {
//
}
}

View file

@ -0,0 +1,31 @@
use futures_util::Future;
use slab::Slab;
use std::{cell::RefCell, pin::Pin, rc::Rc, sync::Arc};
use super::{LocalTask, SchedulerMsg, SuspenseLeaf};
#[derive(Clone)]
pub struct SchedulerHandle(Rc<HandleInner>);
impl std::ops::Deref for SchedulerHandle {
type Target = HandleInner;
fn deref(&self) -> &Self::Target {
&self.0
}
}
pub struct HandleInner {
pub sender: futures_channel::mpsc::UnboundedSender<SchedulerMsg>,
pub tasks: RefCell<Slab<LocalTask>>,
pub leaves: RefCell<Slab<SuspenseLeaf>>,
}
impl SchedulerHandle {
pub fn new(sender: futures_channel::mpsc::UnboundedSender<SchedulerMsg>) -> Self {
Self(Rc::new(HandleInner {
sender,
tasks: RefCell::new(Slab::new()),
leaves: RefCell::new(Slab::new()),
}))
}
}

View file

@ -0,0 +1,58 @@
use std::sync::Arc;
use crate::ScopeId;
mod handle;
mod suspense;
mod task;
mod wait;
pub use handle::*;
use slab::Slab;
pub use suspense::*;
pub use task::*;
/// The type of message that can be sent to the scheduler.
///
/// These messages control how the scheduler will process updates to the UI.
#[derive(Debug)]
pub enum SchedulerMsg {
/// Events from the Renderer
Event,
/// Immediate updates from Components that mark them as dirty
Immediate(ScopeId),
/// Mark all components as dirty and update them
DirtyAll,
/// A task has woken and needs to be progressed
TaskNotified(TaskId),
/// A task has woken and needs to be progressed
SuspenseNotified(SuspenseId),
}
pub struct Scheduler {
rx: futures_channel::mpsc::UnboundedReceiver<SchedulerMsg>,
ready_suspense: Vec<ScopeId>,
pub handle: SchedulerHandle,
}
impl Scheduler {
pub fn new() -> Self {
let (tx, rx) = futures_channel::mpsc::unbounded();
Self {
rx,
handle: SchedulerHandle::new(tx),
ready_suspense: Default::default(),
}
}
/// Waits for a future to complete that marks the virtualdom as dirty
///
/// Not all messages will mark a virtualdom as dirty, so this waits for a message that has side-effects that do
pub fn wait_for_work(&mut self) {
//
}
}

View file

@ -0,0 +1,71 @@
use std::{collections::HashSet, rc::Rc};
use futures_task::{RawWaker, RawWakerVTable, Waker};
use futures_util::Future;
use crate::{innerlude::Mutation, Element, Scope, ScopeId};
use super::SchedulerMsg;
#[derive(Copy, Clone, PartialEq, Eq, Hash, Debug)]
pub struct SuspenseId(pub usize);
pub type SuspenseContext = Rc<SuspenseBoundary>;
/// Essentially a fiber in React
pub struct SuspenseBoundary {
pub id: ScopeId,
waiting_on: HashSet<SuspenseId>,
mutations: Vec<Mutation<'static>>,
}
impl SuspenseBoundary {
pub fn new(id: ScopeId) -> Self {
Self {
id,
waiting_on: Default::default(),
mutations: Default::default(),
}
}
}
/*
many times the future will be ready every time it's polled, so we can spin on it until it doesnt wake us up immediately
*/
pub struct SuspenseLeaf {
pub id: SuspenseId,
pub scope: ScopeId,
pub boundary: ScopeId,
pub tx: futures_channel::mpsc::UnboundedSender<SchedulerMsg>,
pub task: *mut dyn Future<Output = Element<'static>>,
}
pub fn make_suspense_waker(task: &SuspenseLeaf) -> Waker {
let raw = RawWaker::new(task as *const SuspenseLeaf as *const _, task_vtable());
unsafe { Waker::from_raw(raw) }
}
fn task_vtable() -> &'static RawWakerVTable {
&RawWakerVTable::new(clone, wake, wake_by_ref, drop_task)
}
unsafe fn clone(data: *const ()) -> RawWaker {
RawWaker::new(data as *const (), task_vtable())
}
unsafe fn wake(data: *const ()) {
wake_by_ref(data);
}
unsafe fn wake_by_ref(data: *const ()) {
let task = &*(data as *const SuspenseLeaf);
task.tx
.unbounded_send(SchedulerMsg::SuspenseNotified(task.id))
.expect("Scheduler should exist");
}
unsafe fn drop_task(_data: *const ()) {
// doesnt do anything
}

View file

@ -0,0 +1,81 @@
use std::{cell::RefCell, mem, ops::DerefMut, pin::Pin, process::Output, rc::Rc, sync::Arc};
use futures_task::{waker, ArcWake, Context, RawWaker, RawWakerVTable, Waker};
use futures_util::{pin_mut, Future, FutureExt};
use slab::Slab;
use crate::ScopeId;
use super::{HandleInner, SchedulerHandle, SchedulerMsg};
#[cfg_attr(feature = "serialize", derive(serde::Serialize, serde::Deserialize))]
#[derive(Copy, Clone, PartialEq, Eq, Hash, Debug)]
pub struct TaskId(pub usize);
/// the task itself is the waker
#[derive(Clone)]
pub struct LocalTask {
id: TaskId,
scope: ScopeId,
tx: futures_channel::mpsc::UnboundedSender<SchedulerMsg>,
pub task: *mut dyn Future<Output = ()>,
}
impl HandleInner {
pub fn spawn(&self, scope: ScopeId, task: impl Future<Output = ()> + 'static) -> TaskId {
let mut tasks = self.tasks.borrow_mut();
let entry = tasks.vacant_entry();
let task_id = TaskId(entry.key());
entry.insert(LocalTask {
id: task_id,
tx: self.sender.clone(),
task: Box::into_raw(Box::new(task)),
scope,
});
self.sender
.unbounded_send(SchedulerMsg::TaskNotified(task_id))
.expect("Scheduler should exist");
task_id
}
// drops the future
pub fn remove(&self, id: TaskId) {
//
}
// Aborts the future
pub fn abort(&self, id: TaskId) {
//
}
}
pub fn make_task_waker(task: &LocalTask) -> Waker {
let raw = RawWaker::new(task as *const LocalTask as *const _, task_vtable());
unsafe { Waker::from_raw(raw) }
}
fn task_vtable() -> &'static RawWakerVTable {
&RawWakerVTable::new(clone, wake, wake_by_ref, drop_task)
}
unsafe fn clone(data: *const ()) -> RawWaker {
RawWaker::new(data as *const (), task_vtable())
}
unsafe fn wake(data: *const ()) {
wake_by_ref(data);
}
unsafe fn wake_by_ref(data: *const ()) {
let task = &*(data as *const LocalTask);
task.tx
.unbounded_send(SchedulerMsg::TaskNotified(task.id))
.expect("Scheduler should exist");
}
unsafe fn drop_task(_data: *const ()) {
// doesnt do anything
}

View file

@ -0,0 +1,52 @@
use std::{ops::DerefMut, pin::Pin};
use futures_task::Context;
use futures_util::StreamExt;
use crate::{innerlude::make_task_waker, VirtualDom};
use super::SchedulerMsg;
impl VirtualDom {
/// Wait for futures internal to the virtualdom
///
/// This is cancel safe, so if the future is dropped, you can push events into the virtualdom
pub async fn wait_for_work(&mut self) {
loop {
let msg = self.scheduler.rx.next().await.unwrap();
println!("msg received: {:?}", msg);
match msg {
SchedulerMsg::Event => todo!(),
SchedulerMsg::Immediate(_) => todo!(),
SchedulerMsg::DirtyAll => todo!(),
SchedulerMsg::TaskNotified(id) => {
let mut tasks = self.scheduler.handle.tasks.borrow_mut();
let local_task = &tasks[id.0];
// // attach the waker to itself
let waker = make_task_waker(local_task);
let mut cx = Context::from_waker(&waker);
let mut fut = unsafe { &mut *local_task.task };
let pinned = unsafe { Pin::new_unchecked(fut.deref_mut()) };
match pinned.poll(&mut cx) {
futures_task::Poll::Ready(_) => {
// remove the task
tasks.remove(id.0);
}
futures_task::Poll::Pending => {}
}
if tasks.is_empty() {
return;
}
}
// SchedulerMsg::TaskNotified(id) => {},
SchedulerMsg::SuspenseNotified(_) => todo!(),
}
}
}
}

View file

@ -7,7 +7,6 @@ use crate::{
arena::ElementId,
bump_frame::BumpFrame,
factory::RenderReturn,
nodes::VNode,
scopes::{ScopeId, ScopeState},
virtualdom::VirtualDom,
};
@ -26,8 +25,6 @@ impl VirtualDom {
id,
height,
props,
tasks: self.pending_futures.clone(),
suspense_boundary: None,
node_arena_1: BumpFrame::new(50),
node_arena_2: BumpFrame::new(50),
render_cnt: Default::default(),
@ -35,6 +32,7 @@ impl VirtualDom {
hook_vals: Default::default(),
hook_idx: Default::default(),
shared_contexts: Default::default(),
tasks: self.scheduler.handle.clone(),
});
id

View file

@ -10,8 +10,13 @@ use futures_channel::mpsc::UnboundedSender;
use futures_util::Future;
use crate::{
any_props::AnyProps, arena::ElementId, bump_frame::BumpFrame, future_container::FutureQueue,
innerlude::SchedulerMsg, lazynodes::LazyNodes, nodes::VNode, suspense::Fiber, TaskId,
any_props::AnyProps,
arena::ElementId,
bump_frame::BumpFrame,
innerlude::{SchedulerHandle, SchedulerMsg},
lazynodes::LazyNodes,
nodes::VNode,
TaskId,
};
pub struct Scope<'a, T = ()> {
@ -64,9 +69,7 @@ pub struct ScopeState {
pub(crate) shared_contexts: RefCell<HashMap<TypeId, Box<dyn Any>>>,
pub tasks: FutureQueue,
pub suspense_boundary: Option<Fiber<'static>>,
pub tasks: SchedulerHandle,
pub props: *mut dyn AnyProps<'static>,
}
@ -296,12 +299,6 @@ impl ScopeState {
/// Pushes the future onto the poll queue to be polled after the component renders.
pub fn push_future(&self, fut: impl Future<Output = ()> + 'static) -> TaskId {
// wake up the scheduler if it is sleeping
self.tasks
.sender
.unbounded_send(SchedulerMsg::NewTask(self.id))
.expect("Scheduler should exist");
self.tasks.spawn(self.id, fut)
}
@ -314,14 +311,16 @@ impl ScopeState {
///
/// This is good for tasks that need to be run after the component has been dropped.
pub fn spawn_forever(&self, fut: impl Future<Output = ()> + 'static) -> TaskId {
// The root scope will never be unmounted so we can just add the task at the top of the app
let id = self.tasks.spawn(ScopeId(0), fut);
// wake up the scheduler if it is sleeping
self.tasks
.sender
.unbounded_send(SchedulerMsg::NewTask(self.id))
.unbounded_send(SchedulerMsg::TaskNotified(id))
.expect("Scheduler should exist");
// The root scope will never be unmounted so we can just add the task at the top of the app
self.tasks.spawn(ScopeId(0), fut)
id
}
/// Informs the scheduler that this task is no longer needed and should be removed

View file

@ -1,149 +0,0 @@
//! Container for polling suspended nodes
//!
//! Whenever a future is returns a value, we walk the tree upwards and check if any of the parents are suspended.
use bumpalo::boxed::Box as BumpBox;
use futures_util::Future;
use std::{
collections::{HashMap, HashSet},
future::poll_fn,
pin::Pin,
task::Poll,
};
use crate::{
factory::FiberLeaf, innerlude::Mutation, Element, ElementId, ScopeId, VNode, VirtualDom,
};
impl VirtualDom {
// todo: lots of hammering lifetimes here...
async fn wait_for_suspense(&mut self) {
let res = poll_fn(|cx| {
let all_suspended_complete = true;
let suspended_scopes: Vec<_> = self.suspended_scopes.iter().copied().collect();
for scope in suspended_scopes {
let mut fiber = self.scopes[scope.0]
.suspense_boundary
.as_mut()
.expect(" A fiber to be present if the scope is suspended");
let mut fiber: &mut Fiber = unsafe { std::mem::transmute(fiber) };
let mutations = &mut fiber.mutations;
let mutations: &mut Vec<Mutation> = unsafe { std::mem::transmute(mutations) };
let keys = fiber.futures.keys().copied().collect::<Vec<_>>();
for loc in keys {
let fut = *fiber.futures.get_mut(&loc).unwrap();
let fut = unsafe { &mut *fut };
let fut: &mut FiberLeaf<'_> = unsafe { std::mem::transmute(fut) };
use futures_util::FutureExt;
match fut.poll_unpin(cx) {
Poll::Ready(nodes) => {
// remove the future from the fiber
fiber.futures.remove(&loc).unwrap();
// set the original location to the new nodes
// todo!("set the original location to the new nodes");
let template = nodes.unwrap();
let scope = &self.scopes[scope.0];
let template = scope.bump().alloc(template);
let template: &VNode = unsafe { std::mem::transmute(template) };
// now create the template
self.create(mutations, template);
}
Poll::Pending => todo!("still working huh"),
}
}
// let mut fiber = Pin::new(&mut fiber);
// let mut scope = scope;
// let mut vnode = self.scopes[scope.0].vnode.take().unwrap();
// let mut vnode = Pin::new(&mut vnode);
// let mut vnode = poll_fn(|cx| {
// let mut vnode = Pin::new(&mut vnode);
// let mut fiber = Pin::new(&mut fiber);
// let res = vnode.as_mut().poll(cx);
// if let Poll::Ready(res) = res {
// Poll::Ready(res)
// } else {
// Poll::Pending
// }
// })
// .await;
// self.scopes[scope.0].vnode = Some(vnode);
// self.scopes[scope.0].suspense_boundary = Some(fiber);
}
match all_suspended_complete {
true => Poll::Ready(()),
false => Poll::Pending,
}
});
todo!()
}
}
// impl SuspenseGenerator {
// async fn wait_for_work(&mut self) {
// use futures_util::future::{select, Either};
// // let scopes = &mut self.scopes;
// let suspense_status = poll_fn(|cx| {
// // let mut tasks = scopes.tasks.tasks.borrow_mut();
// // tasks.retain(|_, task| task.as_mut().poll(cx).is_pending());
// match true {
// // match tasks.is_empty() {
// true => Poll::Ready(()),
// false => Poll::Pending,
// }
// });
// // Suspense {
// // maybe generate futures
// // only render when all the futures are ready
// // }
// /*
// div {
// as1 {}
// as2 {}
// as3 {}
// }
// */
// // match select(task_poll, self.channel.1.next()).await {
// // Either::Left((_, _)) => {}
// // Either::Right((msg, _)) => self.pending_messages.push_front(msg.unwrap()),
// // }
// }
// }
#[derive(Default)]
pub struct Fiber<'a> {
// The work-in progress of this suspended tree
pub mutations: Vec<Mutation<'a>>,
// All the pending futures (DFS)
pub futures:
HashMap<LeafLocation, *mut Pin<BumpBox<'a, dyn Future<Output = Element<'a>> + 'a>>>,
}
#[derive(PartialEq, Eq, PartialOrd, Ord, Hash, Clone, Copy, Debug)]
pub struct LeafLocation {
pub scope: ScopeId,
pub element: ElementId,
}

View file

@ -3,17 +3,17 @@ use crate::arena::ElementPath;
use crate::component::Component;
use crate::diff::DirtyScope;
use crate::factory::RenderReturn;
use crate::future_container::FutureQueue;
use crate::innerlude::SchedulerMsg;
use crate::innerlude::{Scheduler, SchedulerMsg};
use crate::mutations::Mutation;
use crate::nodes::{Template, TemplateId};
use crate::suspense::Fiber;
use crate::{
arena::ElementId,
scopes::{ScopeId, ScopeState},
};
use crate::{Element, Scope};
use crate::{scheduler, Element, Scope};
use futures_channel::mpsc::{UnboundedReceiver, UnboundedSender};
use scheduler::SuspenseContext;
use slab::Slab;
use std::collections::{BTreeSet, HashMap};
@ -24,15 +24,12 @@ pub struct VirtualDom {
pub(crate) scope_stack: Vec<ScopeId>,
pub(crate) element_stack: Vec<ElementId>,
pub(crate) dirty_scopes: BTreeSet<DirtyScope>,
pub(crate) pending_futures: FutureQueue,
pub(crate) sender: UnboundedSender<SchedulerMsg>,
pub(crate) receiver: UnboundedReceiver<SchedulerMsg>,
pub(crate) suspended_scopes: BTreeSet<ScopeId>,
pub(crate) scheduler: Scheduler,
}
impl VirtualDom {
pub fn new(app: fn(Scope) -> Element) -> Self {
let (sender, receiver) = futures_channel::mpsc::unbounded();
let scheduler = Scheduler::new();
let mut res = Self {
templates: Default::default(),
@ -41,10 +38,7 @@ impl VirtualDom {
scope_stack: Vec::new(),
element_stack: vec![ElementId(0)],
dirty_scopes: BTreeSet::new(),
pending_futures: FutureQueue::new(sender.clone()),
suspended_scopes: BTreeSet::new(),
receiver,
sender,
scheduler,
};
let props = Box::into_raw(Box::new(VComponentProps::new_empty(app)));
@ -53,7 +47,7 @@ impl VirtualDom {
let root = res.new_scope(props);
// the root component is always a suspense boundary for any async children
res.scopes[root.0].suspense_boundary = Some(Fiber::default());
// res.scopes[root.0].provide_context(SuspenseContext::new(root));
assert_eq!(root, ScopeId(0));
@ -70,7 +64,9 @@ impl VirtualDom {
self.create(mutations, node);
self.scope_stack.pop();
}
RenderReturn::Sync(None) => todo!("Handle empty root node"),
RenderReturn::Sync(None) => {
//
}
RenderReturn::Async(_) => unreachable!(),
}
}
@ -89,11 +85,6 @@ impl VirtualDom {
//
}
/// Wait for futures internal to the virtualdom
///
/// This is cancel safe, so if the future is dropped, you can push events into the virtualdom
pub async fn wait_for_work(&mut self) {}
pub fn get_scope(&self, id: ScopeId) -> Option<&ScopeState> {
self.scopes.get(id.0)
}
@ -108,18 +99,3 @@ impl Drop for VirtualDom {
// self.drop_scope(ScopeId(0));
}
}
/*
div {
Window {}
Window {}
}
edits -> Vec<Mutation>
Subtree {
id: 0,
namespace: "react-three-fiber",
edits: []
}
*/

View file

@ -0,0 +1,33 @@
use std::time::Duration;
use dioxus_core::*;
#[tokio::test]
async fn it_works() {
let mut dom = VirtualDom::new(app);
let mut mutations = vec![];
dom.rebuild(&mut mutations);
println!("mutations: {:?}", mutations);
dom.wait_for_work().await;
}
fn app(cx: Scope) -> Element {
cx.spawn(async {
for x in 0..10 {
tokio::time::sleep(Duration::from_secs(1)).await;
println!("Hello, world! {x}");
}
});
cx.spawn(async {
for x in 0..10 {
tokio::time::sleep(Duration::from_millis(500)).await;
println!("Hello, world does! {x}");
}
});
None
}

View file

@ -230,13 +230,16 @@ fn async_children() {
render! {
div {
async_child {}
"Hello world"
}
}
}
async fn async_child(cx: Scope<'_>) -> Element {
let d = 123;
let user_name = use_fetch("https://jsonplaceholder.typicode.com/users/1").await;
render! { p { "{d}" "hii" } }
}