feat: task system works

but I broke the other things :(
This commit is contained in:
Jonathan Kelley 2021-07-14 02:04:19 -04:00
parent 84c3e9fcb1
commit 3a57b94262
12 changed files with 251 additions and 181 deletions

View file

@ -23,11 +23,11 @@ default = [
"macro",
"hooks",
"html",
"ssr",
"web",
"desktop",
# "atoms",
# "ssr",
# "router",
# "web",
# "desktop",
]
atoms = []
macro = ["dioxus-core-macro"]
@ -43,17 +43,17 @@ core = []
[dev-dependencies]
# futures = "0.3.15"
# log = "0.4.14"
# num-format = "0.4.0"
# separator = "0.4.1"
# serde = { version = "1.0.126", features = ["derive"] }
# surf = "2.2.0"
# env_logger = "*"
# async-std = { version = "1.9.0", features = ["attributes"] }
# im-rc = "15.0.0"
# rand = { version = "0.8.4", features = ["small_rng"] }
# fxhash = "0.2.1"
futures = "0.3.15"
log = "0.4.14"
num-format = "0.4.0"
separator = "0.4.1"
serde = { version = "1.0.126", features = ["derive"] }
surf = "2.2.0"
env_logger = "*"
async-std = { version = "1.9.0", features = ["attributes"] }
im-rc = "15.0.0"
rand = { version = "0.8.4", features = ["small_rng"] }
fxhash = "0.2.1"
[workspace]
@ -62,9 +62,9 @@ members = [
"packages/core-macro",
"packages/html",
"packages/hooks",
# "packages/web",
# "packages/webview",
# "packages/ssr",
"packages/web",
"packages/ssr",
"packages/webview",
# "packages/atoms",
# "packages/docsite",
]

View file

@ -1,3 +1,3 @@
{
"rust-analyzer.inlayHints.enable": false
"rust-analyzer.inlayHints.enable": true
}

View file

@ -36,7 +36,6 @@ use std::{
pub struct Context<'src, T> {
pub props: &'src T,
pub scope: &'src Scope,
pub tasks: &'src RefCell<Vec<&'src mut dyn Future<Output = ()>>>,
}
// pub type PinnedTask = Pin<Box<dyn Future<Output = ()>>>;
@ -46,7 +45,6 @@ impl<'src, T> Clone for Context<'src, T> {
Self {
props: self.props,
scope: self.scope,
tasks: self.tasks,
}
}
}
@ -264,6 +262,9 @@ Any function prefixed with "use" should not be called conditionally.
///
/// This is useful when you have some async task that needs to be progressed.
///
/// This method takes ownership over the task you've provided, and must return (). This means any work that needs to
/// happen must occur within the future or scheduled for after the future completes (through schedule_update )
///
/// ## Explanation
/// Dioxus will step its internal event loop if the future returns if the future completes while waiting.
///
@ -274,7 +275,9 @@ Any function prefixed with "use" should not be called conditionally.
///
///
///
pub fn submit_task(&self, _task: Pin<Box<dyn Future<Output = ()>>>) -> TaskHandle {
pub fn submit_task(&self, task: FiberTask) -> TaskHandle {
let r = (self.scope.task_submitter)(task);
// self.scope.submit_task(task);
// let r = task.then(|f| async {
// //
// });
@ -284,7 +287,6 @@ Any function prefixed with "use" should not be called conditionally.
// t
// // ()
// });
todo!();
// let new_fut = task.then(|f| async {
// //
// ()
@ -297,13 +299,12 @@ Any function prefixed with "use" should not be called conditionally.
/// Awaits the given task, forcing the component to re-render when the value is ready.
///
///
pub fn use_task<Out, Fut: 'static>(
&self,
task_initializer: impl FnOnce() -> Fut + 'src,
) -> &mut Option<Out>
pub fn use_task<Out, Fut, Init>(&self, task_initializer: Init) -> &mut Option<Out>
where
Out: 'static,
Fut: Future<Output = Out>,
Fut: 'static,
Init: FnOnce() -> Fut + 'src,
{
struct TaskHook<T> {
task_dump: Rc<RefCell<Option<T>>>,
@ -318,9 +319,18 @@ Any function prefixed with "use" should not be called conditionally.
let task_dump = Rc::new(RefCell::new(None));
let slot = task_dump.clone();
let update = self.schedule_update();
let originator = self.scope.arena_idx.clone();
self.submit_task(Box::pin(task_fut.then(move |output| async move {
*slot.as_ref().borrow_mut() = Some(output);
update();
EventTrigger {
event: VirtualEvent::FiberEvent,
originator,
priority: EventPriority::Low,
real_node_id: None,
}
})));
TaskHook {
@ -340,29 +350,26 @@ Any function prefixed with "use" should not be called conditionally.
/// Asynchronously render new nodes once the given future has completed.
///
///
///
///
/// # Easda
///
///
///
///
/// # Example
///
pub fn suspend<Out, Fut: 'static>(
///
pub fn use_suspense<Out, Fut: 'static>(
&'src self,
_task_initializer: impl FnOnce() -> Fut + 'src,
_task_initializer: impl FnOnce() -> Fut,
_callback: impl FnOnce(SuspendedContext, Out) -> VNode<'src> + 'src,
) -> VNode<'src>
where
Out: 'src,
Fut: Future<Output = Out>,
{
// self.use_hook(|| , runner, cleanup)
todo!()
// use futures_util::FutureExt;
// match fut.now_or_never() {
// Some(out) => callback(SuspendedContext {}, out),
// None => NodeFactory::suspended(),
// }
}
}

View file

@ -49,7 +49,7 @@
//! - https://hacks.mozilla.org/2019/03/fast-bump-allocated-virtual-doms-with-rust-and-wasm/
use crate::{arena::SharedArena, innerlude::*, tasks::TaskQueue};
use fxhash::{FxHashSet};
use fxhash::FxHashSet;
use std::any::Any;
@ -179,6 +179,7 @@ where
new_node.dom_id.set(root);
// push it just in case
// TODO: remove this - it clogs up things and is inefficient
self.dom.push(root);
self.diff_listeners(old.listeners, new.listeners);
self.diff_attr(old.attributes, new.attributes, new.namespace);
@ -311,9 +312,9 @@ where
// When we create new nodes, we need to propagate some information back up the call chain.
// This gives the caller some information on how to handle things like insertins, appending, and subtree discarding.
struct CreateMeta {
is_static: bool,
added_to_stack: u32,
pub struct CreateMeta {
pub is_static: bool,
pub added_to_stack: u32,
}
impl CreateMeta {
@ -338,7 +339,7 @@ where
// When this function returns, the new node is on top of the change list stack:
//
// [... node]
fn create(&mut self, node: &'bump VNode<'bump>) -> CreateMeta {
pub fn create(&mut self, node: &'bump VNode<'bump>) -> CreateMeta {
log::warn!("Creating node! ... {:#?}", node);
match &node.kind {
VNodeKind::Text(text) => {
@ -373,6 +374,7 @@ where
node.dom_id.set(real_id);
listeners.iter().enumerate().for_each(|(idx, listener)| {
log::info!("setting listener id to {:#?}", real_id);
listener.mounted_node.set(real_id);
self.dom
.new_event_listener(listener.event, listener.scope, idx, real_id);
@ -396,12 +398,12 @@ where
// Notice: this is a web-specific optimization and may be changed in the future
//
// TODO move over
if children.len() == 1 {
if let VNodeKind::Text(text) = &children[0].kind {
self.dom.set_text(text.text);
return CreateMeta::new(is_static, 1);
}
}
// if children.len() == 1 {
// if let VNodeKind::Text(text) = &children[0].kind {
// self.dom.set_text(text.text);
// return CreateMeta::new(is_static, 1);
// }
// }
for child in *children {
let child_meta = self.create(child);
@ -411,11 +413,11 @@ where
self.dom.append_children(child_meta.added_to_stack);
}
if is_static {
log::debug!("created a static node {:#?}", node);
} else {
log::debug!("created a dynamic node {:#?}", node);
}
// if is_static {
// log::debug!("created a static node {:#?}", node);
// } else {
// log::debug!("created a dynamic node {:#?}", node);
// }
// el_is_static.set(is_static);
CreateMeta::new(is_static, 1)

View file

@ -189,6 +189,13 @@ impl<'a> NodeFactory<'a> {
children: &'a [VNode<'a>],
key: Option<&'a str>,
) -> VNode<'a> {
let mut queue = self.scope_ref.listeners.borrow_mut();
for listener in listeners {
let mounted = listener.mounted_node as *const _ as *mut _;
let callback = listener.callback as *const _ as *mut _;
queue.push((mounted, callback))
}
VNode {
dom_id: RealDomNode::empty_cell(),
key,
@ -285,20 +292,13 @@ impl<'a> NodeFactory<'a> {
let caller: Captured = Rc::new(move |scp: &Scope| -> VNode {
// cast back into the right lifetime
let safe_props: &'_ P = unsafe { &*(raw_props as *const P) };
let tasks = RefCell::new(Vec::new());
let cx: Context<P> = Context {
props: safe_props,
scope: scp,
tasks: &tasks,
};
let res = component(cx);
// submit any async tasks to the scope
for _task in tasks.borrow_mut().drain(..) {
// scp.submit_task(task);
}
let g2 = unsafe { std::mem::transmute(res) };
g2

View file

@ -71,6 +71,8 @@ pub struct Scope {
pub(crate) suspended_tasks: Vec<*mut Pin<Box<dyn Future<Output = VNode<'static>>>>>,
}
pub type FiberTask = Pin<Box<dyn Future<Output = EventTrigger>>>;
impl Scope {
// we are being created in the scope of an existing component (where the creator_node lifetime comes into play)
// we are going to break this lifetime by force in order to save it on ourselves.
@ -148,6 +150,17 @@ impl Scope {
Ok(())
}
/// Progress a suspended node
pub fn progress_suspended(&mut self) -> Result<()> {
// load the hook
// downcast to our special state
// run just this hook
// create a new vnode
// diff this new vnode with the original suspended vnode
Ok(())
}
// this is its own function so we can preciesly control how lifetimes flow
unsafe fn call_user_component<'a>(&'a self, caller: &WrappedCaller) -> VNode<'static> {
let new_head: VNode<'a> = caller(self);
@ -164,6 +177,11 @@ impl Scope {
..
} = trigger;
if let &VirtualEvent::FiberEvent = &event {
log::info!("arrived a fiber event");
return Ok(());
}
// todo: implement scanning for outdated events
// Convert the raw ptr into an actual object
@ -200,9 +218,9 @@ impl Scope {
Ok(())
}
pub fn submit_task(&self, task: &mut Pin<Box<dyn Future<Output = ()>>>) {
pub fn submit_task(&self, task: FiberTask) {
log::debug!("Task submitted into scope");
(self.task_submitter)(DTask::new(task, self.arena_idx));
(self.task_submitter)(task);
}
#[inline]

View file

@ -16,28 +16,28 @@ use std::{
task::{Context, Poll},
};
use futures_util::{Future, Stream};
use futures_util::{stream::FuturesUnordered, Future, Stream, StreamExt};
use slotmap::{DefaultKey, SlotMap};
use crate::{events::EventTrigger, innerlude::ScopeIdx};
use crate::innerlude::{EventTrigger, FiberTask, ScopeIdx};
pub type TaskSubmitter = Arc<dyn Fn(DTask)>;
pub type TaskSubmitter = Arc<dyn Fn(FiberTask)>;
pub struct TaskQueue {
slots: Arc<RwLock<SlotMap<DefaultKey, DTask>>>,
slots: Arc<RwLock<FuturesUnordered<FiberTask>>>,
// slots: Arc<RwLock<SlotMap<DefaultKey, DTask>>>,
submitter: TaskSubmitter,
}
impl TaskQueue {
pub fn new() -> Self {
let slots = Arc::new(RwLock::new(SlotMap::new()));
let slots = Arc::new(RwLock::new(FuturesUnordered::new()));
let slots2 = slots.clone();
let submitter = Arc::new(move |task| {
let mut slots = slots2.write().unwrap();
log::debug!("Task submitted into global task queue");
slots.insert(task);
slots.push(task);
});
Self { slots, submitter }
}
@ -46,9 +46,9 @@ impl TaskQueue {
self.submitter.clone()
}
pub fn submit_task(&mut self, task: DTask) -> TaskHandle {
let key = self.slots.write().unwrap().insert(task);
TaskHandle { key }
pub fn submit_task(&mut self, task: FiberTask) {
self.slots.write().unwrap().push(task);
// TaskHandle { key }
}
pub fn is_empty(&self) -> bool {
@ -57,73 +57,78 @@ impl TaskQueue {
pub fn len(&self) -> usize {
self.slots.read().unwrap().len()
}
}
impl Stream for TaskQueue {
type Item = EventTrigger;
/// We can never be finished polling
fn poll_next(
self: Pin<&mut Self>,
cx: &mut std::task::Context<'_>,
) -> std::task::Poll<Option<Self::Item>> {
// let yield_every = self.len();
// let mut polled = 0;
pub async fn next(&mut self) -> Option<EventTrigger> {
let mut slots = self.slots.write().unwrap();
for (_key, slot) in slots.iter_mut() {
if slot.dead.get() {
continue;
}
let r = slot.fut;
let fut = unsafe { &mut *r };
// use futures::{future::Future, poll, FutureExt};
let f2 = fut.as_mut();
let w = cx.waker();
let mut cx = Context::from_waker(&w);
// Pin::new_unchecked(pointer)
// use std::future::Future;
match f2.poll(&mut cx) {
Poll::Ready(_) => {
let trigger = EventTrigger::new_from_task(slot.originator);
slot.dead.set(true);
return Poll::Ready(Some(trigger));
}
Poll::Pending => continue,
}
}
// we tried polling every active task.
// give up and relinquish controlto the parent
// We have polled a large number of futures in a row without yielding.
// To ensure we do not starve other tasks waiting on the executor,
// we yield here, but immediately wake ourselves up to continue.
// cx.waker().wake_by_ref();
return Poll::Pending;
slots.next().await
}
}
// impl Stream for TaskQueue {
// type Item = EventTrigger;
// /// We can never be finished polling
// fn poll_next(
// self: Pin<&mut Self>,
// cx: &mut std::task::Context<'_>,
// ) -> std::task::Poll<Option<Self::Item>> {
// // let yield_every = self.len();
// // let mut polled = 0;
// let mut slots = self.slots.write().unwrap();
// for (_key, slot) in slots.iter_mut() {
// if slot.dead.get() {
// continue;
// }
// let r = slot.fut;
// // let fut = unsafe { &mut *r };
// // use futures::{future::Future, poll, FutureExt};
// let f2 = fut.as_mut();
// let w = cx.waker();
// let mut cx = Context::from_waker(&w);
// // Pin::new_unchecked(pointer)
// // use std::future::Future;
// match f2.poll(&mut cx) {
// Poll::Ready(_) => {
// let trigger = EventTrigger::new_from_task(slot.originator);
// slot.dead.set(true);
// return Poll::Ready(Some(trigger));
// }
// Poll::Pending => continue,
// }
// }
// // we tried polling every active task.
// // give up and relinquish controlto the parent
// // We have polled a large number of futures in a row without yielding.
// // To ensure we do not starve other tasks waiting on the executor,
// // we yield here, but immediately wake ourselves up to continue.
// // cx.waker().wake_by_ref();
// return Poll::Pending;
// }
// }
pub struct TaskHandle {
key: DefaultKey,
}
pub struct DTask {
fut: *mut Pin<Box<dyn Future<Output = ()>>>,
fut: FiberTask,
originator: ScopeIdx,
dead: Cell<bool>,
}
impl DTask {
pub fn new(fut: &mut Pin<Box<dyn Future<Output = ()>>>, originator: ScopeIdx) -> Self {
pub fn new(fut: FiberTask, originator: ScopeIdx) -> Self {
Self {
fut,
originator,
dead: Cell::new(false),
}
}
pub fn debug_new(fut: &mut Pin<Box<dyn Future<Output = ()>>>) -> Self {
pub fn debug_new(fut: FiberTask) -> Self {
let originator = ScopeIdx::default();
Self {
fut,

View file

@ -26,8 +26,8 @@ use slotmap::DefaultKey;
use slotmap::SlotMap;
use std::any::Any;
use std::any::TypeId;
use std::pin::Pin;
use std::{any::TypeId};
pub type ScopeIdx = DefaultKey;
@ -191,7 +191,10 @@ impl VirtualDom {
}
/// Performs a *full* rebuild of the virtual dom, returning every edit required to generate the actual dom rom scratch
///
/// Currently this doesn't do what we want it to do
///
/// The diff machine expects the RealDom's stack to be the root of the application
pub fn rebuild<'s, Dom: RealDom<'s>>(&'s mut self, realdom: &mut Dom) -> Result<()> {
let mut diff_machine = DiffMachine::new(
realdom,
@ -201,15 +204,26 @@ impl VirtualDom {
&self.tasks,
);
let cur_component = self.components.try_get_mut(self.base_scope).unwrap();
cur_component.run_scope()?;
let meta = diff_machine.create(cur_component.next_frame());
log::info!(
"nodes created! appending to body {:#?}",
meta.added_to_stack
);
diff_machine.dom.append_children(meta.added_to_stack);
// Schedule an update and then immediately call it on the root component
// This is akin to a hook being called from a listener and requring a re-render
// Instead, this is done on top-level component
let base = self.components.try_get(self.base_scope)?;
// let base = self.components.try_get(self.base_scope)?;
let update = &base.event_channel;
update();
// let update = &base.event_channel;
// update();
self.progress_completely(&mut diff_machine)?;
// self.progress_completely(&mut diff_machine)?;
Ok(())
}

View file

@ -57,7 +57,7 @@ pub fn use_state<'a, 'c, T: 'static, F: FnOnce() -> T, P>(
move || UseStateInner {
current_val: initial_state_fn(),
callback: cx.schedule_update(),
wip: RefCell::new(None),
wip: Rc::new(RefCell::new(None)),
update_scheuled: Cell::new(false),
},
move |hook| {
@ -76,7 +76,7 @@ struct UseStateInner<T: 'static> {
current_val: T,
update_scheuled: Cell<bool>,
callback: Rc<dyn Fn()>,
wip: RefCell<Option<T>>,
wip: Rc<RefCell<Option<T>>>,
}
pub struct UseState<'a, T: 'static> {
@ -118,6 +118,11 @@ impl<'a, T: 'static> UseState<'a, T> {
pub fn classic(self) -> (&'a T, &'a Rc<dyn Fn(T)>) {
todo!()
}
pub fn setter(&self) -> Rc<dyn Fn(T)> {
let slot = self.inner.wip.clone();
Rc::new(move |new| *slot.borrow_mut() = Some(new))
}
}
impl<'a, T: 'static + ToOwned<Owned = T>> UseState<'a, T> {
pub fn get_mut(self) -> RefMut<'a, T> {

View file

@ -26,25 +26,26 @@ fn main() {
static App: FC<()> = |cx| {
// let mut count = use_state(cx, || 0);
let fut = cx.use_hook(
move || {
Box::pin(async {
let mut tick: i32 = 0;
log::debug!("yeet!");
loop {
gloo_timers::future::TimeoutFuture::new(250).await;
log::debug!("ticking forward... {}", tick);
tick += 1;
}
}) as Pin<Box<dyn Future<Output = ()> + 'static>>
},
|h| h,
|_| {},
);
cx.submit_task(fut);
let state = use_state(cx, || 0);
let set_val = state.setter();
let g = cx.use_task(|| async move {
let mut tick: i32 = 0;
log::debug!("yeet!");
loop {
gloo_timers::future::TimeoutFuture::new(250).await;
log::debug!("ticking forward... {}", tick);
tick += 1;
if tick > 10 {
break;
}
}
set_val(10);
String::from("Huzza!")
});
log::debug!("Value from component was {:#?}", g);
cx.render(rsx! {
div {
section { class: "py-12 px-4 text-center"

View file

@ -62,20 +62,27 @@ impl WebsysRenderer {
}
pub async fn run(&mut self) -> dioxus_core::error::Result<()> {
let body_element = prepare_websys_dom();
use wasm_bindgen::JsCast;
let root_node = body_element.first_child().unwrap();
let root = prepare_websys_dom();
let root_node = root.clone().dyn_into::<Node>().unwrap();
let mut websys_dom = crate::new::WebsysDom::new(body_element.clone());
let mut websys_dom = crate::new::WebsysDom::new(root.clone());
websys_dom.stack.push(root_node.clone());
websys_dom.stack.push(root_node);
self.internal_dom.rebuild(&mut websys_dom)?;
log::info!("Going into event loop");
loop {
let trigger = {
let real_queue = websys_dom.wait_for_event();
// loop {
let trigger = {
let real_queue = websys_dom.wait_for_event();
if self.internal_dom.tasks.is_empty() {
log::info!("tasks is empty, waiting for dom event to trigger soemthing");
real_queue.await
} else {
log::info!("tasks is not empty, waiting for either tasks or event system");
let task_queue = (&mut self.internal_dom.tasks).next();
pin_mut!(real_queue);
@ -85,26 +92,30 @@ impl WebsysRenderer {
futures_util::future::Either::Left((trigger, _)) => trigger,
futures_util::future::Either::Right((trigger, _)) => trigger,
}
};
}
};
if let Some(real_trigger) = trigger {
log::info!("event received");
let root_node = body_element.first_child().unwrap();
websys_dom.stack.push(root_node.clone());
self.internal_dom
.progress_with_event(&mut websys_dom, trigger.unwrap())?;
// let root_node = body_element.first_child().unwrap();
// websys_dom.stack.push(root_node.clone());
// let t2 = self.internal_dom.tasks.next();
// futures::select! {
// trigger = t1 => {
// log::info!("event received");
// let root_node = body_element.first_child().unwrap();
// websys_dom.stack.push(root_node.clone());
// self.internal_dom
// .progress_with_event(&mut websys_dom, trigger)?;
// },
// () = t2 => {}
// };
self.internal_dom
.progress_with_event(&mut websys_dom, real_trigger)?;
}
// let t2 = self.internal_dom.tasks.next();
// futures::select! {
// trigger = t1 => {
// log::info!("event received");
// let root_node = body_element.first_child().unwrap();
// websys_dom.stack.push(root_node.clone());
// self.internal_dom
// .progress_with_event(&mut websys_dom, trigger)?;
// },
// () = t2 => {}
// };
// }
// while let Some(trigger) = websys_dom.wait_for_event().await {
// }
@ -119,21 +130,23 @@ fn prepare_websys_dom() -> Element {
let document = window
.document()
.expect("should have access to the Document");
let body = document.body().unwrap();
// let body = document.body().unwrap();
let el = document.get_element_by_id("dioxusroot").unwrap();
// Build a dummy div
let container: &Element = body.as_ref();
// let container: &Element = body.as_ref();
// container.set_inner_html("");
container
.append_child(
document
.create_element("div")
.expect("should create element OK")
.as_ref(),
)
.expect("should append child OK");
container.clone()
// container
// .append_child(
// document
// .create_element("div")
// .expect("should create element OK")
// .as_ref(),
// )
// .expect("should append child OK");
el
// container.clone()
}
// Progress the mount of the root component

View file

@ -52,6 +52,7 @@ impl WebsysDom {
let mut nodes = slotmap::SlotMap::with_capacity(1000);
let root_id = nodes.insert(root.clone().dyn_into::<Node>().unwrap());
Self {
stack: Stack::with_capacity(10),
nodes,
@ -72,9 +73,13 @@ impl WebsysDom {
impl<'a> dioxus_core::diff::RealDom<'a> for WebsysDom {
fn push(&mut self, root: RealDomNode) {
log::debug!("Called [push_root] {:?}", root);
log::debug!("Called [push_root] {:#?}", root);
let key: DefaultKey = KeyData::from_ffi(root.0).into();
let domnode = self.nodes.get(key).expect("Failed to pop know root");
let domnode = self
.nodes
.get(key)
.expect(&format!("Failed to pop know root: {:#?}", key));
self.stack.push(domnode.clone());
}
// drop the node off the stack
@ -85,10 +90,10 @@ impl<'a> dioxus_core::diff::RealDom<'a> for WebsysDom {
fn append_children(&mut self, many: u32) {
log::debug!("Called [`append_child`]");
let mut root: Node = self
let root: Node = self
.stack
.list
.get(self.stack.list.len() - many as usize)
.get(self.stack.list.len() - (1 + many as usize))
.unwrap()
.clone();