chore: clean up some clippy stuff

This commit is contained in:
Jonathan Kelley 2022-07-03 00:11:28 -04:00
parent 2884d72b08
commit 94004cfe19
8 changed files with 117 additions and 134 deletions

View file

@ -232,8 +232,3 @@ impl<'a> AttributeValue<'a> {
}
}
}
// #[test]
// fn test_attribute_value_size() {
// assert_eq!(std::mem::size_of::<AttributeValue<'_>>(), 24);
// }

View file

@ -966,7 +966,7 @@ impl<'b> DiffState<'b> {
c.scope.set(None);
let props = scope.props.take().unwrap();
c.props.borrow_mut().replace(props);
self.scopes.try_remove(scope_id).unwrap();
self.scopes.try_remove(scope_id);
}
self.leave_scope();
}
@ -1025,7 +1025,7 @@ impl<'b> DiffState<'b> {
let props = scope.props.take().unwrap();
c.props.borrow_mut().replace(props);
self.scopes.try_remove(scope_id).unwrap();
self.scopes.try_remove(scope_id);
}
self.leave_scope();
}

View file

@ -18,7 +18,7 @@ impl BubbleState {
}
}
/// User Events are events that are shuttled from the renderer into the VirtualDom through the scheduler channel.
/// User Events are events that are shuttled from the renderer into the [`VirtualDom`] through the scheduler channel.
///
/// These events will be passed to the appropriate Element given by `mounted_dom_id` and then bubbled up through the tree
/// where each listener is checked and fired if the event name matches.
@ -73,7 +73,7 @@ pub struct UserEvent {
/// Priority of Event Triggers.
///
/// Internally, Dioxus will abort work that's taking too long if new, more important work arrives. Unlike React, Dioxus
/// won't be afraid to pause work or flush changes to the RealDOM. This is called "cooperative scheduling". Some Renderers
/// won't be afraid to pause work or flush changes to the Real Dom. This is called "cooperative scheduling". Some Renderers
/// implement this form of scheduling internally, however Dioxus will perform its own scheduling as well.
///
/// The ultimate goal of the scheduler is to manage latency of changes, prioritizing "flashier" changes over "subtler" changes.
@ -81,12 +81,12 @@ pub struct UserEvent {
/// React has a 5-tier priority system. However, they break things into "Continuous" and "Discrete" priority. For now,
/// we keep it simple, and just use a 3-tier priority system.
///
/// - NoPriority = 0
/// - LowPriority = 1
/// - NormalPriority = 2
/// - UserBlocking = 3
/// - HighPriority = 4
/// - ImmediatePriority = 5
/// - `NoPriority` = 0
/// - `LowPriority` = 1
/// - `NormalPriority` = 2
/// - `UserBlocking` = 3
/// - `HighPriority` = 4
/// - `ImmediatePriority` = 5
///
/// We still have a concept of discrete vs continuous though - discrete events won't be batched, but continuous events will.
/// This means that multiple "scroll" events will be processed in a single frame, but multiple "click" events will be
@ -123,37 +123,34 @@ pub enum EventPriority {
}
/// The internal Dioxus type that carries any event data to the relevant handler.
///
///
///
///
///
pub struct AnyEvent {
pub(crate) bubble_state: Rc<BubbleState>,
pub(crate) data: Arc<dyn Any + Send + Sync>,
}
impl AnyEvent {
/// Convert this AnyEvent into a specific UiEvent with EventData.
/// Convert this [`AnyEvent`] into a specific [`UiEvent`] with [`EventData`].
///
/// ```rust, ignore
/// let evt: FormEvent = evvt.downcast().unwrap();
/// ```
#[must_use]
pub fn downcast<T: Send + Sync + 'static>(self) -> Option<UiEvent<T>> {
let AnyEvent { data, bubble_state } = self;
data.downcast::<T>()
.ok()
.map(|data| UiEvent { bubble_state, data })
.map(|data| UiEvent { data, bubble_state })
}
}
/// A UiEvent is a type that wraps various EventData.
/// A [`UiEvent`] is a type that wraps various [`EventData`].
///
/// You should prefer to use the name of the event directly, rather than
/// the UiEvent<T> generic type.
/// the [`UiEvent`]<T> generic type.
///
/// For the HTML crate, this would include MouseEvent, FormEvent etc.
/// For the HTML crate, this would include `MouseEvent`, `FormEvent` etc.
pub struct UiEvent<T> {
/// The internal data of the event
/// This is wrapped in an Arc so that it can be sent across threads

View file

@ -3,7 +3,7 @@
//! This module provides support for a type called `LazyNodes` which is a micro-heap located on the stack to make calls
//! to `rsx!` more efficient.
//!
//! To support returning rsx! from branches in match statements, we need to use dynamic dispatch on NodeFactory closures.
//! To support returning rsx! from branches in match statements, we need to use dynamic dispatch on [`NodeFactory`] closures.
//!
//! This can be done either through boxing directly, or by using dynamic-sized-types and a custom allocator. In our case,
//! we build a tiny alloactor in the stack and allocate the closure into that.
@ -14,10 +14,10 @@
use crate::innerlude::{NodeFactory, VNode};
use std::mem;
/// A concrete type provider for closures that build VNode structures.
/// A concrete type provider for closures that build [`VNode`] structures.
///
/// This struct wraps lazy structs that build VNode trees Normally, we cannot perform a blanket implementation over
/// closures, but if we wrap the closure in a concrete type, we can maintain separate implementations of IntoVNode.
/// This struct wraps lazy structs that build [`VNode`] trees Normally, we cannot perform a blanket implementation over
/// closures, but if we wrap the closure in a concrete type, we can maintain separate implementations of [`IntoVNode`].
///
///
/// ```rust, ignore
@ -35,7 +35,7 @@ enum StackNodeStorage<'a, 'b> {
}
impl<'a, 'b> LazyNodes<'a, 'b> {
/// Create a new LazyNodes closure, optimistically placing it onto the stack.
/// Create a new [`LazyNodes`] closure, optimistically placing it onto the stack.
///
/// If the closure cannot fit into the stack allocation (16 bytes), then it
/// is placed on the heap. Most closures will fit into the stack, and is
@ -45,8 +45,10 @@ impl<'a, 'b> LazyNodes<'a, 'b> {
let mut slot = Some(val);
let val = move |fac: Option<NodeFactory<'a>>| {
let inner = slot.take().unwrap();
fac.map(inner)
fac.map(
slot.take()
.expect("LazyNodes closure to be called only once"),
)
};
// miri does not know how to work with mucking directly into bytes
@ -60,21 +62,21 @@ impl<'a, 'b> LazyNodes<'a, 'b> {
}
}
/// Create a new LazyNodes closure, but force it onto the heap.
pub fn new_boxed<F>(_val: F) -> Self
/// Create a new [`LazyNodes`] closure, but force it onto the heap.
pub fn new_boxed<F>(inner: F) -> Self
where
F: FnOnce(NodeFactory<'a>) -> VNode<'a> + 'b,
{
// there's no way to call FnOnce without a box, so we need to store it in a slot and use static dispatch
let mut slot = Some(_val);
let val = move |fac: Option<NodeFactory<'a>>| {
let inner = slot.take().unwrap();
fac.map(inner)
};
let mut slot = Some(inner);
Self {
inner: StackNodeStorage::Heap(Box::new(val)),
inner: StackNodeStorage::Heap(Box::new(move |fac: Option<NodeFactory<'a>>| {
fac.map(
slot.take()
.expect("LazyNodes closure to be called only once"),
)
})),
}
}
@ -135,7 +137,8 @@ impl<'a, 'b> LazyNodes<'a, 'b> {
}
let src_ptr = data as *const u8;
let dataptr = buf.as_mut()[..].as_mut_ptr() as *mut u8;
let dataptr = buf.as_mut_ptr().cast::<u8>();
for i in 0..size {
*dataptr.add(i) = *src_ptr.add(i);
}
@ -152,7 +155,7 @@ impl<'a, 'b> LazyNodes<'a, 'b> {
}
}
/// Call the closure with the given factory to produce real VNodes.
/// Call the closure with the given factory to produce real [`VNode`].
///
/// ```rust, ignore
/// let f = LazyNodes::new(move |f| f.element("div", [], [], [] None));
@ -161,9 +164,12 @@ impl<'a, 'b> LazyNodes<'a, 'b> {
///
/// let node = f.call(cac);
/// ```
#[must_use]
pub fn call(self, f: NodeFactory<'a>) -> VNode<'a> {
match self.inner {
StackNodeStorage::Heap(mut lazy) => lazy(Some(f)).unwrap(),
StackNodeStorage::Heap(mut lazy) => {
lazy(Some(f)).expect("Closure should not be called twice")
}
StackNodeStorage::Stack(mut stack) => stack.call(f),
}
}
@ -285,7 +291,6 @@ mod tests {
let it = (0..10)
.map(|i| {
let val = cx.props.inner.clone();
LazyNodes::new(move |f| {
eprintln!("hell closure");
let inner = DropInner { id: i };

View file

@ -56,19 +56,9 @@ pub(crate) mod innerlude {
/// Example {}
/// )
/// ```
///
/// ## As a closure
/// This particular type alias lets you even use static closures for pure/static components:
///
/// ```rust, ignore
/// static Example: Component<Props> = |cx| {
/// // ...
/// };
/// ```
pub type Component<P = ()> = fn(Scope<P>) -> Element;
/// A list of attributes
///
pub type Attributes<'a> = Option<&'a [Attribute<'a>]>;
}

View file

@ -157,21 +157,21 @@ impl ScopeArena {
// todo: subtrees
subtree: Cell::new(0),
is_subtree_root: Cell::new(false),
is_subtree_root: Cell::default(),
generation: 0.into(),
tasks: self.tasks.clone(),
shared_contexts: Default::default(),
shared_contexts: RefCell::default(),
items: RefCell::new(SelfReferentialItems {
listeners: Default::default(),
borrowed_props: Default::default(),
listeners: Vec::default(),
borrowed_props: Vec::default(),
}),
hook_arena: Bump::new(),
hook_vals: RefCell::new(Vec::with_capacity(hook_capacity)),
hook_idx: Default::default(),
hook_idx: Cell::default(),
}),
);
}
@ -180,8 +180,7 @@ impl ScopeArena {
}
// Removes a scope and its descendents from the arena
pub fn try_remove(&self, id: ScopeId) -> Option<()> {
log::trace!("removing scope {:?}", id);
pub fn try_remove(&self, id: ScopeId) {
self.ensure_drop_safety(id);
// Dispose of any ongoing tasks
@ -200,8 +199,6 @@ impl ScopeArena {
scope.reset();
self.free_scopes.borrow_mut().push(scope);
Some(())
}
pub fn reserve_node<'a>(&self, node: &'a VNode<'a>) -> ElementId {
@ -307,7 +304,7 @@ impl ScopeArena {
let node = frame
.bump
.alloc(VNode::Placeholder(frame.bump.alloc(VPlaceholder {
id: Default::default(),
id: Cell::default(),
})));
frame.node.set(unsafe { extend_vnode(node) });
}
@ -317,22 +314,19 @@ impl ScopeArena {
scope.cycle_frame();
}
pub fn call_listener_with_bubbling(&self, event: UserEvent, element: ElementId) {
pub fn call_listener_with_bubbling(&self, event: &UserEvent, element: ElementId) {
let nodes = self.nodes.borrow();
let mut cur_el = Some(element);
log::trace!("calling listener {:?}, {:?}", event, element);
let state = Rc::new(BubbleState::new());
while let Some(id) = cur_el.take() {
if let Some(el) = nodes.get(id.0) {
let real_el = unsafe { &**el };
log::trace!("looking for listener on {:?}", real_el);
if let VNode::Element(real_el) = real_el {
for listener in real_el.listeners.borrow().iter() {
if listener.event == event.name {
log::trace!("calling listener {:?}", listener.event);
if state.canceled.get() {
// stop bubbling if canceled
return;
@ -384,14 +378,11 @@ impl ScopeArena {
// this is totally okay since all our nodes are always in a valid state
pub fn get_element(&self, id: ElementId) -> Option<&VNode> {
let ptr = self.nodes.borrow().get(id.0).cloned();
match ptr {
Some(ptr) => {
let node = unsafe { &*ptr };
Some(unsafe { extend_vnode(node) })
}
None => None,
}
self.nodes
.borrow()
.get(id.0)
.copied()
.map(|ptr| unsafe { extend_vnode(&*ptr) })
}
}
@ -441,7 +432,7 @@ impl<'a, P> std::ops::Deref for Scope<'a, P> {
/// A component's unique identifier.
///
/// `ScopeId` is a `usize` that is unique across the entire VirtualDOM and across time. ScopeIDs will never be reused
/// `ScopeId` is a `usize` that is unique across the entire [`VirtualDom`] and across time. [`ScopeID`]s will never be reused
/// once a component has been unmounted.
#[cfg_attr(feature = "serialize", derive(serde::Serialize, serde::Deserialize))]
#[derive(Copy, Clone, PartialEq, Eq, Hash, Debug, PartialOrd, Ord)]
@ -449,7 +440,7 @@ pub struct ScopeId(pub usize);
/// A task's unique identifier.
///
/// `TaskId` is a `usize` that is unique across the entire VirtualDOM and across time. TaskIDs will never be reused
/// `TaskId` is a `usize` that is unique across the entire [`VirtualDom`] and across time. [`TaskID`]s will never be reused
/// once a Task has been completed.
#[cfg_attr(feature = "serialize", derive(serde::Serialize, serde::Deserialize))]
#[derive(Copy, Clone, PartialEq, Eq, Hash, Debug)]
@ -468,7 +459,7 @@ pub struct TaskId {
/// Scopes are allocated in a generational arena. As components are mounted/unmounted, they will replace slots of dead components.
/// The actual contents of the hooks, though, will be allocated with the standard allocator. These should not allocate as frequently.
///
/// We expose the `Scope` type so downstream users can traverse the Dioxus VirtualDOM for whatever
/// We expose the `Scope` type so downstream users can traverse the Dioxus [`VirtualDom`] for whatever
/// use case they might have.
pub struct ScopeState {
pub(crate) parent_scope: Option<*mut ScopeState>,
@ -568,9 +559,9 @@ impl ScopeState {
self.height
}
/// Get the Parent of this Scope within this Dioxus VirtualDOM.
/// Get the Parent of this [`Scope`] within this Dioxus [`VirtualDom`].
///
/// This ID is not unique across Dioxus VirtualDOMs or across time. IDs will be reused when components are unmounted.
/// This ID is not unique across Dioxus [`VirtualDom`]s or across time. IDs will be reused when components are unmounted.
///
/// The base component will not have a parent, and will return `None`.
///
@ -589,9 +580,9 @@ impl ScopeState {
self.parent_scope.map(|p| unsafe { &*p }.our_arena_idx)
}
/// Get the ID of this Scope within this Dioxus VirtualDOM.
/// Get the ID of this Scope within this Dioxus [`VirtualDom`].
///
/// This ID is not unique across Dioxus VirtualDOMs or across time. IDs will be reused when components are unmounted.
/// This ID is not unique across Dioxus [`VirtualDom`]s or across time. IDs will be reused when components are unmounted.
///
/// # Example
///
@ -613,41 +604,37 @@ impl ScopeState {
/// Create a subscription that schedules a future render for the reference component
///
/// ## Notice: you should prefer using schedule_update_any and scope_id
/// ## Notice: you should prefer using [`schedule_update_any`] and [`scope_id`]
pub fn schedule_update(&self) -> Arc<dyn Fn() + Send + Sync + 'static> {
let (chan, id) = (self.tasks.sender.clone(), self.scope_id());
Arc::new(move || {
let _ = chan.unbounded_send(SchedulerMsg::Immediate(id));
})
Arc::new(move || drop(chan.unbounded_send(SchedulerMsg::Immediate(id))))
}
/// Schedule an update for any component given its ScopeId.
/// Schedule an update for any component given its [`ScopeId`].
///
/// A component's ScopeId can be obtained from `use_hook` or the [`ScopeState::scope_id`] method.
/// A component's [`ScopeId`] can be obtained from `use_hook` or the [`ScopeState::scope_id`] method.
///
/// This method should be used when you want to schedule an update for a component
pub fn schedule_update_any(&self) -> Arc<dyn Fn(ScopeId) + Send + Sync> {
let chan = self.tasks.sender.clone();
Arc::new(move |id| {
let _ = chan.unbounded_send(SchedulerMsg::Immediate(id));
})
Arc::new(move |id| drop(chan.unbounded_send(SchedulerMsg::Immediate(id))))
}
/// Get the [`ScopeId`] of a mounted component.
///
/// `ScopeId` is not unique for the lifetime of the VirtualDom - a ScopeId will be reused if a component is unmounted.
/// `ScopeId` is not unique for the lifetime of the [`VirtualDom`] - a [`ScopeId`] will be reused if a component is unmounted.
pub fn needs_update(&self) {
self.needs_update_any(self.scope_id())
self.needs_update_any(self.scope_id());
}
/// Get the [`ScopeId`] of a mounted component.
///
/// `ScopeId` is not unique for the lifetime of the VirtualDom - a ScopeId will be reused if a component is unmounted.
/// `ScopeId` is not unique for the lifetime of the [`VirtualDom`] - a [`ScopeId`] will be reused if a component is unmounted.
pub fn needs_update_any(&self, id: ScopeId) {
let _ = self
.tasks
self.tasks
.sender
.unbounded_send(SchedulerMsg::Immediate(id));
.unbounded_send(SchedulerMsg::Immediate(id))
.expect("Scheduler to exist if scope exists");
}
/// Get the Root Node of this scope
@ -656,7 +643,7 @@ impl ScopeState {
unsafe { std::mem::transmute(node) }
}
/// This method enables the ability to expose state to children further down the VirtualDOM Tree.
/// This method enables the ability to expose state to children further down the [`VirtualDom`] Tree.
///
/// This is a "fundamental" operation and should only be called during initialization of a hook.
///
@ -739,10 +726,15 @@ impl ScopeState {
unreachable!("all apps have a root scope")
}
/// Try to retrieve a SharedState with type T from the any parent Scope.
/// Try to retrieve a shared state with type T from the any parent Scope.
pub fn consume_context<T: 'static + Clone>(&self) -> Option<T> {
if let Some(shared) = self.shared_contexts.borrow().get(&TypeId::of::<T>()) {
Some((*shared.downcast_ref::<T>().unwrap()).clone())
Some(
(*shared
.downcast_ref::<T>()
.expect("Context of type T should exist"))
.clone(),
)
} else {
let mut search_parent = self.parent_scope;
@ -750,7 +742,12 @@ impl ScopeState {
// safety: all parent pointers are valid thanks to the bump arena
let parent = unsafe { &*parent_ptr };
if let Some(shared) = parent.shared_contexts.borrow().get(&TypeId::of::<T>()) {
return Some(shared.downcast_ref::<T>().unwrap().clone());
return Some(
shared
.downcast_ref::<T>()
.expect("Context of type T should exist")
.clone(),
);
}
search_parent = parent.parent_scope;
}
@ -764,12 +761,12 @@ impl ScopeState {
self.tasks
.sender
.unbounded_send(SchedulerMsg::NewTask(self.our_arena_idx))
.unwrap();
.expect("Scheduler should exist");
self.tasks.spawn(self.our_arena_idx, fut)
}
/// Spawns the future but does not return the TaskId
/// Spawns the future but does not return the [`TaskId`]
pub fn spawn(&self, fut: impl Future<Output = ()> + 'static) {
self.push_future(fut);
}
@ -782,7 +779,7 @@ impl ScopeState {
self.tasks
.sender
.unbounded_send(SchedulerMsg::NewTask(self.our_arena_idx))
.unwrap();
.expect("Scheduler should exist");
// The root scope will never be unmounted so we can just add the task at the top of the app
self.tasks.spawn(ScopeId(0), fut)
@ -794,9 +791,7 @@ impl ScopeState {
self.tasks.remove(id);
}
/// Take a lazy VNode structure and actually build it with the context of the VDom's efficient VNode allocator.
///
/// This function consumes the context and absorb the lifetime, so these VNodes *must* be returned.
/// Take a lazy [`VNode`] structure and actually build it with the context of the Vdoms efficient [`VNode`] allocator.
///
/// ## Example
///
@ -865,25 +860,25 @@ impl ScopeState {
/// The "work in progress frame" represents the frame that is currently being worked on.
pub(crate) fn wip_frame(&self) -> &BumpFrame {
match self.generation.get() & 1 == 0 {
true => &self.frames[0],
false => &self.frames[1],
match self.generation.get() & 1 {
0 => &self.frames[0],
_ => &self.frames[1],
}
}
/// Mutable access to the "work in progress frame" - used to clear it
pub(crate) fn wip_frame_mut(&mut self) -> &mut BumpFrame {
match self.generation.get() & 1 == 0 {
true => &mut self.frames[0],
false => &mut self.frames[1],
match self.generation.get() & 1 {
0 => &mut self.frames[0],
_ => &mut self.frames[1],
}
}
/// Access to the frame where finalized nodes existed
pub(crate) fn fin_frame(&self) -> &BumpFrame {
match self.generation.get() & 1 == 1 {
true => &self.frames[0],
false => &self.frames[1],
match self.generation.get() & 1 {
1 => &self.frames[0],
_ => &self.frames[1],
}
}
@ -891,7 +886,7 @@ impl ScopeState {
///
/// # Safety:
///
/// This method breaks every reference of VNodes in the current frame.
/// This method breaks every reference of every [`VNode`] in the current frame.
///
/// Calling reset itself is not usually a big deal, but we consider it important
/// due to the complex safety guarantees we need to uphold.
@ -945,13 +940,14 @@ pub(crate) struct BumpFrame {
impl BumpFrame {
pub(crate) fn new(capacity: usize) -> Self {
let bump = Bump::with_capacity(capacity);
let node = &*bump.alloc(VText {
let node = bump.alloc(VText {
text: "placeholdertext",
id: Default::default(),
id: Cell::default(),
is_static: false,
});
let node = bump.alloc(VNode::Text(unsafe { std::mem::transmute(node) }));
let node = bump.alloc(VNode::Text(unsafe {
&*(node as *mut VText as *const VText)
}));
let nodes = Cell::new(node as *const _);
Self { bump, node: nodes }
}
@ -960,12 +956,12 @@ impl BumpFrame {
self.bump.reset();
let node = self.bump.alloc(VText {
text: "placeholdertext",
id: Default::default(),
id: Cell::default(),
is_static: false,
});
let node = self
.bump
.alloc(VNode::Text(unsafe { std::mem::transmute(node) }));
let node = self.bump.alloc(VNode::Text(unsafe {
&*(node as *mut VText as *const VText)
}));
self.node.set(node as *const _);
}
}
@ -1000,7 +996,7 @@ impl TaskQueue {
fn remove(&self, id: TaskId) {
if let Ok(mut tasks) = self.tasks.try_borrow_mut() {
let _ = tasks.remove(&id);
tasks.remove(&id);
if let Some(task_map) = self.task_map.borrow_mut().get_mut(&id.scope) {
task_map.remove(&id);
}

View file

@ -1,4 +1,4 @@
use crate::innerlude::*;
use crate::innerlude::{VNode, VirtualDom};
/// An iterator that only yields "real" [`Element`]s. IE only Elements that are
/// not [`VNode::Component`] or [`VNode::Fragment`], .
@ -44,11 +44,11 @@ impl<'a> Iterator for ElementIdIterator<'a> {
// If we get a fragment we push the next child
VNode::Fragment(frag) => {
let _count = *count as usize;
if _count >= frag.children.len() {
let count = *count as usize;
if count >= frag.children.len() {
should_pop = true;
} else {
should_push = Some(&frag.children[_count]);
should_push = Some(&frag.children[count]);
}
}

View file

@ -1,4 +1,4 @@
//! # VirtualDom Implementation for Rust
//! # Virtual DOM Implementation for Rust
//!
//! This module provides the primary mechanics to create a hook-based, concurrent VDOM for Rust.
@ -387,7 +387,7 @@ impl VirtualDom {
}
SchedulerMsg::Event(event) => {
if let Some(element) = event.element {
self.scopes.call_listener_with_bubbling(event, element);
self.scopes.call_listener_with_bubbling(&event, element);
}
}
SchedulerMsg::Immediate(s) => {