rust-analyzer/crates/span/src/ast_id.rs

Ignoring revisions in .git-blame-ignore-revs. Click here to bypass and see the normal blame view.

277 lines
8.2 KiB
Rust
Raw Normal View History

2019-10-29 08:15:51 +00:00
//! `AstIdMap` allows to create stable IDs for "large" syntax nodes like items
//! and macro calls.
//!
//! Specifically, it enumerates all items in a file and uses position of a an
//! item as an ID. That way, id's don't change unless the set of items itself
//! changes.
use std::{
2020-06-22 13:07:06 +00:00
any::type_name,
fmt,
2021-12-05 14:19:48 +00:00
hash::{BuildHasher, BuildHasherDefault, Hash, Hasher},
2019-10-29 08:15:51 +00:00
marker::PhantomData,
};
use la_arena::{Arena, Idx, RawIdx};
2021-12-05 14:19:48 +00:00
use rustc_hash::FxHasher;
2022-08-15 14:16:59 +00:00
use syntax::{ast, AstNode, AstPtr, SyntaxNode, SyntaxNodePtr};
2019-10-29 08:15:51 +00:00
2024-03-01 14:39:44 +00:00
/// See crates\hir-expand\src\ast_id_map.rs
/// This is a type erased FileAstId.
2024-08-03 17:16:56 +00:00
#[derive(Clone, Copy, PartialEq, Eq, PartialOrd, Ord, Hash)]
pub struct ErasedFileAstId(u32);
impl ErasedFileAstId {
pub const fn into_raw(self) -> u32 {
self.0
}
pub const fn from_raw(u32: u32) -> Self {
Self(u32)
}
}
impl fmt::Display for ErasedFileAstId {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
self.0.fmt(f)
}
}
impl fmt::Debug for ErasedFileAstId {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
self.0.fmt(f)
}
}
2023-11-24 15:38:48 +00:00
2019-10-29 08:15:51 +00:00
/// `AstId` points to an AST node in a specific file.
2023-07-04 07:16:15 +00:00
pub struct FileAstId<N: AstIdNode> {
2019-10-29 08:15:51 +00:00
raw: ErasedFileAstId,
covariant: PhantomData<fn() -> N>,
2019-10-29 08:15:51 +00:00
}
2023-07-04 07:16:15 +00:00
impl<N: AstIdNode> Clone for FileAstId<N> {
2019-10-29 08:15:51 +00:00
fn clone(&self) -> FileAstId<N> {
*self
}
}
2023-07-04 07:16:15 +00:00
impl<N: AstIdNode> Copy for FileAstId<N> {}
2019-10-29 08:15:51 +00:00
2023-07-04 07:16:15 +00:00
impl<N: AstIdNode> PartialEq for FileAstId<N> {
2019-10-29 08:15:51 +00:00
fn eq(&self, other: &Self) -> bool {
self.raw == other.raw
}
}
2023-07-04 07:16:15 +00:00
impl<N: AstIdNode> Eq for FileAstId<N> {}
impl<N: AstIdNode> Hash for FileAstId<N> {
2019-10-29 08:15:51 +00:00
fn hash<H: Hasher>(&self, hasher: &mut H) {
self.raw.hash(hasher);
}
}
2023-07-04 07:16:15 +00:00
impl<N: AstIdNode> fmt::Debug for FileAstId<N> {
2020-06-22 13:07:06 +00:00
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
2024-08-03 17:16:56 +00:00
write!(f, "FileAstId::<{}>({})", type_name::<N>(), self.raw)
2020-06-22 13:07:06 +00:00
}
}
2023-07-04 07:16:15 +00:00
impl<N: AstIdNode> FileAstId<N> {
// Can't make this a From implementation because of coherence
2023-07-04 07:16:15 +00:00
pub fn upcast<M: AstIdNode>(self) -> FileAstId<M>
where
2020-06-22 13:07:06 +00:00
N: Into<M>,
{
FileAstId { raw: self.raw, covariant: PhantomData }
}
2023-07-04 07:16:15 +00:00
pub fn erase(self) -> ErasedFileAstId {
self.raw
}
}
2023-07-04 07:16:15 +00:00
pub trait AstIdNode: AstNode {}
macro_rules! register_ast_id_node {
(impl AstIdNode for $($ident:ident),+ ) => {
$(
impl AstIdNode for ast::$ident {}
)+
fn should_alloc_id(kind: syntax::SyntaxKind) -> bool {
$(
ast::$ident::can_cast(kind)
)||+
}
};
}
register_ast_id_node! {
impl AstIdNode for
2024-07-25 08:18:06 +00:00
Item, AnyHasGenericParams,
2023-07-04 07:16:15 +00:00
Adt,
Enum,
Variant,
2023-07-04 07:16:15 +00:00
Struct,
Union,
AssocItem,
Const,
Fn,
MacroCall,
TypeAlias,
2023-07-04 07:16:15 +00:00
ExternBlock,
ExternCrate,
Impl,
Macro,
MacroDef,
MacroRules,
Module,
Static,
Trait,
TraitAlias,
Use,
2024-07-25 08:18:06 +00:00
BlockExpr, ConstArg
2023-07-04 07:16:15 +00:00
}
2019-10-29 08:15:51 +00:00
/// Maps items' `SyntaxNode`s to `ErasedFileAstId`s and back.
#[derive(Default)]
2019-10-29 08:15:51 +00:00
pub struct AstIdMap {
2021-12-05 14:19:48 +00:00
/// Maps stable id to unstable ptr.
2020-03-19 15:00:11 +00:00
arena: Arena<SyntaxNodePtr>,
2021-12-05 14:19:48 +00:00
/// Reverse: map ptr to id.
map: hashbrown::HashMap<Idx<SyntaxNodePtr>, (), ()>,
2019-10-29 08:15:51 +00:00
}
impl fmt::Debug for AstIdMap {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
f.debug_struct("AstIdMap").field("arena", &self.arena).finish()
}
}
impl PartialEq for AstIdMap {
fn eq(&self, other: &Self) -> bool {
self.arena == other.arena
}
}
impl Eq for AstIdMap {}
2019-10-29 08:15:51 +00:00
impl AstIdMap {
2024-03-01 14:39:44 +00:00
pub fn from_source(node: &SyntaxNode) -> AstIdMap {
2019-10-29 08:15:51 +00:00
assert!(node.parent().is_none());
2021-01-27 09:16:24 +00:00
let mut res = AstIdMap::default();
// make sure to allocate the root node
if !should_alloc_id(node.kind()) {
res.alloc(node);
}
// By walking the tree in breadth-first order we make sure that parents
2019-10-29 08:15:51 +00:00
// get lower ids then children. That is, adding a new child does not
// change parent's id. This means that, say, adding a new function to a
// trait does not change ids of top-level items, which helps caching.
bdfs(node, |it| {
2023-07-04 07:16:15 +00:00
if should_alloc_id(it.kind()) {
2022-08-15 14:16:59 +00:00
res.alloc(&it);
2023-11-24 15:38:48 +00:00
TreeOrder::BreadthFirst
2022-08-15 14:16:59 +00:00
} else {
2023-11-24 15:38:48 +00:00
TreeOrder::DepthFirst
2019-10-29 08:15:51 +00:00
}
});
2021-12-05 14:19:48 +00:00
res.map = hashbrown::HashMap::with_capacity_and_hasher(res.arena.len(), ());
for (idx, ptr) in res.arena.iter() {
let hash = hash_ptr(ptr);
match res.map.raw_entry_mut().from_hash(hash, |idx2| *idx2 == idx) {
hashbrown::hash_map::RawEntryMut::Occupied(_) => unreachable!(),
hashbrown::hash_map::RawEntryMut::Vacant(entry) => {
entry.insert_with_hasher(hash, idx, (), |&idx| hash_ptr(&res.arena[idx]));
}
}
}
res.arena.shrink_to_fit();
2019-10-29 08:15:51 +00:00
res
}
/// The [`AstId`] of the root node
pub fn root(&self) -> SyntaxNodePtr {
2024-01-18 12:59:49 +00:00
self.arena[Idx::from_raw(RawIdx::from_u32(0))]
}
2023-07-04 07:16:15 +00:00
pub fn ast_id<N: AstIdNode>(&self, item: &N) -> FileAstId<N> {
2019-10-29 12:20:08 +00:00
let raw = self.erased_ast_id(item.syntax());
FileAstId { raw, covariant: PhantomData }
2019-10-29 12:20:08 +00:00
}
2022-08-15 14:16:59 +00:00
pub fn ast_id_for_ptr<N: AstIdNode>(&self, ptr: AstPtr<N>) -> FileAstId<N> {
let ptr = ptr.syntax_node_ptr();
let hash = hash_ptr(&ptr);
match self.map.raw_entry().from_hash(hash, |&idx| self.arena[idx] == ptr) {
2024-08-03 17:16:56 +00:00
Some((&raw, &())) => FileAstId {
raw: ErasedFileAstId(raw.into_raw().into_u32()),
covariant: PhantomData,
},
None => panic!(
"Can't find {:?} in AstIdMap:\n{:?}",
ptr,
self.arena.iter().map(|(_id, i)| i).collect::<Vec<_>>(),
),
}
}
2023-07-04 07:16:15 +00:00
pub fn get<N: AstIdNode>(&self, id: FileAstId<N>) -> AstPtr<N> {
2024-08-03 17:16:56 +00:00
AstPtr::try_from_raw(self.arena[Idx::from_raw(RawIdx::from_u32(id.raw.into_raw()))])
.unwrap()
}
2023-11-24 15:38:48 +00:00
pub fn get_erased(&self, id: ErasedFileAstId) -> SyntaxNodePtr {
2024-08-03 17:16:56 +00:00
self.arena[Idx::from_raw(RawIdx::from_u32(id.into_raw()))]
2023-07-04 07:16:15 +00:00
}
2019-10-29 12:20:08 +00:00
fn erased_ast_id(&self, item: &SyntaxNode) -> ErasedFileAstId {
let ptr = SyntaxNodePtr::new(item);
2021-12-05 14:19:48 +00:00
let hash = hash_ptr(&ptr);
match self.map.raw_entry().from_hash(hash, |&idx| self.arena[idx] == ptr) {
2024-08-03 17:16:56 +00:00
Some((&idx, &())) => ErasedFileAstId(idx.into_raw().into_u32()),
2021-12-05 14:19:48 +00:00
None => panic!(
2019-10-29 08:15:51 +00:00
"Can't find {:?} in AstIdMap:\n{:?}",
2019-10-29 12:20:08 +00:00
item,
2019-10-29 08:15:51 +00:00
self.arena.iter().map(|(_id, i)| i).collect::<Vec<_>>(),
2021-12-05 14:19:48 +00:00
),
}
2019-10-29 08:15:51 +00:00
}
2019-10-29 12:25:46 +00:00
fn alloc(&mut self, item: &SyntaxNode) -> ErasedFileAstId {
2024-08-03 17:16:56 +00:00
ErasedFileAstId(self.arena.alloc(SyntaxNodePtr::new(item)).into_raw().into_u32())
2019-10-29 08:15:51 +00:00
}
}
2021-12-05 14:19:48 +00:00
fn hash_ptr(ptr: &SyntaxNodePtr) -> u64 {
2024-01-18 12:59:49 +00:00
BuildHasherDefault::<FxHasher>::default().hash_one(ptr)
2021-12-05 14:19:48 +00:00
}
2023-11-24 15:38:48 +00:00
#[derive(Copy, Clone, PartialEq, Eq)]
enum TreeOrder {
BreadthFirst,
DepthFirst,
}
/// Walks the subtree in bdfs order, calling `f` for each node. What is bdfs
/// order? It is a mix of breadth-first and depth first orders. Nodes for which
2023-11-24 15:38:48 +00:00
/// `f` returns [`TreeOrder::BreadthFirst`] are visited breadth-first, all the other nodes are explored
/// [`TreeOrder::DepthFirst`].
///
/// In other words, the size of the bfs queue is bound by the number of "true"
/// nodes.
2023-11-24 15:38:48 +00:00
fn bdfs(node: &SyntaxNode, mut f: impl FnMut(SyntaxNode) -> TreeOrder) {
2019-10-29 08:15:51 +00:00
let mut curr_layer = vec![node.clone()];
let mut next_layer = vec![];
while !curr_layer.is_empty() {
curr_layer.drain(..).for_each(|node| {
let mut preorder = node.preorder();
while let Some(event) = preorder.next() {
match event {
syntax::WalkEvent::Enter(node) => {
2023-11-24 15:38:48 +00:00
if f(node.clone()) == TreeOrder::BreadthFirst {
next_layer.extend(node.children());
preorder.skip_subtree();
}
}
syntax::WalkEvent::Leave(_) => {}
}
}
2019-10-29 08:15:51 +00:00
});
std::mem::swap(&mut curr_layer, &mut next_layer);
}
}