1 //! `AstIdMap` allows to create stable IDs for "large" syntax nodes like items
4 //! Specifically, it enumerates all items in a file and uses position of a an
5 //! item as an ID. That way, id's don't change unless the set of items itself
11 hash::{BuildHasher, BuildHasherDefault, Hash, Hasher},
15 use la_arena::{Arena, Idx};
17 use rustc_hash::FxHasher;
18 use syntax::{ast, AstNode, AstPtr, SyntaxNode, SyntaxNodePtr};
20 /// `AstId` points to an AST node in a specific file.
21 pub struct FileAstId<N: AstNode> {
23 _ty: PhantomData<fn() -> N>,
26 impl<N: AstNode> Clone for FileAstId<N> {
27 fn clone(&self) -> FileAstId<N> {
31 impl<N: AstNode> Copy for FileAstId<N> {}
33 impl<N: AstNode> PartialEq for FileAstId<N> {
34 fn eq(&self, other: &Self) -> bool {
38 impl<N: AstNode> Eq for FileAstId<N> {}
39 impl<N: AstNode> Hash for FileAstId<N> {
40 fn hash<H: Hasher>(&self, hasher: &mut H) {
41 self.raw.hash(hasher);
45 impl<N: AstNode> fmt::Debug for FileAstId<N> {
46 fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
47 write!(f, "FileAstId::<{}>({})", type_name::<N>(), self.raw.into_raw())
51 impl<N: AstNode> FileAstId<N> {
52 // Can't make this a From implementation because of coherence
53 pub fn upcast<M: AstNode>(self) -> FileAstId<M>
57 FileAstId { raw: self.raw, _ty: PhantomData }
61 type ErasedFileAstId = Idx<SyntaxNodePtr>;
63 /// Maps items' `SyntaxNode`s to `ErasedFileAstId`s and back.
66 /// Maps stable id to unstable ptr.
67 arena: Arena<SyntaxNodePtr>,
68 /// Reverse: map ptr to id.
69 map: hashbrown::HashMap<Idx<SyntaxNodePtr>, (), ()>,
73 impl fmt::Debug for AstIdMap {
74 fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
75 f.debug_struct("AstIdMap").field("arena", &self.arena).finish()
79 impl PartialEq for AstIdMap {
80 fn eq(&self, other: &Self) -> bool {
81 self.arena == other.arena
84 impl Eq for AstIdMap {}
87 pub(crate) fn from_source(node: &SyntaxNode) -> AstIdMap {
88 assert!(node.parent().is_none());
89 let mut res = AstIdMap::default();
90 // By walking the tree in breadth-first order we make sure that parents
91 // get lower ids then children. That is, adding a new child does not
92 // change parent's id. This means that, say, adding a new function to a
93 // trait does not change ids of top-level items, which helps caching.
96 if ast::Item::can_cast(kind)
97 || ast::BlockExpr::can_cast(kind)
98 || ast::Variant::can_cast(kind)
99 || ast::RecordField::can_cast(kind)
100 || ast::TupleField::can_cast(kind)
108 res.map = hashbrown::HashMap::with_capacity_and_hasher(res.arena.len(), ());
109 for (idx, ptr) in res.arena.iter() {
110 let hash = hash_ptr(ptr);
111 match res.map.raw_entry_mut().from_hash(hash, |idx2| *idx2 == idx) {
112 hashbrown::hash_map::RawEntryMut::Occupied(_) => unreachable!(),
113 hashbrown::hash_map::RawEntryMut::Vacant(entry) => {
114 entry.insert_with_hasher(hash, idx, (), |&idx| hash_ptr(&res.arena[idx]));
121 pub fn ast_id<N: AstNode>(&self, item: &N) -> FileAstId<N> {
122 let raw = self.erased_ast_id(item.syntax());
123 FileAstId { raw, _ty: PhantomData }
126 fn erased_ast_id(&self, item: &SyntaxNode) -> ErasedFileAstId {
127 let ptr = SyntaxNodePtr::new(item);
128 let hash = hash_ptr(&ptr);
129 match self.map.raw_entry().from_hash(hash, |&idx| self.arena[idx] == ptr) {
130 Some((&idx, &())) => idx,
132 "Can't find {:?} in AstIdMap:\n{:?}",
134 self.arena.iter().map(|(_id, i)| i).collect::<Vec<_>>(),
139 pub fn get<N: AstNode>(&self, id: FileAstId<N>) -> AstPtr<N> {
140 AstPtr::try_from_raw(self.arena[id.raw].clone()).unwrap()
143 fn alloc(&mut self, item: &SyntaxNode) -> ErasedFileAstId {
144 self.arena.alloc(SyntaxNodePtr::new(item))
148 fn hash_ptr(ptr: &SyntaxNodePtr) -> u64 {
149 let mut hasher = BuildHasherDefault::<FxHasher>::default().build_hasher();
150 ptr.hash(&mut hasher);
154 /// Walks the subtree in bdfs order, calling `f` for each node. What is bdfs
155 /// order? It is a mix of breadth-first and depth first orders. Nodes for which
156 /// `f` returns true are visited breadth-first, all the other nodes are explored
159 /// In other words, the size of the bfs queue is bound by the number of "true"
161 fn bdfs(node: &SyntaxNode, mut f: impl FnMut(SyntaxNode) -> bool) {
162 let mut curr_layer = vec![node.clone()];
163 let mut next_layer = vec![];
164 while !curr_layer.is_empty() {
165 curr_layer.drain(..).for_each(|node| {
166 let mut preorder = node.preorder();
167 while let Some(event) = preorder.next() {
169 syntax::WalkEvent::Enter(node) => {
171 next_layer.extend(node.children());
172 preorder.skip_subtree();
175 syntax::WalkEvent::Leave(_) => {}
179 std::mem::swap(&mut curr_layer, &mut next_layer);