1 //! Conversions between [`SyntaxNode`] and [`tt::TokenTree`].
3 use rustc_hash::{FxHashMap, FxHashSet};
4 use stdx::non_empty_vec::NonEmptyVec;
6 ast::{self, make::tokens::doc_comment},
7 AstToken, Parse, PreorderWithTokens, SmolStr, SyntaxElement, SyntaxKind,
9 SyntaxNode, SyntaxToken, SyntaxTreeBuilder, TextRange, TextSize, WalkEvent, T,
11 use tt::buffer::{Cursor, TokenBuffer};
13 use crate::{to_parser_input::to_parser_input, tt_iter::TtIter, ExpandError, TokenMap};
15 /// Convert the syntax node to a `TokenTree` (what macro
17 pub fn syntax_node_to_token_tree(node: &SyntaxNode) -> (tt::Subtree, TokenMap) {
18 syntax_node_to_token_tree_censored(node, &Default::default())
21 /// Convert the syntax node to a `TokenTree` (what macro will consume)
22 /// with the censored range excluded.
23 pub fn syntax_node_to_token_tree_censored(
25 censor: &FxHashSet<SyntaxNode>,
26 ) -> (tt::Subtree, TokenMap) {
27 let global_offset = node.text_range().start();
28 let mut c = Convertor::new(node, global_offset, censor);
29 let subtree = convert_tokens(&mut c);
30 c.id_alloc.map.shrink_to_fit();
31 (subtree, c.id_alloc.map)
34 // The following items are what `rustc` macro can be parsed into :
35 // link: https://github.com/rust-lang/rust/blob/9ebf47851a357faa4cd97f4b1dc7835f6376e639/src/libsyntax/ext/expand.rs#L141
36 // * Expr(P<ast::Expr>) -> token_tree_to_expr
37 // * Pat(P<ast::Pat>) -> token_tree_to_pat
38 // * Ty(P<ast::Ty>) -> token_tree_to_ty
39 // * Stmts(SmallVec<[ast::Stmt; 1]>) -> token_tree_to_stmts
40 // * Items(SmallVec<[P<ast::Item>; 1]>) -> token_tree_to_items
42 // * TraitItems(SmallVec<[ast::TraitItem; 1]>)
43 // * AssocItems(SmallVec<[ast::AssocItem; 1]>)
44 // * ForeignItems(SmallVec<[ast::ForeignItem; 1]>
46 pub fn token_tree_to_syntax_node(
48 entry_point: parser::TopEntryPoint,
49 ) -> Result<(Parse<SyntaxNode>, TokenMap), ExpandError> {
50 let buffer = match tt {
51 tt::Subtree { delimiter: None, token_trees } => {
52 TokenBuffer::from_tokens(token_trees.as_slice())
54 _ => TokenBuffer::from_subtree(tt),
56 let parser_input = to_parser_input(&buffer);
57 let parser_output = entry_point.parse(&parser_input);
58 let mut tree_sink = TtTreeSink::new(buffer.begin());
59 for event in parser_output.iter() {
61 parser::Step::Token { kind, n_input_tokens: n_raw_tokens } => {
62 tree_sink.token(kind, n_raw_tokens)
64 parser::Step::Enter { kind } => tree_sink.start_node(kind),
65 parser::Step::Exit => tree_sink.finish_node(),
66 parser::Step::Error { msg } => tree_sink.error(msg.to_string()),
69 if tree_sink.roots.len() != 1 {
70 return Err(ExpandError::ConversionError);
72 //FIXME: would be cool to report errors
73 let (parse, range_map) = tree_sink.finish();
74 Ok((parse, range_map))
77 /// Convert a string to a `TokenTree`
78 pub fn parse_to_token_tree(text: &str) -> Option<(tt::Subtree, TokenMap)> {
79 let lexed = parser::LexedStr::new(text);
80 if lexed.errors().next().is_some() {
84 let mut conv = RawConvertor {
87 id_alloc: TokenIdAlloc {
88 map: Default::default(),
89 global_offset: TextSize::default(),
94 let subtree = convert_tokens(&mut conv);
95 Some((subtree, conv.id_alloc.map))
98 /// Split token tree with separate expr: $($e:expr)SEP*
99 pub fn parse_exprs_with_sep(tt: &tt::Subtree, sep: char) -> Vec<tt::Subtree> {
100 if tt.token_trees.is_empty() {
104 let mut iter = TtIter::new(tt);
105 let mut res = Vec::new();
107 while iter.peek_n(0).is_some() {
108 let expanded = iter.expect_fragment(parser::PrefixEntryPoint::Expr);
110 res.push(match expanded.value {
112 Some(tt @ tt::TokenTree::Leaf(_)) => {
113 tt::Subtree { delimiter: None, token_trees: vec![tt] }
115 Some(tt::TokenTree::Subtree(tt)) => tt,
118 let mut fork = iter.clone();
119 if fork.expect_char(sep).is_err() {
125 if iter.peek_n(0).is_some() {
126 res.push(tt::Subtree { delimiter: None, token_trees: iter.into_iter().cloned().collect() });
132 fn convert_tokens<C: TokenConvertor>(conv: &mut C) -> tt::Subtree {
134 subtree: tt::Subtree,
136 open_range: TextRange,
139 let entry = StackEntry {
140 subtree: tt::Subtree { delimiter: None, ..Default::default() },
141 // never used (delimiter is `None`)
143 open_range: TextRange::empty(TextSize::of('.')),
145 let mut stack = NonEmptyVec::new(entry);
148 let StackEntry { subtree, .. } = stack.last_mut();
149 let result = &mut subtree.token_trees;
150 let (token, range) = match conv.bump() {
155 let kind = token.kind(&conv);
157 if let Some(tokens) = conv.convert_doc_comment(&token) {
158 // FIXME: There has to be a better way to do this
159 // Add the comments token id to the converted doc string
160 let id = conv.id_alloc().alloc(range);
161 result.extend(tokens.into_iter().map(|mut tt| {
162 if let tt::TokenTree::Subtree(sub) = &mut tt {
163 if let Some(tt::TokenTree::Leaf(tt::Leaf::Literal(lit))) =
164 sub.token_trees.get_mut(2)
174 let tt = if kind.is_punct() && kind != UNDERSCORE {
175 assert_eq!(range.len(), TextSize::of('.'));
177 if let Some(delim) = subtree.delimiter {
178 let expected = match delim.kind {
179 tt::DelimiterKind::Parenthesis => T![')'],
180 tt::DelimiterKind::Brace => T!['}'],
181 tt::DelimiterKind::Bracket => T![']'],
184 if kind == expected {
185 if let Some(entry) = stack.pop() {
186 conv.id_alloc().close_delim(entry.idx, Some(range));
187 stack.last_mut().subtree.token_trees.push(entry.subtree.into());
193 let delim = match kind {
194 T!['('] => Some(tt::DelimiterKind::Parenthesis),
195 T!['{'] => Some(tt::DelimiterKind::Brace),
196 T!['['] => Some(tt::DelimiterKind::Bracket),
200 if let Some(kind) = delim {
201 let mut subtree = tt::Subtree::default();
202 let (id, idx) = conv.id_alloc().open_delim(range);
203 subtree.delimiter = Some(tt::Delimiter { id, kind });
204 stack.push(StackEntry { subtree, idx, open_range: range });
208 let spacing = match conv.peek().map(|next| next.kind(&conv)) {
215 && kind != UNDERSCORE =>
219 _ => tt::Spacing::Alone,
221 let char = match token.to_char(&conv) {
224 panic!("Token from lexer must be single char: token = {:#?}", token);
227 tt::Leaf::from(tt::Punct { char, spacing, id: conv.id_alloc().alloc(range) }).into()
229 macro_rules! make_leaf {
231 tt::$i { id: conv.id_alloc().alloc(range), text: token.to_text(conv) }.into()
234 let leaf: tt::Leaf = match kind {
235 T![true] | T![false] => make_leaf!(Ident),
236 IDENT => make_leaf!(Ident),
237 UNDERSCORE => make_leaf!(Ident),
238 k if k.is_keyword() => make_leaf!(Ident),
239 k if k.is_literal() => make_leaf!(Literal),
241 let char_unit = TextSize::of('\'');
242 let r = TextRange::at(range.start(), char_unit);
243 let apostrophe = tt::Leaf::from(tt::Punct {
245 spacing: tt::Spacing::Joint,
246 id: conv.id_alloc().alloc(r),
248 result.push(apostrophe.into());
250 let r = TextRange::at(range.start() + char_unit, range.len() - char_unit);
251 let ident = tt::Leaf::from(tt::Ident {
252 text: SmolStr::new(&token.to_text(conv)[1..]),
253 id: conv.id_alloc().alloc(r),
255 result.push(ident.into());
266 // If we get here, we've consumed all input tokens.
267 // We might have more than one subtree in the stack, if the delimiters are improperly balanced.
268 // Merge them so we're left with one.
269 while let Some(entry) = stack.pop() {
270 let parent = stack.last_mut();
272 conv.id_alloc().close_delim(entry.idx, None);
273 let leaf: tt::Leaf = tt::Punct {
274 id: conv.id_alloc().alloc(entry.open_range),
275 char: match entry.subtree.delimiter.unwrap().kind {
276 tt::DelimiterKind::Parenthesis => '(',
277 tt::DelimiterKind::Brace => '{',
278 tt::DelimiterKind::Bracket => '[',
280 spacing: tt::Spacing::Alone,
283 parent.subtree.token_trees.push(leaf.into());
284 parent.subtree.token_trees.extend(entry.subtree.token_trees);
287 let subtree = stack.into_first().subtree;
288 if let [tt::TokenTree::Subtree(first)] = &*subtree.token_trees {
295 /// Returns the textual content of a doc comment block as a quoted string
296 /// That is, strips leading `///` (or `/**`, etc)
297 /// and strips the ending `*/`
298 /// And then quote the string, which is needed to convert to `tt::Literal`
299 fn doc_comment_text(comment: &ast::Comment) -> SmolStr {
300 let prefix_len = comment.prefix().len();
301 let mut text = &comment.text()[prefix_len..];
303 // Remove ending "*/"
304 if comment.kind().shape == ast::CommentShape::Block {
305 text = &text[0..text.len() - 2];
309 // Note that `tt::Literal` expect an escaped string
310 let text = format!("\"{}\"", text.escape_debug());
314 fn convert_doc_comment(token: &syntax::SyntaxToken) -> Option<Vec<tt::TokenTree>> {
315 cov_mark::hit!(test_meta_doc_comments);
316 let comment = ast::Comment::cast(token.clone())?;
317 let doc = comment.kind().doc?;
319 // Make `doc="\" Comments\""
320 let meta_tkns = vec![mk_ident("doc"), mk_punct('='), mk_doc_literal(&comment)];
323 let mut token_trees = Vec::with_capacity(3);
324 token_trees.push(mk_punct('#'));
325 if let ast::CommentPlacement::Inner = doc {
326 token_trees.push(mk_punct('!'));
328 token_trees.push(tt::TokenTree::from(tt::Subtree {
329 delimiter: Some(tt::Delimiter {
330 kind: tt::DelimiterKind::Bracket,
331 id: tt::TokenId::unspecified(),
333 token_trees: meta_tkns,
336 return Some(token_trees);
339 fn mk_ident(s: &str) -> tt::TokenTree {
340 tt::TokenTree::from(tt::Leaf::from(tt::Ident {
342 id: tt::TokenId::unspecified(),
346 fn mk_punct(c: char) -> tt::TokenTree {
347 tt::TokenTree::from(tt::Leaf::from(tt::Punct {
349 spacing: tt::Spacing::Alone,
350 id: tt::TokenId::unspecified(),
354 fn mk_doc_literal(comment: &ast::Comment) -> tt::TokenTree {
355 let lit = tt::Literal { text: doc_comment_text(comment), id: tt::TokenId::unspecified() };
357 tt::TokenTree::from(tt::Leaf::from(lit))
361 struct TokenIdAlloc {
363 global_offset: TextSize,
368 fn alloc(&mut self, absolute_range: TextRange) -> tt::TokenId {
369 let relative_range = absolute_range - self.global_offset;
370 let token_id = tt::TokenId(self.next_id);
372 self.map.insert(token_id, relative_range);
376 fn open_delim(&mut self, open_abs_range: TextRange) -> (tt::TokenId, usize) {
377 let token_id = tt::TokenId(self.next_id);
379 let idx = self.map.insert_delim(
381 open_abs_range - self.global_offset,
382 open_abs_range - self.global_offset,
387 fn close_delim(&mut self, idx: usize, close_abs_range: Option<TextRange>) {
388 match close_abs_range {
390 self.map.remove_delim(idx);
393 self.map.update_close_delim(idx, close - self.global_offset);
399 /// A Raw Token (straightly from lexer) convertor
400 struct RawConvertor<'a> {
401 lexed: parser::LexedStr<'a>,
403 id_alloc: TokenIdAlloc,
406 trait SrcToken<Ctx>: std::fmt::Debug {
407 fn kind(&self, ctx: &Ctx) -> SyntaxKind;
409 fn to_char(&self, ctx: &Ctx) -> Option<char>;
411 fn to_text(&self, ctx: &Ctx) -> SmolStr;
414 trait TokenConvertor: Sized {
415 type Token: SrcToken<Self>;
417 fn convert_doc_comment(&self, token: &Self::Token) -> Option<Vec<tt::TokenTree>>;
419 fn bump(&mut self) -> Option<(Self::Token, TextRange)>;
421 fn peek(&self) -> Option<Self::Token>;
423 fn id_alloc(&mut self) -> &mut TokenIdAlloc;
426 impl<'a> SrcToken<RawConvertor<'a>> for usize {
427 fn kind(&self, ctx: &RawConvertor<'a>) -> SyntaxKind {
428 ctx.lexed.kind(*self)
431 fn to_char(&self, ctx: &RawConvertor<'a>) -> Option<char> {
432 ctx.lexed.text(*self).chars().next()
435 fn to_text(&self, ctx: &RawConvertor<'_>) -> SmolStr {
436 ctx.lexed.text(*self).into()
440 impl<'a> TokenConvertor for RawConvertor<'a> {
443 fn convert_doc_comment(&self, &token: &usize) -> Option<Vec<tt::TokenTree>> {
444 let text = self.lexed.text(token);
445 convert_doc_comment(&doc_comment(text))
448 fn bump(&mut self) -> Option<(Self::Token, TextRange)> {
449 if self.pos == self.lexed.len() {
452 let token = self.pos;
454 let range = self.lexed.text_range(token);
455 let range = TextRange::new(range.start.try_into().unwrap(), range.end.try_into().unwrap());
460 fn peek(&self) -> Option<Self::Token> {
461 if self.pos == self.lexed.len() {
467 fn id_alloc(&mut self) -> &mut TokenIdAlloc {
472 struct Convertor<'c> {
473 id_alloc: TokenIdAlloc,
474 current: Option<SyntaxToken>,
475 preorder: PreorderWithTokens,
476 censor: &'c FxHashSet<SyntaxNode>,
478 punct_offset: Option<(SyntaxToken, TextSize)>,
481 impl<'c> Convertor<'c> {
484 global_offset: TextSize,
485 censor: &'c FxHashSet<SyntaxNode>,
487 let range = node.text_range();
488 let mut preorder = node.preorder_with_tokens();
489 let first = Self::next_token(&mut preorder, censor);
491 id_alloc: { TokenIdAlloc { map: TokenMap::default(), global_offset, next_id: 0 } },
501 preorder: &mut PreorderWithTokens,
502 censor: &FxHashSet<SyntaxNode>,
503 ) -> Option<SyntaxToken> {
504 while let Some(ev) = preorder.next() {
506 WalkEvent::Enter(ele) => ele,
510 SyntaxElement::Token(t) => return Some(t),
511 SyntaxElement::Node(node) if censor.contains(&node) => preorder.skip_subtree(),
512 SyntaxElement::Node(_) => (),
521 Ordinary(SyntaxToken),
522 Punch(SyntaxToken, TextSize),
526 fn token(&self) -> &SyntaxToken {
528 SynToken::Ordinary(it) => it,
529 SynToken::Punch(it, _) => it,
534 impl<'a> SrcToken<Convertor<'a>> for SynToken {
535 fn kind(&self, _ctx: &Convertor<'a>) -> SyntaxKind {
538 fn to_char(&self, _ctx: &Convertor<'a>) -> Option<char> {
540 SynToken::Ordinary(_) => None,
541 SynToken::Punch(it, i) => it.text().chars().nth((*i).into()),
544 fn to_text(&self, _ctx: &Convertor<'a>) -> SmolStr {
545 self.token().text().into()
549 impl TokenConvertor for Convertor<'_> {
550 type Token = SynToken;
551 fn convert_doc_comment(&self, token: &Self::Token) -> Option<Vec<tt::TokenTree>> {
552 convert_doc_comment(token.token())
555 fn bump(&mut self) -> Option<(Self::Token, TextRange)> {
556 if let Some((punct, offset)) = self.punct_offset.clone() {
557 if usize::from(offset) + 1 < punct.text().len() {
558 let offset = offset + TextSize::of('.');
559 let range = punct.text_range();
560 self.punct_offset = Some((punct.clone(), offset));
561 let range = TextRange::at(range.start() + offset, TextSize::of('.'));
562 return Some((SynToken::Punch(punct, offset), range));
566 let curr = self.current.clone()?;
567 if !&self.range.contains_range(curr.text_range()) {
570 self.current = Self::next_token(&mut self.preorder, self.censor);
571 let token = if curr.kind().is_punct() {
572 self.punct_offset = Some((curr.clone(), 0.into()));
573 let range = curr.text_range();
574 let range = TextRange::at(range.start(), TextSize::of('.'));
575 (SynToken::Punch(curr, 0.into()), range)
577 self.punct_offset = None;
578 let range = curr.text_range();
579 (SynToken::Ordinary(curr), range)
585 fn peek(&self) -> Option<Self::Token> {
586 if let Some((punct, mut offset)) = self.punct_offset.clone() {
587 offset += TextSize::of('.');
588 if usize::from(offset) < punct.text().len() {
589 return Some(SynToken::Punch(punct, offset));
593 let curr = self.current.clone()?;
594 if !self.range.contains_range(curr.text_range()) {
598 let token = if curr.kind().is_punct() {
599 SynToken::Punch(curr, 0.into())
601 SynToken::Ordinary(curr)
606 fn id_alloc(&mut self) -> &mut TokenIdAlloc {
611 struct TtTreeSink<'a> {
614 open_delims: FxHashMap<tt::TokenId, TextSize>,
616 inner: SyntaxTreeBuilder,
620 // Use for detect ill-form tree which is not single root
621 roots: smallvec::SmallVec<[usize; 1]>,
624 impl<'a> TtTreeSink<'a> {
625 fn new(cursor: Cursor<'a>) -> Self {
629 open_delims: FxHashMap::default(),
631 inner: SyntaxTreeBuilder::default(),
632 roots: smallvec::SmallVec::new(),
633 token_map: TokenMap::default(),
637 fn finish(mut self) -> (Parse<SyntaxNode>, TokenMap) {
638 self.token_map.shrink_to_fit();
639 (self.inner.finish(), self.token_map)
643 fn delim_to_str(d: tt::DelimiterKind, closing: bool) -> &'static str {
644 let texts = match d {
645 tt::DelimiterKind::Parenthesis => "()",
646 tt::DelimiterKind::Brace => "{}",
647 tt::DelimiterKind::Bracket => "[]",
650 let idx = closing as usize;
651 &texts[idx..texts.len() - (1 - idx)]
654 impl<'a> TtTreeSink<'a> {
655 fn token(&mut self, kind: SyntaxKind, mut n_tokens: u8) {
656 if kind == LIFETIME_IDENT {
660 let mut last = self.cursor;
661 for _ in 0..n_tokens {
662 let tmp_str: SmolStr;
663 if self.cursor.eof() {
667 let text: &str = loop {
668 break match self.cursor.token_tree() {
669 Some(tt::buffer::TokenTreeRef::Leaf(leaf, _)) => {
670 // Mark the range if needed
671 let (text, id) = match leaf {
672 tt::Leaf::Ident(ident) => (&ident.text, ident.id),
673 tt::Leaf::Punct(punct) => {
674 assert!(punct.char.is_ascii());
675 let char = &(punct.char as u8);
676 tmp_str = SmolStr::new_inline(
677 std::str::from_utf8(std::slice::from_ref(char)).unwrap(),
681 tt::Leaf::Literal(lit) => (&lit.text, lit.id),
683 let range = TextRange::at(self.text_pos, TextSize::of(text.as_str()));
684 self.token_map.insert(id, range);
685 self.cursor = self.cursor.bump();
688 Some(tt::buffer::TokenTreeRef::Subtree(subtree, _)) => {
689 self.cursor = self.cursor.subtree().unwrap();
690 match subtree.delimiter {
692 self.open_delims.insert(d.id, self.text_pos);
693 delim_to_str(d.kind, false)
699 let parent = self.cursor.end().unwrap();
700 self.cursor = self.cursor.bump();
701 match parent.delimiter {
703 if let Some(open_delim) = self.open_delims.get(&d.id) {
704 let open_range = TextRange::at(*open_delim, TextSize::of('('));
706 TextRange::at(self.text_pos, TextSize::of('('));
707 self.token_map.insert_delim(d.id, open_range, close_range);
709 delim_to_str(d.kind, true)
717 self.text_pos += TextSize::of(text);
720 self.inner.token(kind, self.buf.as_str());
722 // Add whitespace between adjoint puncts
723 let next = last.bump();
725 Some(tt::buffer::TokenTreeRef::Leaf(tt::Leaf::Punct(curr), _)),
726 Some(tt::buffer::TokenTreeRef::Leaf(tt::Leaf::Punct(_), _)),
727 ) = (last.token_tree(), next.token_tree())
729 // Note: We always assume the semi-colon would be the last token in
730 // other parts of RA such that we don't add whitespace here.
731 if curr.spacing == tt::Spacing::Alone && curr.char != ';' {
732 self.inner.token(WHITESPACE, " ");
733 self.text_pos += TextSize::of(' ');
738 fn start_node(&mut self, kind: SyntaxKind) {
739 self.inner.start_node(kind);
741 match self.roots.last_mut() {
742 None | Some(0) => self.roots.push(1),
743 Some(ref mut n) => **n += 1,
747 fn finish_node(&mut self) {
748 self.inner.finish_node();
749 *self.roots.last_mut().unwrap() -= 1;
752 fn error(&mut self, error: String) {
753 self.inner.error(error, self.text_pos)