]> git.lizzy.rs Git - rust.git/commitdiff
syntax: Rename `TokenAndSpan` into `Token`
authorVadim Petrochenkov <vadim.petrochenkov@gmail.com>
Tue, 4 Jun 2019 15:48:40 +0000 (18:48 +0300)
committerVadim Petrochenkov <vadim.petrochenkov@gmail.com>
Thu, 6 Jun 2019 11:03:15 +0000 (14:03 +0300)
src/librustc_save_analysis/span_utils.rs
src/librustdoc/html/highlight.rs
src/librustdoc/passes/check_code_block_syntax.rs
src/libsyntax/parse/lexer/mod.rs
src/libsyntax/parse/lexer/tokentrees.rs
src/libsyntax/parse/parser.rs
src/libsyntax/parse/token.rs
src/libsyntax/tokenstream.rs

index 5527fcb923b6fbf87840de746e54b815104b370d..5831b0bcd8fa37a65e8cdb307dc13d611d227d5d 100644 (file)
@@ -60,11 +60,11 @@ pub fn sub_span_of_token(&self, span: Span, tok: TokenKind) -> Option<Span> {
         let mut toks = self.retokenise_span(span);
         loop {
             let next = toks.real_token();
-            if next.tok == token::Eof {
+            if next == token::Eof {
                 return None;
             }
-            if next.tok == tok {
-                return Some(next.sp);
+            if next == tok {
+                return Some(next.span);
             }
         }
     }
@@ -74,12 +74,12 @@ pub fn sub_span_of_token(&self, span: Span, tok: TokenKind) -> Option<Span> {
     //     let mut toks = self.retokenise_span(span);
     //     loop {
     //         let ts = toks.real_token();
-    //         if ts.tok == token::Eof {
+    //         if ts == token::Eof {
     //             return None;
     //         }
-    //         if ts.tok == token::Not {
+    //         if ts == token::Not {
     //             let ts = toks.real_token();
-    //             if ts.tok.is_ident() {
+    //             if ts.kind.is_ident() {
     //                 return Some(ts.sp);
     //             } else {
     //                 return None;
@@ -93,12 +93,12 @@ pub fn sub_span_of_token(&self, span: Span, tok: TokenKind) -> Option<Span> {
     //     let mut toks = self.retokenise_span(span);
     //     let mut prev = toks.real_token();
     //     loop {
-    //         if prev.tok == token::Eof {
+    //         if prev == token::Eof {
     //             return None;
     //         }
     //         let ts = toks.real_token();
-    //         if ts.tok == token::Not {
-    //             if prev.tok.is_ident() {
+    //         if ts == token::Not {
+    //             if prev.kind.is_ident() {
     //                 return Some(prev.sp);
     //             } else {
     //                 return None;
index 932419c78f22c1f03d6d09828e53e51d5e39b966..3b9de761828b7387c1eeecebcab26e90005f397e 100644 (file)
@@ -12,8 +12,8 @@
 use std::io::prelude::*;
 
 use syntax::source_map::{SourceMap, FilePathMapping};
-use syntax::parse::lexer::{self, TokenAndSpan};
-use syntax::parse::token;
+use syntax::parse::lexer;
+use syntax::parse::token::{self, Token};
 use syntax::parse;
 use syntax::symbol::{kw, sym};
 use syntax_pos::{Span, FileName};
@@ -186,9 +186,9 @@ fn new(lexer: lexer::StringReader<'a>, source_map: &'a SourceMap) -> Classifier<
     }
 
     /// Gets the next token out of the lexer.
-    fn try_next_token(&mut self) -> Result<TokenAndSpan, HighlightError> {
+    fn try_next_token(&mut self) -> Result<Token, HighlightError> {
         match self.lexer.try_next_token() {
-            Ok(tas) => Ok(tas),
+            Ok(token) => Ok(token),
             Err(_) => Err(HighlightError::LexError),
         }
     }
@@ -205,7 +205,7 @@ fn write_source<W: Writer>(&mut self,
                                    -> Result<(), HighlightError> {
         loop {
             let next = self.try_next_token()?;
-            if next.tok == token::Eof {
+            if next == token::Eof {
                 break;
             }
 
@@ -218,9 +218,9 @@ fn write_source<W: Writer>(&mut self,
     // Handles an individual token from the lexer.
     fn write_token<W: Writer>(&mut self,
                               out: &mut W,
-                              tas: TokenAndSpan)
+                              token: Token)
                               -> Result<(), HighlightError> {
-        let klass = match tas.tok {
+        let klass = match token.kind {
             token::Shebang(s) => {
                 out.string(Escape(&s.as_str()), Class::None)?;
                 return Ok(());
@@ -234,7 +234,7 @@ fn write_token<W: Writer>(&mut self,
             // reference or dereference operator or a reference or pointer type, instead of the
             // bit-and or multiplication operator.
             token::BinOp(token::And) | token::BinOp(token::Star)
-                if self.lexer.peek().tok != token::Whitespace => Class::RefKeyWord,
+                if self.lexer.peek().kind != token::Whitespace => Class::RefKeyWord,
 
             // Consider this as part of a macro invocation if there was a
             // leading identifier.
@@ -257,7 +257,7 @@ fn write_token<W: Writer>(&mut self,
             token::Question => Class::QuestionMark,
 
             token::Dollar => {
-                if self.lexer.peek().tok.is_ident() {
+                if self.lexer.peek().kind.is_ident() {
                     self.in_macro_nonterminal = true;
                     Class::MacroNonTerminal
                 } else {
@@ -280,9 +280,9 @@ fn write_token<W: Writer>(&mut self,
                 // as an attribute.
 
                 // Case 1: #![inner_attribute]
-                if self.lexer.peek().tok == token::Not {
+                if self.lexer.peek() == token::Not {
                     self.try_next_token()?; // NOTE: consumes `!` token!
-                    if self.lexer.peek().tok == token::OpenDelim(token::Bracket) {
+                    if self.lexer.peek() == token::OpenDelim(token::Bracket) {
                         self.in_attribute = true;
                         out.enter_span(Class::Attribute)?;
                     }
@@ -292,7 +292,7 @@ fn write_token<W: Writer>(&mut self,
                 }
 
                 // Case 2: #[outer_attribute]
-                if self.lexer.peek().tok == token::OpenDelim(token::Bracket) {
+                if self.lexer.peek() == token::OpenDelim(token::Bracket) {
                     self.in_attribute = true;
                     out.enter_span(Class::Attribute)?;
                 }
@@ -335,13 +335,13 @@ fn write_token<W: Writer>(&mut self,
                     sym::Option | sym::Result => Class::PreludeTy,
                     sym::Some | sym::None | sym::Ok | sym::Err => Class::PreludeVal,
 
-                    _ if tas.tok.is_reserved_ident() => Class::KeyWord,
+                    _ if token.kind.is_reserved_ident() => Class::KeyWord,
 
                     _ => {
                         if self.in_macro_nonterminal {
                             self.in_macro_nonterminal = false;
                             Class::MacroNonTerminal
-                        } else if self.lexer.peek().tok == token::Not {
+                        } else if self.lexer.peek() == token::Not {
                             self.in_macro = true;
                             Class::Macro
                         } else {
@@ -359,7 +359,7 @@ fn write_token<W: Writer>(&mut self,
 
         // Anything that didn't return above is the simple case where we the
         // class just spans a single token, so we can use the `string` method.
-        out.string(Escape(&self.snip(tas.sp)), klass)?;
+        out.string(Escape(&self.snip(token.span)), klass)?;
 
         Ok(())
     }
index 0556852c54ac2e741aa64b62fad4369b4c804e70..694843ad7f71e1accb2f1ae3ad2d62c8a2ded3c1 100644 (file)
@@ -1,5 +1,5 @@
 use errors::Applicability;
-use syntax::parse::lexer::{TokenAndSpan, StringReader as Lexer};
+use syntax::parse::lexer::{StringReader as Lexer};
 use syntax::parse::{ParseSess, token};
 use syntax::source_map::FilePathMapping;
 use syntax_pos::FileName;
@@ -33,8 +33,8 @@ fn check_rust_syntax(&self, item: &clean::Item, dox: &str, code_block: RustCodeB
         );
 
         let errors = Lexer::new_or_buffered_errs(&sess, source_file, None).and_then(|mut lexer| {
-            while let Ok(TokenAndSpan { tok, .. }) = lexer.try_next_token() {
-                if tok == token::Eof {
+            while let Ok(token::Token { kind, .. }) = lexer.try_next_token() {
+                if kind == token::Eof {
                     break;
                 }
             }
index ca9199975bb7e0e0260ae3abe8c884ddeda14f7a..32d5b16dd714f7d09c3fefb933ea03e487b909b7 100644 (file)
@@ -1,6 +1,6 @@
 use crate::ast::{self, Ident};
 use crate::parse::ParseSess;
-use crate::parse::token::{self, TokenKind};
+use crate::parse::token::{self, Token, TokenKind};
 use crate::symbol::{sym, Symbol};
 use crate::parse::unescape;
 use crate::parse::unescape_error_reporting::{emit_unescape_error, push_escaped_char};
 mod tokentrees;
 mod unicode_chars;
 
-#[derive(Clone, Debug)]
-pub struct TokenAndSpan {
-    pub tok: TokenKind,
-    pub sp: Span,
-}
-
-impl Default for TokenAndSpan {
-    fn default() -> Self {
-        TokenAndSpan {
-            tok: token::Whitespace,
-            sp: syntax_pos::DUMMY_SP,
-        }
-    }
-}
-
 #[derive(Clone, Debug)]
 pub struct UnmatchedBrace {
     pub expected_delim: token::DelimToken,
@@ -87,7 +72,7 @@ fn mk_ident(&self, string: &str) -> Ident {
         ident
     }
 
-    fn unwrap_or_abort(&mut self, res: Result<TokenAndSpan, ()>) -> TokenAndSpan {
+    fn unwrap_or_abort(&mut self, res: Result<Token, ()>) -> Token {
         match res {
             Ok(tok) => tok,
             Err(_) => {
@@ -97,17 +82,17 @@ fn unwrap_or_abort(&mut self, res: Result<TokenAndSpan, ()>) -> TokenAndSpan {
         }
     }
 
-    fn next_token(&mut self) -> TokenAndSpan where Self: Sized {
+    fn next_token(&mut self) -> Token where Self: Sized {
         let res = self.try_next_token();
         self.unwrap_or_abort(res)
     }
 
     /// Returns the next token. EFFECT: advances the string_reader.
-    pub fn try_next_token(&mut self) -> Result<TokenAndSpan, ()> {
+    pub fn try_next_token(&mut self) -> Result<Token, ()> {
         assert!(self.fatal_errs.is_empty());
-        let ret_val = TokenAndSpan {
-            tok: replace(&mut self.peek_tok, token::Whitespace),
-            sp: self.peek_span,
+        let ret_val = Token {
+            kind: replace(&mut self.peek_tok, token::Whitespace),
+            span: self.peek_span,
         };
         self.advance_token()?;
         Ok(ret_val)
@@ -135,10 +120,10 @@ fn peek_delimited(&self, from_ch: char, to_ch: char) -> Option<String> {
         return None;
     }
 
-    fn try_real_token(&mut self) -> Result<TokenAndSpan, ()> {
+    fn try_real_token(&mut self) -> Result<Token, ()> {
         let mut t = self.try_next_token()?;
         loop {
-            match t.tok {
+            match t.kind {
                 token::Whitespace | token::Comment | token::Shebang(_) => {
                     t = self.try_next_token()?;
                 }
@@ -149,7 +134,7 @@ fn try_real_token(&mut self) -> Result<TokenAndSpan, ()> {
         Ok(t)
     }
 
-    pub fn real_token(&mut self) -> TokenAndSpan {
+    pub fn real_token(&mut self) -> Token {
         let res = self.try_real_token();
         self.unwrap_or_abort(res)
     }
@@ -194,11 +179,11 @@ pub fn buffer_fatal_errors(&mut self) -> Vec<Diagnostic> {
         buffer
     }
 
-    pub fn peek(&self) -> TokenAndSpan {
+    pub fn peek(&self) -> Token {
         // FIXME(pcwalton): Bad copy!
-        TokenAndSpan {
-            tok: self.peek_tok.clone(),
-            sp: self.peek_span,
+        Token {
+            kind: self.peek_tok.clone(),
+            span: self.peek_span,
         }
     }
 
@@ -341,9 +326,9 @@ fn err_span_char(&self, from_pos: BytePos, to_pos: BytePos, m: &str, c: char) {
     fn advance_token(&mut self) -> Result<(), ()> {
         match self.scan_whitespace_or_comment() {
             Some(comment) => {
-                self.peek_span_src_raw = comment.sp;
-                self.peek_span = comment.sp;
-                self.peek_tok = comment.tok;
+                self.peek_span_src_raw = comment.span;
+                self.peek_span = comment.span;
+                self.peek_tok = comment.kind;
             }
             None => {
                 if self.is_eof() {
@@ -527,7 +512,7 @@ fn scan_optional_raw_name(&mut self) -> Option<ast::Name> {
 
     /// PRECONDITION: self.ch is not whitespace
     /// Eats any kind of comment.
-    fn scan_comment(&mut self) -> Option<TokenAndSpan> {
+    fn scan_comment(&mut self) -> Option<Token> {
         if let Some(c) = self.ch {
             if c.is_whitespace() {
                 let msg = "called consume_any_line_comment, but there was whitespace";
@@ -563,14 +548,14 @@ fn scan_comment(&mut self) -> Option<TokenAndSpan> {
                         self.bump();
                     }
 
-                    let tok = if doc_comment {
+                    let kind = if doc_comment {
                         self.with_str_from(start_bpos, |string| {
                             token::DocComment(Symbol::intern(string))
                         })
                     } else {
                         token::Comment
                     };
-                    Some(TokenAndSpan { tok, sp: self.mk_sp(start_bpos, self.pos) })
+                    Some(Token { kind, span: self.mk_sp(start_bpos, self.pos) })
                 }
                 Some('*') => {
                     self.bump();
@@ -594,9 +579,9 @@ fn scan_comment(&mut self) -> Option<TokenAndSpan> {
                     while !self.ch_is('\n') && !self.is_eof() {
                         self.bump();
                     }
-                    return Some(TokenAndSpan {
-                        tok: token::Shebang(self.name_from(start)),
-                        sp: self.mk_sp(start, self.pos),
+                    return Some(Token {
+                        kind: token::Shebang(self.name_from(start)),
+                        span: self.mk_sp(start, self.pos),
                     });
                 }
             }
@@ -608,7 +593,7 @@ fn scan_comment(&mut self) -> Option<TokenAndSpan> {
 
     /// If there is whitespace, shebang, or a comment, scan it. Otherwise,
     /// return `None`.
-    fn scan_whitespace_or_comment(&mut self) -> Option<TokenAndSpan> {
+    fn scan_whitespace_or_comment(&mut self) -> Option<Token> {
         match self.ch.unwrap_or('\0') {
             // # to handle shebang at start of file -- this is the entry point
             // for skipping over all "junk"
@@ -622,9 +607,9 @@ fn scan_whitespace_or_comment(&mut self) -> Option<TokenAndSpan> {
                 while is_pattern_whitespace(self.ch) {
                     self.bump();
                 }
-                let c = Some(TokenAndSpan {
-                    tok: token::Whitespace,
-                    sp: self.mk_sp(start_bpos, self.pos),
+                let c = Some(Token {
+                    kind: token::Whitespace,
+                    span: self.mk_sp(start_bpos, self.pos),
                 });
                 debug!("scanning whitespace: {:?}", c);
                 c
@@ -634,7 +619,7 @@ fn scan_whitespace_or_comment(&mut self) -> Option<TokenAndSpan> {
     }
 
     /// Might return a sugared-doc-attr
-    fn scan_block_comment(&mut self) -> Option<TokenAndSpan> {
+    fn scan_block_comment(&mut self) -> Option<Token> {
         // block comments starting with "/**" or "/*!" are doc-comments
         let is_doc_comment = self.ch_is('*') || self.ch_is('!');
         let start_bpos = self.pos - BytePos(2);
@@ -671,7 +656,7 @@ fn scan_block_comment(&mut self) -> Option<TokenAndSpan> {
 
         self.with_str_from(start_bpos, |string| {
             // but comments with only "*"s between two "/"s are not
-            let tok = if is_block_doc_comment(string) {
+            let kind = if is_block_doc_comment(string) {
                 let string = if has_cr {
                     self.translate_crlf(start_bpos,
                                         string,
@@ -684,9 +669,9 @@ fn scan_block_comment(&mut self) -> Option<TokenAndSpan> {
                 token::Comment
             };
 
-            Some(TokenAndSpan {
-                tok,
-                sp: self.mk_sp(start_bpos, self.pos),
+            Some(Token {
+                kind,
+                span: self.mk_sp(start_bpos, self.pos),
             })
         })
     }
@@ -1611,26 +1596,26 @@ fn t1() {
                                         "/* my source file */ fn main() { println!(\"zebra\"); }\n"
                                             .to_string());
             let id = Ident::from_str("fn");
-            assert_eq!(string_reader.next_token().tok, token::Comment);
-            assert_eq!(string_reader.next_token().tok, token::Whitespace);
+            assert_eq!(string_reader.next_token().kind, token::Comment);
+            assert_eq!(string_reader.next_token().kind, token::Whitespace);
             let tok1 = string_reader.next_token();
-            let tok2 = TokenAndSpan {
-                tok: token::Ident(id, false),
-                sp: Span::new(BytePos(21), BytePos(23), NO_EXPANSION),
+            let tok2 = Token {
+                kind: token::Ident(id, false),
+                span: Span::new(BytePos(21), BytePos(23), NO_EXPANSION),
             };
-            assert_eq!(tok1.tok, tok2.tok);
-            assert_eq!(tok1.sp, tok2.sp);
-            assert_eq!(string_reader.next_token().tok, token::Whitespace);
+            assert_eq!(tok1.kind, tok2.kind);
+            assert_eq!(tok1.span, tok2.span);
+            assert_eq!(string_reader.next_token().kind, token::Whitespace);
             // the 'main' id is already read:
             assert_eq!(string_reader.pos.clone(), BytePos(28));
             // read another token:
             let tok3 = string_reader.next_token();
-            let tok4 = TokenAndSpan {
-                tok: mk_ident("main"),
-                sp: Span::new(BytePos(24), BytePos(28), NO_EXPANSION),
+            let tok4 = Token {
+                kind: mk_ident("main"),
+                span: Span::new(BytePos(24), BytePos(28), NO_EXPANSION),
             };
-            assert_eq!(tok3.tok, tok4.tok);
-            assert_eq!(tok3.sp, tok4.sp);
+            assert_eq!(tok3.kind, tok4.kind);
+            assert_eq!(tok3.span, tok4.span);
             // the lparen is already read:
             assert_eq!(string_reader.pos.clone(), BytePos(29))
         })
@@ -1640,7 +1625,7 @@ fn t1() {
     // of tokens (stop checking after exhausting the expected vec)
     fn check_tokenization(mut string_reader: StringReader<'_>, expected: Vec<TokenKind>) {
         for expected_tok in &expected {
-            assert_eq!(&string_reader.next_token().tok, expected_tok);
+            assert_eq!(&string_reader.next_token().kind, expected_tok);
         }
     }
 
@@ -1698,7 +1683,7 @@ fn character_a() {
         with_default_globals(|| {
             let sm = Lrc::new(SourceMap::new(FilePathMapping::empty()));
             let sh = mk_sess(sm.clone());
-            assert_eq!(setup(&sm, &sh, "'a'".to_string()).next_token().tok,
+            assert_eq!(setup(&sm, &sh, "'a'".to_string()).next_token().kind,
                        mk_lit(token::Char, "a", None));
         })
     }
@@ -1708,7 +1693,7 @@ fn character_space() {
         with_default_globals(|| {
             let sm = Lrc::new(SourceMap::new(FilePathMapping::empty()));
             let sh = mk_sess(sm.clone());
-            assert_eq!(setup(&sm, &sh, "' '".to_string()).next_token().tok,
+            assert_eq!(setup(&sm, &sh, "' '".to_string()).next_token().kind,
                        mk_lit(token::Char, " ", None));
         })
     }
@@ -1718,7 +1703,7 @@ fn character_escaped() {
         with_default_globals(|| {
             let sm = Lrc::new(SourceMap::new(FilePathMapping::empty()));
             let sh = mk_sess(sm.clone());
-            assert_eq!(setup(&sm, &sh, "'\\n'".to_string()).next_token().tok,
+            assert_eq!(setup(&sm, &sh, "'\\n'".to_string()).next_token().kind,
                        mk_lit(token::Char, "\\n", None));
         })
     }
@@ -1728,7 +1713,7 @@ fn lifetime_name() {
         with_default_globals(|| {
             let sm = Lrc::new(SourceMap::new(FilePathMapping::empty()));
             let sh = mk_sess(sm.clone());
-            assert_eq!(setup(&sm, &sh, "'abc".to_string()).next_token().tok,
+            assert_eq!(setup(&sm, &sh, "'abc".to_string()).next_token().kind,
                        token::Lifetime(Ident::from_str("'abc")));
         })
     }
@@ -1738,7 +1723,7 @@ fn raw_string() {
         with_default_globals(|| {
             let sm = Lrc::new(SourceMap::new(FilePathMapping::empty()));
             let sh = mk_sess(sm.clone());
-            assert_eq!(setup(&sm, &sh, "r###\"\"#a\\b\x00c\"\"###".to_string()).next_token().tok,
+            assert_eq!(setup(&sm, &sh, "r###\"\"#a\\b\x00c\"\"###".to_string()).next_token().kind,
                        mk_lit(token::StrRaw(3), "\"#a\\b\x00c\"", None));
         })
     }
@@ -1750,10 +1735,10 @@ fn literal_suffixes() {
             let sh = mk_sess(sm.clone());
             macro_rules! test {
                 ($input: expr, $tok_type: ident, $tok_contents: expr) => {{
-                    assert_eq!(setup(&sm, &sh, format!("{}suffix", $input)).next_token().tok,
+                    assert_eq!(setup(&sm, &sh, format!("{}suffix", $input)).next_token().kind,
                                mk_lit(token::$tok_type, $tok_contents, Some("suffix")));
                     // with a whitespace separator:
-                    assert_eq!(setup(&sm, &sh, format!("{} suffix", $input)).next_token().tok,
+                    assert_eq!(setup(&sm, &sh, format!("{} suffix", $input)).next_token().kind,
                                mk_lit(token::$tok_type, $tok_contents, None));
                 }}
             }
@@ -1768,11 +1753,11 @@ macro_rules! test {
             test!("1.0", Float, "1.0");
             test!("1.0e10", Float, "1.0e10");
 
-            assert_eq!(setup(&sm, &sh, "2us".to_string()).next_token().tok,
+            assert_eq!(setup(&sm, &sh, "2us".to_string()).next_token().kind,
                        mk_lit(token::Integer, "2", Some("us")));
-            assert_eq!(setup(&sm, &sh, "r###\"raw\"###suffix".to_string()).next_token().tok,
+            assert_eq!(setup(&sm, &sh, "r###\"raw\"###suffix".to_string()).next_token().kind,
                        mk_lit(token::StrRaw(3), "raw", Some("suffix")));
-            assert_eq!(setup(&sm, &sh, "br###\"raw\"###suffix".to_string()).next_token().tok,
+            assert_eq!(setup(&sm, &sh, "br###\"raw\"###suffix".to_string()).next_token().kind,
                        mk_lit(token::ByteStrRaw(3), "raw", Some("suffix")));
         })
     }
@@ -1790,11 +1775,11 @@ fn nested_block_comments() {
             let sm = Lrc::new(SourceMap::new(FilePathMapping::empty()));
             let sh = mk_sess(sm.clone());
             let mut lexer = setup(&sm, &sh, "/* /* */ */'a'".to_string());
-            match lexer.next_token().tok {
+            match lexer.next_token().kind {
                 token::Comment => {}
                 _ => panic!("expected a comment!"),
             }
-            assert_eq!(lexer.next_token().tok, mk_lit(token::Char, "a", None));
+            assert_eq!(lexer.next_token().kind, mk_lit(token::Char, "a", None));
         })
     }
 
@@ -1805,10 +1790,10 @@ fn crlf_comments() {
             let sh = mk_sess(sm.clone());
             let mut lexer = setup(&sm, &sh, "// test\r\n/// test\r\n".to_string());
             let comment = lexer.next_token();
-            assert_eq!(comment.tok, token::Comment);
-            assert_eq!((comment.sp.lo(), comment.sp.hi()), (BytePos(0), BytePos(7)));
-            assert_eq!(lexer.next_token().tok, token::Whitespace);
-            assert_eq!(lexer.next_token().tok,
+            assert_eq!(comment.kind, token::Comment);
+            assert_eq!((comment.span.lo(), comment.span.hi()), (BytePos(0), BytePos(7)));
+            assert_eq!(lexer.next_token().kind, token::Whitespace);
+            assert_eq!(lexer.next_token().kind,
                     token::DocComment(Symbol::intern("/// test")));
         })
     }
index b8cd32883b88c5299596d068850b67b6fe4efe14..767d37016da87cecece3ad65d3fe2d0911463e71 100644 (file)
@@ -220,7 +220,7 @@ fn parse_token_tree(&mut self) -> PResult<'a, TreeAndJoint> {
 
     fn real_token(&mut self) {
         let t = self.string_reader.real_token();
-        self.token = t.tok;
-        self.span = t.sp;
+        self.token = t.kind;
+        self.span = t.span;
     }
 }
index 8fc02dd9259e0c310725ec81d77df6476686b31e..3b7d4e14dbb40fb18282ef53a8cb54952d342dcf 100644 (file)
@@ -36,9 +36,9 @@
 use crate::ext::base::DummyResult;
 use crate::source_map::{self, SourceMap, Spanned, respan};
 use crate::parse::{SeqSep, classify, literal, token};
-use crate::parse::lexer::{TokenAndSpan, UnmatchedBrace};
+use crate::parse::lexer::UnmatchedBrace;
 use crate::parse::lexer::comments::{doc_comment_style, strip_doc_comment_decoration};
-use crate::parse::token::DelimToken;
+use crate::parse::token::{Token, DelimToken};
 use crate::parse::{new_sub_parser_from_file, ParseSess, Directory, DirectoryOwnership};
 use crate::util::parser::{AssocOp, Fixity};
 use crate::print::pprust;
@@ -295,7 +295,7 @@ fn new(sp: DelimSpan, delim: DelimToken, tts: &TokenStream) -> Self {
 }
 
 impl TokenCursor {
-    fn next(&mut self) -> TokenAndSpan {
+    fn next(&mut self) -> Token {
         loop {
             let tree = if !self.frame.open_delim {
                 self.frame.open_delim = true;
@@ -309,7 +309,7 @@ fn next(&mut self) -> TokenAndSpan {
                 self.frame = frame;
                 continue
             } else {
-                return TokenAndSpan { tok: token::Eof, sp: DUMMY_SP }
+                return Token { kind: token::Eof, span: DUMMY_SP }
             };
 
             match self.frame.last_token {
@@ -318,7 +318,7 @@ fn next(&mut self) -> TokenAndSpan {
             }
 
             match tree {
-                TokenTree::Token(sp, tok) => return TokenAndSpan { tok: tok, sp: sp },
+                TokenTree::Token(span, kind) => return Token { kind, span },
                 TokenTree::Delimited(sp, delim, tts) => {
                     let frame = TokenCursorFrame::new(sp, delim, &tts);
                     self.stack.push(mem::replace(&mut self.frame, frame));
@@ -327,9 +327,9 @@ fn next(&mut self) -> TokenAndSpan {
         }
     }
 
-    fn next_desugared(&mut self) -> TokenAndSpan {
+    fn next_desugared(&mut self) -> Token {
         let (sp, name) = match self.next() {
-            TokenAndSpan { sp, tok: token::DocComment(name) } => (sp, name),
+            Token { span, kind: token::DocComment(name) } => (span, name),
             tok => return tok,
         };
 
@@ -499,8 +499,8 @@ pub fn new(
         };
 
         let tok = parser.next_tok();
-        parser.token = tok.tok;
-        parser.span = tok.sp;
+        parser.token = tok.kind;
+        parser.span = tok.span;
 
         if let Some(directory) = directory {
             parser.directory = directory;
@@ -515,15 +515,15 @@ pub fn new(
         parser
     }
 
-    fn next_tok(&mut self) -> TokenAndSpan {
+    fn next_tok(&mut self) -> Token {
         let mut next = if self.desugar_doc_comments {
             self.token_cursor.next_desugared()
         } else {
             self.token_cursor.next()
         };
-        if next.sp.is_dummy() {
+        if next.span.is_dummy() {
             // Tweak the location for better diagnostics, but keep syntactic context intact.
-            next.sp = self.prev_span.with_ctxt(next.sp.ctxt());
+            next.span = self.prev_span.with_ctxt(next.span.ctxt());
         }
         next
     }
@@ -1023,8 +1023,8 @@ pub fn bump(&mut self) {
         };
 
         let next = self.next_tok();
-        self.span = next.sp;
-        self.token = next.tok;
+        self.token = next.kind;
+        self.span = next.span;
         self.expected_tokens.clear();
         // check after each token
         self.process_potential_macro_variable();
@@ -1038,8 +1038,8 @@ fn bump_with(&mut self, next: token::TokenKind, span: Span) {
         // fortunately for tokens currently using `bump_with`, the
         // prev_token_kind will be of no use anyway.
         self.prev_token_kind = PrevTokenKind::Other;
-        self.span = span;
         self.token = next;
+        self.span = span;
         self.expected_tokens.clear();
     }
 
index aa1e8fd060f783ba8585a88777acaa5b25d313ab..3679e4050ff4210ab18bf29f817ce3658f0843a7 100644 (file)
@@ -235,6 +235,12 @@ pub enum TokenKind {
 #[cfg(target_arch = "x86_64")]
 static_assert_size!(TokenKind, 16);
 
+#[derive(Clone, Debug)]
+pub struct Token {
+    pub kind: TokenKind,
+    pub span: Span,
+}
+
 impl TokenKind {
     /// Recovers a `TokenKind` from an `ast::Ident`. This creates a raw identifier if necessary.
     pub fn from_ast_ident(ident: ast::Ident) -> TokenKind {
@@ -602,6 +608,12 @@ pub fn is_reserved_ident(&self) -> bool {
     }
 }
 
+impl PartialEq<TokenKind> for Token {
+    fn eq(&self, rhs: &TokenKind) -> bool {
+        self.kind == *rhs
+    }
+}
+
 #[derive(Clone, RustcEncodable, RustcDecodable)]
 /// For interpolation during macro expansion.
 pub enum Nonterminal {
index 0f50f51f5d35c4adbd676c86d123b7cec95faac6..654c21fd094e907224ac7b7c5f384a03148c95b4 100644 (file)
@@ -580,7 +580,6 @@ mod tests {
     use super::*;
     use crate::syntax::ast::Ident;
     use crate::with_default_globals;
-    use crate::parse::token::TokenKind;
     use crate::util::parser_testing::string_to_stream;
     use syntax_pos::{Span, BytePos, NO_EXPANSION};