use std::io::prelude::*;
use syntax::source_map::{SourceMap, FilePathMapping};
-use syntax::parse::lexer::{self, TokenAndSpan};
-use syntax::parse::token;
+use syntax::parse::lexer;
+use syntax::parse::token::{self, Token};
use syntax::parse;
use syntax::symbol::{kw, sym};
use syntax_pos::{Span, FileName};
}
/// Gets the next token out of the lexer.
- fn try_next_token(&mut self) -> Result<TokenAndSpan, HighlightError> {
+ fn try_next_token(&mut self) -> Result<Token, HighlightError> {
match self.lexer.try_next_token() {
- Ok(tas) => Ok(tas),
+ Ok(token) => Ok(token),
Err(_) => Err(HighlightError::LexError),
}
}
-> Result<(), HighlightError> {
loop {
let next = self.try_next_token()?;
- if next.tok == token::Eof {
+ if next == token::Eof {
break;
}
// Handles an individual token from the lexer.
fn write_token<W: Writer>(&mut self,
out: &mut W,
- tas: TokenAndSpan)
+ token: Token)
-> Result<(), HighlightError> {
- let klass = match tas.tok {
+ let klass = match token.kind {
token::Shebang(s) => {
out.string(Escape(&s.as_str()), Class::None)?;
return Ok(());
// reference or dereference operator or a reference or pointer type, instead of the
// bit-and or multiplication operator.
token::BinOp(token::And) | token::BinOp(token::Star)
- if self.lexer.peek().tok != token::Whitespace => Class::RefKeyWord,
+ if self.lexer.peek().kind != token::Whitespace => Class::RefKeyWord,
// Consider this as part of a macro invocation if there was a
// leading identifier.
token::Question => Class::QuestionMark,
token::Dollar => {
- if self.lexer.peek().tok.is_ident() {
+ if self.lexer.peek().kind.is_ident() {
self.in_macro_nonterminal = true;
Class::MacroNonTerminal
} else {
// as an attribute.
// Case 1: #![inner_attribute]
- if self.lexer.peek().tok == token::Not {
+ if self.lexer.peek() == token::Not {
self.try_next_token()?; // NOTE: consumes `!` token!
- if self.lexer.peek().tok == token::OpenDelim(token::Bracket) {
+ if self.lexer.peek() == token::OpenDelim(token::Bracket) {
self.in_attribute = true;
out.enter_span(Class::Attribute)?;
}
}
// Case 2: #[outer_attribute]
- if self.lexer.peek().tok == token::OpenDelim(token::Bracket) {
+ if self.lexer.peek() == token::OpenDelim(token::Bracket) {
self.in_attribute = true;
out.enter_span(Class::Attribute)?;
}
sym::Option | sym::Result => Class::PreludeTy,
sym::Some | sym::None | sym::Ok | sym::Err => Class::PreludeVal,
- _ if tas.tok.is_reserved_ident() => Class::KeyWord,
+ _ if token.kind.is_reserved_ident() => Class::KeyWord,
_ => {
if self.in_macro_nonterminal {
self.in_macro_nonterminal = false;
Class::MacroNonTerminal
- } else if self.lexer.peek().tok == token::Not {
+ } else if self.lexer.peek() == token::Not {
self.in_macro = true;
Class::Macro
} else {
// Anything that didn't return above is the simple case where we the
// class just spans a single token, so we can use the `string` method.
- out.string(Escape(&self.snip(tas.sp)), klass)?;
+ out.string(Escape(&self.snip(token.span)), klass)?;
Ok(())
}
use crate::ast::{self, Ident};
use crate::parse::ParseSess;
-use crate::parse::token::{self, TokenKind};
+use crate::parse::token::{self, Token, TokenKind};
use crate::symbol::{sym, Symbol};
use crate::parse::unescape;
use crate::parse::unescape_error_reporting::{emit_unescape_error, push_escaped_char};
mod tokentrees;
mod unicode_chars;
-#[derive(Clone, Debug)]
-pub struct TokenAndSpan {
- pub tok: TokenKind,
- pub sp: Span,
-}
-
-impl Default for TokenAndSpan {
- fn default() -> Self {
- TokenAndSpan {
- tok: token::Whitespace,
- sp: syntax_pos::DUMMY_SP,
- }
- }
-}
-
#[derive(Clone, Debug)]
pub struct UnmatchedBrace {
pub expected_delim: token::DelimToken,
ident
}
- fn unwrap_or_abort(&mut self, res: Result<TokenAndSpan, ()>) -> TokenAndSpan {
+ fn unwrap_or_abort(&mut self, res: Result<Token, ()>) -> Token {
match res {
Ok(tok) => tok,
Err(_) => {
}
}
- fn next_token(&mut self) -> TokenAndSpan where Self: Sized {
+ fn next_token(&mut self) -> Token where Self: Sized {
let res = self.try_next_token();
self.unwrap_or_abort(res)
}
/// Returns the next token. EFFECT: advances the string_reader.
- pub fn try_next_token(&mut self) -> Result<TokenAndSpan, ()> {
+ pub fn try_next_token(&mut self) -> Result<Token, ()> {
assert!(self.fatal_errs.is_empty());
- let ret_val = TokenAndSpan {
- tok: replace(&mut self.peek_tok, token::Whitespace),
- sp: self.peek_span,
+ let ret_val = Token {
+ kind: replace(&mut self.peek_tok, token::Whitespace),
+ span: self.peek_span,
};
self.advance_token()?;
Ok(ret_val)
return None;
}
- fn try_real_token(&mut self) -> Result<TokenAndSpan, ()> {
+ fn try_real_token(&mut self) -> Result<Token, ()> {
let mut t = self.try_next_token()?;
loop {
- match t.tok {
+ match t.kind {
token::Whitespace | token::Comment | token::Shebang(_) => {
t = self.try_next_token()?;
}
Ok(t)
}
- pub fn real_token(&mut self) -> TokenAndSpan {
+ pub fn real_token(&mut self) -> Token {
let res = self.try_real_token();
self.unwrap_or_abort(res)
}
buffer
}
- pub fn peek(&self) -> TokenAndSpan {
+ pub fn peek(&self) -> Token {
// FIXME(pcwalton): Bad copy!
- TokenAndSpan {
- tok: self.peek_tok.clone(),
- sp: self.peek_span,
+ Token {
+ kind: self.peek_tok.clone(),
+ span: self.peek_span,
}
}
fn advance_token(&mut self) -> Result<(), ()> {
match self.scan_whitespace_or_comment() {
Some(comment) => {
- self.peek_span_src_raw = comment.sp;
- self.peek_span = comment.sp;
- self.peek_tok = comment.tok;
+ self.peek_span_src_raw = comment.span;
+ self.peek_span = comment.span;
+ self.peek_tok = comment.kind;
}
None => {
if self.is_eof() {
/// PRECONDITION: self.ch is not whitespace
/// Eats any kind of comment.
- fn scan_comment(&mut self) -> Option<TokenAndSpan> {
+ fn scan_comment(&mut self) -> Option<Token> {
if let Some(c) = self.ch {
if c.is_whitespace() {
let msg = "called consume_any_line_comment, but there was whitespace";
self.bump();
}
- let tok = if doc_comment {
+ let kind = if doc_comment {
self.with_str_from(start_bpos, |string| {
token::DocComment(Symbol::intern(string))
})
} else {
token::Comment
};
- Some(TokenAndSpan { tok, sp: self.mk_sp(start_bpos, self.pos) })
+ Some(Token { kind, span: self.mk_sp(start_bpos, self.pos) })
}
Some('*') => {
self.bump();
while !self.ch_is('\n') && !self.is_eof() {
self.bump();
}
- return Some(TokenAndSpan {
- tok: token::Shebang(self.name_from(start)),
- sp: self.mk_sp(start, self.pos),
+ return Some(Token {
+ kind: token::Shebang(self.name_from(start)),
+ span: self.mk_sp(start, self.pos),
});
}
}
/// If there is whitespace, shebang, or a comment, scan it. Otherwise,
/// return `None`.
- fn scan_whitespace_or_comment(&mut self) -> Option<TokenAndSpan> {
+ fn scan_whitespace_or_comment(&mut self) -> Option<Token> {
match self.ch.unwrap_or('\0') {
// # to handle shebang at start of file -- this is the entry point
// for skipping over all "junk"
while is_pattern_whitespace(self.ch) {
self.bump();
}
- let c = Some(TokenAndSpan {
- tok: token::Whitespace,
- sp: self.mk_sp(start_bpos, self.pos),
+ let c = Some(Token {
+ kind: token::Whitespace,
+ span: self.mk_sp(start_bpos, self.pos),
});
debug!("scanning whitespace: {:?}", c);
c
}
/// Might return a sugared-doc-attr
- fn scan_block_comment(&mut self) -> Option<TokenAndSpan> {
+ fn scan_block_comment(&mut self) -> Option<Token> {
// block comments starting with "/**" or "/*!" are doc-comments
let is_doc_comment = self.ch_is('*') || self.ch_is('!');
let start_bpos = self.pos - BytePos(2);
self.with_str_from(start_bpos, |string| {
// but comments with only "*"s between two "/"s are not
- let tok = if is_block_doc_comment(string) {
+ let kind = if is_block_doc_comment(string) {
let string = if has_cr {
self.translate_crlf(start_bpos,
string,
token::Comment
};
- Some(TokenAndSpan {
- tok,
- sp: self.mk_sp(start_bpos, self.pos),
+ Some(Token {
+ kind,
+ span: self.mk_sp(start_bpos, self.pos),
})
})
}
"/* my source file */ fn main() { println!(\"zebra\"); }\n"
.to_string());
let id = Ident::from_str("fn");
- assert_eq!(string_reader.next_token().tok, token::Comment);
- assert_eq!(string_reader.next_token().tok, token::Whitespace);
+ assert_eq!(string_reader.next_token().kind, token::Comment);
+ assert_eq!(string_reader.next_token().kind, token::Whitespace);
let tok1 = string_reader.next_token();
- let tok2 = TokenAndSpan {
- tok: token::Ident(id, false),
- sp: Span::new(BytePos(21), BytePos(23), NO_EXPANSION),
+ let tok2 = Token {
+ kind: token::Ident(id, false),
+ span: Span::new(BytePos(21), BytePos(23), NO_EXPANSION),
};
- assert_eq!(tok1.tok, tok2.tok);
- assert_eq!(tok1.sp, tok2.sp);
- assert_eq!(string_reader.next_token().tok, token::Whitespace);
+ assert_eq!(tok1.kind, tok2.kind);
+ assert_eq!(tok1.span, tok2.span);
+ assert_eq!(string_reader.next_token().kind, token::Whitespace);
// the 'main' id is already read:
assert_eq!(string_reader.pos.clone(), BytePos(28));
// read another token:
let tok3 = string_reader.next_token();
- let tok4 = TokenAndSpan {
- tok: mk_ident("main"),
- sp: Span::new(BytePos(24), BytePos(28), NO_EXPANSION),
+ let tok4 = Token {
+ kind: mk_ident("main"),
+ span: Span::new(BytePos(24), BytePos(28), NO_EXPANSION),
};
- assert_eq!(tok3.tok, tok4.tok);
- assert_eq!(tok3.sp, tok4.sp);
+ assert_eq!(tok3.kind, tok4.kind);
+ assert_eq!(tok3.span, tok4.span);
// the lparen is already read:
assert_eq!(string_reader.pos.clone(), BytePos(29))
})
// of tokens (stop checking after exhausting the expected vec)
fn check_tokenization(mut string_reader: StringReader<'_>, expected: Vec<TokenKind>) {
for expected_tok in &expected {
- assert_eq!(&string_reader.next_token().tok, expected_tok);
+ assert_eq!(&string_reader.next_token().kind, expected_tok);
}
}
with_default_globals(|| {
let sm = Lrc::new(SourceMap::new(FilePathMapping::empty()));
let sh = mk_sess(sm.clone());
- assert_eq!(setup(&sm, &sh, "'a'".to_string()).next_token().tok,
+ assert_eq!(setup(&sm, &sh, "'a'".to_string()).next_token().kind,
mk_lit(token::Char, "a", None));
})
}
with_default_globals(|| {
let sm = Lrc::new(SourceMap::new(FilePathMapping::empty()));
let sh = mk_sess(sm.clone());
- assert_eq!(setup(&sm, &sh, "' '".to_string()).next_token().tok,
+ assert_eq!(setup(&sm, &sh, "' '".to_string()).next_token().kind,
mk_lit(token::Char, " ", None));
})
}
with_default_globals(|| {
let sm = Lrc::new(SourceMap::new(FilePathMapping::empty()));
let sh = mk_sess(sm.clone());
- assert_eq!(setup(&sm, &sh, "'\\n'".to_string()).next_token().tok,
+ assert_eq!(setup(&sm, &sh, "'\\n'".to_string()).next_token().kind,
mk_lit(token::Char, "\\n", None));
})
}
with_default_globals(|| {
let sm = Lrc::new(SourceMap::new(FilePathMapping::empty()));
let sh = mk_sess(sm.clone());
- assert_eq!(setup(&sm, &sh, "'abc".to_string()).next_token().tok,
+ assert_eq!(setup(&sm, &sh, "'abc".to_string()).next_token().kind,
token::Lifetime(Ident::from_str("'abc")));
})
}
with_default_globals(|| {
let sm = Lrc::new(SourceMap::new(FilePathMapping::empty()));
let sh = mk_sess(sm.clone());
- assert_eq!(setup(&sm, &sh, "r###\"\"#a\\b\x00c\"\"###".to_string()).next_token().tok,
+ assert_eq!(setup(&sm, &sh, "r###\"\"#a\\b\x00c\"\"###".to_string()).next_token().kind,
mk_lit(token::StrRaw(3), "\"#a\\b\x00c\"", None));
})
}
let sh = mk_sess(sm.clone());
macro_rules! test {
($input: expr, $tok_type: ident, $tok_contents: expr) => {{
- assert_eq!(setup(&sm, &sh, format!("{}suffix", $input)).next_token().tok,
+ assert_eq!(setup(&sm, &sh, format!("{}suffix", $input)).next_token().kind,
mk_lit(token::$tok_type, $tok_contents, Some("suffix")));
// with a whitespace separator:
- assert_eq!(setup(&sm, &sh, format!("{} suffix", $input)).next_token().tok,
+ assert_eq!(setup(&sm, &sh, format!("{} suffix", $input)).next_token().kind,
mk_lit(token::$tok_type, $tok_contents, None));
}}
}
test!("1.0", Float, "1.0");
test!("1.0e10", Float, "1.0e10");
- assert_eq!(setup(&sm, &sh, "2us".to_string()).next_token().tok,
+ assert_eq!(setup(&sm, &sh, "2us".to_string()).next_token().kind,
mk_lit(token::Integer, "2", Some("us")));
- assert_eq!(setup(&sm, &sh, "r###\"raw\"###suffix".to_string()).next_token().tok,
+ assert_eq!(setup(&sm, &sh, "r###\"raw\"###suffix".to_string()).next_token().kind,
mk_lit(token::StrRaw(3), "raw", Some("suffix")));
- assert_eq!(setup(&sm, &sh, "br###\"raw\"###suffix".to_string()).next_token().tok,
+ assert_eq!(setup(&sm, &sh, "br###\"raw\"###suffix".to_string()).next_token().kind,
mk_lit(token::ByteStrRaw(3), "raw", Some("suffix")));
})
}
let sm = Lrc::new(SourceMap::new(FilePathMapping::empty()));
let sh = mk_sess(sm.clone());
let mut lexer = setup(&sm, &sh, "/* /* */ */'a'".to_string());
- match lexer.next_token().tok {
+ match lexer.next_token().kind {
token::Comment => {}
_ => panic!("expected a comment!"),
}
- assert_eq!(lexer.next_token().tok, mk_lit(token::Char, "a", None));
+ assert_eq!(lexer.next_token().kind, mk_lit(token::Char, "a", None));
})
}
let sh = mk_sess(sm.clone());
let mut lexer = setup(&sm, &sh, "// test\r\n/// test\r\n".to_string());
let comment = lexer.next_token();
- assert_eq!(comment.tok, token::Comment);
- assert_eq!((comment.sp.lo(), comment.sp.hi()), (BytePos(0), BytePos(7)));
- assert_eq!(lexer.next_token().tok, token::Whitespace);
- assert_eq!(lexer.next_token().tok,
+ assert_eq!(comment.kind, token::Comment);
+ assert_eq!((comment.span.lo(), comment.span.hi()), (BytePos(0), BytePos(7)));
+ assert_eq!(lexer.next_token().kind, token::Whitespace);
+ assert_eq!(lexer.next_token().kind,
token::DocComment(Symbol::intern("/// test")));
})
}
use crate::ext::base::DummyResult;
use crate::source_map::{self, SourceMap, Spanned, respan};
use crate::parse::{SeqSep, classify, literal, token};
-use crate::parse::lexer::{TokenAndSpan, UnmatchedBrace};
+use crate::parse::lexer::UnmatchedBrace;
use crate::parse::lexer::comments::{doc_comment_style, strip_doc_comment_decoration};
-use crate::parse::token::DelimToken;
+use crate::parse::token::{Token, DelimToken};
use crate::parse::{new_sub_parser_from_file, ParseSess, Directory, DirectoryOwnership};
use crate::util::parser::{AssocOp, Fixity};
use crate::print::pprust;
}
impl TokenCursor {
- fn next(&mut self) -> TokenAndSpan {
+ fn next(&mut self) -> Token {
loop {
let tree = if !self.frame.open_delim {
self.frame.open_delim = true;
self.frame = frame;
continue
} else {
- return TokenAndSpan { tok: token::Eof, sp: DUMMY_SP }
+ return Token { kind: token::Eof, span: DUMMY_SP }
};
match self.frame.last_token {
}
match tree {
- TokenTree::Token(sp, tok) => return TokenAndSpan { tok: tok, sp: sp },
+ TokenTree::Token(span, kind) => return Token { kind, span },
TokenTree::Delimited(sp, delim, tts) => {
let frame = TokenCursorFrame::new(sp, delim, &tts);
self.stack.push(mem::replace(&mut self.frame, frame));
}
}
- fn next_desugared(&mut self) -> TokenAndSpan {
+ fn next_desugared(&mut self) -> Token {
let (sp, name) = match self.next() {
- TokenAndSpan { sp, tok: token::DocComment(name) } => (sp, name),
+ Token { span, kind: token::DocComment(name) } => (span, name),
tok => return tok,
};
};
let tok = parser.next_tok();
- parser.token = tok.tok;
- parser.span = tok.sp;
+ parser.token = tok.kind;
+ parser.span = tok.span;
if let Some(directory) = directory {
parser.directory = directory;
parser
}
- fn next_tok(&mut self) -> TokenAndSpan {
+ fn next_tok(&mut self) -> Token {
let mut next = if self.desugar_doc_comments {
self.token_cursor.next_desugared()
} else {
self.token_cursor.next()
};
- if next.sp.is_dummy() {
+ if next.span.is_dummy() {
// Tweak the location for better diagnostics, but keep syntactic context intact.
- next.sp = self.prev_span.with_ctxt(next.sp.ctxt());
+ next.span = self.prev_span.with_ctxt(next.span.ctxt());
}
next
}
};
let next = self.next_tok();
- self.span = next.sp;
- self.token = next.tok;
+ self.token = next.kind;
+ self.span = next.span;
self.expected_tokens.clear();
// check after each token
self.process_potential_macro_variable();
// fortunately for tokens currently using `bump_with`, the
// prev_token_kind will be of no use anyway.
self.prev_token_kind = PrevTokenKind::Other;
- self.span = span;
self.token = next;
+ self.span = span;
self.expected_tokens.clear();
}