1 // Copyright 2012-2013 The Rust Project Developers. See the COPYRIGHT
2 // file at the top-level directory of this distribution and at
3 // http://rust-lang.org/COPYRIGHT.
5 // Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
6 // http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
7 // <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
8 // option. This file may not be copied, modified, or distributed
9 // except according to those terms.
11 use ast::{self, Ident};
12 use syntax_pos::{self, BytePos, CharPos, Pos, Span, NO_EXPANSION};
13 use codemap::{CodeMap, FilePathMapping};
14 use errors::{FatalError, DiagnosticBuilder};
15 use parse::{token, ParseSess};
18 use std_unicode::property::Pattern_White_Space;
22 use std::mem::replace;
23 use rustc_data_structures::sync::Lrc;
29 #[derive(Clone, PartialEq, Eq, Debug)]
30 pub struct TokenAndSpan {
31 pub tok: token::Token,
35 impl Default for TokenAndSpan {
36 fn default() -> Self {
37 TokenAndSpan { tok: token::Whitespace, sp: syntax_pos::DUMMY_SP }
41 pub struct StringReader<'a> {
42 pub sess: &'a ParseSess,
43 /// The absolute offset within the codemap of the next character to read
44 pub next_pos: BytePos,
45 /// The absolute offset within the codemap of the current character
47 /// The column of the next character to read
49 /// The current character (which has been read from self.pos)
51 pub filemap: Lrc<syntax_pos::FileMap>,
52 /// If Some, stop reading the source at this position (inclusive).
53 pub terminator: Option<BytePos>,
54 /// Whether to record new-lines and multibyte chars in filemap.
55 /// This is only necessary the first time a filemap is lexed.
56 /// If part of a filemap is being re-lexed, this should be set to false.
57 pub save_new_lines_and_multibyte: bool,
59 pub peek_tok: token::Token,
61 pub fatal_errs: Vec<DiagnosticBuilder<'a>>,
62 // cache a direct reference to the source text, so that we don't have to
63 // retrieve it via `self.filemap.src.as_ref().unwrap()` all the time.
64 source_text: Lrc<String>,
65 /// Stack of open delimiters and their spans. Used for error message.
68 open_braces: Vec<(token::DelimToken, Span)>,
69 pub override_span: Option<Span>,
72 impl<'a> StringReader<'a> {
73 fn mk_sp(&self, lo: BytePos, hi: BytePos) -> Span {
74 unwrap_or!(self.override_span, Span::new(lo, hi, NO_EXPANSION))
76 fn mk_ident(&self, string: &str) -> Ident {
77 let mut ident = Ident::from_str(string);
78 if let Some(span) = self.override_span {
79 ident.ctxt = span.ctxt();
84 fn next_token(&mut self) -> TokenAndSpan where Self: Sized {
85 let res = self.try_next_token();
86 self.unwrap_or_abort(res)
88 fn unwrap_or_abort(&mut self, res: Result<TokenAndSpan, ()>) -> TokenAndSpan {
92 self.emit_fatal_errors();
97 fn try_real_token(&mut self) -> Result<TokenAndSpan, ()> {
98 let mut t = self.try_next_token()?;
101 token::Whitespace | token::Comment | token::Shebang(_) => {
102 t = self.try_next_token()?;
107 self.token = t.tok.clone();
111 pub fn real_token(&mut self) -> TokenAndSpan {
112 let res = self.try_real_token();
113 self.unwrap_or_abort(res)
115 fn is_eof(&self) -> bool {
116 if self.ch.is_none() {
120 match self.terminator {
121 Some(t) => self.next_pos > t,
125 /// Return the next token. EFFECT: advances the string_reader.
126 pub fn try_next_token(&mut self) -> Result<TokenAndSpan, ()> {
127 assert!(self.fatal_errs.is_empty());
128 let ret_val = TokenAndSpan {
129 tok: replace(&mut self.peek_tok, token::Whitespace),
132 self.advance_token()?;
136 fn fail_unterminated_raw_string(&self, pos: BytePos, hash_count: usize) {
137 let mut err = self.struct_span_fatal(pos, pos, "unterminated raw string");
138 err.span_label(self.mk_sp(pos, pos), "unterminated raw string");
140 err.note(&format!("this raw string should be terminated with `\"{}`",
141 "#".repeat(hash_count)));
147 fn fatal(&self, m: &str) -> FatalError {
148 self.fatal_span(self.peek_span, m)
150 pub fn emit_fatal_errors(&mut self) {
151 for err in &mut self.fatal_errs {
154 self.fatal_errs.clear();
156 pub fn peek(&self) -> TokenAndSpan {
157 // FIXME(pcwalton): Bad copy!
159 tok: self.peek_tok.clone(),
165 impl<'a> StringReader<'a> {
166 /// For comments.rs, which hackily pokes into next_pos and ch
167 pub fn new_raw(sess: &'a ParseSess, filemap: Lrc<syntax_pos::FileMap>) -> Self {
168 let mut sr = StringReader::new_raw_internal(sess, filemap);
173 fn new_raw_internal(sess: &'a ParseSess, filemap: Lrc<syntax_pos::FileMap>) -> Self {
174 if filemap.src.is_none() {
175 sess.span_diagnostic.bug(&format!("Cannot lex filemap without source: {}",
179 let source_text = (*filemap.src.as_ref().unwrap()).clone();
183 next_pos: filemap.start_pos,
184 pos: filemap.start_pos,
189 save_new_lines_and_multibyte: true,
190 // dummy values; not read
191 peek_tok: token::Eof,
192 peek_span: syntax_pos::DUMMY_SP,
194 fatal_errs: Vec::new(),
196 span: syntax_pos::DUMMY_SP,
197 open_braces: Vec::new(),
202 pub fn new(sess: &'a ParseSess, filemap: Lrc<syntax_pos::FileMap>) -> Self {
203 let mut sr = StringReader::new_raw(sess, filemap);
204 if sr.advance_token().is_err() {
205 sr.emit_fatal_errors();
211 pub fn retokenize(sess: &'a ParseSess, mut span: Span) -> Self {
212 let begin = sess.codemap().lookup_byte_offset(span.lo());
213 let end = sess.codemap().lookup_byte_offset(span.hi());
215 // Make the range zero-length if the span is invalid.
216 if span.lo() > span.hi() || begin.fm.start_pos != end.fm.start_pos {
217 span = span.shrink_to_lo();
220 let mut sr = StringReader::new_raw_internal(sess, begin.fm);
222 // Seek the lexer to the right byte range.
223 sr.save_new_lines_and_multibyte = false;
224 sr.next_pos = span.lo();
225 sr.terminator = Some(span.hi());
229 if sr.advance_token().is_err() {
230 sr.emit_fatal_errors();
236 pub fn ch_is(&self, c: char) -> bool {
240 /// Report a fatal lexical error with a given span.
241 pub fn fatal_span(&self, sp: Span, m: &str) -> FatalError {
242 self.sess.span_diagnostic.span_fatal(sp, m)
245 /// Report a lexical error with a given span.
246 pub fn err_span(&self, sp: Span, m: &str) {
247 self.sess.span_diagnostic.span_err(sp, m)
251 /// Report a fatal error spanning [`from_pos`, `to_pos`).
252 fn fatal_span_(&self, from_pos: BytePos, to_pos: BytePos, m: &str) -> FatalError {
253 self.fatal_span(self.mk_sp(from_pos, to_pos), m)
256 /// Report a lexical error spanning [`from_pos`, `to_pos`).
257 fn err_span_(&self, from_pos: BytePos, to_pos: BytePos, m: &str) {
258 self.err_span(self.mk_sp(from_pos, to_pos), m)
261 /// Pushes a character to a message string for error reporting
262 fn push_escaped_char_for_msg(m: &mut String, c: char) {
264 '\u{20}'...'\u{7e}' => {
265 // Don't escape \, ' or " for user-facing messages
269 for c in c.escape_default() {
276 /// Report a lexical error spanning [`from_pos`, `to_pos`), appending an
277 /// escaped character to the error message
278 fn fatal_span_char(&self, from_pos: BytePos, to_pos: BytePos, m: &str, c: char) -> FatalError {
279 let mut m = m.to_string();
281 Self::push_escaped_char_for_msg(&mut m, c);
282 self.fatal_span_(from_pos, to_pos, &m[..])
285 fn struct_span_fatal(&self,
289 -> DiagnosticBuilder<'a> {
290 self.sess.span_diagnostic.struct_span_fatal(self.mk_sp(from_pos, to_pos), m)
293 fn struct_fatal_span_char(&self,
298 -> DiagnosticBuilder<'a> {
299 let mut m = m.to_string();
301 Self::push_escaped_char_for_msg(&mut m, c);
302 self.sess.span_diagnostic.struct_span_fatal(self.mk_sp(from_pos, to_pos), &m[..])
305 /// Report a lexical error spanning [`from_pos`, `to_pos`), appending an
306 /// escaped character to the error message
307 fn err_span_char(&self, from_pos: BytePos, to_pos: BytePos, m: &str, c: char) {
308 let mut m = m.to_string();
310 Self::push_escaped_char_for_msg(&mut m, c);
311 self.err_span_(from_pos, to_pos, &m[..]);
313 fn struct_err_span_char(&self,
318 -> DiagnosticBuilder<'a> {
319 let mut m = m.to_string();
321 Self::push_escaped_char_for_msg(&mut m, c);
322 self.sess.span_diagnostic.struct_span_err(self.mk_sp(from_pos, to_pos), &m[..])
325 /// Report a lexical error spanning [`from_pos`, `to_pos`), appending the
326 /// offending string to the error message
327 fn fatal_span_verbose(&self, from_pos: BytePos, to_pos: BytePos, mut m: String) -> FatalError {
329 let from = self.byte_offset(from_pos).to_usize();
330 let to = self.byte_offset(to_pos).to_usize();
331 m.push_str(&self.source_text[from..to]);
332 self.fatal_span_(from_pos, to_pos, &m[..])
335 /// Advance peek_tok and peek_span to refer to the next token, and
336 /// possibly update the interner.
337 fn advance_token(&mut self) -> Result<(), ()> {
338 match self.scan_whitespace_or_comment() {
340 self.peek_span = comment.sp;
341 self.peek_tok = comment.tok;
345 self.peek_tok = token::Eof;
346 self.peek_span = self.mk_sp(self.filemap.end_pos, self.filemap.end_pos);
348 let start_bytepos = self.pos;
349 self.peek_tok = self.next_token_inner()?;
350 self.peek_span = self.mk_sp(start_bytepos, self.pos);
357 fn byte_offset(&self, pos: BytePos) -> BytePos {
358 (pos - self.filemap.start_pos)
361 /// Calls `f` with a string slice of the source text spanning from `start`
362 /// up to but excluding `self.pos`, meaning the slice does not include
363 /// the character `self.ch`.
364 pub fn with_str_from<T, F>(&self, start: BytePos, f: F) -> T
365 where F: FnOnce(&str) -> T
367 self.with_str_from_to(start, self.pos, f)
370 /// Create a Name from a given offset to the current offset, each
371 /// adjusted 1 towards each other (assumes that on either side there is a
372 /// single-byte delimiter).
373 pub fn name_from(&self, start: BytePos) -> ast::Name {
374 debug!("taking an ident from {:?} to {:?}", start, self.pos);
375 self.with_str_from(start, Symbol::intern)
378 /// As name_from, with an explicit endpoint.
379 pub fn name_from_to(&self, start: BytePos, end: BytePos) -> ast::Name {
380 debug!("taking an ident from {:?} to {:?}", start, end);
381 self.with_str_from_to(start, end, Symbol::intern)
384 /// Calls `f` with a string slice of the source text spanning from `start`
385 /// up to but excluding `end`.
386 fn with_str_from_to<T, F>(&self, start: BytePos, end: BytePos, f: F) -> T
387 where F: FnOnce(&str) -> T
389 f(&self.source_text[self.byte_offset(start).to_usize()..self.byte_offset(end).to_usize()])
392 /// Converts CRLF to LF in the given string, raising an error on bare CR.
393 fn translate_crlf<'b>(&self, start: BytePos, s: &'b str, errmsg: &'b str) -> Cow<'b, str> {
396 let ch = char_at(s, i);
397 let next = i + ch.len_utf8();
399 if next < s.len() && char_at(s, next) == '\n' {
400 return translate_crlf_(self, start, s, errmsg, i).into();
402 let pos = start + BytePos(i as u32);
403 let end_pos = start + BytePos(next as u32);
404 self.err_span_(pos, end_pos, errmsg);
410 fn translate_crlf_(rdr: &StringReader,
416 let mut buf = String::with_capacity(s.len());
419 let ch = char_at(s, i);
420 let next = i + ch.len_utf8();
423 buf.push_str(&s[j..i]);
426 if next >= s.len() || char_at(s, next) != '\n' {
427 let pos = start + BytePos(i as u32);
428 let end_pos = start + BytePos(next as u32);
429 rdr.err_span_(pos, end_pos, errmsg);
435 buf.push_str(&s[j..]);
442 /// Advance the StringReader by one character. If a newline is
443 /// discovered, add it to the FileMap's list of line start offsets.
444 pub fn bump(&mut self) {
445 let new_pos = self.next_pos;
446 let new_byte_offset = self.byte_offset(new_pos).to_usize();
447 let end = self.terminator.map_or(self.source_text.len(), |t| {
448 self.byte_offset(t).to_usize()
450 if new_byte_offset < end {
451 let old_ch_is_newline = self.ch.unwrap() == '\n';
452 let new_ch = char_at(&self.source_text, new_byte_offset);
453 let new_ch_len = new_ch.len_utf8();
455 self.ch = Some(new_ch);
457 self.next_pos = new_pos + Pos::from_usize(new_ch_len);
458 if old_ch_is_newline {
459 if self.save_new_lines_and_multibyte {
460 self.filemap.next_line(self.pos);
462 self.col = CharPos(0);
464 self.col = self.col + CharPos(1);
467 if self.save_new_lines_and_multibyte {
468 self.filemap.record_multibyte_char(self.pos, new_ch_len);
471 self.filemap.record_width(self.pos, new_ch);
478 pub fn nextch(&self) -> Option<char> {
479 let offset = self.byte_offset(self.next_pos).to_usize();
480 if offset < self.source_text.len() {
481 Some(char_at(&self.source_text, offset))
487 pub fn nextch_is(&self, c: char) -> bool {
488 self.nextch() == Some(c)
491 pub fn nextnextch(&self) -> Option<char> {
492 let offset = self.byte_offset(self.next_pos).to_usize();
493 let s = &self.source_text[..];
494 if offset >= s.len() {
497 let next = offset + char_at(s, offset).len_utf8();
499 Some(char_at(s, next))
505 pub fn nextnextch_is(&self, c: char) -> bool {
506 self.nextnextch() == Some(c)
509 /// Eats <XID_start><XID_continue>*, if possible.
510 fn scan_optional_raw_name(&mut self) -> Option<ast::Name> {
511 if !ident_start(self.ch) {
514 let start = self.pos;
515 while ident_continue(self.ch) {
519 self.with_str_from(start, |string| {
521 self.sess.span_diagnostic
522 .struct_span_warn(self.mk_sp(start, self.pos),
523 "underscore literal suffix is not allowed")
524 .warn("this was previously accepted by the compiler but is \
525 being phased out; it will become a hard error in \
527 .note("for more information, see issue #42326 \
528 <https://github.com/rust-lang/rust/issues/42326>")
532 Some(Symbol::intern(string))
537 /// PRECONDITION: self.ch is not whitespace
538 /// Eats any kind of comment.
539 fn scan_comment(&mut self) -> Option<TokenAndSpan> {
540 if let Some(c) = self.ch {
541 if c.is_whitespace() {
542 let msg = "called consume_any_line_comment, but there was whitespace";
543 self.sess.span_diagnostic.span_err(self.mk_sp(self.pos, self.pos), msg);
548 match self.nextch() {
553 // line comments starting with "///" or "//!" are doc-comments
554 let doc_comment = (self.ch_is('/') && !self.nextch_is('/')) || self.ch_is('!');
555 let start_bpos = self.pos - BytePos(2);
557 while !self.is_eof() {
558 match self.ch.unwrap() {
561 if self.nextch_is('\n') {
564 } else if doc_comment {
565 self.err_span_(self.pos,
567 "bare CR not allowed in doc-comment");
576 self.with_str_from(start_bpos, |string| {
577 // comments with only more "/"s are not doc comments
578 let tok = if is_doc_comment(string) {
579 token::DocComment(Symbol::intern(string))
586 sp: self.mk_sp(start_bpos, self.pos),
592 sp: self.mk_sp(start_bpos, self.pos),
599 self.scan_block_comment()
603 } else if self.ch_is('#') {
604 if self.nextch_is('!') {
606 // Parse an inner attribute.
607 if self.nextnextch_is('[') {
611 // I guess this is the only way to figure out if
612 // we're at the beginning of the file...
613 let cmap = CodeMap::new(FilePathMapping::empty());
614 cmap.files.borrow_mut().file_maps.push(self.filemap.clone());
615 let loc = cmap.lookup_char_pos_adj(self.pos);
616 debug!("Skipping a shebang");
617 if loc.line == 1 && loc.col == CharPos(0) {
618 // FIXME: Add shebang "token", return it
619 let start = self.pos;
620 while !self.ch_is('\n') && !self.is_eof() {
623 return Some(TokenAndSpan {
624 tok: token::Shebang(self.name_from(start)),
625 sp: self.mk_sp(start, self.pos),
635 /// If there is whitespace, shebang, or a comment, scan it. Otherwise,
637 fn scan_whitespace_or_comment(&mut self) -> Option<TokenAndSpan> {
638 match self.ch.unwrap_or('\0') {
639 // # to handle shebang at start of file -- this is the entry point
640 // for skipping over all "junk"
642 let c = self.scan_comment();
643 debug!("scanning a comment {:?}", c);
646 c if is_pattern_whitespace(Some(c)) => {
647 let start_bpos = self.pos;
648 while is_pattern_whitespace(self.ch) {
651 let c = Some(TokenAndSpan {
652 tok: token::Whitespace,
653 sp: self.mk_sp(start_bpos, self.pos),
655 debug!("scanning whitespace: {:?}", c);
662 /// Might return a sugared-doc-attr
663 fn scan_block_comment(&mut self) -> Option<TokenAndSpan> {
664 // block comments starting with "/**" or "/*!" are doc-comments
665 let is_doc_comment = self.ch_is('*') || self.ch_is('!');
666 let start_bpos = self.pos - BytePos(2);
668 let mut level: isize = 1;
669 let mut has_cr = false;
672 let msg = if is_doc_comment {
673 "unterminated block doc-comment"
675 "unterminated block comment"
677 let last_bpos = self.pos;
678 self.fatal_span_(start_bpos, last_bpos, msg).raise();
680 let n = self.ch.unwrap();
682 '/' if self.nextch_is('*') => {
686 '*' if self.nextch_is('/') => {
698 self.with_str_from(start_bpos, |string| {
699 // but comments with only "*"s between two "/"s are not
700 let tok = if is_block_doc_comment(string) {
701 let string = if has_cr {
702 self.translate_crlf(start_bpos,
704 "bare CR not allowed in block doc-comment")
708 token::DocComment(Symbol::intern(&string[..]))
715 sp: self.mk_sp(start_bpos, self.pos),
720 /// Scan through any digits (base `scan_radix`) or underscores,
721 /// and return how many digits there were.
723 /// `real_radix` represents the true radix of the number we're
724 /// interested in, and errors will be emitted for any digits
725 /// between `real_radix` and `scan_radix`.
726 fn scan_digits(&mut self, real_radix: u32, scan_radix: u32) -> usize {
727 assert!(real_radix <= scan_radix);
732 debug!("skipping a _");
736 match c.and_then(|cc| cc.to_digit(scan_radix)) {
738 debug!("{:?} in scan_digits", c);
739 // check that the hypothetical digit is actually
740 // in range for the true radix
741 if c.unwrap().to_digit(real_radix).is_none() {
742 self.err_span_(self.pos,
744 &format!("invalid digit for a base {} literal", real_radix));
754 /// Lex a LIT_INTEGER or a LIT_FLOAT
755 fn scan_number(&mut self, c: char) -> token::Lit {
758 let start_bpos = self.pos;
763 match self.ch.unwrap_or('\0') {
767 num_digits = self.scan_digits(2, 10);
772 num_digits = self.scan_digits(8, 10);
777 num_digits = self.scan_digits(16, 16);
779 '0'...'9' | '_' | '.' | 'e' | 'E' => {
780 num_digits = self.scan_digits(10, 10) + 1;
784 return token::Integer(self.name_from(start_bpos));
787 } else if c.is_digit(10) {
788 num_digits = self.scan_digits(10, 10) + 1;
794 self.err_span_(start_bpos,
796 "no valid digits found for number");
797 return token::Integer(Symbol::intern("0"));
800 // might be a float, but don't be greedy if this is actually an
801 // integer literal followed by field/method access or a range pattern
802 // (`0..2` and `12.foo()`)
803 if self.ch_is('.') && !self.nextch_is('.') &&
804 !ident_start(self.nextch()) {
805 // might have stuff after the ., and if it does, it needs to start
808 if self.ch.unwrap_or('\0').is_digit(10) {
809 self.scan_digits(10, 10);
810 self.scan_float_exponent();
813 self.check_float_base(start_bpos, pos, base);
814 token::Float(self.name_from(start_bpos))
816 // it might be a float if it has an exponent
817 if self.ch_is('e') || self.ch_is('E') {
818 self.scan_float_exponent();
820 self.check_float_base(start_bpos, pos, base);
821 return token::Float(self.name_from(start_bpos));
823 // but we certainly have an integer!
824 token::Integer(self.name_from(start_bpos))
828 /// Scan over `n_digits` hex digits, stopping at `delim`, reporting an
829 /// error if too many or too few digits are encountered.
830 fn scan_hex_digits(&mut self, n_digits: usize, delim: char, below_0x7f_only: bool) -> bool {
831 debug!("scanning {} digits until {:?}", n_digits, delim);
832 let start_bpos = self.pos;
833 let mut accum_int = 0;
835 let mut valid = true;
836 for _ in 0..n_digits {
838 let last_bpos = self.pos;
839 self.fatal_span_(start_bpos,
841 "unterminated numeric character escape").raise();
843 if self.ch_is(delim) {
844 let last_bpos = self.pos;
845 self.err_span_(start_bpos,
847 "numeric character escape is too short");
851 let c = self.ch.unwrap_or('\x00');
853 accum_int += c.to_digit(16).unwrap_or_else(|| {
854 self.err_span_char(self.pos,
856 "invalid character in numeric character escape",
865 if below_0x7f_only && accum_int >= 0x80 {
866 self.err_span_(start_bpos,
868 "this form of character escape may only be used with characters in \
869 the range [\\x00-\\x7f]");
873 match char::from_u32(accum_int) {
876 let last_bpos = self.pos;
877 self.err_span_(start_bpos, last_bpos, "invalid numeric character escape");
883 /// Scan for a single (possibly escaped) byte or char
884 /// in a byte, (non-raw) byte string, char, or (non-raw) string literal.
885 /// `start` is the position of `first_source_char`, which is already consumed.
887 /// Returns true if there was a valid char/byte, false otherwise.
888 fn scan_char_or_byte(&mut self,
890 first_source_char: char,
894 match first_source_char {
896 // '\X' for some X must be a character constant:
897 let escaped = self.ch;
898 let escaped_pos = self.pos;
901 None => {} // EOF here is an error that will be checked later.
904 'n' | 'r' | 't' | '\\' | '\'' | '"' | '0' => true,
905 'x' => self.scan_byte_escape(delim, !ascii_only),
907 let valid = if self.ch_is('{') {
908 self.scan_unicode_escape(delim) && !ascii_only
910 let span = self.mk_sp(start, self.pos);
911 self.sess.span_diagnostic
912 .struct_span_err(span, "incorrect unicode escape sequence")
914 "format of unicode escape sequences is \
920 self.err_span_(start,
922 "unicode escape sequences cannot be used as a \
923 byte or in a byte string");
928 '\n' if delim == '"' => {
929 self.consume_whitespace();
932 '\r' if delim == '"' && self.ch_is('\n') => {
933 self.consume_whitespace();
938 let mut err = self.struct_err_span_char(escaped_pos,
941 "unknown byte escape"
948 err.span_help(self.mk_sp(escaped_pos, pos),
949 "this is an isolated carriage return; consider \
950 checking your editor and version control \
953 if (e == '{' || e == '}') && !ascii_only {
954 err.span_help(self.mk_sp(escaped_pos, pos),
955 "if used in a formatting string, curly braces \
956 are escaped with `{{` and `}}`");
965 '\t' | '\n' | '\r' | '\'' if delim == '\'' => {
967 self.err_span_char(start,
970 "byte constant must be escaped"
972 "character constant must be escaped"
978 if self.ch_is('\n') {
982 self.err_span_(start,
984 "bare CR not allowed in string, use \\r instead");
989 if ascii_only && first_source_char > '\x7F' {
991 self.err_span_(start,
993 "byte constant must be ASCII. Use a \\xHH escape for a \
1002 /// Scan over a `\u{...}` escape
1004 /// At this point, we have already seen the `\` and the `u`, the `{` is the current character.
1005 /// We will read a hex number (with `_` separators), with 1 to 6 actual digits,
1006 /// and pass over the `}`.
1007 fn scan_unicode_escape(&mut self, delim: char) -> bool {
1008 self.bump(); // past the {
1009 let start_bpos = self.pos;
1010 let mut valid = true;
1012 if let Some('_') = self.ch {
1013 // disallow leading `_`
1014 self.err_span_(self.pos,
1016 "invalid start of unicode escape");
1020 let count = self.scan_digits(16, 16);
1023 self.err_span_(start_bpos,
1025 "overlong unicode escape (must have at most 6 hex digits)");
1031 if valid && count == 0 {
1032 self.err_span_(start_bpos,
1034 "empty unicode escape (must have at least 1 hex digit)");
1037 self.bump(); // past the ending `}`
1042 self.err_span_(self.pos,
1044 "unterminated unicode escape (needed a `}`)");
1048 self.err_span_char(start_bpos,
1050 "invalid character in unicode escape",
1056 self.fatal_span_(start_bpos,
1058 "unterminated unicode escape (found EOF)").raise();
1066 /// Scan over a float exponent.
1067 fn scan_float_exponent(&mut self) {
1068 if self.ch_is('e') || self.ch_is('E') {
1070 if self.ch_is('-') || self.ch_is('+') {
1073 if self.scan_digits(10, 10) == 0 {
1074 self.err_span_(self.pos,
1076 "expected at least one digit in exponent")
1081 /// Check that a base is valid for a floating literal, emitting a nice
1082 /// error if it isn't.
1083 fn check_float_base(&mut self, start_bpos: BytePos, last_bpos: BytePos, base: usize) {
1086 self.err_span_(start_bpos,
1088 "hexadecimal float literal is not supported")
1091 self.err_span_(start_bpos,
1093 "octal float literal is not supported")
1096 self.err_span_(start_bpos,
1098 "binary float literal is not supported")
1104 fn binop(&mut self, op: token::BinOpToken) -> token::Token {
1106 if self.ch_is('=') {
1114 /// Return the next token from the string, advances the input past that
1115 /// token, and updates the interner
1116 fn next_token_inner(&mut self) -> Result<token::Token, ()> {
1118 if ident_start(c) &&
1119 match (c.unwrap(), self.nextch(), self.nextnextch()) {
1120 // Note: r as in r" or r#" is part of a raw string literal,
1121 // b as in b' is part of a byte literal.
1122 // They are not identifiers, and are handled further down.
1123 ('r', Some('"'), _) |
1124 ('r', Some('#'), _) |
1125 ('b', Some('"'), _) |
1126 ('b', Some('\''), _) |
1127 ('b', Some('r'), Some('"')) |
1128 ('b', Some('r'), Some('#')) => false,
1131 let start = self.pos;
1132 while ident_continue(self.ch) {
1136 // FIXME: perform NFKC normalization here. (Issue #2253)
1137 return Ok(self.with_str_from(start, |string| token::Ident(self.mk_ident(string))));
1140 if is_dec_digit(c) {
1141 let num = self.scan_number(c.unwrap());
1142 let suffix = self.scan_optional_raw_name();
1143 debug!("next_token_inner: scanned number {:?}, {:?}", num, suffix);
1144 return Ok(token::Literal(num, suffix));
1147 match c.expect("next_token_inner called at EOF") {
1159 if self.ch_is('.') {
1161 if self.ch_is('.') {
1163 Ok(token::DotDotDot)
1164 } else if self.ch_is('=') {
1176 Ok(token::OpenDelim(token::Paren))
1180 Ok(token::CloseDelim(token::Paren))
1184 Ok(token::OpenDelim(token::Brace))
1188 Ok(token::CloseDelim(token::Brace))
1192 Ok(token::OpenDelim(token::Bracket))
1196 Ok(token::CloseDelim(token::Bracket))
1216 if self.ch_is(':') {
1229 // Multi-byte tokens.
1232 if self.ch_is('=') {
1235 } else if self.ch_is('>') {
1244 if self.ch_is('=') {
1253 match self.ch.unwrap_or('\x00') {
1259 Ok(self.binop(token::Shl))
1263 match self.ch.unwrap_or('\x00') {
1276 match self.ch.unwrap_or('\x00') {
1282 Ok(self.binop(token::Shr))
1290 // Either a character constant 'a' OR a lifetime name 'abc
1291 let start_with_quote = self.pos;
1293 let start = self.pos;
1295 // the eof will be picked up by the final `'` check below
1296 let c2 = self.ch.unwrap_or('\x00');
1299 // If the character is an ident start not followed by another single
1300 // quote, then this is a lifetime name:
1301 if ident_start(Some(c2)) && !self.ch_is('\'') {
1302 while ident_continue(self.ch) {
1305 // lifetimes shouldn't end with a single quote
1306 // if we find one, then this is an invalid character literal
1307 if self.ch_is('\'') {
1308 self.fatal_span_verbose(start_with_quote, self.next_pos,
1309 String::from("character literal may only contain one codepoint"))
1314 // Include the leading `'` in the real identifier, for macro
1315 // expansion purposes. See #12512 for the gory details of why
1316 // this is necessary.
1317 let ident = self.with_str_from(start, |lifetime_name| {
1318 self.mk_ident(&format!("'{}", lifetime_name))
1321 return Ok(token::Lifetime(ident));
1324 let valid = self.scan_char_or_byte(start,
1330 if !self.ch_is('\'') {
1334 if self.ch_is('\'') {
1335 let start = self.byte_offset(start).to_usize();
1336 let end = self.byte_offset(self.pos).to_usize();
1338 let span = self.mk_sp(start_with_quote, self.pos);
1339 self.sess.span_diagnostic
1340 .struct_span_err(span,
1341 "character literal may only contain one codepoint")
1342 .span_suggestion(span,
1343 "if you meant to write a `str` literal, \
1346 &self.source_text[start..end]))
1348 return Ok(token::Literal(token::Str_(Symbol::intern("??")), None))
1350 if self.ch_is('\n') || self.is_eof() || self.ch_is('/') {
1351 // Only attempt to infer single line string literals. If we encounter
1352 // a slash, bail out in order to avoid nonsensical suggestion when
1353 // involving comments.
1357 self.fatal_span_verbose(start_with_quote, pos,
1358 String::from("character literal may only contain one codepoint")).raise();
1362 self.name_from(start)
1366 self.bump(); // advance ch past token
1367 let suffix = self.scan_optional_raw_name();
1368 Ok(token::Literal(token::Char(id), suffix))
1372 let lit = match self.ch {
1373 Some('\'') => self.scan_byte(),
1374 Some('"') => self.scan_byte_string(),
1375 Some('r') => self.scan_raw_byte_string(),
1376 _ => unreachable!(), // Should have been a token::Ident above.
1378 let suffix = self.scan_optional_raw_name();
1379 Ok(token::Literal(lit, suffix))
1382 let start_bpos = self.pos;
1383 let mut valid = true;
1385 while !self.ch_is('"') {
1387 let last_bpos = self.pos;
1388 self.fatal_span_(start_bpos,
1390 "unterminated double quote string").raise();
1393 let ch_start = self.pos;
1394 let ch = self.ch.unwrap();
1396 valid &= self.scan_char_or_byte(ch_start,
1402 // adjust for the ASCII " at the start of the literal
1404 self.name_from(start_bpos + BytePos(1))
1406 Symbol::intern("??")
1409 let suffix = self.scan_optional_raw_name();
1410 Ok(token::Literal(token::Str_(id), suffix))
1413 let start_bpos = self.pos;
1415 let mut hash_count = 0;
1416 while self.ch_is('#') {
1422 self.fail_unterminated_raw_string(start_bpos, hash_count);
1423 } else if !self.ch_is('"') {
1424 let last_bpos = self.pos;
1425 let curr_char = self.ch.unwrap();
1426 self.fatal_span_char(start_bpos,
1428 "found invalid character; only `#` is allowed \
1429 in raw string delimitation",
1433 let content_start_bpos = self.pos;
1434 let mut content_end_bpos;
1435 let mut valid = true;
1438 self.fail_unterminated_raw_string(start_bpos, hash_count);
1440 // if self.ch_is('"') {
1441 // content_end_bpos = self.pos;
1442 // for _ in 0..hash_count {
1444 // if !self.ch_is('#') {
1446 let c = self.ch.unwrap();
1449 content_end_bpos = self.pos;
1450 for _ in 0..hash_count {
1452 if !self.ch_is('#') {
1459 if !self.nextch_is('\n') {
1460 let last_bpos = self.pos;
1461 self.err_span_(start_bpos,
1463 "bare CR not allowed in raw string, use \\r \
1474 self.name_from_to(content_start_bpos, content_end_bpos)
1476 Symbol::intern("??")
1478 let suffix = self.scan_optional_raw_name();
1479 Ok(token::Literal(token::StrRaw(id, hash_count), suffix))
1482 if self.nextch_is('>') {
1487 Ok(self.binop(token::Minus))
1491 if self.nextch_is('&') {
1496 Ok(self.binop(token::And))
1500 match self.nextch() {
1507 Ok(self.binop(token::Or))
1512 Ok(self.binop(token::Plus))
1515 Ok(self.binop(token::Star))
1518 Ok(self.binop(token::Slash))
1521 Ok(self.binop(token::Caret))
1524 Ok(self.binop(token::Percent))
1527 let last_bpos = self.pos;
1528 let bpos = self.next_pos;
1529 let mut err = self.struct_fatal_span_char(last_bpos,
1531 "unknown start of token",
1533 unicode_chars::check_for_substitution(self, c, &mut err);
1534 self.fatal_errs.push(err);
1540 fn consume_whitespace(&mut self) {
1541 while is_pattern_whitespace(self.ch) && !self.is_eof() {
1546 fn read_to_eol(&mut self) -> String {
1547 let mut val = String::new();
1548 while !self.ch_is('\n') && !self.is_eof() {
1549 val.push(self.ch.unwrap());
1552 if self.ch_is('\n') {
1558 fn read_one_line_comment(&mut self) -> String {
1559 let val = self.read_to_eol();
1560 assert!((val.as_bytes()[0] == b'/' && val.as_bytes()[1] == b'/') ||
1561 (val.as_bytes()[0] == b'#' && val.as_bytes()[1] == b'!'));
1565 fn consume_non_eol_whitespace(&mut self) {
1566 while is_pattern_whitespace(self.ch) && !self.ch_is('\n') && !self.is_eof() {
1571 fn peeking_at_comment(&self) -> bool {
1572 (self.ch_is('/') && self.nextch_is('/')) || (self.ch_is('/') && self.nextch_is('*')) ||
1573 // consider shebangs comments, but not inner attributes
1574 (self.ch_is('#') && self.nextch_is('!') && !self.nextnextch_is('['))
1577 fn scan_byte(&mut self) -> token::Lit {
1579 let start = self.pos;
1581 // the eof will be picked up by the final `'` check below
1582 let c2 = self.ch.unwrap_or('\x00');
1585 let valid = self.scan_char_or_byte(start,
1590 if !self.ch_is('\'') {
1591 // Byte offsetting here is okay because the
1592 // character before position `start` are an
1593 // ascii single quote and ascii 'b'.
1595 self.fatal_span_verbose(start - BytePos(2),
1597 "unterminated byte constant".to_string()).raise();
1601 self.name_from(start)
1605 self.bump(); // advance ch past token
1609 fn scan_byte_escape(&mut self, delim: char, below_0x7f_only: bool) -> bool {
1610 self.scan_hex_digits(2, delim, below_0x7f_only)
1613 fn scan_byte_string(&mut self) -> token::Lit {
1615 let start = self.pos;
1616 let mut valid = true;
1618 while !self.ch_is('"') {
1621 self.fatal_span_(start, pos, "unterminated double quote byte string").raise();
1624 let ch_start = self.pos;
1625 let ch = self.ch.unwrap();
1627 valid &= self.scan_char_or_byte(ch_start,
1634 self.name_from(start)
1636 Symbol::intern("??")
1642 fn scan_raw_byte_string(&mut self) -> token::Lit {
1643 let start_bpos = self.pos;
1645 let mut hash_count = 0;
1646 while self.ch_is('#') {
1652 self.fail_unterminated_raw_string(start_bpos, hash_count);
1653 } else if !self.ch_is('"') {
1655 let ch = self.ch.unwrap();
1656 self.fatal_span_char(start_bpos,
1658 "found invalid character; only `#` is allowed in raw \
1659 string delimitation",
1663 let content_start_bpos = self.pos;
1664 let mut content_end_bpos;
1668 self.fail_unterminated_raw_string(start_bpos, hash_count);
1671 content_end_bpos = self.pos;
1672 for _ in 0..hash_count {
1674 if !self.ch_is('#') {
1683 self.err_span_char(pos, pos, "raw byte string must be ASCII", c);
1690 token::ByteStrRaw(self.name_from_to(content_start_bpos, content_end_bpos),
1695 // This tests the character for the unicode property 'PATTERN_WHITE_SPACE' which
1696 // is guaranteed to be forward compatible. http://unicode.org/reports/tr31/#R3
1697 pub fn is_pattern_whitespace(c: Option<char>) -> bool {
1698 c.map_or(false, Pattern_White_Space)
1701 fn in_range(c: Option<char>, lo: char, hi: char) -> bool {
1703 Some(c) => lo <= c && c <= hi,
1708 fn is_dec_digit(c: Option<char>) -> bool {
1709 in_range(c, '0', '9')
1712 pub fn is_doc_comment(s: &str) -> bool {
1713 let res = (s.starts_with("///") && *s.as_bytes().get(3).unwrap_or(&b' ') != b'/') ||
1714 s.starts_with("//!");
1715 debug!("is {:?} a doc comment? {}", s, res);
1719 pub fn is_block_doc_comment(s: &str) -> bool {
1720 // Prevent `/**/` from being parsed as a doc comment
1721 let res = ((s.starts_with("/**") && *s.as_bytes().get(3).unwrap_or(&b' ') != b'*') ||
1722 s.starts_with("/*!")) && s.len() >= 5;
1723 debug!("is {:?} a doc comment? {}", s, res);
1727 fn ident_start(c: Option<char>) -> bool {
1730 None => return false,
1733 (c >= 'a' && c <= 'z') || (c >= 'A' && c <= 'Z') || c == '_' || (c > '\x7f' && c.is_xid_start())
1736 fn ident_continue(c: Option<char>) -> bool {
1739 None => return false,
1742 (c >= 'a' && c <= 'z') || (c >= 'A' && c <= 'Z') || (c >= '0' && c <= '9') || c == '_' ||
1743 (c > '\x7f' && c.is_xid_continue())
1750 use ast::{Ident, CrateConfig};
1752 use syntax_pos::{BytePos, Span, NO_EXPANSION};
1753 use codemap::CodeMap;
1755 use feature_gate::UnstableFeatures;
1757 use std::cell::RefCell;
1758 use std::collections::HashSet;
1760 use std::path::PathBuf;
1761 use diagnostics::plugin::ErrorMap;
1762 use rustc_data_structures::sync::Lock;
1764 fn mk_sess(cm: Lrc<CodeMap>) -> ParseSess {
1765 let emitter = errors::emitter::EmitterWriter::new(Box::new(io::sink()),
1770 span_diagnostic: errors::Handler::with_emitter(true, false, Box::new(emitter)),
1771 unstable_features: UnstableFeatures::from_environment(),
1772 config: CrateConfig::new(),
1773 included_mod_stack: RefCell::new(Vec::new()),
1775 missing_fragment_specifiers: RefCell::new(HashSet::new()),
1776 registered_diagnostics: Lock::new(ErrorMap::new()),
1777 non_modrs_mods: RefCell::new(vec![]),
1781 // open a string reader for the given string
1782 fn setup<'a>(cm: &CodeMap,
1783 sess: &'a ParseSess,
1785 -> StringReader<'a> {
1786 let fm = cm.new_filemap(PathBuf::from("zebra.rs").into(), teststr);
1787 StringReader::new(sess, fm)
1793 let cm = Lrc::new(CodeMap::new(FilePathMapping::empty()));
1794 let sh = mk_sess(cm.clone());
1795 let mut string_reader = setup(&cm,
1797 "/* my source file */ fn main() { println!(\"zebra\"); }\n"
1799 let id = Ident::from_str("fn");
1800 assert_eq!(string_reader.next_token().tok, token::Comment);
1801 assert_eq!(string_reader.next_token().tok, token::Whitespace);
1802 let tok1 = string_reader.next_token();
1803 let tok2 = TokenAndSpan {
1804 tok: token::Ident(id),
1805 sp: Span::new(BytePos(21), BytePos(23), NO_EXPANSION),
1807 assert_eq!(tok1, tok2);
1808 assert_eq!(string_reader.next_token().tok, token::Whitespace);
1809 // the 'main' id is already read:
1810 assert_eq!(string_reader.pos.clone(), BytePos(28));
1811 // read another token:
1812 let tok3 = string_reader.next_token();
1813 let tok4 = TokenAndSpan {
1814 tok: token::Ident(Ident::from_str("main")),
1815 sp: Span::new(BytePos(24), BytePos(28), NO_EXPANSION),
1817 assert_eq!(tok3, tok4);
1818 // the lparen is already read:
1819 assert_eq!(string_reader.pos.clone(), BytePos(29))
1823 // check that the given reader produces the desired stream
1824 // of tokens (stop checking after exhausting the expected vec)
1825 fn check_tokenization(mut string_reader: StringReader, expected: Vec<token::Token>) {
1826 for expected_tok in &expected {
1827 assert_eq!(&string_reader.next_token().tok, expected_tok);
1831 // make the identifier by looking up the string in the interner
1832 fn mk_ident(id: &str) -> token::Token {
1833 token::Ident(Ident::from_str(id))
1837 fn doublecolonparsing() {
1839 let cm = Lrc::new(CodeMap::new(FilePathMapping::empty()));
1840 let sh = mk_sess(cm.clone());
1841 check_tokenization(setup(&cm, &sh, "a b".to_string()),
1842 vec![mk_ident("a"), token::Whitespace, mk_ident("b")]);
1849 let cm = Lrc::new(CodeMap::new(FilePathMapping::empty()));
1850 let sh = mk_sess(cm.clone());
1851 check_tokenization(setup(&cm, &sh, "a::b".to_string()),
1852 vec![mk_ident("a"), token::ModSep, mk_ident("b")]);
1859 let cm = Lrc::new(CodeMap::new(FilePathMapping::empty()));
1860 let sh = mk_sess(cm.clone());
1861 check_tokenization(setup(&cm, &sh, "a ::b".to_string()),
1862 vec![mk_ident("a"), token::Whitespace, token::ModSep, mk_ident("b")]);
1869 let cm = Lrc::new(CodeMap::new(FilePathMapping::empty()));
1870 let sh = mk_sess(cm.clone());
1871 check_tokenization(setup(&cm, &sh, "a:: b".to_string()),
1872 vec![mk_ident("a"), token::ModSep, token::Whitespace, mk_ident("b")]);
1879 let cm = Lrc::new(CodeMap::new(FilePathMapping::empty()));
1880 let sh = mk_sess(cm.clone());
1881 assert_eq!(setup(&cm, &sh, "'a'".to_string()).next_token().tok,
1882 token::Literal(token::Char(Symbol::intern("a")), None));
1887 fn character_space() {
1889 let cm = Lrc::new(CodeMap::new(FilePathMapping::empty()));
1890 let sh = mk_sess(cm.clone());
1891 assert_eq!(setup(&cm, &sh, "' '".to_string()).next_token().tok,
1892 token::Literal(token::Char(Symbol::intern(" ")), None));
1897 fn character_escaped() {
1899 let cm = Lrc::new(CodeMap::new(FilePathMapping::empty()));
1900 let sh = mk_sess(cm.clone());
1901 assert_eq!(setup(&cm, &sh, "'\\n'".to_string()).next_token().tok,
1902 token::Literal(token::Char(Symbol::intern("\\n")), None));
1907 fn lifetime_name() {
1909 let cm = Lrc::new(CodeMap::new(FilePathMapping::empty()));
1910 let sh = mk_sess(cm.clone());
1911 assert_eq!(setup(&cm, &sh, "'abc".to_string()).next_token().tok,
1912 token::Lifetime(Ident::from_str("'abc")));
1919 let cm = Lrc::new(CodeMap::new(FilePathMapping::empty()));
1920 let sh = mk_sess(cm.clone());
1921 assert_eq!(setup(&cm, &sh, "r###\"\"#a\\b\x00c\"\"###".to_string())
1924 token::Literal(token::StrRaw(Symbol::intern("\"#a\\b\x00c\""), 3), None));
1929 fn literal_suffixes() {
1931 let cm = Lrc::new(CodeMap::new(FilePathMapping::empty()));
1932 let sh = mk_sess(cm.clone());
1934 ($input: expr, $tok_type: ident, $tok_contents: expr) => {{
1935 assert_eq!(setup(&cm, &sh, format!("{}suffix", $input)).next_token().tok,
1936 token::Literal(token::$tok_type(Symbol::intern($tok_contents)),
1937 Some(Symbol::intern("suffix"))));
1938 // with a whitespace separator:
1939 assert_eq!(setup(&cm, &sh, format!("{} suffix", $input)).next_token().tok,
1940 token::Literal(token::$tok_type(Symbol::intern($tok_contents)),
1945 test!("'a'", Char, "a");
1946 test!("b'a'", Byte, "a");
1947 test!("\"a\"", Str_, "a");
1948 test!("b\"a\"", ByteStr, "a");
1949 test!("1234", Integer, "1234");
1950 test!("0b101", Integer, "0b101");
1951 test!("0xABC", Integer, "0xABC");
1952 test!("1.0", Float, "1.0");
1953 test!("1.0e10", Float, "1.0e10");
1955 assert_eq!(setup(&cm, &sh, "2us".to_string()).next_token().tok,
1956 token::Literal(token::Integer(Symbol::intern("2")),
1957 Some(Symbol::intern("us"))));
1958 assert_eq!(setup(&cm, &sh, "r###\"raw\"###suffix".to_string()).next_token().tok,
1959 token::Literal(token::StrRaw(Symbol::intern("raw"), 3),
1960 Some(Symbol::intern("suffix"))));
1961 assert_eq!(setup(&cm, &sh, "br###\"raw\"###suffix".to_string()).next_token().tok,
1962 token::Literal(token::ByteStrRaw(Symbol::intern("raw"), 3),
1963 Some(Symbol::intern("suffix"))));
1968 fn line_doc_comments() {
1969 assert!(is_doc_comment("///"));
1970 assert!(is_doc_comment("/// blah"));
1971 assert!(!is_doc_comment("////"));
1975 fn nested_block_comments() {
1977 let cm = Lrc::new(CodeMap::new(FilePathMapping::empty()));
1978 let sh = mk_sess(cm.clone());
1979 let mut lexer = setup(&cm, &sh, "/* /* */ */'a'".to_string());
1980 match lexer.next_token().tok {
1981 token::Comment => {}
1982 _ => panic!("expected a comment!"),
1984 assert_eq!(lexer.next_token().tok,
1985 token::Literal(token::Char(Symbol::intern("a")), None));
1990 fn crlf_comments() {
1992 let cm = Lrc::new(CodeMap::new(FilePathMapping::empty()));
1993 let sh = mk_sess(cm.clone());
1994 let mut lexer = setup(&cm, &sh, "// test\r\n/// test\r\n".to_string());
1995 let comment = lexer.next_token();
1996 assert_eq!(comment.tok, token::Comment);
1997 assert_eq!((comment.sp.lo(), comment.sp.hi()), (BytePos(0), BytePos(7)));
1998 assert_eq!(lexer.next_token().tok, token::Whitespace);
1999 assert_eq!(lexer.next_token().tok,
2000 token::DocComment(Symbol::intern("/// test")));