1 use rustc_ast::token::{self, Token, TokenKind};
2 use rustc_ast::util::comments;
3 use rustc_data_structures::sync::Lrc;
4 use rustc_errors::{error_code, Applicability, DiagnosticBuilder, FatalError};
6 use rustc_lexer::{unescape, RawStrError};
7 use rustc_session::parse::ParseSess;
8 use rustc_span::symbol::{sym, Symbol};
9 use rustc_span::{BytePos, Pos, Span};
15 mod unescape_error_reporting;
18 use rustc_lexer::unescape::Mode;
19 use unescape_error_reporting::{emit_unescape_error, push_escaped_char};
21 #[derive(Clone, Debug)]
22 pub struct UnmatchedBrace {
23 pub expected_delim: token::DelimToken,
24 pub found_delim: Option<token::DelimToken>,
26 pub unclosed_span: Option<Span>,
27 pub candidate_span: Option<Span>,
30 pub struct StringReader<'a> {
32 /// Initial position, read-only.
34 /// The absolute offset within the source_map of the current character.
36 /// Stop reading src at this index.
38 /// Source text to tokenize.
40 override_span: Option<Span>,
43 impl<'a> StringReader<'a> {
46 source_file: Lrc<rustc_span::SourceFile>,
47 override_span: Option<Span>,
49 // Make sure external source is loaded first, before accessing it.
50 // While this can't show up during normal parsing, `retokenize` may
51 // be called with a source file from an external crate.
52 sess.source_map().ensure_source_file_source_present(Lrc::clone(&source_file));
54 let src = if let Some(src) = &source_file.src {
56 } else if let Some(src) = source_file.external_src.borrow().get_source() {
60 .bug(&format!("cannot lex `source_file` without source: {}", source_file.name));
65 start_pos: source_file.start_pos,
66 pos: source_file.start_pos,
67 end_src_index: src.len(),
73 pub fn retokenize(sess: &'a ParseSess, mut span: Span) -> Self {
74 let begin = sess.source_map().lookup_byte_offset(span.lo());
75 let end = sess.source_map().lookup_byte_offset(span.hi());
77 // Make the range zero-length if the span is invalid.
78 if begin.sf.start_pos != end.sf.start_pos {
79 span = span.shrink_to_lo();
82 let mut sr = StringReader::new(sess, begin.sf, None);
84 // Seek the lexer to the right byte range.
85 sr.end_src_index = sr.src_index(span.hi());
90 fn mk_sp(&self, lo: BytePos, hi: BytePos) -> Span {
91 self.override_span.unwrap_or_else(|| Span::with_root_ctxt(lo, hi))
94 /// Returns the next token, including trivia like whitespace or comments.
95 pub fn next_token(&mut self) -> Token {
96 let start_src_index = self.src_index(self.pos);
97 let text: &str = &self.src[start_src_index..self.end_src_index];
100 let span = self.mk_sp(self.pos, self.pos);
101 return Token::new(token::Eof, span);
105 let is_beginning_of_file = self.pos == self.start_pos;
106 if is_beginning_of_file {
107 if let Some(shebang_len) = rustc_lexer::strip_shebang(text) {
108 let start = self.pos;
109 self.pos = self.pos + BytePos::from_usize(shebang_len);
111 let sym = self.symbol_from(start + BytePos::from_usize("#!".len()));
112 let kind = token::Shebang(sym);
114 let span = self.mk_sp(start, self.pos);
115 return Token::new(kind, span);
120 let token = rustc_lexer::first_token(text);
122 let start = self.pos;
123 self.pos = self.pos + BytePos::from_usize(token.len);
125 debug!("try_next_token: {:?}({:?})", token.kind, self.str_from(start));
127 let kind = self.cook_lexer_token(token.kind, start);
128 let span = self.mk_sp(start, self.pos);
129 Token::new(kind, span)
132 /// Report a fatal lexical error with a given span.
133 fn fatal_span(&self, sp: Span, m: &str) -> FatalError {
134 self.sess.span_diagnostic.span_fatal(sp, m)
137 /// Report a lexical error with a given span.
138 fn err_span(&self, sp: Span, m: &str) {
139 self.sess.span_diagnostic.struct_span_err(sp, m).emit();
142 /// Report a fatal error spanning [`from_pos`, `to_pos`).
143 fn fatal_span_(&self, from_pos: BytePos, to_pos: BytePos, m: &str) -> FatalError {
144 self.fatal_span(self.mk_sp(from_pos, to_pos), m)
147 /// Report a lexical error spanning [`from_pos`, `to_pos`).
148 fn err_span_(&self, from_pos: BytePos, to_pos: BytePos, m: &str) {
149 self.err_span(self.mk_sp(from_pos, to_pos), m)
152 fn struct_fatal_span_char(
158 ) -> DiagnosticBuilder<'a> {
159 let mut m = m.to_string();
161 push_escaped_char(&mut m, c);
163 self.sess.span_diagnostic.struct_span_fatal(self.mk_sp(from_pos, to_pos), &m[..])
166 /// Turns simple `rustc_lexer::TokenKind` enum into a rich
167 /// `librustc_ast::TokenKind`. This turns strings into interned
168 /// symbols and runs additional validation.
169 fn cook_lexer_token(&self, token: rustc_lexer::TokenKind, start: BytePos) -> TokenKind {
171 rustc_lexer::TokenKind::LineComment => {
172 let string = self.str_from(start);
173 // comments with only more "/"s are not doc comments
174 if comments::is_line_doc_comment(string) {
175 self.forbid_bare_cr(start, string, "bare CR not allowed in doc-comment");
176 token::DocComment(Symbol::intern(string))
181 rustc_lexer::TokenKind::BlockComment { terminated } => {
182 let string = self.str_from(start);
183 // block comments starting with "/**" or "/*!" are doc-comments
184 // but comments with only "*"s between two "/"s are not
185 let is_doc_comment = comments::is_block_doc_comment(string);
188 let msg = if is_doc_comment {
189 "unterminated block doc-comment"
191 "unterminated block comment"
193 let last_bpos = self.pos;
196 .struct_span_fatal_with_code(
197 self.mk_sp(start, last_bpos),
206 self.forbid_bare_cr(start, string, "bare CR not allowed in block doc-comment");
207 token::DocComment(Symbol::intern(string))
212 rustc_lexer::TokenKind::Whitespace => token::Whitespace,
213 rustc_lexer::TokenKind::Ident | rustc_lexer::TokenKind::RawIdent => {
214 let is_raw_ident = token == rustc_lexer::TokenKind::RawIdent;
215 let mut ident_start = start;
217 ident_start = ident_start + BytePos(2);
219 let sym = nfc_normalize(self.str_from(ident_start));
220 let span = self.mk_sp(start, self.pos);
221 self.sess.symbol_gallery.insert(sym, span);
223 if !sym.can_be_raw() {
224 self.err_span(span, &format!("`{}` cannot be a raw identifier", sym));
226 self.sess.raw_identifier_spans.borrow_mut().push(span);
228 token::Ident(sym, is_raw_ident)
230 rustc_lexer::TokenKind::Literal { kind, suffix_start } => {
231 let suffix_start = start + BytePos(suffix_start as u32);
232 let (kind, symbol) = self.cook_lexer_literal(start, suffix_start, kind);
233 let suffix = if suffix_start < self.pos {
234 let string = self.str_from(suffix_start);
239 self.mk_sp(suffix_start, self.pos),
240 "underscore literal suffix is not allowed",
243 "this was previously accepted by the compiler but is \
244 being phased out; it will become a hard error in \
249 <https://github.com/rust-lang/rust/issues/42326> \
250 for more information",
255 Some(Symbol::intern(string))
260 token::Literal(token::Lit { kind, symbol, suffix })
262 rustc_lexer::TokenKind::Lifetime { starts_with_number } => {
263 // Include the leading `'` in the real identifier, for macro
264 // expansion purposes. See #12512 for the gory details of why
265 // this is necessary.
266 let lifetime_name = self.str_from(start);
267 if starts_with_number {
268 self.err_span_(start, self.pos, "lifetimes cannot start with a number");
270 let ident = Symbol::intern(lifetime_name);
271 token::Lifetime(ident)
273 rustc_lexer::TokenKind::Semi => token::Semi,
274 rustc_lexer::TokenKind::Comma => token::Comma,
275 rustc_lexer::TokenKind::Dot => token::Dot,
276 rustc_lexer::TokenKind::OpenParen => token::OpenDelim(token::Paren),
277 rustc_lexer::TokenKind::CloseParen => token::CloseDelim(token::Paren),
278 rustc_lexer::TokenKind::OpenBrace => token::OpenDelim(token::Brace),
279 rustc_lexer::TokenKind::CloseBrace => token::CloseDelim(token::Brace),
280 rustc_lexer::TokenKind::OpenBracket => token::OpenDelim(token::Bracket),
281 rustc_lexer::TokenKind::CloseBracket => token::CloseDelim(token::Bracket),
282 rustc_lexer::TokenKind::At => token::At,
283 rustc_lexer::TokenKind::Pound => token::Pound,
284 rustc_lexer::TokenKind::Tilde => token::Tilde,
285 rustc_lexer::TokenKind::Question => token::Question,
286 rustc_lexer::TokenKind::Colon => token::Colon,
287 rustc_lexer::TokenKind::Dollar => token::Dollar,
288 rustc_lexer::TokenKind::Eq => token::Eq,
289 rustc_lexer::TokenKind::Not => token::Not,
290 rustc_lexer::TokenKind::Lt => token::Lt,
291 rustc_lexer::TokenKind::Gt => token::Gt,
292 rustc_lexer::TokenKind::Minus => token::BinOp(token::Minus),
293 rustc_lexer::TokenKind::And => token::BinOp(token::And),
294 rustc_lexer::TokenKind::Or => token::BinOp(token::Or),
295 rustc_lexer::TokenKind::Plus => token::BinOp(token::Plus),
296 rustc_lexer::TokenKind::Star => token::BinOp(token::Star),
297 rustc_lexer::TokenKind::Slash => token::BinOp(token::Slash),
298 rustc_lexer::TokenKind::Caret => token::BinOp(token::Caret),
299 rustc_lexer::TokenKind::Percent => token::BinOp(token::Percent),
301 rustc_lexer::TokenKind::Unknown => {
302 let c = self.str_from(start).chars().next().unwrap();
304 self.struct_fatal_span_char(start, self.pos, "unknown start of token", c);
305 // FIXME: the lexer could be used to turn the ASCII version of unicode homoglyphs,
306 // instead of keeping a table in `check_for_substitution`into the token. Ideally,
307 // this should be inside `rustc_lexer`. However, we should first remove compound
308 // tokens like `<<` from `rustc_lexer`, and then add fancier error recovery to it,
309 // as there will be less overall work to do this way.
310 let token = unicode_chars::check_for_substitution(self, start, c, &mut err)
311 .unwrap_or_else(|| token::Unknown(self.symbol_from(start)));
318 fn cook_lexer_literal(
321 suffix_start: BytePos,
322 kind: rustc_lexer::LiteralKind,
323 ) -> (token::LitKind, Symbol) {
324 // prefix means `"` or `br"` or `r###"`, ...
325 let (lit_kind, mode, prefix_len, postfix_len) = match kind {
326 rustc_lexer::LiteralKind::Char { terminated } => {
330 .struct_span_fatal_with_code(
331 self.mk_sp(start, suffix_start),
332 "unterminated character literal",
338 (token::Char, Mode::Char, 1, 1) // ' '
340 rustc_lexer::LiteralKind::Byte { terminated } => {
344 .struct_span_fatal_with_code(
345 self.mk_sp(start + BytePos(1), suffix_start),
346 "unterminated byte constant",
352 (token::Byte, Mode::Byte, 2, 1) // b' '
354 rustc_lexer::LiteralKind::Str { terminated } => {
358 .struct_span_fatal_with_code(
359 self.mk_sp(start, suffix_start),
360 "unterminated double quote string",
366 (token::Str, Mode::Str, 1, 1) // " "
368 rustc_lexer::LiteralKind::ByteStr { terminated } => {
372 .struct_span_fatal_with_code(
373 self.mk_sp(start + BytePos(1), suffix_start),
374 "unterminated double quote byte string",
380 (token::ByteStr, Mode::ByteStr, 2, 1) // b" "
382 rustc_lexer::LiteralKind::RawStr { n_hashes, err } => {
383 self.report_raw_str_error(start, err);
384 let n = u32::from(n_hashes);
385 (token::StrRaw(n_hashes), Mode::RawStr, 2 + n, 1 + n) // r##" "##
387 rustc_lexer::LiteralKind::RawByteStr { n_hashes, err } => {
388 self.report_raw_str_error(start, err);
389 let n = u32::from(n_hashes);
390 (token::ByteStrRaw(n_hashes), Mode::RawByteStr, 3 + n, 1 + n) // br##" "##
392 rustc_lexer::LiteralKind::Int { base, empty_int } => {
393 return if empty_int {
396 .struct_span_err_with_code(
397 self.mk_sp(start, suffix_start),
398 "no valid digits found for number",
402 (token::Integer, sym::integer(0))
404 self.validate_int_literal(base, start, suffix_start);
405 (token::Integer, self.symbol_from_to(start, suffix_start))
408 rustc_lexer::LiteralKind::Float { base, empty_exponent } => {
410 self.err_span_(start, self.pos, "expected at least one digit in exponent");
414 Base::Hexadecimal => self.err_span_(
417 "hexadecimal float literal is not supported",
420 self.err_span_(start, suffix_start, "octal float literal is not supported")
423 self.err_span_(start, suffix_start, "binary float literal is not supported")
428 let id = self.symbol_from_to(start, suffix_start);
429 return (token::Float, id);
432 let content_start = start + BytePos(prefix_len);
433 let content_end = suffix_start - BytePos(postfix_len);
434 let id = self.symbol_from_to(content_start, content_end);
435 self.validate_literal_escape(mode, content_start, content_end);
439 pub fn pos(&self) -> BytePos {
444 fn src_index(&self, pos: BytePos) -> usize {
445 (pos - self.start_pos).to_usize()
448 /// Slice of the source text from `start` up to but excluding `self.pos`,
449 /// meaning the slice does not include the character `self.ch`.
450 fn str_from(&self, start: BytePos) -> &str {
451 self.str_from_to(start, self.pos)
454 /// Creates a Symbol from a given offset to the current offset.
455 fn symbol_from(&self, start: BytePos) -> Symbol {
456 debug!("taking an ident from {:?} to {:?}", start, self.pos);
457 Symbol::intern(self.str_from(start))
460 /// As symbol_from, with an explicit endpoint.
461 fn symbol_from_to(&self, start: BytePos, end: BytePos) -> Symbol {
462 debug!("taking an ident from {:?} to {:?}", start, end);
463 Symbol::intern(self.str_from_to(start, end))
466 /// Slice of the source text spanning from `start` up to but excluding `end`.
467 fn str_from_to(&self, start: BytePos, end: BytePos) -> &str {
468 &self.src[self.src_index(start)..self.src_index(end)]
471 fn forbid_bare_cr(&self, start: BytePos, s: &str, errmsg: &str) {
474 idx = match s[idx..].find('\r') {
476 Some(it) => idx + it + 1,
478 self.err_span_(start + BytePos(idx as u32 - 1), start + BytePos(idx as u32), errmsg);
482 fn report_raw_str_error(&self, start: BytePos, opt_err: Option<RawStrError>) {
484 Some(RawStrError::InvalidStarter { bad_char }) => {
485 self.report_non_started_raw_string(start, bad_char)
487 Some(RawStrError::NoTerminator { expected, found, possible_terminator_offset }) => self
488 .report_unterminated_raw_string(start, expected, possible_terminator_offset, found),
489 Some(RawStrError::TooManyDelimiters { found }) => {
490 self.report_too_many_hashes(start, found)
496 fn report_non_started_raw_string(&self, start: BytePos, bad_char: char) -> ! {
497 self.struct_fatal_span_char(
500 "found invalid character; only `#` is allowed in raw string delimitation",
507 fn report_unterminated_raw_string(
511 possible_offset: Option<usize>,
512 found_terminators: usize,
514 let mut err = self.sess.span_diagnostic.struct_span_fatal_with_code(
515 self.mk_sp(start, start),
516 "unterminated raw string",
520 err.span_label(self.mk_sp(start, start), "unterminated raw string");
524 "this raw string should be terminated with `\"{}`",
529 if let Some(possible_offset) = possible_offset {
530 let lo = start + BytePos(possible_offset as u32);
531 let hi = lo + BytePos(found_terminators as u32);
532 let span = self.mk_sp(lo, hi);
535 "consider terminating the string here",
536 "#".repeat(n_hashes),
537 Applicability::MaybeIncorrect,
545 /// Note: It was decided to not add a test case, because it would be to big.
546 /// https://github.com/rust-lang/rust/pull/50296#issuecomment-392135180
547 fn report_too_many_hashes(&self, start: BytePos, found: usize) -> ! {
552 "too many `#` symbols: raw strings may be delimited \
553 by up to 65535 `#` symbols, but found {}",
560 fn validate_literal_escape(&self, mode: Mode, content_start: BytePos, content_end: BytePos) {
561 let lit_content = self.str_from_to(content_start, content_end);
562 unescape::unescape_literal(lit_content, mode, &mut |range, result| {
563 // Here we only check for errors. The actual unescaping is done later.
564 if let Err(err) = result {
565 let span_with_quotes =
566 self.mk_sp(content_start - BytePos(1), content_end + BytePos(1));
568 &self.sess.span_diagnostic,
579 fn validate_int_literal(&self, base: Base, content_start: BytePos, content_end: BytePos) {
580 let base = match base {
585 let s = self.str_from_to(content_start + BytePos(2), content_end);
586 for (idx, c) in s.char_indices() {
587 let idx = idx as u32;
588 if c != '_' && c.to_digit(base).is_none() {
589 let lo = content_start + BytePos(2 + idx);
590 let hi = content_start + BytePos(2 + idx + c.len_utf8() as u32);
591 self.err_span_(lo, hi, &format!("invalid digit for a base {} literal", base));
597 pub fn nfc_normalize(string: &str) -> Symbol {
598 use unicode_normalization::{is_nfc_quick, IsNormalized, UnicodeNormalization};
599 match is_nfc_quick(string.chars()) {
600 IsNormalized::Yes => Symbol::intern(string),
602 let normalized_str: String = string.chars().nfc().collect();
603 Symbol::intern(&normalized_str)