1 //! Low-level Rust lexer.
3 //! Tokens produced by this lexer are not yet ready for parsing the Rust syntax,
4 //! for that see `librustc_parse::lexer`, which converts this basic token stream
5 //! into wide tokens used by actual parser.
7 //! The purpose of this crate is to convert raw sources into a labeled sequence
8 //! of well-known token types, so building an actual Rust token stream will
11 //! Main entity of this crate is [`TokenKind`] enum which represents common
14 // We want to be able to build this crate with a stable compiler, so no
15 // `#![feature]` attributes should be added.
23 use self::LiteralKind::*;
24 use self::TokenKind::*;
25 use crate::cursor::{Cursor, EOF_CHAR};
26 use std::convert::TryInto;
29 /// It doesn't contain information about data that has been parsed,
30 /// only the type of the token and its size.
37 fn new(kind: TokenKind, len: usize) -> Token {
42 /// Enum representing common lexeme types.
43 #[derive(Clone, Copy, Debug, PartialEq, Eq, PartialOrd, Ord)]
48 /// "/* block comment */"
49 /// Block comments can be recursive, so the sequence like "/* /* */"
50 /// will not be considered terminated and will result in a parsing error.
51 BlockComment { terminated: bool },
52 /// Any whitespace characters sequence.
54 /// "ident" or "continue"
55 /// At this step keywords are also considered identifiers.
59 /// "12_u8", "1.0e-40", "b"123"". See `LiteralKind` for more details.
60 Literal { kind: LiteralKind, suffix_start: usize },
62 Lifetime { starts_with_number: bool },
120 /// Unknown token, not expected by the lexer, e.g. "№"
124 #[derive(Clone, Copy, Debug, PartialEq, Eq, PartialOrd, Ord)]
125 pub enum LiteralKind {
126 /// "12_u8", "0o100", "0b120i99"
127 Int { base: Base, empty_int: bool },
128 /// "12.34f32", "0b100.100"
129 Float { base: Base, empty_exponent: bool },
130 /// "'a'", "'\\'", "'''", "';"
131 Char { terminated: bool },
132 /// "b'a'", "b'\\'", "b'''", "b';"
133 Byte { terminated: bool },
135 Str { terminated: bool },
136 /// "b"abc"", "b"abc"
137 ByteStr { terminated: bool },
138 /// "r"abc"", "r#"abc"#", "r####"ab"###"c"####", "r#"a"
139 RawStr(UnvalidatedRawStr),
140 /// "br"abc"", "br#"abc"#", "br####"ab"###"c"####", "br#"a"
141 RawByteStr(UnvalidatedRawStr),
144 /// Represents something that looks like a raw string, but may have some
145 /// problems. Use `.validate()` to convert it into something
147 #[derive(Clone, Copy, Debug, PartialEq, Eq, PartialOrd, Ord)]
148 pub struct UnvalidatedRawStr {
149 /// The prefix (`r###"`) is valid
152 /// The postfix (`"###`) is valid
155 /// The number of leading `#`
156 n_start_hashes: usize,
157 /// The number of trailing `#`. `n_end_hashes` <= `n_start_hashes`
159 /// The offset starting at `r` or `br` where the user may have intended to end the string.
160 /// Currently, it is the longest sequence of pattern `"#+"`.
161 possible_terminator_offset: Option<usize>,
164 /// Error produced validating a raw string. Represents cases like:
165 /// - `r##~"abcde"##`: `LexRawStrError::InvalidStarter`
166 /// - `r###"abcde"##`: `LexRawStrError::NoTerminator { expected: 3, found: 2, possible_terminator_offset: Some(11)`
167 /// - Too many `#`s (>65536): `TooManyDelimiters`
168 #[derive(Clone, Copy, Debug, PartialEq, Eq, PartialOrd, Ord)]
169 pub enum LexRawStrError {
170 /// Non `#` characters exist between `r` and `"` eg. `r#~"..`
172 /// The string was never terminated. `possible_terminator_offset` is the number of characters after `r` or `br` where they
173 /// may have intended to terminate it.
174 NoTerminator { expected: usize, found: usize, possible_terminator_offset: Option<usize> },
175 /// More than 65536 `#`s exist.
179 /// Raw String that contains a valid prefix (`#+"`) and postfix (`"#+`) where
180 /// there are a matching number of `#` characters in both. Note that this will
181 /// not consume extra trailing `#` characters: `r###"abcde"####` is lexed as a
182 /// `ValidatedRawString { n_hashes: 3 }` followed by a `#` token.
183 #[derive(Debug, Eq, PartialEq, Copy, Clone)]
184 pub struct ValidatedRawStr {
188 impl ValidatedRawStr {
189 pub fn num_hashes(&self) -> u16 {
194 impl UnvalidatedRawStr {
195 pub fn validate(self) -> Result<ValidatedRawStr, LexRawStrError> {
196 if !self.valid_start {
197 return Err(LexRawStrError::InvalidStarter);
200 // Only up to 65535 `#`s are allowed in raw strings
201 let n_start_safe: u16 =
202 self.n_start_hashes.try_into().map_err(|_| LexRawStrError::TooManyDelimiters)?;
204 if self.n_start_hashes > self.n_end_hashes || !self.valid_end {
205 Err(LexRawStrError::NoTerminator {
206 expected: self.n_start_hashes,
207 found: self.n_end_hashes,
208 possible_terminator_offset: self.possible_terminator_offset,
211 // Since the lexer should never produce a literal with n_end > n_start, if n_start <= n_end,
212 // they must be equal.
213 debug_assert_eq!(self.n_start_hashes, self.n_end_hashes);
214 Ok(ValidatedRawStr { n_hashes: n_start_safe })
219 /// Base of numeric literal encoding according to its prefix.
220 #[derive(Clone, Copy, Debug, PartialEq, Eq, PartialOrd, Ord)]
222 /// Literal starts with "0b".
224 /// Literal starts with "0o".
226 /// Literal starts with "0x".
228 /// Literal doesn't contain a prefix.
232 /// `rustc` allows files to have a shebang, e.g. "#!/usr/bin/rustrun",
233 /// but shebang isn't a part of rust syntax, so this function
234 /// skips the line if it starts with a shebang ("#!").
235 /// Line won't be skipped if it represents a valid Rust syntax
236 /// (e.g. "#![deny(missing_docs)]").
237 pub fn strip_shebang(input: &str) -> Option<usize> {
238 debug_assert!(!input.is_empty());
239 let s: &str = &remove_whitespace(input);
240 if !s.starts_with("#!") || s.starts_with("#![") {
243 Some(input.find('\n').unwrap_or(input.len()))
246 fn remove_whitespace(s: &str) -> String {
247 s.chars().filter(|c| !c.is_whitespace()).collect()
250 /// Parses the first token from the provided input string.
251 pub fn first_token(input: &str) -> Token {
252 debug_assert!(!input.is_empty());
253 Cursor::new(input).advance_token()
256 /// Creates an iterator that produces tokens from the input string.
257 pub fn tokenize(mut input: &str) -> impl Iterator<Item = Token> + '_ {
258 std::iter::from_fn(move || {
259 if input.is_empty() {
262 let token = first_token(input);
263 input = &input[token.len..];
268 /// True if `c` is considered a whitespace according to Rust language definition.
269 /// See [Rust language reference](https://doc.rust-lang.org/reference/whitespace.html)
270 /// for definitions of these classes.
271 pub fn is_whitespace(c: char) -> bool {
272 // This is Pattern_White_Space.
274 // Note that this set is stable (ie, it doesn't change with different
275 // Unicode versions), so it's ok to just hard-code the values.
278 // Usual ASCII suspects
281 | '\u{000B}' // vertical tab
282 | '\u{000C}' // form feed
284 | '\u{0020}' // space
286 // NEXT LINE from latin1
290 | '\u{200E}' // LEFT-TO-RIGHT MARK
291 | '\u{200F}' // RIGHT-TO-LEFT MARK
293 // Dedicated whitespace characters from Unicode
294 | '\u{2028}' // LINE SEPARATOR
295 | '\u{2029}' // PARAGRAPH SEPARATOR
301 /// True if `c` is valid as a first character of an identifier.
302 /// See [Rust language reference](https://doc.rust-lang.org/reference/identifiers.html) for
303 /// a formal definition of valid identifier name.
304 pub fn is_id_start(c: char) -> bool {
305 // This is XID_Start OR '_' (which formally is not a XID_Start).
306 // We also add fast-path for ascii idents
307 ('a' <= c && c <= 'z')
308 || ('A' <= c && c <= 'Z')
310 || (c > '\x7f' && unicode_xid::UnicodeXID::is_xid_start(c))
313 /// True if `c` is valid as a non-first character of an identifier.
314 /// See [Rust language reference](https://doc.rust-lang.org/reference/identifiers.html) for
315 /// a formal definition of valid identifier name.
316 pub fn is_id_continue(c: char) -> bool {
317 // This is exactly XID_Continue.
318 // We also add fast-path for ascii idents
319 ('a' <= c && c <= 'z')
320 || ('A' <= c && c <= 'Z')
321 || ('0' <= c && c <= '9')
323 || (c > '\x7f' && unicode_xid::UnicodeXID::is_xid_continue(c))
327 /// Parses a token from the input string.
328 fn advance_token(&mut self) -> Token {
329 let first_char = self.bump().unwrap();
330 let token_kind = match first_char {
331 // Slash, comment or block comment.
332 '/' => match self.first() {
333 '/' => self.line_comment(),
334 '*' => self.block_comment(),
338 // Whitespace sequence.
339 c if is_whitespace(c) => self.whitespace(),
341 // Raw identifier, raw string literal or identifier.
342 'r' => match (self.first(), self.second()) {
343 ('#', c1) if is_id_start(c1) => self.raw_ident(),
344 ('#', _) | ('"', _) => {
345 let raw_str_i = self.raw_double_quoted_string(1);
346 let suffix_start = self.len_consumed();
347 if raw_str_i.n_end_hashes == raw_str_i.n_start_hashes {
348 self.eat_literal_suffix();
350 let kind = RawStr(raw_str_i);
351 Literal { kind, suffix_start }
356 // Byte literal, byte string literal, raw byte string literal or identifier.
357 'b' => match (self.first(), self.second()) {
360 let terminated = self.single_quoted_string();
361 let suffix_start = self.len_consumed();
363 self.eat_literal_suffix();
365 let kind = Byte { terminated };
366 Literal { kind, suffix_start }
370 let terminated = self.double_quoted_string();
371 let suffix_start = self.len_consumed();
373 self.eat_literal_suffix();
375 let kind = ByteStr { terminated };
376 Literal { kind, suffix_start }
378 ('r', '"') | ('r', '#') => {
380 let raw_str_i = self.raw_double_quoted_string(2);
381 let suffix_start = self.len_consumed();
382 let terminated = raw_str_i.n_start_hashes == raw_str_i.n_end_hashes;
384 self.eat_literal_suffix();
387 let kind = RawByteStr(raw_str_i);
388 Literal { kind, suffix_start }
393 // Identifier (this should be checked after other variant that can
394 // start as identifier).
395 c if is_id_start(c) => self.ident(),
399 let literal_kind = self.number(c);
400 let suffix_start = self.len_consumed();
401 self.eat_literal_suffix();
402 TokenKind::Literal { kind: literal_kind, suffix_start }
405 // One-symbol tokens.
433 // Lifetime or character literal.
434 '\'' => self.lifetime_or_char(),
438 let terminated = self.double_quoted_string();
439 let suffix_start = self.len_consumed();
441 self.eat_literal_suffix();
443 let kind = Str { terminated };
444 Literal { kind, suffix_start }
448 Token::new(token_kind, self.len_consumed())
451 fn line_comment(&mut self) -> TokenKind {
452 debug_assert!(self.prev() == '/' && self.first() == '/');
454 self.eat_while(|c| c != '\n');
458 fn block_comment(&mut self) -> TokenKind {
459 debug_assert!(self.prev() == '/' && self.first() == '*');
461 let mut depth = 1usize;
462 while let Some(c) = self.bump() {
464 '/' if self.first() == '*' => {
468 '*' if self.first() == '/' => {
472 // This block comment is closed, so for a construction like "/* */ */"
473 // there will be a successfully parsed block comment "/* */"
474 // and " */" will be processed separately.
482 BlockComment { terminated: depth == 0 }
485 fn whitespace(&mut self) -> TokenKind {
486 debug_assert!(is_whitespace(self.prev()));
487 self.eat_while(is_whitespace);
491 fn raw_ident(&mut self) -> TokenKind {
492 debug_assert!(self.prev() == 'r' && self.first() == '#' && is_id_start(self.second()));
495 // Eat the identifier part of RawIdent.
496 self.eat_identifier();
500 fn ident(&mut self) -> TokenKind {
501 debug_assert!(is_id_start(self.prev()));
502 // Start is already eaten, eat the rest of identifier.
503 self.eat_while(is_id_continue);
507 fn number(&mut self, first_digit: char) -> LiteralKind {
508 debug_assert!('0' <= self.prev() && self.prev() <= '9');
509 let mut base = Base::Decimal;
510 if first_digit == '0' {
511 // Attempt to parse encoding base.
512 let has_digits = match self.first() {
516 self.eat_decimal_digits()
521 self.eat_decimal_digits()
524 base = Base::Hexadecimal;
526 self.eat_hexadecimal_digits()
528 // Not a base prefix.
529 '0'..='9' | '_' | '.' | 'e' | 'E' => {
530 self.eat_decimal_digits();
534 _ => return Int { base, empty_int: false },
536 // Base prefix was provided, but there were no digits
537 // after it, e.g. "0x".
539 return Int { base, empty_int: true };
542 // No base prefix, parse number in the usual way.
543 self.eat_decimal_digits();
547 // Don't be greedy if this is actually an
548 // integer literal followed by field/method access or a range pattern
549 // (`0..2` and `12.foo()`)
550 '.' if self.second() != '.' && !is_id_start(self.second()) => {
551 // might have stuff after the ., and if it does, it needs to start
554 let mut empty_exponent = false;
555 if self.first().is_digit(10) {
556 self.eat_decimal_digits();
560 empty_exponent = !self.eat_float_exponent();
565 Float { base, empty_exponent }
569 let empty_exponent = !self.eat_float_exponent();
570 Float { base, empty_exponent }
572 _ => Int { base, empty_int: false },
576 fn lifetime_or_char(&mut self) -> TokenKind {
577 debug_assert!(self.prev() == '\'');
579 let can_be_a_lifetime = if self.second() == '\'' {
580 // It's surely not a lifetime.
583 // If the first symbol is valid for identifier, it can be a lifetime.
584 // Also check if it's a number for a better error reporting (so '0 will
585 // be reported as invalid lifetime and not as unterminated char literal).
586 is_id_start(self.first()) || self.first().is_digit(10)
589 if !can_be_a_lifetime {
590 let terminated = self.single_quoted_string();
591 let suffix_start = self.len_consumed();
593 self.eat_literal_suffix();
595 let kind = Char { terminated };
596 return Literal { kind, suffix_start };
599 // Either a lifetime or a character literal with
600 // length greater than 1.
602 let starts_with_number = self.first().is_digit(10);
604 // Skip the literal contents.
605 // First symbol can be a number (which isn't a valid identifier start),
606 // so skip it without any checks.
608 self.eat_while(is_id_continue);
610 // Check if after skipping literal contents we've met a closing
611 // single quote (which means that user attempted to create a
612 // string with single quotes).
613 if self.first() == '\'' {
615 let kind = Char { terminated: true };
616 Literal { kind, suffix_start: self.len_consumed() }
618 Lifetime { starts_with_number }
622 fn single_quoted_string(&mut self) -> bool {
623 debug_assert!(self.prev() == '\'');
624 // Check if it's a one-symbol literal.
625 if self.second() == '\'' && self.first() != '\\' {
631 // Literal has more than one symbol.
633 // Parse until either quotes are terminated or error is detected.
636 // Quotes are terminated, finish parsing.
641 // Probably beginning of the comment, which we don't want to include
642 // to the error report.
644 // Newline without following '\'' means unclosed quote, stop parsing.
645 '\n' if self.second() != '\'' => break,
646 // End of file, stop parsing.
647 EOF_CHAR if self.is_eof() => break,
648 // Escaped slash is considered one character, so bump twice.
653 // Skip the character.
659 // String was not terminated.
663 /// Eats double-quoted string and returns true
664 /// if string is terminated.
665 fn double_quoted_string(&mut self) -> bool {
666 debug_assert!(self.prev() == '"');
667 while let Some(c) = self.bump() {
672 '\\' if self.first() == '\\' || self.first() == '"' => {
673 // Bump again to skip escaped character.
679 // End of file reached.
683 /// Eats the double-quoted string and returns an `UnvalidatedRawStr`.
684 fn raw_double_quoted_string(&mut self, prefix_len: usize) -> UnvalidatedRawStr {
685 debug_assert!(self.prev() == 'r');
686 let mut valid_start: bool = false;
687 let start_pos = self.len_consumed();
688 let (mut possible_terminator_offset, mut max_hashes) = (None, 0);
690 // Count opening '#' symbols.
691 let n_start_hashes = self.eat_while(|c| c == '#');
693 // Check that string is started.
695 Some('"') => valid_start = true,
697 return UnvalidatedRawStr {
702 possible_terminator_offset,
707 // Skip the string contents and on each '#' character met, check if this is
708 // a raw string termination.
710 self.eat_while(|c| c != '"');
713 return UnvalidatedRawStr {
717 n_end_hashes: max_hashes,
718 possible_terminator_offset,
722 // Eat closing double quote.
725 // Check that amount of closing '#' symbols
726 // is equal to the amount of opening ones.
727 let mut hashes_left = n_start_hashes;
728 let is_closing_hash = |c| {
729 if c == '#' && hashes_left != 0 {
736 let n_end_hashes = self.eat_while(is_closing_hash);
738 if n_end_hashes == n_start_hashes {
739 return UnvalidatedRawStr {
744 possible_terminator_offset: None,
746 } else if n_end_hashes > max_hashes {
747 // Keep track of possible terminators to give a hint about where there might be
748 // a missing terminator
749 possible_terminator_offset =
750 Some(self.len_consumed() - start_pos - n_end_hashes + prefix_len);
751 max_hashes = n_end_hashes;
756 fn eat_decimal_digits(&mut self) -> bool {
757 let mut has_digits = false;
773 fn eat_hexadecimal_digits(&mut self) -> bool {
774 let mut has_digits = false;
780 '0'..='9' | 'a'..='f' | 'A'..='F' => {
790 /// Eats the float exponent. Returns true if at least one digit was met,
791 /// and returns false otherwise.
792 fn eat_float_exponent(&mut self) -> bool {
793 debug_assert!(self.prev() == 'e' || self.prev() == 'E');
794 if self.first() == '-' || self.first() == '+' {
797 self.eat_decimal_digits()
800 // Eats the suffix of the literal, e.g. "_u8".
801 fn eat_literal_suffix(&mut self) {
802 self.eat_identifier();
805 // Eats the identifier.
806 fn eat_identifier(&mut self) {
807 if !is_id_start(self.first()) {
812 self.eat_while(is_id_continue);
815 /// Eats symbols while predicate returns true or until the end of file is reached.
816 /// Returns amount of eaten symbols.
817 fn eat_while<F>(&mut self, mut predicate: F) -> usize
819 F: FnMut(char) -> bool,
821 let mut eaten: usize = 0;
822 while predicate(self.first()) && !self.is_eof() {