};
use syntax::{
algo::{self, find_node_at_offset, SyntaxRewriter},
- AstNode, SourceFile, SyntaxElement, SyntaxKind, SyntaxToken, TextRange, TextSize,
+ AstNode, AstToken, SourceFile, SyntaxElement, SyntaxKind, SyntaxToken, TextRange, TextSize,
TokenAtOffset,
};
use text_edit::{TextEdit, TextEditBuilder};
pub(crate) fn token_at_offset(&self) -> TokenAtOffset<SyntaxToken> {
self.source_file.syntax().token_at_offset(self.offset())
}
- pub(crate) fn find_token_at_offset(&self, kind: SyntaxKind) -> Option<SyntaxToken> {
+ pub(crate) fn find_token_syntax_at_offset(&self, kind: SyntaxKind) -> Option<SyntaxToken> {
self.token_at_offset().find(|it| it.kind() == kind)
}
+ pub(crate) fn find_token_at_offset<T: AstToken>(&self) -> Option<T> {
+ self.token_at_offset().find_map(T::cast)
+ }
pub(crate) fn find_node_at_offset<N: AstNode>(&self) -> Option<N> {
find_node_at_offset(self.source_file.syntax(), self.offset())
}
// }
// ```
pub(crate) fn add_turbo_fish(acc: &mut Assists, ctx: &AssistContext) -> Option<()> {
- let ident = ctx.find_token_at_offset(SyntaxKind::IDENT).or_else(|| {
+ let ident = ctx.find_token_syntax_at_offset(SyntaxKind::IDENT).or_else(|| {
let arg_list = ctx.find_node_at_offset::<ast::ArgList>()?;
if arg_list.args().count() > 0 {
return None;
// fn qux(bar: Bar, baz: Baz) {}
// ```
pub(crate) fn expand_glob_import(acc: &mut Assists, ctx: &AssistContext) -> Option<()> {
- let star = ctx.find_token_at_offset(T![*])?;
+ let star = ctx.find_token_syntax_at_offset(T![*])?;
let (parent, mod_path) = find_parent_and_path(&star)?;
let target_module = match ctx.sema.resolve_path(&mod_path)? {
PathResolution::Def(ModuleDef::Module(it)) => it,
// }
// ```
pub(crate) fn flip_comma(acc: &mut Assists, ctx: &AssistContext) -> Option<()> {
- let comma = ctx.find_token_at_offset(T![,])?;
+ let comma = ctx.find_token_syntax_at_offset(T![,])?;
let prev = non_trivia_sibling(comma.clone().into(), Direction::Prev)?;
let next = non_trivia_sibling(comma.clone().into(), Direction::Next)?;
pub(crate) fn flip_trait_bound(acc: &mut Assists, ctx: &AssistContext) -> Option<()> {
// We want to replicate the behavior of `flip_binexpr` by only suggesting
// the assist when the cursor is on a `+`
- let plus = ctx.find_token_at_offset(T![+])?;
+ let plus = ctx.find_token_syntax_at_offset(T![+])?;
// Make sure we're in a `TypeBoundList`
if ast::TypeBoundList::cast(plus.parent()).is_none() {
// FIXME: should also add support for the case fun(f: &Foo) -> &<|>Foo
pub(crate) fn introduce_named_lifetime(acc: &mut Assists, ctx: &AssistContext) -> Option<()> {
let lifetime_token = ctx
- .find_token_at_offset(SyntaxKind::LIFETIME)
+ .find_token_syntax_at_offset(SyntaxKind::LIFETIME)
.filter(|lifetime| lifetime.text() == "'_")?;
if let Some(fn_def) = lifetime_token.ancestors().find_map(ast::Fn::cast) {
generate_fn_def_assist(acc, &fn_def, lifetime_token.text_range())
// ```
pub(crate) fn invert_if(acc: &mut Assists, ctx: &AssistContext) -> Option<()> {
- let if_keyword = ctx.find_token_at_offset(T![if])?;
+ let if_keyword = ctx.find_token_syntax_at_offset(T![if])?;
let expr = ast::IfExpr::cast(if_keyword.parent())?;
let if_range = if_keyword.text_range();
let cursor_in_range = if_range.contains_range(ctx.frange.range);
use syntax::{
ast::{self, HasQuotes, HasStringValue},
- AstToken,
- SyntaxKind::{RAW_STRING, STRING},
- TextRange, TextSize,
+ AstToken, TextRange, TextSize,
};
use test_utils::mark;
// }
// ```
pub(crate) fn make_raw_string(acc: &mut Assists, ctx: &AssistContext) -> Option<()> {
- let token = ctx.find_token_at_offset(STRING).and_then(ast::String::cast)?;
+ let token = ctx.find_token_at_offset::<ast::String>()?;
+ if token.is_raw() {
+ return None;
+ }
let value = token.value()?;
let target = token.syntax().text_range();
acc.add(
// }
// ```
pub(crate) fn make_usual_string(acc: &mut Assists, ctx: &AssistContext) -> Option<()> {
- let token = ctx.find_token_at_offset(RAW_STRING).and_then(ast::RawString::cast)?;
+ let token = ctx.find_token_at_offset::<ast::String>()?;
+ if !token.is_raw() {
+ return None;
+ }
let value = token.value()?;
let target = token.syntax().text_range();
acc.add(
// }
// ```
pub(crate) fn add_hash(acc: &mut Assists, ctx: &AssistContext) -> Option<()> {
- let token = ctx.find_token_at_offset(RAW_STRING)?;
- let target = token.text_range();
+ let token = ctx.find_token_at_offset::<ast::String>()?;
+ if !token.is_raw() {
+ return None;
+ }
+ let text_range = token.syntax().text_range();
+ let target = text_range;
acc.add(AssistId("add_hash", AssistKind::Refactor), "Add #", target, |edit| {
- edit.insert(token.text_range().start() + TextSize::of('r'), "#");
- edit.insert(token.text_range().end(), "#");
+ edit.insert(text_range.start() + TextSize::of('r'), "#");
+ edit.insert(text_range.end(), "#");
})
}
// }
// ```
pub(crate) fn remove_hash(acc: &mut Assists, ctx: &AssistContext) -> Option<()> {
- let token = ctx.find_token_at_offset(RAW_STRING).and_then(ast::RawString::cast)?;
+ let token = ctx.find_token_at_offset::<ast::String>()?;
+ if !token.is_raw() {
+ return None;
+ }
let text = token.text().as_str();
if !text.starts_with("r#") && text.ends_with('#') {
// }
// ```
pub(crate) fn remove_mut(acc: &mut Assists, ctx: &AssistContext) -> Option<()> {
- let mut_token = ctx.find_token_at_offset(T![mut])?;
+ let mut_token = ctx.find_token_syntax_at_offset(T![mut])?;
let delete_from = mut_token.text_range().start();
let delete_to = match mut_token.next_token() {
Some(it) if it.kind() == SyntaxKind::WHITESPACE => it.text_range().end(),
// fn compute() -> Option<i32> { None }
// ```
pub(crate) fn replace_let_with_if_let(acc: &mut Assists, ctx: &AssistContext) -> Option<()> {
- let let_kw = ctx.find_token_at_offset(T![let])?;
+ let let_kw = ctx.find_token_syntax_at_offset(T![let])?;
let let_stmt = let_kw.ancestors().find_map(ast::LetStmt::cast)?;
let init = let_stmt.initializer()?;
let original_pat = let_stmt.pat()?;
// }
// ```
pub(crate) fn replace_string_with_char(acc: &mut Assists, ctx: &AssistContext) -> Option<()> {
- let token = ctx.find_token_at_offset(STRING).and_then(ast::String::cast)?;
+ let token = ctx.find_token_syntax_at_offset(STRING).and_then(ast::String::cast)?;
let value = token.value()?;
let target = token.syntax().text_range();
// use std::{collections::HashMap};
// ```
pub(crate) fn split_import(acc: &mut Assists, ctx: &AssistContext) -> Option<()> {
- let colon_colon = ctx.find_token_at_offset(T![::])?;
+ let colon_colon = ctx.find_token_syntax_at_offset(T![::])?;
let path = ast::Path::cast(colon_colon.parent())?.qualifier()?;
let top_path = successors(Some(path.clone()), |it| it.parent_path()).last()?;
let assist_id = AssistId("unwrap_block", AssistKind::RefactorRewrite);
let assist_label = "Unwrap block";
- let l_curly_token = ctx.find_token_at_offset(T!['{'])?;
+ let l_curly_token = ctx.find_token_syntax_at_offset(T!['{'])?;
let mut block = ast::BlockExpr::cast(l_curly_token.parent())?;
let mut parent = block.syntax().parent()?;
if ast::MatchArm::can_cast(parent.kind()) {
) -> Option<TextRange> {
let range = frange.range;
- let string_kinds = [COMMENT, STRING, RAW_STRING, BYTE_STRING, RAW_BYTE_STRING];
+ let string_kinds = [COMMENT, STRING, BYTE_STRING];
let list_kinds = [
RECORD_PAT_FIELD_LIST,
MATCH_ARM_LIST,
element.clone()
};
- if let Some(token) = element.as_token().cloned().and_then(ast::RawString::cast) {
- let expanded = element_to_highlight.as_token().unwrap().clone();
- if injection::highlight_injection(&mut stack, &sema, token, expanded).is_some() {
- continue;
+ if let Some(token) = element.as_token().cloned().and_then(ast::String::cast) {
+ if token.is_raw() {
+ let expanded = element_to_highlight.as_token().unwrap().clone();
+ if injection::highlight_injection(&mut stack, &sema, token, expanded).is_some() {
+ continue;
+ }
}
}
}
stack.pop_and_inject(None);
}
- } else if let Some(string) =
- element_to_highlight.as_token().cloned().and_then(ast::RawString::cast)
- {
- format_string_highlighter.highlight_format_string(&mut stack, &string, range);
}
}
}
None => h.into(),
}
}
- STRING | RAW_STRING | RAW_BYTE_STRING | BYTE_STRING => HighlightTag::StringLiteral.into(),
+ STRING | BYTE_STRING => HighlightTag::StringLiteral.into(),
ATTR => HighlightTag::Attribute.into(),
INT_NUMBER | FLOAT_NUMBER => HighlightTag::NumericLiteral.into(),
BYTE => HighlightTag::ByteLiteral.into(),
.children_with_tokens()
.filter(|t| t.kind() != SyntaxKind::WHITESPACE)
.nth(1)
- .filter(|e| {
- ast::String::can_cast(e.kind()) || ast::RawString::can_cast(e.kind())
- })
+ .filter(|e| ast::String::can_cast(e.kind()))
}
_ => {}
}
pub(super) fn highlight_injection(
acc: &mut HighlightedRangeStack,
sema: &Semantics<RootDatabase>,
- literal: ast::RawString,
+ literal: ast::String,
expanded: SyntaxToken,
) -> Option<()> {
let active_parameter = ActiveParameter::at_token(&sema, expanded)?;
use ide_db::base_db::{FileId, SourceDatabase};
use ide_db::RootDatabase;
use syntax::{
- algo, AstNode, NodeOrToken, SourceFile,
- SyntaxKind::{RAW_STRING, STRING},
- SyntaxToken, TextRange, TextSize,
+ algo, AstNode, NodeOrToken, SourceFile, SyntaxKind::STRING, SyntaxToken, TextRange, TextSize,
};
// Feature: Show Syntax Tree
// we'll attempt parsing it as rust syntax
// to provide the syntax tree of the contents of the string
match token.kind() {
- STRING | RAW_STRING => syntax_tree_for_token(token, text_range),
+ STRING => syntax_tree_for_token(token, text_range),
_ => None,
}
}
assert!(p.at(T![extern]));
let abi = p.start();
p.bump(T![extern]);
- match p.current() {
- STRING | RAW_STRING => p.bump_any(),
- _ => (),
- }
+ p.eat(STRING);
abi.complete(p, ABI);
}
// let _ = b"e";
// let _ = br"f";
// }
-pub(crate) const LITERAL_FIRST: TokenSet = TokenSet::new(&[
- TRUE_KW,
- FALSE_KW,
- INT_NUMBER,
- FLOAT_NUMBER,
- BYTE,
- CHAR,
- STRING,
- RAW_STRING,
- BYTE_STRING,
- RAW_BYTE_STRING,
-]);
+pub(crate) const LITERAL_FIRST: TokenSet =
+ TokenSet::new(&[TRUE_KW, FALSE_KW, INT_NUMBER, FLOAT_NUMBER, BYTE, CHAR, STRING, BYTE_STRING]);
pub(crate) fn literal(p: &mut Parser) -> Option<CompletedMarker> {
if !p.at_ts(LITERAL_FIRST) {
T![static] => consts::static_(p, m),
// test extern_block
// extern {}
- T![extern]
- if la == T!['{'] || ((la == STRING || la == RAW_STRING) && p.nth(2) == T!['{']) =>
- {
+ T![extern] if la == T!['{'] || (la == STRING && p.nth(2) == T!['{']) => {
abi(p);
extern_item_list(p);
m.complete(p, EXTERN_BLOCK);
CHAR,
BYTE,
STRING,
- RAW_STRING,
BYTE_STRING,
- RAW_BYTE_STRING,
ERROR,
IDENT,
WHITESPACE,
}
pub fn is_literal(self) -> bool {
match self {
- INT_NUMBER | FLOAT_NUMBER | CHAR | BYTE | STRING | RAW_STRING | BYTE_STRING
- | RAW_BYTE_STRING => true,
+ INT_NUMBER | FLOAT_NUMBER | CHAR | BYTE | STRING | BYTE_STRING => true,
_ => false,
}
}
ast::IntNumber::cast(self.token())
}
+ pub fn as_string(&self) -> Option<ast::String> {
+ ast::String::cast(self.token())
+ }
+ pub fn as_byte_string(&self) -> Option<ast::ByteString> {
+ ast::ByteString::cast(self.token())
+ }
+
fn find_suffix(text: &str, possible_suffixes: &[&str]) -> Option<SmolStr> {
possible_suffixes
.iter()
suffix: Self::find_suffix(&text, &ast::FloatNumber::SUFFIXES),
}
}
- STRING | RAW_STRING => LiteralKind::String,
+ STRING => LiteralKind::String,
T![true] => LiteralKind::Bool(true),
T![false] => LiteralKind::Bool(false),
- BYTE_STRING | RAW_BYTE_STRING => LiteralKind::ByteString,
+ BYTE_STRING => LiteralKind::ByteString,
CHAR => LiteralKind::Char,
BYTE => LiteralKind::Byte,
_ => unreachable!(),
}
#[derive(Debug, Clone, PartialEq, Eq, Hash)]
-pub struct RawString {
+pub struct ByteString {
pub(crate) syntax: SyntaxToken,
}
-impl std::fmt::Display for RawString {
+impl std::fmt::Display for ByteString {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
std::fmt::Display::fmt(&self.syntax, f)
}
}
-impl AstToken for RawString {
- fn can_cast(kind: SyntaxKind) -> bool { kind == RAW_STRING }
+impl AstToken for ByteString {
+ fn can_cast(kind: SyntaxKind) -> bool { kind == BYTE_STRING }
fn cast(syntax: SyntaxToken) -> Option<Self> {
if Self::can_cast(syntax.kind()) {
Some(Self { syntax })
let key = self.simple_name()?;
let value_token = lit.syntax().first_token()?;
- let value: SmolStr = if let Some(s) = ast::String::cast(value_token.clone()) {
- s.value()?.into()
- } else if let Some(s) = ast::RawString::cast(value_token) {
- s.value()?.into()
- } else {
- return None;
- };
+ let value: SmolStr = ast::String::cast(value_token.clone())?.value()?.into();
Some((key, value))
}
}
impl HasQuotes for ast::String {}
-impl HasQuotes for ast::RawString {}
pub trait HasStringValue: HasQuotes {
fn value(&self) -> Option<Cow<'_, str>>;
}
+impl ast::String {
+ pub fn is_raw(&self) -> bool {
+ self.text().starts_with('r')
+ }
+ pub fn map_range_up(&self, range: TextRange) -> Option<TextRange> {
+ let contents_range = self.text_range_between_quotes()?;
+ assert!(TextRange::up_to(contents_range.len()).contains_range(range));
+ Some(range + contents_range.start())
+ }
+}
+
impl HasStringValue for ast::String {
fn value(&self) -> Option<Cow<'_, str>> {
+ if self.is_raw() {
+ let text = self.text().as_str();
+ let text =
+ &text[self.text_range_between_quotes()? - self.syntax().text_range().start()];
+ return Some(Cow::Borrowed(text));
+ }
+
let text = self.text().as_str();
let text = &text[self.text_range_between_quotes()? - self.syntax().text_range().start()];
}
}
-// FIXME: merge `ast::RawString` and `ast::String`.
-impl HasStringValue for ast::RawString {
- fn value(&self) -> Option<Cow<'_, str>> {
- let text = self.text().as_str();
- let text = &text[self.text_range_between_quotes()? - self.syntax().text_range().start()];
- Some(Cow::Borrowed(text))
- }
-}
-
-impl ast::RawString {
- pub fn map_range_up(&self, range: TextRange) -> Option<TextRange> {
- let contents_range = self.text_range_between_quotes()?;
- assert!(TextRange::up_to(contents_range.len()).contains_range(range));
- Some(range + contents_range.start())
+impl ast::ByteString {
+ pub fn is_raw(&self) -> bool {
+ self.text().starts_with("br")
}
}
}
}
-impl HasFormatSpecifier for ast::RawString {
- fn char_ranges(
- &self,
- ) -> Option<Vec<(TextRange, Result<char, rustc_lexer::unescape::EscapeError>)>> {
- let text = self.text().as_str();
- let text = &text[self.text_range_between_quotes()? - self.syntax().text_range().start()];
- let offset = self.text_range_between_quotes()?.start() - self.syntax().text_range().start();
-
- let mut res = Vec::with_capacity(text.len());
- for (idx, c) in text.char_indices() {
- res.push((TextRange::at(idx.try_into().unwrap(), TextSize::of(c)) + offset, Ok(c)));
- }
- Some(res)
- }
-}
-
impl ast::IntNumber {
#[rustfmt::skip]
pub(crate) const SUFFIXES: &'static [&'static str] = &[
RawStrError::TooManyDelimiters { .. } => "Too many `#` symbols: raw strings may be delimited by up to 65535 `#` symbols",
};
};
- RAW_STRING
+ STRING
}
rustc_lexer::LiteralKind::RawByteStr { err: raw_str_err, .. } => {
if let Some(raw_str_err) = raw_str_err {
};
};
- RAW_BYTE_STRING
+ BYTE_STRING
}
};
let prev_token = algo::find_covering_element(root, edit.delete).as_token()?.clone();
let prev_token_kind = prev_token.kind();
match prev_token_kind {
- WHITESPACE | COMMENT | IDENT | STRING | RAW_STRING => {
+ WHITESPACE | COMMENT | IDENT | STRING => {
if prev_token_kind == WHITESPACE || prev_token_kind == COMMENT {
// removing a new line may extends previous token
let deleted_range = edit.delete - prev_token.text_range().start();
use crate::{
algo, ast, match_ast, AstNode, SyntaxError,
- SyntaxKind::{BYTE, BYTE_STRING, CHAR, CONST, FN, INT_NUMBER, STRING, TYPE_ALIAS},
+ SyntaxKind::{BYTE, CHAR, CONST, FN, INT_NUMBER, TYPE_ALIAS},
SyntaxNode, SyntaxToken, TextSize, T,
};
use rowan::Direction;
acc.push(SyntaxError::new_at_offset(rustc_unescape_error_to_string(err), off));
};
- match token.kind() {
- BYTE => {
- if let Some(Err(e)) = unquote(text, 2, '\'').map(unescape_byte) {
- push_err(2, e);
- }
- }
- CHAR => {
- if let Some(Err(e)) = unquote(text, 1, '\'').map(unescape_char) {
- push_err(1, e);
+ if let Some(s) = literal.as_string() {
+ if !s.is_raw() {
+ if let Some(without_quotes) = unquote(text, 1, '"') {
+ unescape_literal(without_quotes, Mode::Str, &mut |range, char| {
+ if let Err(err) = char {
+ push_err(1, (range.start, err));
+ }
+ })
}
}
- BYTE_STRING => {
+ }
+ if let Some(s) = literal.as_byte_string() {
+ if !s.is_raw() {
if let Some(without_quotes) = unquote(text, 2, '"') {
unescape_byte_literal(without_quotes, Mode::ByteStr, &mut |range, char| {
if let Err(err) = char {
})
}
}
- STRING => {
- if let Some(without_quotes) = unquote(text, 1, '"') {
- unescape_literal(without_quotes, Mode::Str, &mut |range, char| {
- if let Err(err) = char {
- push_err(1, (range.start, err));
- }
- })
+ }
+
+ match token.kind() {
+ BYTE => {
+ if let Some(Err(e)) = unquote(text, 2, '\'').map(unescape_byte) {
+ push_err(2, e);
+ }
+ }
+ CHAR => {
+ if let Some(Err(e)) = unquote(text, 1, '\'').map(unescape_char) {
+ push_err(1, e);
}
}
_ => (),
-RAW_STRING 4 "r##\""
+STRING 4 "r##\""
> error0..4 token("r##\"") msg(Missing trailing `"` with `#` symbols to terminate the raw string literal)
-RAW_STRING 8 "r##\"🦀"
+STRING 8 "r##\"🦀"
> error0..8 token("r##\"🦀") msg(Missing trailing `"` with `#` symbols to terminate the raw string literal)
-RAW_STRING 8 "r##\"\\x7f"
+STRING 8 "r##\"\\x7f"
> error0..8 token("r##\"\\x7f") msg(Missing trailing `"` with `#` symbols to terminate the raw string literal)
-RAW_STRING 12 "r##\"\\u{20AA}"
+STRING 12 "r##\"\\u{20AA}"
> error0..12 token("r##\"\\u{20AA}") msg(Missing trailing `"` with `#` symbols to terminate the raw string literal)
-RAW_STRING 5 "r##\" "
+STRING 5 "r##\" "
> error0..5 token("r##\" ") msg(Missing trailing `"` with `#` symbols to terminate the raw string literal)
-RAW_STRING 5 "r##\"\\"
+STRING 5 "r##\"\\"
> error0..5 token("r##\"\\") msg(Missing trailing `"` with `#` symbols to terminate the raw string literal)
-RAW_STRING 6 "r##\"\\n"
+STRING 6 "r##\"\\n"
> error0..6 token("r##\"\\n") msg(Missing trailing `"` with `#` symbols to terminate the raw string literal)
-RAW_BYTE_STRING 5 "br##\""
+BYTE_STRING 5 "br##\""
> error0..5 token("br##\"") msg(Missing trailing `"` with `#` symbols to terminate the raw byte string literal)
-RAW_BYTE_STRING 9 "br##\"🦀"
+BYTE_STRING 9 "br##\"🦀"
> error0..9 token("br##\"🦀") msg(Missing trailing `"` with `#` symbols to terminate the raw byte string literal)
-RAW_BYTE_STRING 9 "br##\"\\x7f"
+BYTE_STRING 9 "br##\"\\x7f"
> error0..9 token("br##\"\\x7f") msg(Missing trailing `"` with `#` symbols to terminate the raw byte string literal)
-RAW_BYTE_STRING 13 "br##\"\\u{20AA}"
+BYTE_STRING 13 "br##\"\\u{20AA}"
> error0..13 token("br##\"\\u{20AA}") msg(Missing trailing `"` with `#` symbols to terminate the raw byte string literal)
-RAW_BYTE_STRING 6 "br##\" "
+BYTE_STRING 6 "br##\" "
> error0..6 token("br##\" ") msg(Missing trailing `"` with `#` symbols to terminate the raw byte string literal)
-RAW_BYTE_STRING 6 "br##\"\\"
+BYTE_STRING 6 "br##\"\\"
> error0..6 token("br##\"\\") msg(Missing trailing `"` with `#` symbols to terminate the raw byte string literal)
-RAW_BYTE_STRING 7 "br##\"\\n"
+BYTE_STRING 7 "br##\"\\n"
> error0..7 token("br##\"\\n") msg(Missing trailing `"` with `#` symbols to terminate the raw byte string literal)
-RAW_STRING 3 "r##"
+STRING 3 "r##"
> error0..3 token("r##") msg(Missing `"` symbol after `#` symbols to begin the raw string literal)
-RAW_BYTE_STRING 4 "br##"
+BYTE_STRING 4 "br##"
> error0..4 token("br##") msg(Missing `"` symbol after `#` symbols to begin the raw byte string literal)
-RAW_STRING 4 "r## "
+STRING 4 "r## "
IDENT 1 "I"
WHITESPACE 1 " "
IDENT 4 "lack"
-RAW_BYTE_STRING 5 "br## "
+BYTE_STRING 5 "br## "
IDENT 1 "I"
WHITESPACE 1 " "
IDENT 4 "lack"
WHITESPACE 1 " "
BYTE_STRING 6 "b\"foo\""
WHITESPACE 1 " "
-RAW_BYTE_STRING 4 "br\"\""
+BYTE_STRING 4 "br\"\""
WHITESPACE 1 "\n"
BYTE 6 "b\'\'suf"
WHITESPACE 1 " "
BYTE_STRING 5 "b\"\"ix"
WHITESPACE 1 " "
-RAW_BYTE_STRING 6 "br\"\"br"
+BYTE_STRING 6 "br\"\"br"
WHITESPACE 1 "\n"
BYTE 5 "b\'\\n\'"
WHITESPACE 1 " "
STRING 7 "\"hello\""
WHITESPACE 1 " "
-RAW_STRING 8 "r\"world\""
+STRING 8 "r\"world\""
WHITESPACE 1 " "
STRING 17 "\"\\n\\\"\\\\no escape\""
WHITESPACE 1 " "
-RAW_STRING 36 "r###\"this is a r##\"raw\"## string\"###"
+STRING 36 "r###\"this is a r##\"raw\"## string\"###"
WHITESPACE 1 "\n"
EQ@142..143 "="
WHITESPACE@143..144 " "
LITERAL@144..148
- RAW_STRING@144..148 "r\"d\""
+ STRING@144..148 "r\"d\""
SEMICOLON@148..149 ";"
WHITESPACE@149..154 "\n "
LET_STMT@154..167
EQ@178..179 "="
WHITESPACE@179..180 " "
LITERAL@180..185
- RAW_BYTE_STRING@180..185 "br\"f\""
+ BYTE_STRING@180..185 "br\"f\""
SEMICOLON@185..186 ";"
WHITESPACE@186..187 "\n"
R_CURLY@187..188 "}"
"trait", "true", "try", "type", "unsafe", "use", "where", "while",
],
contextual_keywords: &["auto", "default", "existential", "union", "raw"],
- literals: &[
- "INT_NUMBER",
- "FLOAT_NUMBER",
- "CHAR",
- "BYTE",
- "STRING",
- "RAW_STRING",
- "BYTE_STRING",
- "RAW_BYTE_STRING",
- ],
+ literals: &["INT_NUMBER", "FLOAT_NUMBER", "CHAR", "BYTE", "STRING", "BYTE_STRING"],
tokens: &[
"ERROR",
"IDENT",
fn lower(grammar: &Grammar) -> AstSrc {
let mut res = AstSrc::default();
- res.tokens = "Whitespace Comment String RawString IntNumber FloatNumber"
+ res.tokens = "Whitespace Comment String ByteString IntNumber FloatNumber"
.split_ascii_whitespace()
.map(|it| it.to_string())
.collect::<Vec<_>>();