.collect::<Vec<SmolStr>>();
let has_more_derives = !new_attr_input.is_empty();
let new_attr_input = new_attr_input.iter().sep_by(", ").surround_with("(", ")").to_string();
- let new_attr_input_len = new_attr_input.len();
let mut buf = String::new();
buf.push_str("\n\nimpl ");
buf.push_str(" {\n");
let cursor_delta = if has_more_derives {
+ let delta = input.syntax().text_range().len() - TextSize::of(&new_attr_input);
edit.replace(input.syntax().text_range(), new_attr_input);
- input.syntax().text_range().len() - TextSize::from_usize(new_attr_input_len)
+ delta
} else {
let attr_range = attr.syntax().text_range();
edit.delete(attr_range);
let fn_def = indent_once.increase_indent(fn_def);
let fn_def = ast::make::add_trailing_newlines(1, fn_def);
let fn_def = indent.increase_indent(fn_def);
- (fn_def, it.syntax().text_range().start() + TextSize::from_usize(1))
+ (fn_def, it.syntax().text_range().start() + TextSize::of('{'))
}
};
.text_range()
.end();
- Some((start, TextSize::from_usize(1)))
+ Some((start, TextSize::of("\n")))
})
.unwrap_or_else(|| {
buf = generate_impl_text(&strukt, &buf);
let start = strukt.syntax().text_range().end();
- (start, TextSize::from_usize(3))
+ (start, TextSize::of("\n}\n"))
});
edit.set_cursor(start_offset + TextSize::of(&buf) - end_offset);
edit.target(current_text_range);
edit.set_cursor(match cursor_pos {
- CursorPos::InExpr(back_offset) => start + TextSize::from_usize(arm.len()) - back_offset,
+ CursorPos::InExpr(back_offset) => start + TextSize::of(&arm) - back_offset,
CursorPos::InPat(offset) => offset,
});
edit.replace(TextRange::new(start, end), arm);
//! `LineIndex` maps flat `TextSize` offsets into `(Line, Column)`
//! representation.
+use std::iter;
+
use ra_syntax::{TextRange, TextSize};
use rustc_hash::FxHashMap;
-use std::iter;
use superslice::Ext;
#[derive(Clone, Debug, PartialEq, Eq)]
res
}
- fn utf16_to_utf8_col(&self, line: u32, col: u32) -> TextSize {
- let mut col: TextSize = col.into();
+ fn utf16_to_utf8_col(&self, line: u32, mut col: u32) -> TextSize {
if let Some(utf16_chars) = self.utf16_lines.get(&line) {
for c in utf16_chars {
- if col >= c.start {
- col += c.len() - TextSize::from_usize(1);
+ if col >= u32::from(c.start) {
+ col += u32::from(c.len()) - 1;
} else {
// From here on, all utf16 characters come *after* the character we are mapping,
// so we don't need to take them into account
}
}
- col
+ col.into()
}
}
#[cfg(test)]
-mod test_line_index {
+mod tests {
use super::*;
#[test]
assert!(col_index.utf8_to_utf16_col(2, 15.into()) == 15);
// UTF-16 to UTF-8
- assert_eq!(col_index.utf16_to_utf8_col(1, 15), TextSize::from_usize(15));
+ assert_eq!(col_index.utf16_to_utf8_col(1, 15), TextSize::from(15));
- assert_eq!(col_index.utf16_to_utf8_col(1, 18), TextSize::from_usize(20));
- assert_eq!(col_index.utf16_to_utf8_col(1, 19), TextSize::from_usize(23));
+ assert_eq!(col_index.utf16_to_utf8_col(1, 18), TextSize::from(20));
+ assert_eq!(col_index.utf16_to_utf8_col(1, 19), TextSize::from(23));
- assert_eq!(col_index.utf16_to_utf8_col(2, 15), TextSize::from_usize(15));
+ assert_eq!(col_index.utf16_to_utf8_col(2, 15), TextSize::from(15));
}
#[test]
//! Code in this module applies this "to (Line, Column) after edit"
//! transformation.
+use std::convert::TryInto;
+
use ra_syntax::{TextRange, TextSize};
use ra_text_edit::{AtomTextEdit, TextEdit};
.text
.char_indices()
.filter_map(|(i, c)| {
+ let i: TextSize = i.try_into().unwrap();
+ let char_len = TextSize::of(c);
if c == '\n' {
- let next_offset = self.offset + TextSize::from_usize(i + 1);
+ let next_offset = self.offset + i + char_len;
let next = Step::Newline(next_offset);
Some((next, next_offset))
} else {
- let char_len = TextSize::of(c);
- if char_len > TextSize::from_usize(1) {
- let start = self.offset + TextSize::from_usize(i);
+ if !c.is_ascii() {
+ let start = self.offset + i;
let end = start + char_len;
let next = Step::Utf16Char(TextRange::new(start, end));
let next_offset = end;
//! get a super-set of matches. Then, we we confirm each match using precise
//! name resolution.
-use std::mem;
+use std::{convert::TryInto, mem};
use hir::{DefWithBody, HasSource, Module, ModuleSource, Semantics, Visibility};
use once_cell::unsync::Lazy;
let tree = Lazy::new(|| sema.parse(file_id).syntax().clone());
for (idx, _) in text.match_indices(pat) {
- let offset = TextSize::from_usize(idx);
+ let offset: TextSize = idx.try_into().unwrap();
if !search_range.contains_inclusive(offset) {
tested_by!(search_filters_by_range; force);
continue;
fn bump(&mut self) -> Option<(Self::Token, TextRange)> {
if let Some((punct, offset)) = self.punct_offset.clone() {
if usize::from(offset) + 1 < punct.text().len() {
- let offset = offset + TextSize::from_usize(1);
+ let offset = offset + TextSize::of('.');
let range = punct.text_range();
self.punct_offset = Some((punct.clone(), offset));
let range = TextRange::at(range.start() + offset, TextSize::of('.'));
let token = if curr.kind().is_punct() {
let range = curr.text_range();
- let range = TextRange::at(range.start(), TextSize::from_usize(1));
- self.punct_offset = Some((curr.clone(), TextSize::from_usize(0)));
- (SynToken::Punch(curr, TextSize::from_usize(0)), range)
+ let range = TextRange::at(range.start(), TextSize::of('.'));
+ self.punct_offset = Some((curr.clone(), 0.into()));
+ (SynToken::Punch(curr, 0.into()), range)
} else {
self.punct_offset = None;
let range = curr.text_range();
fn peek(&self) -> Option<Self::Token> {
if let Some((punct, mut offset)) = self.punct_offset.clone() {
- offset = offset + TextSize::from_usize(1);
+ offset = offset + TextSize::of('.');
if usize::from(offset) < punct.text().len() {
return Some(SynToken::Punch(punct, offset));
}
}
let token = if curr.kind().is_punct() {
- SynToken::Punch(curr, TextSize::from_usize(0))
+ SynToken::Punch(curr, 0.into())
} else {
SynToken::Ordiniary(curr)
};
//! There are many AstNodes, but only a few tokens, so we hand-write them here.
+use std::convert::{TryFrom, TryInto};
+
use crate::{
ast::{AstToken, Comment, RawString, String, Whitespace},
TextRange, TextSize,
}
let start = TextSize::from(0);
- let left_quote = TextSize::from_usize(left_quote) + TextSize::of('"');
- let right_quote = TextSize::from_usize(right_quote);
+ let left_quote = TextSize::try_from(left_quote).unwrap() + TextSize::of('"');
+ let right_quote = TextSize::try_from(right_quote).unwrap();
let end = TextSize::of(literal);
let res = QuoteOffsets {
let mut res = Vec::with_capacity(text.len());
rustc_lexer::unescape::unescape_str(text, &mut |range, unescaped_char| {
res.push((
- TextRange::new(TextSize::from_usize(range.start), TextSize::from_usize(range.end))
+ TextRange::new(range.start.try_into().unwrap(), range.end.try_into().unwrap())
+ offset,
unescaped_char,
))
let mut res = Vec::with_capacity(text.len());
for (idx, c) in text.char_indices() {
- res.push((
- TextRange::new(TextSize::from_usize(idx), TextSize::from_usize(idx + c.len_utf8()))
- + offset,
- Ok(c),
- ));
+ res.push((TextRange::at(idx.try_into().unwrap(), TextSize::of(c)) + offset, Ok(c)));
}
Some(res)
}
//! FIXME: write short doc here
-use crate::{validation, AstNode, SourceFile, TextRange, TextSize};
+use std::{
+ convert::TryInto,
+ str::{self, FromStr},
+};
+
use ra_text_edit::AtomTextEdit;
-use std::str::{self, FromStr};
+
+use crate::{validation, AstNode, SourceFile, TextRange};
fn check_file_invariants(file: &SourceFile) {
let root = file.syntax();
let text = format!("{}{}{}", PREFIX, text, SUFFIX);
text.get(delete_start..delete_start.checked_add(delete_len)?)?; // make sure delete is a valid range
let delete =
- TextRange::at(TextSize::from_usize(delete_start), TextSize::from_usize(delete_len));
+ TextRange::at(delete_start.try_into().unwrap(), delete_len.try_into().unwrap());
let edited_text =
format!("{}{}{}", &text[..delete_start], &insert, &text[delete_start + delete_len..]);
let edit = AtomTextEdit { delete, insert };
//! Lexer analyzes raw input string and produces lexemes (tokens).
//! It is just a bridge to `rustc_lexer`.
+use std::convert::TryInto;
+
use crate::{
SyntaxError,
SyntaxKind::{self, *},
let mut tokens = Vec::new();
let mut errors = Vec::new();
- let mut offset: usize = rustc_lexer::strip_shebang(text)
- .map(|shebang_len| {
- tokens.push(Token { kind: SHEBANG, len: TextSize::from_usize(shebang_len) });
+ let mut offset = match rustc_lexer::strip_shebang(text) {
+ Some(shebang_len) => {
+ tokens.push(Token { kind: SHEBANG, len: shebang_len.try_into().unwrap() });
shebang_len
- })
- .unwrap_or(0);
+ }
+ None => 0,
+ };
let text_without_shebang = &text[offset..];
for rustc_token in rustc_lexer::tokenize(text_without_shebang) {
- let token_len = TextSize::from_usize(rustc_token.len);
- let token_range = TextRange::at(TextSize::from_usize(offset), token_len);
+ let token_len: TextSize = rustc_token.len.try_into().unwrap();
+ let token_range = TextRange::at(offset.try_into().unwrap(), token_len);
let (syntax_kind, err_message) =
rustc_token_kind_to_syntax_kind(&rustc_token.kind, &text[token_range]);
let rustc_token = rustc_lexer::first_token(text);
let (syntax_kind, err_message) = rustc_token_kind_to_syntax_kind(&rustc_token.kind, text);
- let token = Token { kind: syntax_kind, len: TextSize::from_usize(rustc_token.len) };
- let optional_error = err_message.map(|err_message| {
- SyntaxError::new(err_message, TextRange::new(0.into(), TextSize::of(text)))
- });
+ let token = Token { kind: syntax_kind, len: rustc_token.len.try_into().unwrap() };
+ let optional_error = err_message
+ .map(|err_message| SyntaxError::new(err_message, TextRange::up_to(TextSize::of(text))));
Some((token, optional_error))
}
fn dump_tokens_and_errors(tokens: &[Token], errors: &[SyntaxError], text: &str) -> String {
let mut acc = String::new();
- let mut offset = TextSize::from_usize(0);
+ let mut offset: TextSize = 0.into();
for token in tokens {
let token_len = token.len;
let token_text = &text[TextRange::at(offset, token.len)];
mod block;
+use std::convert::TryFrom;
+
use rustc_lexer::unescape;
use crate::{
// FIXME: lift this lambda refactor to `fn` (https://github.com/rust-analyzer/rust-analyzer/pull/2834#discussion_r366199205)
let mut push_err = |prefix_len, (off, err): (usize, unescape::EscapeError)| {
- let off = token.text_range().start() + TextSize::from_usize(off + prefix_len);
+ let off = token.text_range().start() + TextSize::try_from(off + prefix_len).unwrap();
acc.push(SyntaxError::new_at_offset(rustc_unescape_error_to_string(err), off));
};