Remove some TextUnit->usize escapees

This commit is contained in:
CAD97 2020-03-12 22:29:44 -04:00
parent 2f9f409538
commit 88c944f96b
7 changed files with 31 additions and 27 deletions

View file

@ -235,7 +235,7 @@ mod helpers {
(Some(assist), ExpectedResult::Target(target)) => {
let action = assist.0[0].action.clone().unwrap();
let range = action.target.expect("expected target on action");
assert_eq_text!(&before[range.start().to_usize()..range.end().to_usize()], target);
assert_eq_text!(&before[range], target);
}
(Some(_), ExpectedResult::NotApplicable) => panic!("assist should not be applicable!"),
(None, ExpectedResult::After(_)) | (None, ExpectedResult::Target(_)) => {

View file

@ -5,7 +5,7 @@ use ra_ide_db::RootDatabase;
use ra_syntax::{
algo, AstNode, NodeOrToken, SourceFile,
SyntaxKind::{RAW_STRING, STRING},
SyntaxToken, TextRange,
SyntaxToken, TextRange, TextUnit,
};
pub use ra_db::FileId;
@ -56,19 +56,23 @@ fn syntax_tree_for_token(node: &SyntaxToken, text_range: TextRange) -> Option<St
let start = text_range.start() - node_range.start();
// how many characters we have selected
let len = text_range.len().to_usize();
let len = text_range.len();
let node_len = node_range.len().to_usize();
let node_len = node_range.len();
let start = start.to_usize();
let start = start;
// We want to cap our length
let len = len.min(node_len);
// Ensure our slice is inside the actual string
let end = if start + len < text.len() { start + len } else { text.len() - start };
let end = if start + len < TextUnit::of_str(&text) {
start + len
} else {
TextUnit::of_str(&text) - start
};
let text = &text[start..end];
let text = &text[TextRange::from_to(start, end)];
// Remove possible extra string quotes from the start
// and the end of the string

View file

@ -59,7 +59,7 @@ impl LineIndex {
}
let char_len = TextUnit::of_char(c);
if char_len.to_usize() > 1 {
if char_len > TextUnit::from_usize(1) {
utf16_chars.push(Utf16Char { start: curr_col, end: curr_col + char_len });
}
@ -101,12 +101,12 @@ impl LineIndex {
.filter(|it| !it.is_empty())
}
fn utf8_to_utf16_col(&self, line: u32, mut col: TextUnit) -> usize {
fn utf8_to_utf16_col(&self, line: u32, col: TextUnit) -> usize {
if let Some(utf16_chars) = self.utf16_lines.get(&line) {
let mut correction = TextUnit::from_usize(0);
let mut correction = 0;
for c in utf16_chars {
if col >= c.end {
correction += c.len() - TextUnit::from_usize(1);
correction += c.len().to_usize() - 1;
} else {
// From here on, all utf16 characters come *after* the character we are mapping,
// so we don't need to take them into account
@ -114,10 +114,10 @@ impl LineIndex {
}
}
col -= correction;
col.to_usize() - correction
} else {
col.to_usize()
}
col.to_usize()
}
fn utf16_to_utf8_col(&self, line: u32, col: u32) -> TextUnit {

View file

@ -145,7 +145,7 @@ impl Iterator for OffsetStepIter<'_> {
Some((next, next_offset))
} else {
let char_len = TextUnit::of_char(c);
if char_len.to_usize() > 1 {
if char_len > TextUnit::from_usize(1) {
let start = self.offset + TextUnit::from_usize(i);
let end = start + char_len;
let next = Step::Utf16Char(TextRange::from_to(start, end));

View file

@ -65,7 +65,7 @@ pub fn tokenize(text: &str) -> (Vec<Token>, Vec<SyntaxError>) {
/// Beware that unescape errors are not checked at tokenization time.
pub fn lex_single_syntax_kind(text: &str) -> Option<(SyntaxKind, Option<SyntaxError>)> {
lex_first_token(text)
.filter(|(token, _)| token.len.to_usize() == text.len())
.filter(|(token, _)| token.len == TextUnit::of_str(text))
.map(|(token, error)| (token.kind, error))
}
@ -75,7 +75,7 @@ pub fn lex_single_syntax_kind(text: &str) -> Option<(SyntaxKind, Option<SyntaxEr
/// Beware that unescape errors are not checked at tokenization time.
pub fn lex_single_valid_syntax_kind(text: &str) -> Option<SyntaxKind> {
lex_first_token(text)
.filter(|(token, error)| !error.is_some() && token.len.to_usize() == text.len())
.filter(|(token, error)| !error.is_some() && token.len == TextUnit::of_str(text))
.map(|(token, _error)| token.kind)
}

View file

@ -5,7 +5,7 @@ use std::{
use test_utils::{collect_tests, dir_tests, project_dir, read_text};
use crate::{fuzz, tokenize, SourceFile, SyntaxError, Token};
use crate::{fuzz, tokenize, SourceFile, SyntaxError, TextRange, TextUnit, Token};
#[test]
fn lexer_tests() {
@ -120,11 +120,11 @@ fn assert_errors_are_absent(errors: &[SyntaxError], path: &Path) {
fn dump_tokens_and_errors(tokens: &[Token], errors: &[SyntaxError], text: &str) -> String {
let mut acc = String::new();
let mut offset = 0;
let mut offset = TextUnit::from_usize(0);
for token in tokens {
let token_len = token.len.to_usize();
let token_text = &text[offset..offset + token_len];
offset += token_len;
let token_len = token.len;
let token_text = &text[TextRange::offset_len(offset, token.len)];
offset += token.len;
writeln!(acc, "{:?} {} {:?}", token.kind, token_len, token_text).unwrap();
}
for err in errors {

View file

@ -63,12 +63,12 @@ impl TextEdit {
}
pub fn apply(&self, text: &str) -> String {
let mut total_len = text.len();
let mut total_len = TextUnit::of_str(text);
for atom in self.atoms.iter() {
total_len += atom.insert.len();
total_len -= (atom.delete.end() - atom.delete.start()).to_usize();
total_len += TextUnit::of_str(&atom.insert);
total_len -= atom.delete.end() - atom.delete.start();
}
let mut buf = String::with_capacity(total_len);
let mut buf = String::with_capacity(total_len.to_usize());
let mut prev = 0;
for atom in self.atoms.iter() {
let start = atom.delete.start().to_usize();
@ -80,7 +80,7 @@ impl TextEdit {
prev = end;
}
buf.push_str(&text[prev..text.len()]);
assert_eq!(buf.len(), total_len);
assert_eq!(TextUnit::of_str(&buf), total_len);
buf
}