Speedup line index calculation via SSE2

This commit is contained in:
Lukas Wirth 2023-06-27 15:21:58 +02:00
parent a9c29a9ada
commit 97748b0fc6
5 changed files with 303 additions and 114 deletions

8
Cargo.lock generated
View file

@ -730,7 +730,7 @@ dependencies = [
"indexmap 2.0.0",
"itertools",
"limit",
"line-index 0.1.0-pre.1 (registry+https://github.com/rust-lang/crates.io-index)",
"line-index 0.1.0-pre.1",
"memchr",
"nohash-hasher",
"once_cell",
@ -947,6 +947,8 @@ version = "0.0.0"
[[package]]
name = "line-index"
version = "0.1.0-pre.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "2cad96769710c1745e11d4f940a8ff36000ade4bbada4285b001cb8aa2f745ce"
dependencies = [
"nohash-hasher",
"text-size",
@ -954,9 +956,7 @@ dependencies = [
[[package]]
name = "line-index"
version = "0.1.0-pre.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "2cad96769710c1745e11d4f940a8ff36000ade4bbada4285b001cb8aa2f745ce"
version = "0.1.0"
dependencies = [
"nohash-hasher",
"text-size",

View file

@ -1,6 +1,6 @@
[package]
name = "line-index"
version = "0.1.0-pre.1"
version = "0.1.0"
description = "Maps flat `TextSize` offsets to/from `(line, column)` representation."
license = "MIT OR Apache-2.0"
repository = "https://github.com/rust-lang/rust-analyzer/tree/master/lib/line-index"

View file

@ -94,44 +94,7 @@ pub struct LineIndex {
impl LineIndex {
/// Returns a `LineIndex` for the `text`.
pub fn new(text: &str) -> LineIndex {
let mut newlines = Vec::<TextSize>::with_capacity(16);
let mut line_wide_chars = IntMap::<u32, Box<[WideChar]>>::default();
let mut wide_chars = Vec::<WideChar>::new();
let mut cur_row = TextSize::from(0);
let mut cur_col = TextSize::from(0);
let mut line = 0u32;
for c in text.chars() {
let c_len = TextSize::of(c);
cur_row += c_len;
if c == '\n' {
newlines.push(cur_row);
// Save any wide characters seen in the previous line
if !wide_chars.is_empty() {
let cs = std::mem::take(&mut wide_chars).into_boxed_slice();
line_wide_chars.insert(line, cs);
}
// Prepare for processing the next line
cur_col = TextSize::from(0);
line += 1;
continue;
}
if !c.is_ascii() {
wide_chars.push(WideChar { start: cur_col, end: cur_col + c_len });
}
cur_col += c_len;
}
// Save any wide characters seen in the last line
if !wide_chars.is_empty() {
line_wide_chars.insert(line, wide_chars.into_boxed_slice());
}
let (newlines, line_wide_chars) = analyze_source_file(text);
LineIndex {
newlines: newlines.into_boxed_slice(),
line_wide_chars,
@ -235,3 +198,182 @@ impl LineIndex {
self.len
}
}
/// This is adapted from the rustc_span crate, https://github.com/rust-lang/rust/blob/master/compiler/rustc_span/src/analyze_source_file.rs
fn analyze_source_file(src: &str) -> (Vec<TextSize>, IntMap<u32, Box<[WideChar]>>) {
assert!(src.len() < !0u32 as usize);
let mut lines = vec![];
let mut line_wide_chars = IntMap::<u32, Vec<WideChar>>::default();
// Calls the right implementation, depending on hardware support available.
analyze_source_file_dispatch(src, &mut lines, &mut line_wide_chars);
(lines, line_wide_chars.into_iter().map(|(k, v)| (k, v.into_boxed_slice())).collect())
}
#[cfg(any(target_arch = "x86", target_arch = "x86_64"))]
fn analyze_source_file_dispatch(
src: &str,
lines: &mut Vec<TextSize>,
multi_byte_chars: &mut IntMap<u32, Vec<WideChar>>,
) {
if is_x86_feature_detected!("sse2") {
// SAFETY: SSE2 support was checked
unsafe {
analyze_source_file_sse2(src, lines, multi_byte_chars);
}
} else {
analyze_source_file_generic(src, src.len(), TextSize::from(0), lines, multi_byte_chars);
}
}
/// Checks 16 byte chunks of text at a time. If the chunk contains
/// something other than printable ASCII characters and newlines, the
/// function falls back to the generic implementation. Otherwise it uses
/// SSE2 intrinsics to quickly find all newlines.
#[target_feature(enable = "sse2")]
#[cfg(any(target_arch = "x86", target_arch = "x86_64"))]
unsafe fn analyze_source_file_sse2(
src: &str,
lines: &mut Vec<TextSize>,
multi_byte_chars: &mut IntMap<u32, Vec<WideChar>>,
) {
#[cfg(target_arch = "x86")]
use std::arch::x86::*;
#[cfg(target_arch = "x86_64")]
use std::arch::x86_64::*;
const CHUNK_SIZE: usize = 16;
let src_bytes = src.as_bytes();
let chunk_count = src.len() / CHUNK_SIZE;
// This variable keeps track of where we should start decoding a
// chunk. If a multi-byte character spans across chunk boundaries,
// we need to skip that part in the next chunk because we already
// handled it.
let mut intra_chunk_offset = 0;
for chunk_index in 0..chunk_count {
let ptr = src_bytes.as_ptr() as *const __m128i;
// We don't know if the pointer is aligned to 16 bytes, so we
// use `loadu`, which supports unaligned loading.
let chunk = _mm_loadu_si128(ptr.add(chunk_index));
// For character in the chunk, see if its byte value is < 0, which
// indicates that it's part of a UTF-8 char.
let multibyte_test = _mm_cmplt_epi8(chunk, _mm_set1_epi8(0));
// Create a bit mask from the comparison results.
let multibyte_mask = _mm_movemask_epi8(multibyte_test);
// If the bit mask is all zero, we only have ASCII chars here:
if multibyte_mask == 0 {
assert!(intra_chunk_offset == 0);
// Check for newlines in the chunk
let newlines_test = _mm_cmpeq_epi8(chunk, _mm_set1_epi8(b'\n' as i8));
let newlines_mask = _mm_movemask_epi8(newlines_test);
if newlines_mask != 0 {
// All control characters are newlines, record them
let mut newlines_mask = 0xFFFF0000 | newlines_mask as u32;
let output_offset = TextSize::from((chunk_index * CHUNK_SIZE + 1) as u32);
loop {
let index = newlines_mask.trailing_zeros();
if index >= CHUNK_SIZE as u32 {
// We have arrived at the end of the chunk.
break;
}
lines.push(TextSize::from(index) + output_offset);
// Clear the bit, so we can find the next one.
newlines_mask &= (!1) << index;
}
}
continue;
}
// The slow path.
// There are control chars in here, fallback to generic decoding.
let scan_start = chunk_index * CHUNK_SIZE + intra_chunk_offset;
intra_chunk_offset = analyze_source_file_generic(
&src[scan_start..],
CHUNK_SIZE - intra_chunk_offset,
TextSize::from(scan_start as u32),
lines,
multi_byte_chars,
);
}
// There might still be a tail left to analyze
let tail_start = chunk_count * CHUNK_SIZE + intra_chunk_offset;
if tail_start < src.len() {
analyze_source_file_generic(
&src[tail_start..],
src.len() - tail_start,
TextSize::from(tail_start as u32),
lines,
multi_byte_chars,
);
}
}
#[cfg(not(any(target_arch = "x86", target_arch = "x86_64")))]
// The target (or compiler version) does not support SSE2 ...
fn analyze_source_file_dispatch(
src: &str,
lines: &mut Vec<TextSize>,
multi_byte_chars: &mut IntMap<u32, Vec<WideChar>>,
) {
analyze_source_file_generic(src, src.len(), TextSize::from(0), lines, multi_byte_chars);
}
// `scan_len` determines the number of bytes in `src` to scan. Note that the
// function can read past `scan_len` if a multi-byte character start within the
// range but extends past it. The overflow is returned by the function.
fn analyze_source_file_generic(
src: &str,
scan_len: usize,
output_offset: TextSize,
lines: &mut Vec<TextSize>,
multi_byte_chars: &mut IntMap<u32, Vec<WideChar>>,
) -> usize {
assert!(src.len() >= scan_len);
let mut i = 0;
let src_bytes = src.as_bytes();
while i < scan_len {
let byte = unsafe {
// We verified that i < scan_len <= src.len()
*src_bytes.get_unchecked(i)
};
// How much to advance in order to get to the next UTF-8 char in the
// string.
let mut char_len = 1;
if byte == b'\n' {
lines.push(TextSize::from(i as u32 + 1) + output_offset);
} else if byte >= 127 {
// The slow path: Just decode to `char`.
let c = src[i..].chars().next().unwrap();
char_len = c.len_utf8();
let pos = TextSize::from(i as u32) + output_offset;
if char_len > 1 {
assert!((2..=4).contains(&char_len));
let mbc = WideChar { start: pos, end: pos + TextSize::from(char_len as u32) };
multi_byte_chars.entry(lines.len() as u32).or_default().push(mbc);
}
}
i += char_len;
}
i - scan_len
}

View file

@ -1,11 +1,120 @@
use super::LineIndex;
use crate::{LineIndex, TextSize, WideChar};
#[test]
fn test_empty_index() {
let col_index = LineIndex::new(
"
const C: char = 'x';
",
);
assert_eq!(col_index.line_wide_chars.len(), 0);
macro_rules! test {
(
case: $test_name:ident,
text: $text:expr,
lines: $lines:expr,
multi_byte_chars: $multi_byte_chars:expr,
) => {
#[test]
fn $test_name() {
let line_index = LineIndex::new($text);
let expected_lines: Vec<TextSize> =
$lines.into_iter().map(<TextSize as From<u32>>::from).collect();
assert_eq!(&*line_index.newlines, &*expected_lines);
let expected_mbcs: Vec<_> = $multi_byte_chars
.into_iter()
.map(|(line, (pos, end)): (u32, (u32, u32))| {
(line, WideChar { start: TextSize::from(pos), end: TextSize::from(end) })
})
.collect();
assert_eq!(
line_index
.line_wide_chars
.iter()
.flat_map(|(line, val)| std::iter::repeat(*line).zip(val.iter().copied()))
.collect::<Vec<_>>(),
expected_mbcs
);
}
};
}
test!(
case: empty_text,
text: "",
lines: vec![],
multi_byte_chars: vec![],
);
test!(
case: newlines_short,
text: "a\nc",
lines: vec![2],
multi_byte_chars: vec![],
);
test!(
case: newlines_long,
text: "012345678\nabcdef012345678\na",
lines: vec![10, 26],
multi_byte_chars: vec![],
);
test!(
case: newline_and_multi_byte_char_in_same_chunk,
text: "01234β789\nbcdef0123456789abcdef",
lines: vec![11],
multi_byte_chars: vec![(0, (5, 7))],
);
test!(
case: newline_and_control_char_in_same_chunk,
text: "01234\u{07}6789\nbcdef0123456789abcdef",
lines: vec![11],
multi_byte_chars: vec![],
);
test!(
case: multi_byte_char_short,
text: "aβc",
lines: vec![],
multi_byte_chars: vec![(0, (1, 3))],
);
test!(
case: multi_byte_char_long,
text: "0123456789abcΔf012345β",
lines: vec![],
multi_byte_chars: vec![(0, (13, 15)), (0, (22, 24))],
);
test!(
case: multi_byte_char_across_chunk_boundary,
text: "0123456789abcdeΔ123456789abcdef01234",
lines: vec![],
multi_byte_chars: vec![(0, (15, 17))],
);
test!(
case: multi_byte_char_across_chunk_boundary_tail,
text: "0123456789abcdeΔ....",
lines: vec![],
multi_byte_chars: vec![(0, (15, 17))],
);
test!(
case: multi_byte_with_new_lines,
text: "01\t345\n789abcΔf01234567\u{07}9\nbcΔf",
lines: vec![7, 27],
multi_byte_chars: vec![(1, (13, 15)), (2, (29, 31))],
);
test!(
case: trailing_newline,
text: "0123456789\n",
lines: vec![11],
multi_byte_chars: vec![],
);
test!(
case: trailing_newline_chunk_boundary,
text: "0123456789abcde\n",
lines: vec![16],
multi_byte_chars: vec![],
);

View file

@ -1,62 +0,0 @@
use line_index::{LineCol, LineIndex, TextRange};
#[test]
fn test_line_index() {
let text = "hello\nworld";
let table = [
(00, 0, 0),
(01, 0, 1),
(05, 0, 5),
(06, 1, 0),
(07, 1, 1),
(08, 1, 2),
(10, 1, 4),
(11, 1, 5),
];
let index = LineIndex::new(text);
for (offset, line, col) in table {
assert_eq!(index.line_col(offset.into()), LineCol { line, col });
}
let text = "\nhello\nworld";
let table = [(0, 0, 0), (1, 1, 0), (2, 1, 1), (6, 1, 5), (7, 2, 0)];
let index = LineIndex::new(text);
for (offset, line, col) in table {
assert_eq!(index.line_col(offset.into()), LineCol { line, col });
}
}
#[test]
fn test_char_len() {
assert_eq!('メ'.len_utf8(), 3);
assert_eq!('メ'.len_utf16(), 1);
}
#[test]
fn test_splitlines() {
fn r(lo: u32, hi: u32) -> TextRange {
TextRange::new(lo.into(), hi.into())
}
let text = "a\nbb\nccc\n";
let line_index = LineIndex::new(text);
let actual = line_index.lines(r(0, 9)).collect::<Vec<_>>();
let expected = vec![r(0, 2), r(2, 5), r(5, 9)];
assert_eq!(actual, expected);
let text = "";
let line_index = LineIndex::new(text);
let actual = line_index.lines(r(0, 0)).collect::<Vec<_>>();
let expected = vec![];
assert_eq!(actual, expected);
let text = "\n";
let line_index = LineIndex::new(text);
let actual = line_index.lines(r(0, 1)).collect::<Vec<_>>();
let expected = vec![r(0, 1)];
assert_eq!(actual, expected)
}