2019-10-26 16:58:18 +00:00
|
|
|
//! Assorted testing utilities.
|
|
|
|
//!
|
|
|
|
//! Most notable things are:
|
|
|
|
//!
|
|
|
|
//! * Rich text comparison, which outputs a diff.
|
2021-01-06 20:15:48 +00:00
|
|
|
//! * Extracting markup (mainly, `$0` markers) out of fixture strings.
|
2019-10-26 16:58:18 +00:00
|
|
|
//! * marks (see the eponymous module).
|
2019-09-30 08:58:53 +00:00
|
|
|
|
2024-02-01 15:16:38 +00:00
|
|
|
#![allow(clippy::print_stderr)]
|
2022-07-20 12:59:42 +00:00
|
|
|
|
2022-07-22 16:16:46 +00:00
|
|
|
mod assert_linear;
|
2021-02-09 16:29:40 +00:00
|
|
|
pub mod bench_fixture;
|
2020-06-23 15:59:56 +00:00
|
|
|
mod fixture;
|
2019-01-23 12:36:29 +00:00
|
|
|
|
|
|
|
use std::{
|
2021-06-20 16:34:43 +00:00
|
|
|
collections::BTreeMap,
|
2020-06-15 09:02:17 +00:00
|
|
|
env, fs,
|
2021-03-08 14:20:36 +00:00
|
|
|
path::{Path, PathBuf},
|
2019-01-23 12:36:29 +00:00
|
|
|
};
|
2018-08-17 13:04:34 +00:00
|
|
|
|
2024-08-02 09:08:19 +00:00
|
|
|
use paths::Utf8PathBuf;
|
2021-02-09 16:29:40 +00:00
|
|
|
use profile::StopWatch;
|
2021-05-04 11:10:49 +00:00
|
|
|
use stdx::is_ci;
|
2020-06-15 09:02:17 +00:00
|
|
|
use text_size::{TextRange, TextSize};
|
2020-05-16 08:16:32 +00:00
|
|
|
|
2021-01-06 17:13:29 +00:00
|
|
|
pub use dissimilar::diff as __diff;
|
2020-05-16 08:16:32 +00:00
|
|
|
pub use rustc_hash::FxHashMap;
|
2018-08-17 13:04:34 +00:00
|
|
|
|
2021-06-15 18:02:40 +00:00
|
|
|
pub use crate::{
|
|
|
|
assert_linear::AssertLinear,
|
2023-04-10 15:15:39 +00:00
|
|
|
fixture::{Fixture, FixtureWithProjectMeta, MiniCore},
|
2021-06-15 18:02:40 +00:00
|
|
|
};
|
2018-08-12 15:50:16 +00:00
|
|
|
|
2021-01-06 20:15:48 +00:00
|
|
|
pub const CURSOR_MARKER: &str = "$0";
|
2021-01-07 15:21:00 +00:00
|
|
|
pub const ESCAPED_CURSOR_MARKER: &str = "\\$0";
|
2018-10-31 19:34:31 +00:00
|
|
|
|
2020-01-29 01:52:13 +00:00
|
|
|
/// Asserts that two strings are equal, otherwise displays a rich diff between them.
|
|
|
|
///
|
|
|
|
/// The diff shows changes from the "original" left string to the "actual" right string.
|
|
|
|
///
|
|
|
|
/// All arguments starting from and including the 3rd one are passed to
|
|
|
|
/// `eprintln!()` macro in case of text inequality.
|
2018-08-12 15:50:16 +00:00
|
|
|
#[macro_export]
|
|
|
|
macro_rules! assert_eq_text {
|
2019-01-13 15:21:23 +00:00
|
|
|
($left:expr, $right:expr) => {
|
|
|
|
assert_eq_text!($left, $right,)
|
2018-12-21 15:13:21 +00:00
|
|
|
};
|
2019-01-13 15:21:23 +00:00
|
|
|
($left:expr, $right:expr, $($tt:tt)*) => {{
|
|
|
|
let left = $left;
|
|
|
|
let right = $right;
|
|
|
|
if left != right {
|
|
|
|
if left.trim() == right.trim() {
|
2020-10-23 15:18:41 +00:00
|
|
|
std::eprintln!("Left:\n{:?}\n\nRight:\n{:?}\n\nWhitespace difference\n", left, right);
|
2018-12-21 15:13:21 +00:00
|
|
|
} else {
|
2021-01-06 17:13:29 +00:00
|
|
|
let diff = $crate::__diff(left, right);
|
|
|
|
std::eprintln!("Left:\n{}\n\nRight:\n{}\n\nDiff:\n{}\n", left, right, $crate::format_diff(diff));
|
2018-12-21 15:13:21 +00:00
|
|
|
}
|
2020-10-23 15:18:41 +00:00
|
|
|
std::eprintln!($($tt)*);
|
2018-08-12 15:50:16 +00:00
|
|
|
panic!("text differs");
|
|
|
|
}
|
|
|
|
}};
|
|
|
|
}
|
2018-08-17 13:04:34 +00:00
|
|
|
|
2020-01-29 01:52:13 +00:00
|
|
|
/// Infallible version of `try_extract_offset()`.
|
2020-04-24 21:40:41 +00:00
|
|
|
pub fn extract_offset(text: &str) -> (TextSize, String) {
|
2018-10-13 19:33:15 +00:00
|
|
|
match try_extract_offset(text) {
|
2018-08-25 11:30:54 +00:00
|
|
|
None => panic!("text should contain cursor marker"),
|
2018-10-13 19:33:15 +00:00
|
|
|
Some(result) => result,
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2021-01-08 14:46:48 +00:00
|
|
|
/// Returns the offset of the first occurrence of `$0` marker and the copy of `text`
|
2020-01-29 01:52:13 +00:00
|
|
|
/// without the marker.
|
2020-04-24 21:40:41 +00:00
|
|
|
fn try_extract_offset(text: &str) -> Option<(TextSize, String)> {
|
2018-10-31 19:34:31 +00:00
|
|
|
let cursor_pos = text.find(CURSOR_MARKER)?;
|
|
|
|
let mut new_text = String::with_capacity(text.len() - CURSOR_MARKER.len());
|
2018-08-25 11:30:54 +00:00
|
|
|
new_text.push_str(&text[..cursor_pos]);
|
2018-10-31 19:34:31 +00:00
|
|
|
new_text.push_str(&text[cursor_pos + CURSOR_MARKER.len()..]);
|
2020-04-24 21:40:41 +00:00
|
|
|
let cursor_pos = TextSize::from(cursor_pos as u32);
|
2018-10-13 19:33:15 +00:00
|
|
|
Some((cursor_pos, new_text))
|
2018-08-25 11:30:54 +00:00
|
|
|
}
|
|
|
|
|
2020-01-29 01:52:13 +00:00
|
|
|
/// Infallible version of `try_extract_range()`.
|
2018-08-25 11:30:54 +00:00
|
|
|
pub fn extract_range(text: &str) -> (TextRange, String) {
|
2018-10-13 19:33:15 +00:00
|
|
|
match try_extract_range(text) {
|
|
|
|
None => panic!("text should contain cursor marker"),
|
|
|
|
Some(result) => result,
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2021-01-06 20:15:48 +00:00
|
|
|
/// Returns `TextRange` between the first two markers `$0...$0` and the copy
|
2020-01-29 01:52:13 +00:00
|
|
|
/// of `text` without both of these markers.
|
2019-10-26 16:58:18 +00:00
|
|
|
fn try_extract_range(text: &str) -> Option<(TextRange, String)> {
|
2018-10-13 19:33:15 +00:00
|
|
|
let (start, text) = try_extract_offset(text)?;
|
|
|
|
let (end, text) = try_extract_offset(&text)?;
|
2020-04-24 21:40:41 +00:00
|
|
|
Some((TextRange::new(start, end), text))
|
2018-10-13 19:33:15 +00:00
|
|
|
}
|
|
|
|
|
2023-05-02 06:52:08 +00:00
|
|
|
#[derive(Clone, Copy, Debug)]
|
2019-10-26 16:58:18 +00:00
|
|
|
pub enum RangeOrOffset {
|
|
|
|
Range(TextRange),
|
2020-04-24 21:40:41 +00:00
|
|
|
Offset(TextSize),
|
2019-10-26 16:58:18 +00:00
|
|
|
}
|
|
|
|
|
2021-05-28 18:46:09 +00:00
|
|
|
impl RangeOrOffset {
|
|
|
|
pub fn expect_offset(self) -> TextSize {
|
|
|
|
match self {
|
|
|
|
RangeOrOffset::Offset(it) => it,
|
|
|
|
RangeOrOffset::Range(_) => panic!("expected an offset but got a range instead"),
|
|
|
|
}
|
|
|
|
}
|
|
|
|
pub fn expect_range(self) -> TextRange {
|
|
|
|
match self {
|
|
|
|
RangeOrOffset::Range(it) => it,
|
|
|
|
RangeOrOffset::Offset(_) => panic!("expected a range but got an offset"),
|
|
|
|
}
|
|
|
|
}
|
2021-08-11 11:39:36 +00:00
|
|
|
pub fn range_or_empty(self) -> TextRange {
|
|
|
|
match self {
|
|
|
|
RangeOrOffset::Range(range) => range,
|
|
|
|
RangeOrOffset::Offset(offset) => TextRange::empty(offset),
|
|
|
|
}
|
|
|
|
}
|
2021-05-28 18:46:09 +00:00
|
|
|
}
|
|
|
|
|
2019-10-26 16:58:18 +00:00
|
|
|
impl From<RangeOrOffset> for TextRange {
|
|
|
|
fn from(selection: RangeOrOffset) -> Self {
|
|
|
|
match selection {
|
|
|
|
RangeOrOffset::Range(it) => it,
|
2020-06-23 15:59:56 +00:00
|
|
|
RangeOrOffset::Offset(it) => TextRange::empty(it),
|
2019-10-26 16:58:18 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2021-01-06 20:15:48 +00:00
|
|
|
/// Extracts `TextRange` or `TextSize` depending on the amount of `$0` markers
|
2020-01-29 01:52:13 +00:00
|
|
|
/// found in `text`.
|
|
|
|
///
|
|
|
|
/// # Panics
|
2021-01-06 20:15:48 +00:00
|
|
|
/// Panics if no `$0` marker is present in the `text`.
|
2019-10-26 16:58:18 +00:00
|
|
|
pub fn extract_range_or_offset(text: &str) -> (RangeOrOffset, String) {
|
|
|
|
if let Some((range, text)) = try_extract_range(text) {
|
|
|
|
return (RangeOrOffset::Range(range), text);
|
|
|
|
}
|
|
|
|
let (offset, text) = extract_offset(text);
|
|
|
|
(RangeOrOffset::Offset(offset), text)
|
|
|
|
}
|
|
|
|
|
2020-01-29 01:52:13 +00:00
|
|
|
/// Extracts ranges, marked with `<tag> </tag>` pairs from the `text`
|
2020-07-01 16:17:08 +00:00
|
|
|
pub fn extract_tags(mut text: &str, tag: &str) -> (Vec<(TextRange, Option<String>)>, String) {
|
2022-12-23 18:42:58 +00:00
|
|
|
let open = format!("<{tag}");
|
|
|
|
let close = format!("</{tag}>");
|
2018-10-13 19:33:15 +00:00
|
|
|
let mut ranges = Vec::new();
|
2018-12-20 19:30:30 +00:00
|
|
|
let mut res = String::new();
|
|
|
|
let mut stack = Vec::new();
|
|
|
|
loop {
|
|
|
|
match text.find('<') {
|
|
|
|
None => {
|
|
|
|
res.push_str(text);
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
Some(i) => {
|
|
|
|
res.push_str(&text[..i]);
|
|
|
|
text = &text[i..];
|
|
|
|
if text.starts_with(&open) {
|
2020-07-01 16:17:08 +00:00
|
|
|
let close_open = text.find('>').unwrap();
|
|
|
|
let attr = text[open.len()..close_open].trim();
|
2024-02-09 15:58:06 +00:00
|
|
|
let attr = if attr.is_empty() { None } else { Some(attr.to_owned()) };
|
2020-07-01 16:17:08 +00:00
|
|
|
text = &text[close_open + '>'.len_utf8()..];
|
2020-04-24 21:40:41 +00:00
|
|
|
let from = TextSize::of(&res);
|
2020-07-01 16:17:08 +00:00
|
|
|
stack.push((from, attr));
|
2018-12-20 19:30:30 +00:00
|
|
|
} else if text.starts_with(&close) {
|
|
|
|
text = &text[close.len()..];
|
2022-12-23 18:42:58 +00:00
|
|
|
let (from, attr) = stack.pop().unwrap_or_else(|| panic!("unmatched </{tag}>"));
|
2020-04-24 21:40:41 +00:00
|
|
|
let to = TextSize::of(&res);
|
2020-07-01 16:17:08 +00:00
|
|
|
ranges.push((TextRange::new(from, to), attr));
|
|
|
|
} else {
|
|
|
|
res.push('<');
|
|
|
|
text = &text['<'.len_utf8()..];
|
2018-12-20 19:30:30 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
2018-10-13 19:33:15 +00:00
|
|
|
}
|
2022-12-23 18:42:58 +00:00
|
|
|
assert!(stack.is_empty(), "unmatched <{tag}>");
|
2020-07-01 16:17:08 +00:00
|
|
|
ranges.sort_by_key(|r| (r.0.start(), r.0.end()));
|
2018-12-20 19:30:30 +00:00
|
|
|
(ranges, res)
|
2018-08-25 11:30:54 +00:00
|
|
|
}
|
2020-07-01 16:17:08 +00:00
|
|
|
#[test]
|
|
|
|
fn test_extract_tags() {
|
|
|
|
let (tags, text) = extract_tags(r#"<tag fn>fn <tag>main</tag>() {}</tag>"#, "tag");
|
|
|
|
let actual = tags.into_iter().map(|(range, attr)| (&text[range], attr)).collect::<Vec<_>>();
|
|
|
|
assert_eq!(actual, vec![("fn main() {}", Some("fn".into())), ("main", None),]);
|
|
|
|
}
|
2018-08-25 11:30:54 +00:00
|
|
|
|
2021-01-06 20:15:48 +00:00
|
|
|
/// Inserts `$0` marker into the `text` at `offset`.
|
2020-04-24 21:40:41 +00:00
|
|
|
pub fn add_cursor(text: &str, offset: TextSize) -> String {
|
|
|
|
let offset: usize = offset.into();
|
2018-08-25 11:30:54 +00:00
|
|
|
let mut res = String::new();
|
|
|
|
res.push_str(&text[..offset]);
|
2021-01-06 20:15:48 +00:00
|
|
|
res.push_str("$0");
|
2018-08-25 11:30:54 +00:00
|
|
|
res.push_str(&text[offset..]);
|
|
|
|
res
|
|
|
|
}
|
2018-10-31 18:37:32 +00:00
|
|
|
|
2021-06-13 16:23:37 +00:00
|
|
|
/// Extracts `//^^^ some text` annotations.
|
|
|
|
///
|
|
|
|
/// A run of `^^^` can be arbitrary long and points to the corresponding range
|
|
|
|
/// in the line above.
|
|
|
|
///
|
|
|
|
/// The `// ^file text` syntax can be used to attach `text` to the entirety of
|
|
|
|
/// the file.
|
|
|
|
///
|
|
|
|
/// Multiline string values are supported:
|
|
|
|
///
|
|
|
|
/// // ^^^ first line
|
|
|
|
/// // | second line
|
2021-06-20 16:34:43 +00:00
|
|
|
///
|
2022-03-18 14:30:58 +00:00
|
|
|
/// Trailing whitespace is sometimes desired but usually stripped by the editor
|
|
|
|
/// if at the end of a line, or incorrectly sized if followed by another
|
|
|
|
/// annotation. In those cases the annotation can be explicitly ended with the
|
|
|
|
/// `$` character.
|
|
|
|
///
|
|
|
|
/// // ^^^ trailing-ws-wanted $
|
|
|
|
///
|
2021-06-20 16:34:43 +00:00
|
|
|
/// Annotations point to the last line that actually was long enough for the
|
|
|
|
/// range, not counting annotations themselves. So overlapping annotations are
|
|
|
|
/// possible:
|
2024-06-14 00:29:10 +00:00
|
|
|
/// ```text
|
2021-06-20 16:34:43 +00:00
|
|
|
/// // stuff other stuff
|
|
|
|
/// // ^^ 'st'
|
|
|
|
/// // ^^^^^ 'stuff'
|
|
|
|
/// // ^^^^^^^^^^^ 'other stuff'
|
|
|
|
/// ```
|
2020-06-29 15:22:47 +00:00
|
|
|
pub fn extract_annotations(text: &str) -> Vec<(TextRange, String)> {
|
2020-06-29 12:21:57 +00:00
|
|
|
let mut res = Vec::new();
|
2021-06-20 16:34:43 +00:00
|
|
|
// map from line length to beginning of last line that had that length
|
|
|
|
let mut line_start_map = BTreeMap::new();
|
2020-06-29 12:21:57 +00:00
|
|
|
let mut line_start: TextSize = 0.into();
|
2020-07-14 12:57:33 +00:00
|
|
|
let mut prev_line_annotations: Vec<(TextSize, usize)> = Vec::new();
|
2021-05-04 11:10:49 +00:00
|
|
|
for line in text.split_inclusive('\n') {
|
2020-07-14 12:57:33 +00:00
|
|
|
let mut this_line_annotations = Vec::new();
|
2022-03-18 14:30:58 +00:00
|
|
|
let line_length = if let Some((prefix, suffix)) = line.split_once("//") {
|
|
|
|
let ss_len = TextSize::of("//");
|
|
|
|
let annotation_offset = TextSize::of(prefix) + ss_len;
|
|
|
|
for annotation in extract_line_annotations(suffix.trim_end_matches('\n')) {
|
2020-07-14 12:57:33 +00:00
|
|
|
match annotation {
|
2021-06-13 16:23:37 +00:00
|
|
|
LineAnnotation::Annotation { mut range, content, file } => {
|
2020-07-14 12:57:33 +00:00
|
|
|
range += annotation_offset;
|
|
|
|
this_line_annotations.push((range.end(), res.len()));
|
2021-06-13 16:23:37 +00:00
|
|
|
let range = if file {
|
|
|
|
TextRange::up_to(TextSize::of(text))
|
|
|
|
} else {
|
2021-06-20 16:34:43 +00:00
|
|
|
let line_start = line_start_map.range(range.end()..).next().unwrap();
|
|
|
|
|
|
|
|
range + line_start.1
|
2021-06-13 16:23:37 +00:00
|
|
|
};
|
2021-10-03 12:39:43 +00:00
|
|
|
res.push((range, content));
|
2020-07-14 12:57:33 +00:00
|
|
|
}
|
|
|
|
LineAnnotation::Continuation { mut offset, content } => {
|
|
|
|
offset += annotation_offset;
|
|
|
|
let &(_, idx) = prev_line_annotations
|
|
|
|
.iter()
|
|
|
|
.find(|&&(off, _idx)| off == offset)
|
|
|
|
.unwrap();
|
|
|
|
res[idx].1.push('\n');
|
|
|
|
res[idx].1.push_str(&content);
|
|
|
|
res[idx].1.push('\n');
|
|
|
|
}
|
|
|
|
}
|
2020-06-30 16:04:25 +00:00
|
|
|
}
|
2022-03-18 14:30:58 +00:00
|
|
|
annotation_offset
|
2021-06-20 16:34:43 +00:00
|
|
|
} else {
|
|
|
|
TextSize::of(line)
|
|
|
|
};
|
|
|
|
|
|
|
|
line_start_map = line_start_map.split_off(&line_length);
|
|
|
|
line_start_map.insert(line_length, line_start);
|
2020-07-14 12:57:33 +00:00
|
|
|
|
2020-06-29 12:21:57 +00:00
|
|
|
line_start += TextSize::of(line);
|
2020-07-14 12:57:33 +00:00
|
|
|
|
|
|
|
prev_line_annotations = this_line_annotations;
|
2020-06-29 12:21:57 +00:00
|
|
|
}
|
2021-06-13 16:23:37 +00:00
|
|
|
|
2020-06-29 12:21:57 +00:00
|
|
|
res
|
|
|
|
}
|
|
|
|
|
2020-07-14 12:57:33 +00:00
|
|
|
enum LineAnnotation {
|
2021-06-13 16:23:37 +00:00
|
|
|
Annotation { range: TextRange, content: String, file: bool },
|
2020-07-14 12:57:33 +00:00
|
|
|
Continuation { offset: TextSize, content: String },
|
|
|
|
}
|
|
|
|
|
|
|
|
fn extract_line_annotations(mut line: &str) -> Vec<LineAnnotation> {
|
2020-06-30 16:04:25 +00:00
|
|
|
let mut res = Vec::new();
|
|
|
|
let mut offset: TextSize = 0.into();
|
2020-07-14 12:57:33 +00:00
|
|
|
let marker: fn(char) -> bool = if line.contains('^') { |c| c == '^' } else { |c| c == '|' };
|
2021-06-03 09:45:10 +00:00
|
|
|
while let Some(idx) = line.find(marker) {
|
|
|
|
offset += TextSize::try_from(idx).unwrap();
|
|
|
|
line = &line[idx..];
|
2020-07-14 12:01:54 +00:00
|
|
|
|
2020-07-14 12:57:33 +00:00
|
|
|
let mut len = line.chars().take_while(|&it| it == '^').count();
|
|
|
|
let mut continuation = false;
|
|
|
|
if len == 0 {
|
|
|
|
assert!(line.starts_with('|'));
|
|
|
|
continuation = true;
|
|
|
|
len = 1;
|
|
|
|
}
|
2020-06-30 16:04:25 +00:00
|
|
|
let range = TextRange::at(offset, len.try_into().unwrap());
|
2022-03-18 14:30:58 +00:00
|
|
|
let line_no_caret = &line[len..];
|
2024-06-30 11:44:33 +00:00
|
|
|
let end_marker = line_no_caret.find('$');
|
2022-03-18 14:30:58 +00:00
|
|
|
let next = line_no_caret.find(marker).map_or(line.len(), |it| it + len);
|
|
|
|
|
2022-03-18 17:11:16 +00:00
|
|
|
let cond = |end_marker| {
|
|
|
|
end_marker < next
|
|
|
|
&& (line_no_caret[end_marker + 1..].is_empty()
|
|
|
|
|| line_no_caret[end_marker + 1..]
|
2022-03-18 14:30:58 +00:00
|
|
|
.strip_prefix(|c: char| c.is_whitespace() || c == '^')
|
2022-03-18 17:11:16 +00:00
|
|
|
.is_some())
|
|
|
|
};
|
|
|
|
let mut content = match end_marker {
|
|
|
|
Some(end_marker) if cond(end_marker) => &line_no_caret[..end_marker],
|
2022-03-18 14:30:58 +00:00
|
|
|
_ => line_no_caret[..next - len].trim_end(),
|
|
|
|
};
|
2021-06-13 16:23:37 +00:00
|
|
|
|
|
|
|
let mut file = false;
|
|
|
|
if !continuation && content.starts_with("file") {
|
|
|
|
file = true;
|
2021-10-03 12:39:43 +00:00
|
|
|
content = &content["file".len()..];
|
2021-06-13 16:23:37 +00:00
|
|
|
}
|
|
|
|
|
2024-02-09 15:58:06 +00:00
|
|
|
let content = content.trim_start().to_owned();
|
2020-07-14 12:57:33 +00:00
|
|
|
|
|
|
|
let annotation = if continuation {
|
|
|
|
LineAnnotation::Continuation { offset: range.end(), content }
|
|
|
|
} else {
|
2021-06-13 16:23:37 +00:00
|
|
|
LineAnnotation::Annotation { range, content, file }
|
2020-07-14 12:57:33 +00:00
|
|
|
};
|
|
|
|
res.push(annotation);
|
|
|
|
|
2020-06-30 16:04:25 +00:00
|
|
|
line = &line[next..];
|
|
|
|
offset += TextSize::try_from(next).unwrap();
|
|
|
|
}
|
2020-07-14 12:57:33 +00:00
|
|
|
|
2020-06-30 16:04:25 +00:00
|
|
|
res
|
|
|
|
}
|
|
|
|
|
2020-06-29 12:21:57 +00:00
|
|
|
#[test]
|
2021-06-20 16:34:43 +00:00
|
|
|
fn test_extract_annotations_1() {
|
2020-06-29 15:22:47 +00:00
|
|
|
let text = stdx::trim_indent(
|
2020-06-29 12:21:57 +00:00
|
|
|
r#"
|
|
|
|
fn main() {
|
2020-06-30 16:04:25 +00:00
|
|
|
let (x, y) = (9, 2);
|
|
|
|
//^ def ^ def
|
2020-06-30 10:13:08 +00:00
|
|
|
zoo + 1
|
2020-07-14 12:57:33 +00:00
|
|
|
} //^^^ type:
|
|
|
|
// | i32
|
2021-06-13 16:23:37 +00:00
|
|
|
|
|
|
|
// ^file
|
2020-06-29 12:21:57 +00:00
|
|
|
"#,
|
2020-06-29 15:22:47 +00:00
|
|
|
);
|
|
|
|
let res = extract_annotations(&text)
|
|
|
|
.into_iter()
|
|
|
|
.map(|(range, ann)| (&text[range], ann))
|
|
|
|
.collect::<Vec<_>>();
|
2021-06-13 16:23:37 +00:00
|
|
|
|
2020-07-14 12:57:33 +00:00
|
|
|
assert_eq!(
|
2021-06-13 16:23:37 +00:00
|
|
|
res[..3],
|
|
|
|
[("x", "def".into()), ("y", "def".into()), ("zoo", "type:\ni32\n".into())]
|
2020-07-14 12:57:33 +00:00
|
|
|
);
|
2021-06-13 16:23:37 +00:00
|
|
|
assert_eq!(res[3].0.len(), 115);
|
2020-06-29 12:21:57 +00:00
|
|
|
}
|
|
|
|
|
2021-06-20 16:34:43 +00:00
|
|
|
#[test]
|
|
|
|
fn test_extract_annotations_2() {
|
|
|
|
let text = stdx::trim_indent(
|
|
|
|
r#"
|
|
|
|
fn main() {
|
|
|
|
(x, y);
|
|
|
|
//^ a
|
|
|
|
// ^ b
|
|
|
|
//^^^^^^^^ c
|
|
|
|
}"#,
|
|
|
|
);
|
|
|
|
let res = extract_annotations(&text)
|
|
|
|
.into_iter()
|
|
|
|
.map(|(range, ann)| (&text[range], ann))
|
|
|
|
.collect::<Vec<_>>();
|
|
|
|
|
|
|
|
assert_eq!(res, [("x", "a".into()), ("y", "b".into()), ("(x, y)", "c".into())]);
|
|
|
|
}
|
|
|
|
|
2020-01-29 01:52:13 +00:00
|
|
|
/// Returns `false` if slow tests should not run, otherwise returns `true` and
|
|
|
|
/// also creates a file at `./target/.slow_tests_cookie` which serves as a flag
|
|
|
|
/// that slow tests did run.
|
2019-12-07 11:46:36 +00:00
|
|
|
pub fn skip_slow_tests() -> bool {
|
2022-07-22 16:16:46 +00:00
|
|
|
let should_skip = (std::env::var("CI").is_err() && std::env::var("RUN_SLOW_TESTS").is_err())
|
|
|
|
|| std::env::var("SKIP_SLOW_TESTS").is_ok();
|
2019-12-07 11:46:36 +00:00
|
|
|
if should_skip {
|
2021-10-03 12:39:43 +00:00
|
|
|
eprintln!("ignoring slow test");
|
2019-12-07 11:46:36 +00:00
|
|
|
} else {
|
2021-03-08 17:22:33 +00:00
|
|
|
let path = project_root().join("./target/.slow_tests_cookie");
|
2022-12-24 21:09:08 +00:00
|
|
|
fs::write(path, ".").unwrap();
|
2019-12-07 11:46:36 +00:00
|
|
|
}
|
|
|
|
should_skip
|
|
|
|
}
|
|
|
|
|
2020-07-01 10:30:17 +00:00
|
|
|
/// Returns the path to the root directory of `rust-analyzer` project.
|
2024-08-02 09:08:19 +00:00
|
|
|
pub fn project_root() -> Utf8PathBuf {
|
2020-07-01 10:30:17 +00:00
|
|
|
let dir = env!("CARGO_MANIFEST_DIR");
|
2024-08-02 09:08:19 +00:00
|
|
|
Utf8PathBuf::from_path_buf(PathBuf::from(dir).parent().unwrap().parent().unwrap().to_owned())
|
|
|
|
.unwrap()
|
2018-12-23 11:05:54 +00:00
|
|
|
}
|
2021-01-06 17:13:29 +00:00
|
|
|
|
2022-07-20 13:02:08 +00:00
|
|
|
pub fn format_diff(chunks: Vec<dissimilar::Chunk<'_>>) -> String {
|
2021-01-06 17:13:29 +00:00
|
|
|
let mut buf = String::new();
|
|
|
|
for chunk in chunks {
|
|
|
|
let formatted = match chunk {
|
|
|
|
dissimilar::Chunk::Equal(text) => text.into(),
|
2022-12-23 18:42:58 +00:00
|
|
|
dissimilar::Chunk::Delete(text) => format!("\x1b[41m{text}\x1b[0m"),
|
|
|
|
dissimilar::Chunk::Insert(text) => format!("\x1b[42m{text}\x1b[0m"),
|
2021-01-06 17:13:29 +00:00
|
|
|
};
|
|
|
|
buf.push_str(&formatted);
|
|
|
|
}
|
|
|
|
buf
|
|
|
|
}
|
2021-02-09 16:29:40 +00:00
|
|
|
|
|
|
|
/// Utility for writing benchmark tests.
|
|
|
|
///
|
|
|
|
/// A benchmark test looks like this:
|
|
|
|
///
|
2024-02-01 15:16:38 +00:00
|
|
|
/// ```ignore
|
2021-02-09 16:29:40 +00:00
|
|
|
/// #[test]
|
|
|
|
/// fn benchmark_foo() {
|
|
|
|
/// if skip_slow_tests() { return; }
|
|
|
|
///
|
|
|
|
/// let data = bench_fixture::some_fixture();
|
|
|
|
/// let analysis = some_setup();
|
|
|
|
///
|
|
|
|
/// let hash = {
|
|
|
|
/// let _b = bench("foo");
|
|
|
|
/// actual_work(analysis)
|
|
|
|
/// };
|
|
|
|
/// assert_eq!(hash, 92);
|
|
|
|
/// }
|
|
|
|
/// ```
|
|
|
|
///
|
|
|
|
/// * We skip benchmarks by default, to save time.
|
|
|
|
/// Ideal benchmark time is 800 -- 1500 ms in debug.
|
|
|
|
/// * We don't count preparation as part of the benchmark
|
|
|
|
/// * The benchmark itself returns some kind of numeric hash.
|
|
|
|
/// The hash is used as a sanity check that some code is actually run.
|
|
|
|
/// Otherwise, it's too easy to win the benchmark by just doing nothing.
|
|
|
|
pub fn bench(label: &'static str) -> impl Drop {
|
|
|
|
struct Bencher {
|
|
|
|
sw: StopWatch,
|
|
|
|
label: &'static str,
|
|
|
|
}
|
|
|
|
|
|
|
|
impl Drop for Bencher {
|
|
|
|
fn drop(&mut self) {
|
2021-10-03 12:39:43 +00:00
|
|
|
eprintln!("{}: {}", self.label, self.sw.elapsed());
|
2021-02-09 16:29:40 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
Bencher { sw: StopWatch::start(), label }
|
|
|
|
}
|
2021-03-08 14:20:36 +00:00
|
|
|
|
|
|
|
/// Checks that the `file` has the specified `contents`. If that is not the
|
|
|
|
/// case, updates the file and then fails the test.
|
2022-03-11 20:06:26 +00:00
|
|
|
#[track_caller]
|
2021-03-08 14:20:36 +00:00
|
|
|
pub fn ensure_file_contents(file: &Path, contents: &str) {
|
|
|
|
if let Err(()) = try_ensure_file_contents(file, contents) {
|
|
|
|
panic!("Some files were not up-to-date");
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
/// Checks that the `file` has the specified `contents`. If that is not the
|
|
|
|
/// case, updates the file and return an Error.
|
|
|
|
pub fn try_ensure_file_contents(file: &Path, contents: &str) -> Result<(), ()> {
|
|
|
|
match std::fs::read_to_string(file) {
|
|
|
|
Ok(old_contents) if normalize_newlines(&old_contents) == normalize_newlines(contents) => {
|
2022-07-22 16:16:46 +00:00
|
|
|
return Ok(());
|
2021-03-08 14:20:36 +00:00
|
|
|
}
|
|
|
|
_ => (),
|
|
|
|
}
|
2022-12-30 08:05:03 +00:00
|
|
|
let display_path = file.strip_prefix(project_root()).unwrap_or(file);
|
2021-03-08 14:20:36 +00:00
|
|
|
eprintln!(
|
|
|
|
"\n\x1b[31;1merror\x1b[0m: {} was not up-to-date, updating\n",
|
|
|
|
display_path.display()
|
|
|
|
);
|
2021-03-08 18:13:15 +00:00
|
|
|
if is_ci() {
|
2021-03-08 18:41:45 +00:00
|
|
|
eprintln!(" NOTE: run `cargo test` locally and commit the updated files\n");
|
2021-03-08 18:13:15 +00:00
|
|
|
}
|
2021-03-08 14:20:36 +00:00
|
|
|
if let Some(parent) = file.parent() {
|
|
|
|
let _ = std::fs::create_dir_all(parent);
|
|
|
|
}
|
|
|
|
std::fs::write(file, contents).unwrap();
|
|
|
|
Err(())
|
|
|
|
}
|
|
|
|
|
|
|
|
fn normalize_newlines(s: &str) -> String {
|
|
|
|
s.replace("\r\n", "\n")
|
|
|
|
}
|