2021-08-01 05:47:21 +00:00
|
|
|
use std::{
|
|
|
|
ops::Deref,
|
|
|
|
path::{Path, PathBuf},
|
|
|
|
};
|
internal: add simple smoke test for project model
Our project model code is rather complicated -- the logic for lowering
from `cargo metadata` to `CrateGraph` is fiddly and special-case. So
far, we survived without testing this at all, but this increasingly
seems like a poor option.
So this PR introduces a simple tests just to detect the most obvious
failures. The idea here is that, although we rely on external processes
(cargo & rustc), we are actually using their stable interfaces, so we
might just mock out the outputs.
Long term, I would like to try to virtualize IO here, so as to do such
mocking in a more principled way, but lets start simple.
Should we forgo the mocking and just call `cargo metadata` directly
perhaps? Touch question -- I personally feel that fast, in-process tests
are more important in this case than any extra assurance we get from
running the real thing.
Super-long term, we would probably want to extend our heavy tests to
cover more use-cases, but we should figure a way to do that without
slowing the tests down for everyone.
Perhaps we need two-tiered bors system, where we pull from `master` into
`release` branch only when an additional set of tests passes?
2021-07-20 12:38:20 +00:00
|
|
|
|
2023-11-23 01:08:30 +00:00
|
|
|
use base_db::{CrateGraph, FileId, ProcMacroPaths};
|
2021-08-23 17:18:11 +00:00
|
|
|
use cfg::{CfgAtom, CfgDiff};
|
2023-04-26 08:20:59 +00:00
|
|
|
use expect_test::{expect_file, ExpectFile};
|
2021-08-20 13:56:02 +00:00
|
|
|
use paths::{AbsPath, AbsPathBuf};
|
2023-04-26 08:20:59 +00:00
|
|
|
use rustc_hash::FxHashMap;
|
2021-08-01 05:47:21 +00:00
|
|
|
use serde::de::DeserializeOwned;
|
internal: add simple smoke test for project model
Our project model code is rather complicated -- the logic for lowering
from `cargo metadata` to `CrateGraph` is fiddly and special-case. So
far, we survived without testing this at all, but this increasingly
seems like a poor option.
So this PR introduces a simple tests just to detect the most obvious
failures. The idea here is that, although we rely on external processes
(cargo & rustc), we are actually using their stable interfaces, so we
might just mock out the outputs.
Long term, I would like to try to virtualize IO here, so as to do such
mocking in a more principled way, but lets start simple.
Should we forgo the mocking and just call `cargo metadata` directly
perhaps? Touch question -- I personally feel that fast, in-process tests
are more important in this case than any extra assurance we get from
running the real thing.
Super-long term, we would probably want to extend our heavy tests to
cover more use-cases, but we should figure a way to do that without
slowing the tests down for everyone.
Perhaps we need two-tiered bors system, where we pull from `master` into
`release` branch only when an additional set of tests passes?
2021-07-20 12:38:20 +00:00
|
|
|
|
2021-08-01 05:47:21 +00:00
|
|
|
use crate::{
|
2022-09-19 15:31:08 +00:00
|
|
|
CargoWorkspace, CfgOverrides, ProjectJson, ProjectJsonData, ProjectWorkspace, Sysroot,
|
|
|
|
WorkspaceBuildScripts,
|
2021-08-01 05:47:21 +00:00
|
|
|
};
|
internal: add simple smoke test for project model
Our project model code is rather complicated -- the logic for lowering
from `cargo metadata` to `CrateGraph` is fiddly and special-case. So
far, we survived without testing this at all, but this increasingly
seems like a poor option.
So this PR introduces a simple tests just to detect the most obvious
failures. The idea here is that, although we rely on external processes
(cargo & rustc), we are actually using their stable interfaces, so we
might just mock out the outputs.
Long term, I would like to try to virtualize IO here, so as to do such
mocking in a more principled way, but lets start simple.
Should we forgo the mocking and just call `cargo metadata` directly
perhaps? Touch question -- I personally feel that fast, in-process tests
are more important in this case than any extra assurance we get from
running the real thing.
Super-long term, we would probably want to extend our heavy tests to
cover more use-cases, but we should figure a way to do that without
slowing the tests down for everyone.
Perhaps we need two-tiered bors system, where we pull from `master` into
`release` branch only when an additional set of tests passes?
2021-07-20 12:38:20 +00:00
|
|
|
|
2023-03-25 17:06:06 +00:00
|
|
|
fn load_cargo(file: &str) -> (CrateGraph, ProcMacroPaths) {
|
2021-08-23 17:18:11 +00:00
|
|
|
load_cargo_with_overrides(file, CfgOverrides::default())
|
|
|
|
}
|
|
|
|
|
2023-03-25 17:06:06 +00:00
|
|
|
fn load_cargo_with_overrides(
|
|
|
|
file: &str,
|
|
|
|
cfg_overrides: CfgOverrides,
|
|
|
|
) -> (CrateGraph, ProcMacroPaths) {
|
2021-08-01 05:47:21 +00:00
|
|
|
let meta = get_test_json_file(file);
|
internal: add simple smoke test for project model
Our project model code is rather complicated -- the logic for lowering
from `cargo metadata` to `CrateGraph` is fiddly and special-case. So
far, we survived without testing this at all, but this increasingly
seems like a poor option.
So this PR introduces a simple tests just to detect the most obvious
failures. The idea here is that, although we rely on external processes
(cargo & rustc), we are actually using their stable interfaces, so we
might just mock out the outputs.
Long term, I would like to try to virtualize IO here, so as to do such
mocking in a more principled way, but lets start simple.
Should we forgo the mocking and just call `cargo metadata` directly
perhaps? Touch question -- I personally feel that fast, in-process tests
are more important in this case than any extra assurance we get from
running the real thing.
Super-long term, we would probably want to extend our heavy tests to
cover more use-cases, but we should figure a way to do that without
slowing the tests down for everyone.
Perhaps we need two-tiered bors system, where we pull from `master` into
`release` branch only when an additional set of tests passes?
2021-07-20 12:38:20 +00:00
|
|
|
let cargo_workspace = CargoWorkspace::new(meta);
|
|
|
|
let project_workspace = ProjectWorkspace::Cargo {
|
|
|
|
cargo: cargo_workspace,
|
|
|
|
build_scripts: WorkspaceBuildScripts::default(),
|
2023-03-15 10:35:34 +00:00
|
|
|
sysroot: Err(None),
|
|
|
|
rustc: Err(None),
|
internal: add simple smoke test for project model
Our project model code is rather complicated -- the logic for lowering
from `cargo metadata` to `CrateGraph` is fiddly and special-case. So
far, we survived without testing this at all, but this increasingly
seems like a poor option.
So this PR introduces a simple tests just to detect the most obvious
failures. The idea here is that, although we rely on external processes
(cargo & rustc), we are actually using their stable interfaces, so we
might just mock out the outputs.
Long term, I would like to try to virtualize IO here, so as to do such
mocking in a more principled way, but lets start simple.
Should we forgo the mocking and just call `cargo metadata` directly
perhaps? Touch question -- I personally feel that fast, in-process tests
are more important in this case than any extra assurance we get from
running the real thing.
Super-long term, we would probably want to extend our heavy tests to
cover more use-cases, but we should figure a way to do that without
slowing the tests down for everyone.
Perhaps we need two-tiered bors system, where we pull from `master` into
`release` branch only when an additional set of tests passes?
2021-07-20 12:38:20 +00:00
|
|
|
rustc_cfg: Vec::new(),
|
2021-08-23 17:18:11 +00:00
|
|
|
cfg_overrides,
|
2022-08-09 12:31:17 +00:00
|
|
|
toolchain: None,
|
2023-01-19 18:21:44 +00:00
|
|
|
target_layout: Err("target_data_layout not loaded".into()),
|
internal: add simple smoke test for project model
Our project model code is rather complicated -- the logic for lowering
from `cargo metadata` to `CrateGraph` is fiddly and special-case. So
far, we survived without testing this at all, but this increasingly
seems like a poor option.
So this PR introduces a simple tests just to detect the most obvious
failures. The idea here is that, although we rely on external processes
(cargo & rustc), we are actually using their stable interfaces, so we
might just mock out the outputs.
Long term, I would like to try to virtualize IO here, so as to do such
mocking in a more principled way, but lets start simple.
Should we forgo the mocking and just call `cargo metadata` directly
perhaps? Touch question -- I personally feel that fast, in-process tests
are more important in this case than any extra assurance we get from
running the real thing.
Super-long term, we would probably want to extend our heavy tests to
cover more use-cases, but we should figure a way to do that without
slowing the tests down for everyone.
Perhaps we need two-tiered bors system, where we pull from `master` into
`release` branch only when an additional set of tests passes?
2021-07-20 12:38:20 +00:00
|
|
|
};
|
2021-08-01 05:47:21 +00:00
|
|
|
to_crate_graph(project_workspace)
|
|
|
|
}
|
internal: add simple smoke test for project model
Our project model code is rather complicated -- the logic for lowering
from `cargo metadata` to `CrateGraph` is fiddly and special-case. So
far, we survived without testing this at all, but this increasingly
seems like a poor option.
So this PR introduces a simple tests just to detect the most obvious
failures. The idea here is that, although we rely on external processes
(cargo & rustc), we are actually using their stable interfaces, so we
might just mock out the outputs.
Long term, I would like to try to virtualize IO here, so as to do such
mocking in a more principled way, but lets start simple.
Should we forgo the mocking and just call `cargo metadata` directly
perhaps? Touch question -- I personally feel that fast, in-process tests
are more important in this case than any extra assurance we get from
running the real thing.
Super-long term, we would probably want to extend our heavy tests to
cover more use-cases, but we should figure a way to do that without
slowing the tests down for everyone.
Perhaps we need two-tiered bors system, where we pull from `master` into
`release` branch only when an additional set of tests passes?
2021-07-20 12:38:20 +00:00
|
|
|
|
2024-01-13 16:22:39 +00:00
|
|
|
fn load_cargo_with_fake_sysroot(
|
2023-04-26 08:20:59 +00:00
|
|
|
file_map: &mut FxHashMap<AbsPathBuf, FileId>,
|
|
|
|
file: &str,
|
|
|
|
) -> (CrateGraph, ProcMacroPaths) {
|
|
|
|
let meta = get_test_json_file(file);
|
|
|
|
let cargo_workspace = CargoWorkspace::new(meta);
|
|
|
|
let project_workspace = ProjectWorkspace::Cargo {
|
|
|
|
cargo: cargo_workspace,
|
|
|
|
build_scripts: WorkspaceBuildScripts::default(),
|
|
|
|
sysroot: Ok(get_fake_sysroot()),
|
|
|
|
rustc: Err(None),
|
|
|
|
rustc_cfg: Vec::new(),
|
|
|
|
cfg_overrides: Default::default(),
|
|
|
|
toolchain: None,
|
|
|
|
target_layout: Err("target_data_layout not loaded".into()),
|
|
|
|
};
|
|
|
|
project_workspace.to_crate_graph(
|
|
|
|
&mut {
|
|
|
|
|path| {
|
|
|
|
let len = file_map.len();
|
2023-12-06 10:53:28 +00:00
|
|
|
Some(*file_map.entry(path.to_path_buf()).or_insert(FileId::from_raw(len as u32)))
|
2023-04-26 08:20:59 +00:00
|
|
|
}
|
|
|
|
},
|
|
|
|
&Default::default(),
|
|
|
|
)
|
|
|
|
}
|
|
|
|
|
2023-03-25 17:06:06 +00:00
|
|
|
fn load_rust_project(file: &str) -> (CrateGraph, ProcMacroPaths) {
|
2021-08-01 05:47:21 +00:00
|
|
|
let data = get_test_json_file(file);
|
|
|
|
let project = rooted_project_json(data);
|
2023-03-15 10:35:34 +00:00
|
|
|
let sysroot = Ok(get_fake_sysroot());
|
2023-03-31 07:10:18 +00:00
|
|
|
let project_workspace =
|
|
|
|
ProjectWorkspace::Json { project, sysroot, rustc_cfg: Vec::new(), toolchain: None };
|
2021-08-01 05:47:21 +00:00
|
|
|
to_crate_graph(project_workspace)
|
internal: add simple smoke test for project model
Our project model code is rather complicated -- the logic for lowering
from `cargo metadata` to `CrateGraph` is fiddly and special-case. So
far, we survived without testing this at all, but this increasingly
seems like a poor option.
So this PR introduces a simple tests just to detect the most obvious
failures. The idea here is that, although we rely on external processes
(cargo & rustc), we are actually using their stable interfaces, so we
might just mock out the outputs.
Long term, I would like to try to virtualize IO here, so as to do such
mocking in a more principled way, but lets start simple.
Should we forgo the mocking and just call `cargo metadata` directly
perhaps? Touch question -- I personally feel that fast, in-process tests
are more important in this case than any extra assurance we get from
running the real thing.
Super-long term, we would probably want to extend our heavy tests to
cover more use-cases, but we should figure a way to do that without
slowing the tests down for everyone.
Perhaps we need two-tiered bors system, where we pull from `master` into
`release` branch only when an additional set of tests passes?
2021-07-20 12:38:20 +00:00
|
|
|
}
|
|
|
|
|
2021-08-01 05:47:21 +00:00
|
|
|
fn get_test_json_file<T: DeserializeOwned>(file: &str) -> T {
|
|
|
|
let file = get_test_path(file);
|
|
|
|
let data = std::fs::read_to_string(file).unwrap();
|
|
|
|
let mut json = data.parse::<serde_json::Value>().unwrap();
|
internal: add simple smoke test for project model
Our project model code is rather complicated -- the logic for lowering
from `cargo metadata` to `CrateGraph` is fiddly and special-case. So
far, we survived without testing this at all, but this increasingly
seems like a poor option.
So this PR introduces a simple tests just to detect the most obvious
failures. The idea here is that, although we rely on external processes
(cargo & rustc), we are actually using their stable interfaces, so we
might just mock out the outputs.
Long term, I would like to try to virtualize IO here, so as to do such
mocking in a more principled way, but lets start simple.
Should we forgo the mocking and just call `cargo metadata` directly
perhaps? Touch question -- I personally feel that fast, in-process tests
are more important in this case than any extra assurance we get from
running the real thing.
Super-long term, we would probably want to extend our heavy tests to
cover more use-cases, but we should figure a way to do that without
slowing the tests down for everyone.
Perhaps we need two-tiered bors system, where we pull from `master` into
`release` branch only when an additional set of tests passes?
2021-07-20 12:38:20 +00:00
|
|
|
fixup_paths(&mut json);
|
|
|
|
return serde_json::from_value(json).unwrap();
|
|
|
|
|
2021-09-19 17:00:06 +00:00
|
|
|
fn fixup_paths(val: &mut serde_json::Value) {
|
internal: add simple smoke test for project model
Our project model code is rather complicated -- the logic for lowering
from `cargo metadata` to `CrateGraph` is fiddly and special-case. So
far, we survived without testing this at all, but this increasingly
seems like a poor option.
So this PR introduces a simple tests just to detect the most obvious
failures. The idea here is that, although we rely on external processes
(cargo & rustc), we are actually using their stable interfaces, so we
might just mock out the outputs.
Long term, I would like to try to virtualize IO here, so as to do such
mocking in a more principled way, but lets start simple.
Should we forgo the mocking and just call `cargo metadata` directly
perhaps? Touch question -- I personally feel that fast, in-process tests
are more important in this case than any extra assurance we get from
running the real thing.
Super-long term, we would probably want to extend our heavy tests to
cover more use-cases, but we should figure a way to do that without
slowing the tests down for everyone.
Perhaps we need two-tiered bors system, where we pull from `master` into
`release` branch only when an additional set of tests passes?
2021-07-20 12:38:20 +00:00
|
|
|
match val {
|
|
|
|
serde_json::Value::String(s) => replace_root(s, true),
|
|
|
|
serde_json::Value::Array(vals) => vals.iter_mut().for_each(fixup_paths),
|
|
|
|
serde_json::Value::Object(kvals) => kvals.values_mut().for_each(fixup_paths),
|
|
|
|
serde_json::Value::Null | serde_json::Value::Bool(_) | serde_json::Value::Number(_) => {
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
fn replace_root(s: &mut String, direction: bool) {
|
|
|
|
if direction {
|
|
|
|
let root = if cfg!(windows) { r#"C:\\ROOT\"# } else { "/ROOT/" };
|
|
|
|
*s = s.replace("$ROOT$", root)
|
|
|
|
} else {
|
|
|
|
let root = if cfg!(windows) { r#"C:\\\\ROOT\\"# } else { "/ROOT/" };
|
|
|
|
*s = s.replace(root, "$ROOT$")
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2023-04-08 16:07:25 +00:00
|
|
|
fn replace_fake_sys_root(s: &mut String) {
|
2023-04-08 18:26:48 +00:00
|
|
|
let fake_sysroot_path = get_test_path("fake-sysroot");
|
|
|
|
let fake_sysroot_path = if cfg!(windows) {
|
|
|
|
let normalized_path =
|
2024-01-18 12:59:49 +00:00
|
|
|
fake_sysroot_path.to_str().expect("expected str").replace('\\', r#"\\"#);
|
2023-04-08 18:26:48 +00:00
|
|
|
format!(r#"{}\\"#, normalized_path)
|
|
|
|
} else {
|
|
|
|
format!("{}/", fake_sysroot_path.to_str().expect("expected str"))
|
|
|
|
};
|
|
|
|
*s = s.replace(&fake_sysroot_path, "$FAKESYSROOT$")
|
2023-04-08 16:07:25 +00:00
|
|
|
}
|
|
|
|
|
2021-08-01 05:47:21 +00:00
|
|
|
fn get_test_path(file: &str) -> PathBuf {
|
internal: add simple smoke test for project model
Our project model code is rather complicated -- the logic for lowering
from `cargo metadata` to `CrateGraph` is fiddly and special-case. So
far, we survived without testing this at all, but this increasingly
seems like a poor option.
So this PR introduces a simple tests just to detect the most obvious
failures. The idea here is that, although we rely on external processes
(cargo & rustc), we are actually using their stable interfaces, so we
might just mock out the outputs.
Long term, I would like to try to virtualize IO here, so as to do such
mocking in a more principled way, but lets start simple.
Should we forgo the mocking and just call `cargo metadata` directly
perhaps? Touch question -- I personally feel that fast, in-process tests
are more important in this case than any extra assurance we get from
running the real thing.
Super-long term, we would probably want to extend our heavy tests to
cover more use-cases, but we should figure a way to do that without
slowing the tests down for everyone.
Perhaps we need two-tiered bors system, where we pull from `master` into
`release` branch only when an additional set of tests passes?
2021-07-20 12:38:20 +00:00
|
|
|
let base = PathBuf::from(env!("CARGO_MANIFEST_DIR"));
|
2021-08-01 05:47:21 +00:00
|
|
|
base.join("test_data").join(file)
|
|
|
|
}
|
|
|
|
|
|
|
|
fn get_fake_sysroot() -> Sysroot {
|
|
|
|
let sysroot_path = get_test_path("fake-sysroot");
|
2022-07-25 14:07:41 +00:00
|
|
|
// there's no `libexec/` directory with a `proc-macro-srv` binary in that
|
|
|
|
// fake sysroot, so we give them both the same path:
|
|
|
|
let sysroot_dir = AbsPathBuf::assert(sysroot_path);
|
|
|
|
let sysroot_src_dir = sysroot_dir.clone();
|
2024-01-13 16:22:39 +00:00
|
|
|
Sysroot::load(sysroot_dir, sysroot_src_dir, false)
|
2021-08-01 05:47:21 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
fn rooted_project_json(data: ProjectJsonData) -> ProjectJson {
|
2024-02-09 15:37:27 +00:00
|
|
|
let mut root = "$ROOT$".to_owned();
|
2021-08-01 05:47:21 +00:00
|
|
|
replace_root(&mut root, true);
|
|
|
|
let path = Path::new(&root);
|
|
|
|
let base = AbsPath::assert(path);
|
|
|
|
ProjectJson::new(base, data)
|
|
|
|
}
|
|
|
|
|
2023-03-25 17:06:06 +00:00
|
|
|
fn to_crate_graph(project_workspace: ProjectWorkspace) -> (CrateGraph, ProcMacroPaths) {
|
2022-08-18 21:41:17 +00:00
|
|
|
project_workspace.to_crate_graph(
|
|
|
|
&mut {
|
|
|
|
let mut counter = 0;
|
|
|
|
move |_path| {
|
|
|
|
counter += 1;
|
2023-12-06 10:53:28 +00:00
|
|
|
Some(FileId::from_raw(counter))
|
2022-08-18 21:41:17 +00:00
|
|
|
}
|
|
|
|
},
|
2022-09-19 15:31:08 +00:00
|
|
|
&Default::default(),
|
2022-08-18 21:41:17 +00:00
|
|
|
)
|
2021-08-01 05:47:21 +00:00
|
|
|
}
|
|
|
|
|
2023-04-26 08:20:59 +00:00
|
|
|
fn check_crate_graph(crate_graph: CrateGraph, expect: ExpectFile) {
|
2022-12-23 18:42:58 +00:00
|
|
|
let mut crate_graph = format!("{crate_graph:#?}");
|
2021-08-01 05:47:21 +00:00
|
|
|
replace_root(&mut crate_graph, false);
|
2023-04-08 16:07:25 +00:00
|
|
|
replace_fake_sys_root(&mut crate_graph);
|
2021-08-01 05:47:21 +00:00
|
|
|
expect.assert_eq(&crate_graph);
|
internal: add simple smoke test for project model
Our project model code is rather complicated -- the logic for lowering
from `cargo metadata` to `CrateGraph` is fiddly and special-case. So
far, we survived without testing this at all, but this increasingly
seems like a poor option.
So this PR introduces a simple tests just to detect the most obvious
failures. The idea here is that, although we rely on external processes
(cargo & rustc), we are actually using their stable interfaces, so we
might just mock out the outputs.
Long term, I would like to try to virtualize IO here, so as to do such
mocking in a more principled way, but lets start simple.
Should we forgo the mocking and just call `cargo metadata` directly
perhaps? Touch question -- I personally feel that fast, in-process tests
are more important in this case than any extra assurance we get from
running the real thing.
Super-long term, we would probably want to extend our heavy tests to
cover more use-cases, but we should figure a way to do that without
slowing the tests down for everyone.
Perhaps we need two-tiered bors system, where we pull from `master` into
`release` branch only when an additional set of tests passes?
2021-07-20 12:38:20 +00:00
|
|
|
}
|
|
|
|
|
2021-08-23 17:18:11 +00:00
|
|
|
#[test]
|
|
|
|
fn cargo_hello_world_project_model_with_wildcard_overrides() {
|
2023-05-26 20:12:22 +00:00
|
|
|
let cfg_overrides = CfgOverrides {
|
|
|
|
global: CfgDiff::new(Vec::new(), vec![CfgAtom::Flag("test".into())]).unwrap(),
|
|
|
|
selective: Default::default(),
|
|
|
|
};
|
2023-03-25 15:42:52 +00:00
|
|
|
let (crate_graph, _proc_macros) =
|
|
|
|
load_cargo_with_overrides("hello-world-metadata.json", cfg_overrides);
|
2021-08-23 17:18:11 +00:00
|
|
|
check_crate_graph(
|
|
|
|
crate_graph,
|
2023-04-26 08:20:59 +00:00
|
|
|
expect_file![
|
|
|
|
"../test_data/output/cargo_hello_world_project_model_with_wildcard_overrides.txt"
|
|
|
|
],
|
2021-08-23 17:18:11 +00:00
|
|
|
)
|
|
|
|
}
|
|
|
|
|
|
|
|
#[test]
|
|
|
|
fn cargo_hello_world_project_model_with_selective_overrides() {
|
2023-05-26 20:12:22 +00:00
|
|
|
let cfg_overrides = CfgOverrides {
|
|
|
|
global: Default::default(),
|
|
|
|
selective: std::iter::once((
|
|
|
|
"libc".to_owned(),
|
|
|
|
CfgDiff::new(Vec::new(), vec![CfgAtom::Flag("test".into())]).unwrap(),
|
|
|
|
))
|
|
|
|
.collect(),
|
2021-08-23 17:18:11 +00:00
|
|
|
};
|
2023-03-25 15:42:52 +00:00
|
|
|
let (crate_graph, _proc_macros) =
|
|
|
|
load_cargo_with_overrides("hello-world-metadata.json", cfg_overrides);
|
2021-08-23 17:18:11 +00:00
|
|
|
check_crate_graph(
|
|
|
|
crate_graph,
|
2023-04-26 08:20:59 +00:00
|
|
|
expect_file![
|
|
|
|
"../test_data/output/cargo_hello_world_project_model_with_selective_overrides.txt"
|
|
|
|
],
|
2022-08-25 19:06:35 +00:00
|
|
|
)
|
|
|
|
}
|
|
|
|
|
|
|
|
#[test]
|
|
|
|
fn cargo_hello_world_project_model() {
|
2023-03-25 15:42:52 +00:00
|
|
|
let (crate_graph, _proc_macros) = load_cargo("hello-world-metadata.json");
|
2022-08-25 19:06:35 +00:00
|
|
|
check_crate_graph(
|
|
|
|
crate_graph,
|
2023-04-26 08:20:59 +00:00
|
|
|
expect_file!["../test_data/output/cargo_hello_world_project_model.txt"],
|
internal: add simple smoke test for project model
Our project model code is rather complicated -- the logic for lowering
from `cargo metadata` to `CrateGraph` is fiddly and special-case. So
far, we survived without testing this at all, but this increasingly
seems like a poor option.
So this PR introduces a simple tests just to detect the most obvious
failures. The idea here is that, although we rely on external processes
(cargo & rustc), we are actually using their stable interfaces, so we
might just mock out the outputs.
Long term, I would like to try to virtualize IO here, so as to do such
mocking in a more principled way, but lets start simple.
Should we forgo the mocking and just call `cargo metadata` directly
perhaps? Touch question -- I personally feel that fast, in-process tests
are more important in this case than any extra assurance we get from
running the real thing.
Super-long term, we would probably want to extend our heavy tests to
cover more use-cases, but we should figure a way to do that without
slowing the tests down for everyone.
Perhaps we need two-tiered bors system, where we pull from `master` into
`release` branch only when an additional set of tests passes?
2021-07-20 12:38:20 +00:00
|
|
|
)
|
|
|
|
}
|
2021-08-01 05:47:21 +00:00
|
|
|
|
|
|
|
#[test]
|
|
|
|
fn rust_project_hello_world_project_model() {
|
2023-03-25 15:42:52 +00:00
|
|
|
let (crate_graph, _proc_macros) = load_rust_project("hello-world-project.json");
|
2021-08-01 05:47:21 +00:00
|
|
|
check_crate_graph(
|
|
|
|
crate_graph,
|
2023-04-26 08:20:59 +00:00
|
|
|
expect_file!["../test_data/output/rust_project_hello_world_project_model.txt"],
|
2021-08-01 05:47:21 +00:00
|
|
|
);
|
|
|
|
}
|
|
|
|
|
|
|
|
#[test]
|
|
|
|
fn rust_project_is_proc_macro_has_proc_macro_dep() {
|
2023-03-25 15:42:52 +00:00
|
|
|
let (crate_graph, _proc_macros) = load_rust_project("is-proc-macro-project.json");
|
2021-08-01 05:47:21 +00:00
|
|
|
// Since the project only defines one crate (outside the sysroot crates),
|
|
|
|
// it should be the one with the biggest Id.
|
|
|
|
let crate_id = crate_graph.iter().max().unwrap();
|
|
|
|
let crate_data = &crate_graph[crate_id];
|
|
|
|
// Assert that the project crate with `is_proc_macro` has a dependency
|
|
|
|
// on the proc_macro sysroot crate.
|
|
|
|
crate_data.dependencies.iter().find(|&dep| dep.name.deref() == "proc_macro").unwrap();
|
|
|
|
}
|
2023-04-26 08:20:59 +00:00
|
|
|
|
|
|
|
#[test]
|
|
|
|
fn crate_graph_dedup_identical() {
|
|
|
|
let (mut crate_graph, proc_macros) =
|
2024-01-13 16:22:39 +00:00
|
|
|
load_cargo_with_fake_sysroot(&mut Default::default(), "regex-metadata.json");
|
2023-04-26 08:20:59 +00:00
|
|
|
crate_graph.sort_deps();
|
|
|
|
|
|
|
|
let (d_crate_graph, mut d_proc_macros) = (crate_graph.clone(), proc_macros.clone());
|
|
|
|
|
2024-01-13 16:22:39 +00:00
|
|
|
crate_graph.extend(d_crate_graph.clone(), &mut d_proc_macros, |_| ());
|
2023-04-26 08:20:59 +00:00
|
|
|
assert!(crate_graph.iter().eq(d_crate_graph.iter()));
|
|
|
|
assert_eq!(proc_macros, d_proc_macros);
|
|
|
|
}
|
|
|
|
|
|
|
|
#[test]
|
|
|
|
fn crate_graph_dedup() {
|
|
|
|
let path_map = &mut Default::default();
|
|
|
|
let (mut crate_graph, _proc_macros) =
|
2024-01-13 16:22:39 +00:00
|
|
|
load_cargo_with_fake_sysroot(path_map, "ripgrep-metadata.json");
|
2023-04-26 08:20:59 +00:00
|
|
|
assert_eq!(crate_graph.iter().count(), 81);
|
|
|
|
crate_graph.sort_deps();
|
|
|
|
let (regex_crate_graph, mut regex_proc_macros) =
|
2024-01-13 16:22:39 +00:00
|
|
|
load_cargo_with_fake_sysroot(path_map, "regex-metadata.json");
|
2023-04-26 08:20:59 +00:00
|
|
|
assert_eq!(regex_crate_graph.iter().count(), 60);
|
|
|
|
|
2024-01-13 16:22:39 +00:00
|
|
|
crate_graph.extend(regex_crate_graph, &mut regex_proc_macros, |_| ());
|
2023-04-26 08:20:59 +00:00
|
|
|
assert_eq!(crate_graph.iter().count(), 118);
|
|
|
|
}
|
2023-10-15 15:32:12 +00:00
|
|
|
|
|
|
|
#[test]
|
2023-11-23 01:08:30 +00:00
|
|
|
fn test_deduplicate_origin_dev() {
|
2023-10-15 15:32:12 +00:00
|
|
|
let path_map = &mut Default::default();
|
|
|
|
let (mut crate_graph, _proc_macros) =
|
2024-01-13 16:22:39 +00:00
|
|
|
load_cargo_with_fake_sysroot(path_map, "deduplication_crate_graph_A.json");
|
2023-10-15 15:32:12 +00:00
|
|
|
crate_graph.sort_deps();
|
|
|
|
let (crate_graph_1, mut _proc_macros_2) =
|
2024-01-13 16:22:39 +00:00
|
|
|
load_cargo_with_fake_sysroot(path_map, "deduplication_crate_graph_B.json");
|
2023-10-15 15:32:12 +00:00
|
|
|
|
2024-01-13 16:22:39 +00:00
|
|
|
crate_graph.extend(crate_graph_1, &mut _proc_macros_2, |_| ());
|
2023-10-15 15:32:12 +00:00
|
|
|
|
2023-11-23 01:08:30 +00:00
|
|
|
let mut crates_named_p2 = vec![];
|
2023-10-15 15:32:12 +00:00
|
|
|
for id in crate_graph.iter() {
|
|
|
|
let krate = &crate_graph[id];
|
|
|
|
if let Some(name) = krate.display_name.as_ref() {
|
2023-11-23 01:08:30 +00:00
|
|
|
if name.to_string() == "p2" {
|
|
|
|
crates_named_p2.push(krate);
|
2023-10-15 15:32:12 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2023-11-23 01:08:30 +00:00
|
|
|
assert!(crates_named_p2.len() == 1);
|
|
|
|
let p2 = crates_named_p2[0];
|
|
|
|
assert!(p2.origin.is_local());
|
2023-10-15 15:32:12 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
#[test]
|
2023-11-23 01:08:30 +00:00
|
|
|
fn test_deduplicate_origin_dev_rev() {
|
2023-10-15 15:32:12 +00:00
|
|
|
let path_map = &mut Default::default();
|
|
|
|
let (mut crate_graph, _proc_macros) =
|
2024-01-13 16:22:39 +00:00
|
|
|
load_cargo_with_fake_sysroot(path_map, "deduplication_crate_graph_B.json");
|
2023-10-15 15:32:12 +00:00
|
|
|
crate_graph.sort_deps();
|
|
|
|
let (crate_graph_1, mut _proc_macros_2) =
|
2024-01-13 16:22:39 +00:00
|
|
|
load_cargo_with_fake_sysroot(path_map, "deduplication_crate_graph_A.json");
|
2023-10-15 15:32:12 +00:00
|
|
|
|
2024-01-13 16:22:39 +00:00
|
|
|
crate_graph.extend(crate_graph_1, &mut _proc_macros_2, |_| ());
|
2023-10-15 15:32:12 +00:00
|
|
|
|
2023-11-23 01:08:30 +00:00
|
|
|
let mut crates_named_p2 = vec![];
|
2023-10-15 15:32:12 +00:00
|
|
|
for id in crate_graph.iter() {
|
|
|
|
let krate = &crate_graph[id];
|
|
|
|
if let Some(name) = krate.display_name.as_ref() {
|
2023-11-23 01:08:30 +00:00
|
|
|
if name.to_string() == "p2" {
|
|
|
|
crates_named_p2.push(krate);
|
2023-10-15 15:32:12 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2023-11-23 01:08:30 +00:00
|
|
|
assert!(crates_named_p2.len() == 1);
|
|
|
|
let p2 = crates_named_p2[0];
|
|
|
|
assert!(p2.origin.is_local());
|
2023-10-15 15:32:12 +00:00
|
|
|
}
|
2024-01-13 16:22:39 +00:00
|
|
|
|
|
|
|
#[test]
|
|
|
|
fn smoke_test_real_sysroot_cargo() {
|
|
|
|
if std::env::var("SYSROOT_CARGO_METADATA").is_err() {
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
let file_map = &mut FxHashMap::<AbsPathBuf, FileId>::default();
|
|
|
|
let meta = get_test_json_file("hello-world-metadata.json");
|
|
|
|
|
|
|
|
let cargo_workspace = CargoWorkspace::new(meta);
|
|
|
|
let sysroot = Ok(Sysroot::discover(
|
|
|
|
AbsPath::assert(Path::new(env!("CARGO_MANIFEST_DIR"))),
|
|
|
|
&Default::default(),
|
|
|
|
true,
|
|
|
|
)
|
|
|
|
.unwrap());
|
|
|
|
|
|
|
|
let project_workspace = ProjectWorkspace::Cargo {
|
|
|
|
cargo: cargo_workspace,
|
|
|
|
build_scripts: WorkspaceBuildScripts::default(),
|
|
|
|
sysroot,
|
|
|
|
rustc: Err(None),
|
|
|
|
rustc_cfg: Vec::new(),
|
|
|
|
cfg_overrides: Default::default(),
|
|
|
|
toolchain: None,
|
|
|
|
target_layout: Err("target_data_layout not loaded".into()),
|
|
|
|
};
|
|
|
|
project_workspace.to_crate_graph(
|
|
|
|
&mut {
|
|
|
|
|path| {
|
|
|
|
let len = file_map.len();
|
|
|
|
Some(*file_map.entry(path.to_path_buf()).or_insert(FileId::from_raw(len as u32)))
|
|
|
|
}
|
|
|
|
},
|
|
|
|
&Default::default(),
|
|
|
|
);
|
|
|
|
}
|