From 8dc66b57fe1a0227b8e132c66e48f507959ff01d Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Tom=C3=A1s=20Vallotton?= Date: Fri, 15 Dec 2023 13:49:31 -0300 Subject: [PATCH 001/122] fix: waker_getters tracking issue from 87021 for 96992. --- crates/ide-db/src/generated/lints.rs | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/crates/ide-db/src/generated/lints.rs b/crates/ide-db/src/generated/lints.rs index 1cb6ff8627..c3b806dec4 100644 --- a/crates/ide-db/src/generated/lints.rs +++ b/crates/ide-db/src/generated/lints.rs @@ -9630,9 +9630,9 @@ The tracking issue for this feature is: [#81944] label: "waker_getters", description: r##"# `waker_getters` -The tracking issue for this feature is: [#87021] +The tracking issue for this feature is: [#96992] -[#87021]: https://github.com/rust-lang/rust/issues/87021 +[#96992]: https://github.com/rust-lang/rust/issues/96992 ------------------------ "##, From e0446a0eb5471d28ec74630ecc43c2ae5da94b79 Mon Sep 17 00:00:00 2001 From: dfireBird Date: Thu, 25 Jan 2024 00:06:22 +0530 Subject: [PATCH 002/122] implement assist for let stmt with TryEnum type to guarded return --- .../src/handlers/convert_to_guarded_return.rs | 126 +++++++++++++++++- 1 file changed, 124 insertions(+), 2 deletions(-) diff --git a/crates/ide-assists/src/handlers/convert_to_guarded_return.rs b/crates/ide-assists/src/handlers/convert_to_guarded_return.rs index 6f30ffa622..5fc1c1dda6 100644 --- a/crates/ide-assists/src/handlers/convert_to_guarded_return.rs +++ b/crates/ide-assists/src/handlers/convert_to_guarded_return.rs @@ -1,6 +1,9 @@ use std::iter::once; -use ide_db::syntax_helpers::node_ext::{is_pattern_cond, single_let}; +use ide_db::{ + syntax_helpers::node_ext::{is_pattern_cond, single_let}, + ty_filter::TryEnum, +}; use syntax::{ ast::{ self, @@ -41,7 +44,20 @@ use crate::{ // } // ``` pub(crate) fn convert_to_guarded_return(acc: &mut Assists, ctx: &AssistContext<'_>) -> Option<()> { - let if_expr: ast::IfExpr = ctx.find_node_at_offset()?; + if let Some(let_stmt) = ctx.find_node_at_offset() { + let_stmt_to_guarded_return(let_stmt, acc, ctx) + } else if let Some(if_expr) = ctx.find_node_at_offset() { + if_expr_to_guarded_return(if_expr, acc, ctx) + } else { + None + } +} + +fn if_expr_to_guarded_return( + if_expr: ast::IfExpr, + acc: &mut Assists, + _ctx: &AssistContext<'_>, +) -> Option<()> { if if_expr.else_branch().is_some() { return None; } @@ -148,6 +164,56 @@ pub(crate) fn convert_to_guarded_return(acc: &mut Assists, ctx: &AssistContext<' ) } +fn let_stmt_to_guarded_return( + let_stmt: ast::LetStmt, + acc: &mut Assists, + ctx: &AssistContext<'_>, +) -> Option<()> { + let pat = let_stmt.pat()?; + let expr = let_stmt.initializer()?; + + let try_enum = + ctx.sema.type_of_expr(&expr).and_then(|ty| TryEnum::from_ty(&ctx.sema, &ty.adjusted()))?; + + let happy_pattern = try_enum.happy_pattern(pat); + let target = let_stmt.syntax().text_range(); + + let early_expression: ast::Expr = { + let parent_block = + let_stmt.syntax().parent()?.ancestors().find_map(ast::BlockExpr::cast)?; + let parent_container = parent_block.syntax().parent()?; + + match parent_container.kind() { + WHILE_EXPR | LOOP_EXPR | FOR_EXPR => make::expr_continue(None), + FN => make::expr_return(None), + _ => return None, + } + }; + + acc.add( + AssistId("convert_to_guarded_return", AssistKind::RefactorRewrite), + "Convert to guarded return", + target, + |edit| { + let let_stmt = edit.make_mut(let_stmt); + let let_indent_level = IndentLevel::from_node(let_stmt.syntax()); + + let replacement = { + let let_else_stmt = make::let_else_stmt( + happy_pattern, + let_stmt.ty(), + expr, + ast::make::tail_only_block_expr(early_expression), + ); + let let_else_stmt = let_else_stmt.indent(let_indent_level); + let_else_stmt.syntax().clone_for_update() + }; + + ted::replace(let_stmt.syntax(), replacement) + }, + ) +} + #[cfg(test)] mod tests { use crate::tests::{check_assist, check_assist_not_applicable}; @@ -450,6 +516,62 @@ fn main() { ); } + #[test] + fn convert_let_stmt_inside_fn() { + check_assist( + convert_to_guarded_return, + r#" +//- minicore: option +fn foo() -> Option { + None +} + +fn main() { + let x$0 = foo(); +} +"#, + r#" +fn foo() -> Option { + None +} + +fn main() { + let Some(x) = foo() else { return }; +} +"#, + ); + } + + #[test] + fn convert_let_stmt_inside_loop() { + check_assist( + convert_to_guarded_return, + r#" +//- minicore: option +fn foo() -> Option { + None +} + +fn main() { + loop { + let x$0 = foo(); + } +} +"#, + r#" +fn foo() -> Option { + None +} + +fn main() { + loop { + let Some(x) = foo() else { continue }; + } +} +"#, + ); + } + #[test] fn convert_arbitrary_if_let_patterns() { check_assist( From 9e8a0fae0cea88a4a64bfb9b9dd1fe00f37c3f7e Mon Sep 17 00:00:00 2001 From: Lukas Wirth Date: Thu, 1 Feb 2024 16:16:38 +0100 Subject: [PATCH 003/122] Lint debug prints and disallowed types with clippy --- .github/workflows/ci.yaml | 2 +- Cargo.lock | 1 + Cargo.toml | 7 +- clippy.toml | 5 ++ crates/flycheck/src/lib.rs | 7 +- crates/hir-def/src/body/lower.rs | 5 +- crates/hir-def/src/item_scope.rs | 8 +-- crates/hir-ty/src/layout/tests.rs | 5 +- crates/hir-ty/src/tests.rs | 9 +-- crates/hir-ty/src/traits.rs | 2 +- crates/hir/src/diagnostics.rs | 4 +- .../src/handlers/desugar_doc_comment.rs | 4 +- .../src/handlers/extract_module.rs | 14 ++-- .../src/handlers/generate_delegate_methods.rs | 6 +- .../src/handlers/generate_delegate_trait.rs | 4 +- .../src/handlers/inline_type_alias.rs | 10 +-- .../src/handlers/merge_match_arms.rs | 11 +-- .../src/handlers/remove_unused_imports.rs | 6 +- .../src/handlers/unwrap_result_return_type.rs | 4 +- crates/ide-assists/src/utils/suggest_name.rs | 6 +- crates/ide-db/src/tests/sourcegen_lints.rs | 1 + crates/ide-diagnostics/src/lib.rs | 10 ++- crates/ide-diagnostics/src/tests.rs | 1 + crates/ide-ssr/src/matching.rs | 2 +- crates/ide-ssr/src/tests.rs | 1 + crates/ide/src/doc_links/tests.rs | 3 - crates/ide/src/references.rs | 4 +- crates/ide/src/static_index.rs | 15 ++-- crates/limit/src/lib.rs | 13 ++-- crates/mbe/src/syntax_bridge/tests.rs | 5 +- .../src/tests/sourcegen_inline_tests.rs | 1 + crates/proc-macro-api/Cargo.toml | 3 +- crates/proc-macro-api/src/msg/flat.rs | 9 +-- crates/proc-macro-srv-cli/src/main.rs | 2 + .../proc-macro-test/imp/src/lib.rs | 1 + crates/profile/src/lib.rs | 3 +- crates/profile/src/stop_watch.rs | 3 + crates/project-model/src/cargo_workspace.rs | 2 +- crates/project-model/src/sysroot.rs | 2 +- crates/rust-analyzer/src/bin/main.rs | 2 + crates/rust-analyzer/src/cli.rs | 2 + crates/rust-analyzer/src/cli/lsif.rs | 20 +++--- crates/rust-analyzer/src/cli/rustc_tests.rs | 15 ++-- crates/rust-analyzer/src/cli/scip.rs | 14 ++-- .../rust-analyzer/src/diagnostics/to_proto.rs | 4 +- crates/rust-analyzer/src/lsp/ext.rs | 15 ++-- crates/rust-analyzer/src/tracing/hprof.rs | 1 + crates/rust-analyzer/tests/slow-tests/main.rs | 1 + crates/rust-analyzer/tests/slow-tests/tidy.rs | 71 +------------------ crates/sourcegen/src/lib.rs | 1 + crates/stdx/Cargo.toml | 2 +- crates/stdx/src/anymap.rs | 5 +- crates/stdx/src/lib.rs | 2 + crates/stdx/src/panic_context.rs | 3 +- crates/stdx/src/rand.rs | 5 +- crates/syntax/src/fuzz.rs | 1 + crates/syntax/src/tests/sourcegen_ast.rs | 8 +-- crates/test-utils/src/lib.rs | 3 +- docs/dev/lsp-extensions.md | 2 +- lib/la-arena/src/lib.rs | 10 ++- lib/la-arena/src/map.rs | 6 +- lib/lsp-server/examples/goto_def.rs | 3 + lib/lsp-server/src/lib.rs | 1 + xtask/src/main.rs | 1 + 64 files changed, 170 insertions(+), 229 deletions(-) create mode 100644 clippy.toml diff --git a/.github/workflows/ci.yaml b/.github/workflows/ci.yaml index b5c5ff0473..014dcbcc2a 100644 --- a/.github/workflows/ci.yaml +++ b/.github/workflows/ci.yaml @@ -105,7 +105,7 @@ jobs: - name: clippy if: matrix.os == 'ubuntu-latest' - run: cargo clippy --all-targets + run: cargo clippy --all-targets -- -D clippy::disallowed_macros -D clippy::dbg_macro -D clippy::todo -D clippy::print_stdout -D clippy::print_stderr # Weird targets to catch non-portable code rust-cross: diff --git a/Cargo.lock b/Cargo.lock index 1b5efb4bb8..dff65e21fd 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -1283,6 +1283,7 @@ dependencies = [ "object 0.32.0", "paths", "profile", + "rustc-hash", "serde", "serde_json", "snap", diff --git a/Cargo.toml b/Cargo.toml index 5a74864811..2f00e95d7e 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -164,6 +164,8 @@ len_without_is_empty = "allow" enum_variant_names = "allow" # Builder pattern disagrees new_ret_no_self = "allow" +# Has a bunch of false positives +useless_asref = "allow" ## Following lints should be tackled at some point borrowed_box = "allow" @@ -178,9 +180,12 @@ type_complexity = "allow" wrong_self_convention = "allow" ## warn at following lints +# CI raises these to deny dbg_macro = "warn" todo = "warn" -unimplemented = "allow" +print_stdout = "warn" +print_stderr = "warn" + rc_buffer = "warn" # FIXME enable this, we use this pattern a lot so its annoying work ... # str_to_string = "warn" diff --git a/clippy.toml b/clippy.toml new file mode 100644 index 0000000000..8032c775ab --- /dev/null +++ b/clippy.toml @@ -0,0 +1,5 @@ +disallowed-types = [ + { path = "std::collections::HashMap", reason = "use FxHashMap" }, + { path = "std::collections::HashSet", reason = "use FxHashSet" }, + { path = "std::collections::hash_map::RandomState", reason = "use BuildHasherDefault"} +] diff --git a/crates/flycheck/src/lib.rs b/crates/flycheck/src/lib.rs index 22603842a1..ef1404487e 100644 --- a/crates/flycheck/src/lib.rs +++ b/crates/flycheck/src/lib.rs @@ -493,9 +493,7 @@ impl CargoActor { // Skip certain kinds of messages to only spend time on what's useful JsonMessage::Cargo(message) => match message { cargo_metadata::Message::CompilerArtifact(artifact) if !artifact.fresh => { - self.sender - .send(CargoMessage::CompilerArtifact(Box::new(artifact))) - .unwrap(); + self.sender.send(CargoMessage::CompilerArtifact(artifact)).unwrap(); } cargo_metadata::Message::CompilerMessage(msg) => { self.sender.send(CargoMessage::Diagnostic(msg.message)).unwrap(); @@ -539,8 +537,9 @@ impl CargoActor { } } +#[allow(clippy::large_enum_variant)] enum CargoMessage { - CompilerArtifact(Box), + CompilerArtifact(cargo_metadata::Artifact), Diagnostic(Diagnostic), } diff --git a/crates/hir-def/src/body/lower.rs b/crates/hir-def/src/body/lower.rs index 492ea6d5c5..29ac666277 100644 --- a/crates/hir-def/src/body/lower.rs +++ b/crates/hir-def/src/body/lower.rs @@ -1980,10 +1980,7 @@ fn pat_literal_to_hir(lit: &ast::LiteralPat) -> Option<(Literal, ast::Literal)> let ast_lit = lit.literal()?; let mut hir_lit: Literal = ast_lit.kind().into(); if lit.minus_token().is_some() { - let Some(h) = hir_lit.negate() else { - return None; - }; - hir_lit = h; + hir_lit = hir_lit.negate()?; } Some((hir_lit, ast_lit)) } diff --git a/crates/hir-def/src/item_scope.rs b/crates/hir-def/src/item_scope.rs index 6237ea7353..60e61dcdf4 100644 --- a/crates/hir-def/src/item_scope.rs +++ b/crates/hir-def/src/item_scope.rs @@ -222,17 +222,15 @@ impl ItemScope { self.declarations.iter().copied() } - pub fn extern_crate_decls( - &self, - ) -> impl Iterator + ExactSizeIterator + '_ { + pub fn extern_crate_decls(&self) -> impl ExactSizeIterator + '_ { self.extern_crate_decls.iter().copied() } - pub fn use_decls(&self) -> impl Iterator + ExactSizeIterator + '_ { + pub fn use_decls(&self) -> impl ExactSizeIterator + '_ { self.use_decls.iter().copied() } - pub fn impls(&self) -> impl Iterator + ExactSizeIterator + '_ { + pub fn impls(&self) -> impl ExactSizeIterator + '_ { self.impls.iter().copied() } diff --git a/crates/hir-ty/src/layout/tests.rs b/crates/hir-ty/src/layout/tests.rs index 1f2ea753c1..ba3dfe8100 100644 --- a/crates/hir-ty/src/layout/tests.rs +++ b/crates/hir-ty/src/layout/tests.rs @@ -1,8 +1,7 @@ -use std::collections::HashMap; - use chalk_ir::{AdtId, TyKind}; use either::Either; use hir_def::db::DefDatabase; +use rustc_hash::FxHashMap; use test_fixture::WithFixture; use triomphe::Arc; @@ -16,7 +15,7 @@ use crate::{ mod closure; fn current_machine_data_layout() -> String { - project_model::target_data_layout::get(None, None, &HashMap::default()).unwrap() + project_model::target_data_layout::get(None, None, &FxHashMap::default()).unwrap() } fn eval_goal(ra_fixture: &str, minicore: &str) -> Result, LayoutError> { diff --git a/crates/hir-ty/src/tests.rs b/crates/hir-ty/src/tests.rs index 9804910c87..03e593d9d1 100644 --- a/crates/hir-ty/src/tests.rs +++ b/crates/hir-ty/src/tests.rs @@ -10,7 +10,7 @@ mod regression; mod simple; mod traits; -use std::{collections::HashMap, env}; +use std::env; use base_db::{FileRange, SourceDatabaseExt}; use expect_test::Expect; @@ -25,6 +25,7 @@ use hir_def::{ }; use hir_expand::{db::ExpandDatabase, InFile}; use once_cell::race::OnceBool; +use rustc_hash::FxHashMap; use stdx::format_to; use syntax::{ ast::{self, AstNode, HasName}, @@ -90,9 +91,9 @@ fn check_impl(ra_fixture: &str, allow_none: bool, only_types: bool, display_sour let (db, files) = TestDB::with_many_files(ra_fixture); let mut had_annotations = false; - let mut mismatches = HashMap::new(); - let mut types = HashMap::new(); - let mut adjustments = HashMap::<_, Vec<_>>::new(); + let mut mismatches = FxHashMap::default(); + let mut types = FxHashMap::default(); + let mut adjustments = FxHashMap::<_, Vec<_>>::default(); for (file_id, annotations) in db.extract_annotations() { for (range, expected) in annotations { let file_range = FileRange { file_id, range }; diff --git a/crates/hir-ty/src/traits.rs b/crates/hir-ty/src/traits.rs index 3a1a4e63ea..5303182d8c 100644 --- a/crates/hir-ty/src/traits.rs +++ b/crates/hir-ty/src/traits.rs @@ -187,7 +187,7 @@ struct LoggingRustIrDatabaseLoggingOnDrop<'a>(LoggingRustIrDatabase { fn drop(&mut self) { - eprintln!("chalk program:\n{}", self.0); + tracing::info!("chalk program:\n{}", self.0); } } diff --git a/crates/hir/src/diagnostics.rs b/crates/hir/src/diagnostics.rs index 2d8f1dbad5..b161265cd9 100644 --- a/crates/hir/src/diagnostics.rs +++ b/crates/hir/src/diagnostics.rs @@ -546,9 +546,7 @@ impl AnyDiagnostic { source_map.pat_syntax(pat).expect("unexpected synthetic"); // cast from Either -> Either<_, Pat> - let Some(ptr) = AstPtr::try_from_raw(value.syntax_node_ptr()) else { - return None; - }; + let ptr = AstPtr::try_from_raw(value.syntax_node_ptr())?; InFile { file_id, value: ptr } } }; diff --git a/crates/ide-assists/src/handlers/desugar_doc_comment.rs b/crates/ide-assists/src/handlers/desugar_doc_comment.rs index c859e98524..d264928046 100644 --- a/crates/ide-assists/src/handlers/desugar_doc_comment.rs +++ b/crates/ide-assists/src/handlers/desugar_doc_comment.rs @@ -27,9 +27,7 @@ use crate::{ pub(crate) fn desugar_doc_comment(acc: &mut Assists, ctx: &AssistContext<'_>) -> Option<()> { let comment = ctx.find_token_at_offset::()?; // Only allow doc comments - let Some(placement) = comment.kind().doc else { - return None; - }; + let placement = comment.kind().doc?; // Only allow comments which are alone on their line if let Some(prev) = comment.syntax().prev_token() { diff --git a/crates/ide-assists/src/handlers/extract_module.rs b/crates/ide-assists/src/handlers/extract_module.rs index 30c3983dc4..af834c8a53 100644 --- a/crates/ide-assists/src/handlers/extract_module.rs +++ b/crates/ide-assists/src/handlers/extract_module.rs @@ -1,7 +1,4 @@ -use std::{ - collections::{HashMap, HashSet}, - iter, -}; +use std::iter; use hir::{HasSource, HirFileIdExt, ModuleSource}; use ide_db::{ @@ -9,6 +6,7 @@ use ide_db::{ base_db::FileId, defs::{Definition, NameClass, NameRefClass}, search::{FileReference, SearchScope}, + FxHashMap, FxHashSet, }; use itertools::Itertools; use smallvec::SmallVec; @@ -235,9 +233,9 @@ impl Module { fn get_usages_and_record_fields( &self, ctx: &AssistContext<'_>, - ) -> (HashMap>, Vec) { + ) -> (FxHashMap>, Vec) { let mut adt_fields = Vec::new(); - let mut refs: HashMap> = HashMap::new(); + let mut refs: FxHashMap> = FxHashMap::default(); //Here impl is not included as each item inside impl will be tied to the parent of //implementing block(a struct, enum, etc), if the parent is in selected module, it will @@ -320,7 +318,7 @@ impl Module { &self, ctx: &AssistContext<'_>, node_def: Definition, - refs_in_files: &mut HashMap>, + refs_in_files: &mut FxHashMap>, ) { for (file_id, references) in node_def.usages(&ctx.sema).all() { let source_file = ctx.sema.parse(file_id); @@ -400,7 +398,7 @@ impl Module { ctx: &AssistContext<'_>, ) -> Vec { let mut import_paths_to_be_removed: Vec = vec![]; - let mut node_set: HashSet = HashSet::new(); + let mut node_set: FxHashSet = FxHashSet::default(); for item in self.body_items.clone() { for x in item.syntax().descendants() { diff --git a/crates/ide-assists/src/handlers/generate_delegate_methods.rs b/crates/ide-assists/src/handlers/generate_delegate_methods.rs index d59bd71d31..1f92c39ad4 100644 --- a/crates/ide-assists/src/handlers/generate_delegate_methods.rs +++ b/crates/ide-assists/src/handlers/generate_delegate_methods.rs @@ -1,7 +1,5 @@ -use std::collections::HashSet; - use hir::{self, HasCrate, HasVisibility}; -use ide_db::path_transform::PathTransform; +use ide_db::{path_transform::PathTransform, FxHashSet}; use syntax::{ ast::{ self, edit_in_place::Indent, make, AstNode, HasGenericParams, HasName, HasVisibility as _, @@ -71,7 +69,7 @@ pub(crate) fn generate_delegate_methods(acc: &mut Assists, ctx: &AssistContext<' let sema_field_ty = ctx.sema.resolve_type(&field_ty)?; let mut methods = vec![]; - let mut seen_names = HashSet::new(); + let mut seen_names = FxHashSet::default(); for ty in sema_field_ty.autoderef(ctx.db()) { let krate = ty.krate(ctx.db()); diff --git a/crates/ide-assists/src/handlers/generate_delegate_trait.rs b/crates/ide-assists/src/handlers/generate_delegate_trait.rs index 3964b14f47..bc66f6cead 100644 --- a/crates/ide-assists/src/handlers/generate_delegate_trait.rs +++ b/crates/ide-assists/src/handlers/generate_delegate_trait.rs @@ -502,9 +502,7 @@ fn generate_args_for_impl( trait_params: &Option, old_trait_args: &FxHashSet, ) -> Option { - let Some(old_impl_args) = old_impl_gpl.map(|gpl| gpl.to_generic_args().generic_args()) else { - return None; - }; + let old_impl_args = old_impl_gpl.map(|gpl| gpl.to_generic_args().generic_args())?; // Create pairs of the args of `self_ty` and corresponding `field_ty` to // form the substitution list let mut arg_substs = FxHashMap::default(); diff --git a/crates/ide-assists/src/handlers/inline_type_alias.rs b/crates/ide-assists/src/handlers/inline_type_alias.rs index 5982e9d61d..e2f3d9edcd 100644 --- a/crates/ide-assists/src/handlers/inline_type_alias.rs +++ b/crates/ide-assists/src/handlers/inline_type_alias.rs @@ -3,12 +3,12 @@ // - Remove unused aliases if there are no longer any users, see inline_call.rs. use hir::{HasSource, PathResolution}; +use ide_db::FxHashMap; use ide_db::{ defs::Definition, imports::insert_use::ast_to_remove_for_path_in_use_stmt, search::FileReference, }; use itertools::Itertools; -use std::collections::HashMap; use syntax::{ ast::{self, make, HasGenericParams, HasName}, ted, AstNode, NodeOrToken, SyntaxNode, @@ -189,14 +189,14 @@ fn inline(alias_def: &ast::TypeAlias, alias_instance: &ast::PathType) -> Option< Some(repl) } -struct LifetimeMap(HashMap); +struct LifetimeMap(FxHashMap); impl LifetimeMap { fn new( instance_args: &Option, alias_generics: &ast::GenericParamList, ) -> Option { - let mut inner = HashMap::new(); + let mut inner = FxHashMap::default(); let wildcard_lifetime = make::lifetime("'_"); let lifetimes = alias_generics @@ -231,14 +231,14 @@ impl LifetimeMap { } } -struct ConstAndTypeMap(HashMap); +struct ConstAndTypeMap(FxHashMap); impl ConstAndTypeMap { fn new( instance_args: &Option, alias_generics: &ast::GenericParamList, ) -> Option { - let mut inner = HashMap::new(); + let mut inner = FxHashMap::default(); let instance_generics = generic_args_to_const_and_type_generics(instance_args); let alias_generics = generic_param_list_to_const_and_type_generics(alias_generics); diff --git a/crates/ide-assists/src/handlers/merge_match_arms.rs b/crates/ide-assists/src/handlers/merge_match_arms.rs index aae9f20d4e..4608e9494b 100644 --- a/crates/ide-assists/src/handlers/merge_match_arms.rs +++ b/crates/ide-assists/src/handlers/merge_match_arms.rs @@ -1,5 +1,6 @@ use hir::Type; -use std::{collections::HashMap, iter::successors}; +use ide_db::FxHashMap; +use std::iter::successors; use syntax::{ algo::neighbor, ast::{self, AstNode, HasName}, @@ -95,7 +96,7 @@ fn contains_placeholder(a: &ast::MatchArm) -> bool { } fn are_same_types( - current_arm_types: &HashMap>, + current_arm_types: &FxHashMap>, arm: &ast::MatchArm, ctx: &AssistContext<'_>, ) -> bool { @@ -114,11 +115,11 @@ fn are_same_types( fn get_arm_types( context: &AssistContext<'_>, arm: &ast::MatchArm, -) -> HashMap> { - let mut mapping: HashMap> = HashMap::new(); +) -> FxHashMap> { + let mut mapping: FxHashMap> = FxHashMap::default(); fn recurse( - map: &mut HashMap>, + map: &mut FxHashMap>, ctx: &AssistContext<'_>, pat: &Option, ) { diff --git a/crates/ide-assists/src/handlers/remove_unused_imports.rs b/crates/ide-assists/src/handlers/remove_unused_imports.rs index 35bf84c434..d67b259d2f 100644 --- a/crates/ide-assists/src/handlers/remove_unused_imports.rs +++ b/crates/ide-assists/src/handlers/remove_unused_imports.rs @@ -1,11 +1,11 @@ -use std::collections::{hash_map::Entry, HashMap}; +use std::collections::hash_map::Entry; use hir::{HirFileIdExt, InFile, InRealFile, Module, ModuleSource}; use ide_db::{ base_db::FileRange, defs::Definition, search::{FileReference, ReferenceCategory, SearchScope}, - RootDatabase, + FxHashMap, RootDatabase, }; use syntax::{ast, AstNode}; use text_edit::TextRange; @@ -44,7 +44,7 @@ pub(crate) fn remove_unused_imports(acc: &mut Assists, ctx: &AssistContext<'_>) let uses = uses_up.chain(uses_down).collect::>(); // Maps use nodes to the scope that we should search through to find - let mut search_scopes = HashMap::>::new(); + let mut search_scopes = FxHashMap::>::default(); // iterator over all unused use trees let mut unused = uses diff --git a/crates/ide-assists/src/handlers/unwrap_result_return_type.rs b/crates/ide-assists/src/handlers/unwrap_result_return_type.rs index 03e6dfebeb..8a9e669630 100644 --- a/crates/ide-assists/src/handlers/unwrap_result_return_type.rs +++ b/crates/ide-assists/src/handlers/unwrap_result_return_type.rs @@ -47,9 +47,7 @@ pub(crate) fn unwrap_result_return_type(acc: &mut Assists, ctx: &AssistContext<' return None; } - let Some(ok_type) = unwrap_result_type(type_ref) else { - return None; - }; + let ok_type = unwrap_result_type(type_ref)?; acc.add( AssistId("unwrap_result_return_type", AssistKind::RefactorRewrite), diff --git a/crates/ide-assists/src/utils/suggest_name.rs b/crates/ide-assists/src/utils/suggest_name.rs index 78dee24a6d..74377f8ec6 100644 --- a/crates/ide-assists/src/utils/suggest_name.rs +++ b/crates/ide-assists/src/utils/suggest_name.rs @@ -1,9 +1,7 @@ //! This module contains functions to suggest names for expressions, functions and other items -use std::collections::HashSet; - use hir::Semantics; -use ide_db::RootDatabase; +use ide_db::{FxHashSet, RootDatabase}; use itertools::Itertools; use stdx::to_lower_snake_case; use syntax::{ @@ -78,7 +76,7 @@ pub(crate) fn for_unique_generic_name( ast::GenericParam::TypeParam(t) => t.name().unwrap().to_string(), p => p.to_string(), }) - .collect::>(); + .collect::>(); let mut name = name.to_string(); let base_len = name.len(); let mut count = 0; diff --git a/crates/ide-db/src/tests/sourcegen_lints.rs b/crates/ide-db/src/tests/sourcegen_lints.rs index c8cf87d3c2..a165470b57 100644 --- a/crates/ide-db/src/tests/sourcegen_lints.rs +++ b/crates/ide-db/src/tests/sourcegen_lints.rs @@ -241,6 +241,7 @@ fn unescape(s: &str) -> String { s.replace(r#"\""#, "").replace(r#"\n"#, "\n").replace(r#"\r"#, "") } +#[allow(clippy::print_stderr)] fn generate_descriptor_clippy(buf: &mut String, path: &Path) { let file_content = std::fs::read_to_string(path).unwrap(); let mut clippy_lints: Vec = Vec::new(); diff --git a/crates/ide-diagnostics/src/lib.rs b/crates/ide-diagnostics/src/lib.rs index 5ad7069e31..ad5e66c5cc 100644 --- a/crates/ide-diagnostics/src/lib.rs +++ b/crates/ide-diagnostics/src/lib.rs @@ -73,8 +73,6 @@ mod handlers { #[cfg(test)] mod tests; -use std::collections::HashMap; - use hir::{diagnostics::AnyDiagnostic, InFile, Semantics}; use ide_db::{ assists::{Assist, AssistId, AssistKind, AssistResolveStrategy}, @@ -413,18 +411,18 @@ pub fn diagnostics( // `__RA_EVERY_LINT` is a fake lint group to allow every lint in proc macros -static RUSTC_LINT_GROUPS_DICT: Lazy>> = +static RUSTC_LINT_GROUPS_DICT: Lazy>> = Lazy::new(|| build_group_dict(DEFAULT_LINT_GROUPS, &["warnings", "__RA_EVERY_LINT"], "")); -static CLIPPY_LINT_GROUPS_DICT: Lazy>> = +static CLIPPY_LINT_GROUPS_DICT: Lazy>> = Lazy::new(|| build_group_dict(CLIPPY_LINT_GROUPS, &["__RA_EVERY_LINT"], "clippy::")); fn build_group_dict( lint_group: &'static [LintGroup], all_groups: &'static [&'static str], prefix: &'static str, -) -> HashMap<&'static str, Vec<&'static str>> { - let mut r: HashMap<&str, Vec<&str>> = HashMap::new(); +) -> FxHashMap<&'static str, Vec<&'static str>> { + let mut r: FxHashMap<&str, Vec<&str>> = FxHashMap::default(); for g in lint_group { for child in g.children { r.entry(child.strip_prefix(prefix).unwrap()) diff --git a/crates/ide-diagnostics/src/tests.rs b/crates/ide-diagnostics/src/tests.rs index f394a491b5..792d4a371e 100644 --- a/crates/ide-diagnostics/src/tests.rs +++ b/crates/ide-diagnostics/src/tests.rs @@ -1,3 +1,4 @@ +#![allow(clippy::print_stderr)] #[cfg(not(feature = "in-rust-tree"))] mod sourcegen; diff --git a/crates/ide-ssr/src/matching.rs b/crates/ide-ssr/src/matching.rs index 060897a685..81f00d51a3 100644 --- a/crates/ide-ssr/src/matching.rs +++ b/crates/ide-ssr/src/matching.rs @@ -706,7 +706,7 @@ where // we are trying to match that bit of code. This saves us having to pass a boolean into all the bits // of code that can make the decision to not match. thread_local! { - pub static RECORDING_MATCH_FAIL_REASONS: Cell = Cell::new(false); + pub static RECORDING_MATCH_FAIL_REASONS: Cell = const { Cell::new(false) }; } fn recording_match_fail_reasons() -> bool { diff --git a/crates/ide-ssr/src/tests.rs b/crates/ide-ssr/src/tests.rs index 7c7d146cb4..e608b0a7c4 100644 --- a/crates/ide-ssr/src/tests.rs +++ b/crates/ide-ssr/src/tests.rs @@ -113,6 +113,7 @@ fn assert_ssr_transforms(rules: &[&str], input: &str, expected: Expect) { expected.assert_eq(&actual); } +#[allow(clippy::print_stdout)] fn print_match_debug_info(match_finder: &MatchFinder<'_>, file_id: FileId, snippet: &str) { let debug_info = match_finder.debug_where_text_equal(file_id, snippet); println!( diff --git a/crates/ide/src/doc_links/tests.rs b/crates/ide/src/doc_links/tests.rs index 3bb0fc6064..60e8d29a71 100644 --- a/crates/ide/src/doc_links/tests.rs +++ b/crates/ide/src/doc_links/tests.rs @@ -29,9 +29,6 @@ fn check_external_docs( let web_url = links.web_url; let local_url = links.local_url; - println!("web_url: {:?}", web_url); - println!("local_url: {:?}", local_url); - match (expect_web_url, web_url) { (Some(expect), Some(url)) => expect.assert_eq(&url), (None, None) => (), diff --git a/crates/ide/src/references.rs b/crates/ide/src/references.rs index bdda25a111..dcdc6118a3 100644 --- a/crates/ide/src/references.rs +++ b/crates/ide/src/references.rs @@ -9,8 +9,6 @@ //! at the index that the match starts at and its tree parent is //! resolved to the search element definition, we get a reference. -use std::collections::HashMap; - use hir::{DescendPreference, PathResolution, Semantics}; use ide_db::{ base_db::FileId, @@ -79,7 +77,7 @@ pub(crate) fn find_all_refs( .collect(), ) }) - .collect::, _>>(); + .collect::>>(); let declaration = match def { Definition::Module(module) => { Some(NavigationTarget::from_module_to_decl(sema.db, module)) diff --git a/crates/ide/src/static_index.rs b/crates/ide/src/static_index.rs index 5b7094e6bc..dee5afbf8d 100644 --- a/crates/ide/src/static_index.rs +++ b/crates/ide/src/static_index.rs @@ -1,14 +1,12 @@ //! This module provides `StaticIndex` which is used for powering //! read-only code browsers and emitting LSIF -use std::collections::HashMap; - use hir::{db::HirDatabase, Crate, HirFileIdExt, Module}; -use ide_db::helpers::get_definition; use ide_db::{ base_db::{FileId, FileRange, SourceDatabaseExt}, defs::Definition, - FxHashSet, RootDatabase, + helpers::get_definition, + FxHashMap, FxHashSet, RootDatabase, }; use syntax::{AstNode, SyntaxKind::*, TextRange, T}; @@ -31,7 +29,7 @@ pub struct StaticIndex<'a> { pub tokens: TokenStore, analysis: &'a Analysis, db: &'a RootDatabase, - def_map: HashMap, + def_map: FxHashMap, } #[derive(Debug)] @@ -232,14 +230,13 @@ impl StaticIndex<'_> { #[cfg(test)] mod tests { use crate::{fixture, StaticIndex}; - use ide_db::base_db::FileRange; - use std::collections::HashSet; + use ide_db::{base_db::FileRange, FxHashSet}; use syntax::TextSize; fn check_all_ranges(ra_fixture: &str) { let (analysis, ranges) = fixture::annotations_without_marker(ra_fixture); let s = StaticIndex::compute(&analysis); - let mut range_set: HashSet<_> = ranges.iter().map(|it| it.0).collect(); + let mut range_set: FxHashSet<_> = ranges.iter().map(|it| it.0).collect(); for f in s.files { for (range, _) in f.tokens { let it = FileRange { file_id: f.file_id, range }; @@ -258,7 +255,7 @@ mod tests { fn check_definitions(ra_fixture: &str) { let (analysis, ranges) = fixture::annotations_without_marker(ra_fixture); let s = StaticIndex::compute(&analysis); - let mut range_set: HashSet<_> = ranges.iter().map(|it| it.0).collect(); + let mut range_set: FxHashSet<_> = ranges.iter().map(|it| it.0).collect(); for (_, t) in s.tokens.iter() { if let Some(t) = t.definition { if t.range.start() == TextSize::from(0) { diff --git a/crates/limit/src/lib.rs b/crates/limit/src/lib.rs index 7f4b00df0b..27471db6a3 100644 --- a/crates/limit/src/lib.rs +++ b/crates/limit/src/lib.rs @@ -55,13 +55,12 @@ impl Limit { if other <= old_max || old_max == 0 { break; } - if self - .max - .compare_exchange_weak(old_max, other, Ordering::Relaxed, Ordering::Relaxed) - .is_ok() - { - eprintln!("new max: {other}"); - } + _ = self.max.compare_exchange_weak( + old_max, + other, + Ordering::Relaxed, + Ordering::Relaxed, + ); } Ok(()) diff --git a/crates/mbe/src/syntax_bridge/tests.rs b/crates/mbe/src/syntax_bridge/tests.rs index e5569138db..11d1a72879 100644 --- a/crates/mbe/src/syntax_bridge/tests.rs +++ b/crates/mbe/src/syntax_bridge/tests.rs @@ -1,5 +1,4 @@ -use std::collections::HashMap; - +use rustc_hash::FxHashMap; use syntax::{ast, AstNode}; use test_utils::extract_annotations; use tt::{ @@ -12,7 +11,7 @@ use crate::{syntax_node_to_token_tree, DummyTestSpanData, DummyTestSpanMap, DUMM fn check_punct_spacing(fixture: &str) { let source_file = ast::SourceFile::parse(fixture).ok().unwrap(); let subtree = syntax_node_to_token_tree(source_file.syntax(), DummyTestSpanMap, DUMMY); - let mut annotations: HashMap<_, _> = extract_annotations(fixture) + let mut annotations: FxHashMap<_, _> = extract_annotations(fixture) .into_iter() .map(|(range, annotation)| { let spacing = match annotation.as_str() { diff --git a/crates/parser/src/tests/sourcegen_inline_tests.rs b/crates/parser/src/tests/sourcegen_inline_tests.rs index bd9e188e4d..c02fb02c9d 100644 --- a/crates/parser/src/tests/sourcegen_inline_tests.rs +++ b/crates/parser/src/tests/sourcegen_inline_tests.rs @@ -1,5 +1,6 @@ //! This module greps parser's code for specially formatted comments and turns //! them into tests. +#![allow(clippy::disallowed_types, clippy::print_stdout)] use std::{ collections::HashMap, diff --git a/crates/proc-macro-api/Cargo.toml b/crates/proc-macro-api/Cargo.toml index 49a0979f4f..cf01b94c0a 100644 --- a/crates/proc-macro-api/Cargo.toml +++ b/crates/proc-macro-api/Cargo.toml @@ -23,6 +23,7 @@ serde.workspace = true serde_json = { workspace = true, features = ["unbounded_depth"] } tracing.workspace = true triomphe.workspace = true +rustc-hash.workspace = true memmap2 = "0.5.4" snap = "1.1.0" indexmap = "2.1.0" @@ -40,4 +41,4 @@ base-db.workspace = true la-arena.workspace = true [lints] -workspace = true \ No newline at end of file +workspace = true diff --git a/crates/proc-macro-api/src/msg/flat.rs b/crates/proc-macro-api/src/msg/flat.rs index 8dfaba5262..ee7afbdd92 100644 --- a/crates/proc-macro-api/src/msg/flat.rs +++ b/crates/proc-macro-api/src/msg/flat.rs @@ -35,10 +35,11 @@ //! as we don't have bincode in Cargo.toml yet, lets stick with serde_json for //! the time being. -use std::collections::{HashMap, VecDeque}; +use std::collections::VecDeque; use indexmap::IndexSet; use la_arena::RawIdx; +use rustc_hash::FxHashMap; use serde::{Deserialize, Serialize}; use span::{ErasedFileAstId, FileId, Span, SpanAnchor, SyntaxContextId}; use text_size::TextRange; @@ -129,7 +130,7 @@ impl FlatTree { span_data_table: &mut SpanDataIndexMap, ) -> FlatTree { let mut w = Writer { - string_table: HashMap::new(), + string_table: FxHashMap::default(), work: VecDeque::new(), span_data_table, @@ -158,7 +159,7 @@ impl FlatTree { pub fn new_raw(subtree: &tt::Subtree, version: u32) -> FlatTree { let mut w = Writer { - string_table: HashMap::new(), + string_table: FxHashMap::default(), work: VecDeque::new(), span_data_table: &mut (), @@ -340,7 +341,7 @@ impl InternableSpan for Span { struct Writer<'a, 'span, S: InternableSpan> { work: VecDeque<(usize, &'a tt::Subtree)>, - string_table: HashMap<&'a str, u32>, + string_table: FxHashMap<&'a str, u32>, span_data_table: &'span mut S::Table, subtree: Vec, diff --git a/crates/proc-macro-srv-cli/src/main.rs b/crates/proc-macro-srv-cli/src/main.rs index a36200cdb4..df0ae3171f 100644 --- a/crates/proc-macro-srv-cli/src/main.rs +++ b/crates/proc-macro-srv-cli/src/main.rs @@ -1,6 +1,8 @@ //! A standalone binary for `proc-macro-srv`. //! Driver for proc macro server #![cfg_attr(feature = "in-rust-tree", feature(rustc_private))] +#![allow(clippy::print_stderr)] + #[cfg(feature = "in-rust-tree")] extern crate rustc_driver as _; diff --git a/crates/proc-macro-srv/proc-macro-test/imp/src/lib.rs b/crates/proc-macro-srv/proc-macro-test/imp/src/lib.rs index d9018b1b87..5f8530d08c 100644 --- a/crates/proc-macro-srv/proc-macro-test/imp/src/lib.rs +++ b/crates/proc-macro-srv/proc-macro-test/imp/src/lib.rs @@ -2,6 +2,7 @@ #![warn(rust_2018_idioms, unused_lifetimes)] #![feature(proc_macro_span, proc_macro_def_site)] +#![allow(clippy::all)] use proc_macro::{Group, Ident, Literal, Punct, Span, TokenStream, TokenTree}; diff --git a/crates/profile/src/lib.rs b/crates/profile/src/lib.rs index 38c5b3fc9c..3639981560 100644 --- a/crates/profile/src/lib.rs +++ b/crates/profile/src/lib.rs @@ -21,7 +21,7 @@ pub use countme; /// almost zero. pub use countme::Count; -thread_local!(static IN_SCOPE: RefCell = RefCell::new(false)); +thread_local!(static IN_SCOPE: RefCell = const { RefCell::new(false) }); /// Allows to check if the current code is within some dynamic scope, can be /// useful during debugging to figure out why a function is called. @@ -88,6 +88,7 @@ pub fn cpu_span() -> CpuSpan { } #[cfg(not(feature = "cpu_profiler"))] + #[allow(clippy::print_stderr)] { eprintln!( r#"cpu profiling is disabled, uncomment `default = [ "cpu_profiler" ]` in Cargo.toml to enable."# diff --git a/crates/profile/src/stop_watch.rs b/crates/profile/src/stop_watch.rs index 814a025740..990b59cad4 100644 --- a/crates/profile/src/stop_watch.rs +++ b/crates/profile/src/stop_watch.rs @@ -1,4 +1,7 @@ //! Like `std::time::Instant`, but also measures memory & CPU cycles. + +#![allow(clippy::print_stderr)] + use std::{ fmt, time::{Duration, Instant}, diff --git a/crates/project-model/src/cargo_workspace.rs b/crates/project-model/src/cargo_workspace.rs index 361f8721a4..5926e5a5f7 100644 --- a/crates/project-model/src/cargo_workspace.rs +++ b/crates/project-model/src/cargo_workspace.rs @@ -399,7 +399,7 @@ impl CargoWorkspace { CargoWorkspace { packages, targets, workspace_root, target_directory } } - pub fn packages(&self) -> impl Iterator + ExactSizeIterator + '_ { + pub fn packages(&self) -> impl ExactSizeIterator + '_ { self.packages.iter().map(|(id, _pkg)| id) } diff --git a/crates/project-model/src/sysroot.rs b/crates/project-model/src/sysroot.rs index c24c0196dd..9e19a52583 100644 --- a/crates/project-model/src/sysroot.rs +++ b/crates/project-model/src/sysroot.rs @@ -57,7 +57,7 @@ impl Stitched { self.by_name("proc_macro") } - pub(crate) fn crates(&self) -> impl Iterator + ExactSizeIterator + '_ { + pub(crate) fn crates(&self) -> impl ExactSizeIterator + '_ { self.crates.iter().map(|(id, _data)| id) } diff --git a/crates/rust-analyzer/src/bin/main.rs b/crates/rust-analyzer/src/bin/main.rs index 66b680571a..72dc67b48a 100644 --- a/crates/rust-analyzer/src/bin/main.rs +++ b/crates/rust-analyzer/src/bin/main.rs @@ -3,7 +3,9 @@ //! Based on cli flags, either spawns an LSP server, or runs a batch analysis #![warn(rust_2018_idioms, unused_lifetimes)] +#![allow(clippy::print_stdout, clippy::print_stderr)] #![cfg_attr(feature = "in-rust-tree", feature(rustc_private))] + #[cfg(feature = "in-rust-tree")] extern crate rustc_driver as _; diff --git a/crates/rust-analyzer/src/cli.rs b/crates/rust-analyzer/src/cli.rs index 00670f2cb4..0bd6677b66 100644 --- a/crates/rust-analyzer/src/cli.rs +++ b/crates/rust-analyzer/src/cli.rs @@ -1,5 +1,7 @@ //! Various batch processing tasks, intended primarily for debugging. +#![allow(clippy::print_stdout, clippy::print_stderr)] + mod analysis_stats; mod diagnostics; pub mod flags; diff --git a/crates/rust-analyzer/src/cli/lsif.rs b/crates/rust-analyzer/src/cli/lsif.rs index 64f965e22a..1b6187f8df 100644 --- a/crates/rust-analyzer/src/cli/lsif.rs +++ b/crates/rust-analyzer/src/cli/lsif.rs @@ -1,6 +1,5 @@ //! LSIF (language server index format) generator -use std::collections::HashMap; use std::env; use std::time::Instant; @@ -16,6 +15,7 @@ use ide_db::{ use load_cargo::{load_workspace, LoadCargoConfig, ProcMacroServerChoice}; use lsp_types::{self, lsif}; use project_model::{CargoConfig, ProjectManifest, ProjectWorkspace, RustLibSource}; +use rustc_hash::FxHashMap; use vfs::{AbsPathBuf, Vfs}; use crate::{ @@ -35,10 +35,10 @@ impl Clone for Snap> { struct LsifManager<'a> { count: i32, - token_map: HashMap, - range_map: HashMap, - file_map: HashMap, - package_map: HashMap, + token_map: FxHashMap, + range_map: FxHashMap, + file_map: FxHashMap, + package_map: FxHashMap, analysis: &'a Analysis, db: &'a RootDatabase, vfs: &'a Vfs, @@ -57,10 +57,10 @@ impl LsifManager<'_> { fn new<'a>(analysis: &'a Analysis, db: &'a RootDatabase, vfs: &'a Vfs) -> LsifManager<'a> { LsifManager { count: 0, - token_map: HashMap::default(), - range_map: HashMap::default(), - file_map: HashMap::default(), - package_map: HashMap::default(), + token_map: FxHashMap::default(), + range_map: FxHashMap::default(), + file_map: FxHashMap::default(), + package_map: FxHashMap::default(), analysis, db, vfs, @@ -215,7 +215,7 @@ impl LsifManager<'_> { out_v: result_set_id.into(), })); let mut edges = token.references.iter().fold( - HashMap::<_, Vec>::new(), + FxHashMap::<_, Vec>::default(), |mut edges, it| { let entry = edges.entry((it.range.file_id, it.is_definition)).or_default(); entry.push((*self.range_map.get(&it.range).unwrap()).into()); diff --git a/crates/rust-analyzer/src/cli/rustc_tests.rs b/crates/rust-analyzer/src/cli/rustc_tests.rs index be7e434aca..64ea246a45 100644 --- a/crates/rust-analyzer/src/cli/rustc_tests.rs +++ b/crates/rust-analyzer/src/cli/rustc_tests.rs @@ -1,8 +1,6 @@ //! Run all tests in a project, similar to `cargo test`, but using the mir interpreter. -use std::{ - cell::RefCell, collections::HashMap, fs::read_to_string, panic::AssertUnwindSafe, path::PathBuf, -}; +use std::{cell::RefCell, fs::read_to_string, panic::AssertUnwindSafe, path::PathBuf}; use hir::{Change, Crate}; use ide::{AnalysisHost, DiagnosticCode, DiagnosticsConfig}; @@ -10,6 +8,7 @@ use profile::StopWatch; use project_model::{CargoConfig, ProjectWorkspace, RustLibSource, Sysroot}; use load_cargo::{load_workspace, LoadCargoConfig, ProcMacroServerChoice}; +use rustc_hash::FxHashMap; use triomphe::Arc; use vfs::{AbsPathBuf, FileId}; use walkdir::WalkDir; @@ -27,7 +26,7 @@ struct Tester { fn string_to_diagnostic_code_leaky(code: &str) -> DiagnosticCode { thread_local! { - static LEAK_STORE: RefCell> = RefCell::new(HashMap::new()); + static LEAK_STORE: RefCell> = RefCell::new(FxHashMap::default()); } LEAK_STORE.with_borrow_mut(|s| match s.get(code) { Some(c) => *c, @@ -39,9 +38,9 @@ fn string_to_diagnostic_code_leaky(code: &str) -> DiagnosticCode { }) } -fn detect_errors_from_rustc_stderr_file(p: PathBuf) -> HashMap { +fn detect_errors_from_rustc_stderr_file(p: PathBuf) -> FxHashMap { let text = read_to_string(p).unwrap(); - let mut result = HashMap::new(); + let mut result = FxHashMap::default(); { let mut text = &*text; while let Some(p) = text.find("error[E") { @@ -106,7 +105,7 @@ impl Tester { let expected = if stderr_path.exists() { detect_errors_from_rustc_stderr_file(stderr_path) } else { - HashMap::new() + FxHashMap::default() }; let text = read_to_string(&p).unwrap(); let mut change = Change::new(); @@ -125,7 +124,7 @@ impl Tester { self.host.apply_change(change); let diagnostic_config = DiagnosticsConfig::test_sample(); - let mut actual = HashMap::new(); + let mut actual = FxHashMap::default(); let panicked = match std::panic::catch_unwind(|| { self.host .analysis() diff --git a/crates/rust-analyzer/src/cli/scip.rs b/crates/rust-analyzer/src/cli/scip.rs index 81622a4617..1b0cfa6a5d 100644 --- a/crates/rust-analyzer/src/cli/scip.rs +++ b/crates/rust-analyzer/src/cli/scip.rs @@ -1,10 +1,6 @@ //! SCIP generator -use std::{ - collections::{HashMap, HashSet}, - path::PathBuf, - time::Instant, -}; +use std::{path::PathBuf, time::Instant}; use ide::{ LineCol, MonikerDescriptorKind, MonikerResult, StaticIndex, StaticIndexedFile, @@ -12,6 +8,7 @@ use ide::{ }; use ide_db::LineIndexDatabase; use load_cargo::{load_workspace_at, LoadCargoConfig, ProcMacroServerChoice}; +use rustc_hash::{FxHashMap, FxHashSet}; use scip::types as scip_types; use crate::{ @@ -76,9 +73,10 @@ impl flags::Scip { }; let mut documents = Vec::new(); - let mut symbols_emitted: HashSet = HashSet::default(); - let mut tokens_to_symbol: HashMap = HashMap::new(); - let mut tokens_to_enclosing_symbol: HashMap> = HashMap::new(); + let mut symbols_emitted: FxHashSet = FxHashSet::default(); + let mut tokens_to_symbol: FxHashMap = FxHashMap::default(); + let mut tokens_to_enclosing_symbol: FxHashMap> = + FxHashMap::default(); for StaticIndexedFile { file_id, tokens, .. } in si.files { let mut local_count = 0; diff --git a/crates/rust-analyzer/src/diagnostics/to_proto.rs b/crates/rust-analyzer/src/diagnostics/to_proto.rs index f8bc66ff8e..f79ae793c9 100644 --- a/crates/rust-analyzer/src/diagnostics/to_proto.rs +++ b/crates/rust-analyzer/src/diagnostics/to_proto.rs @@ -1,9 +1,9 @@ //! This module provides the functionality needed to convert diagnostics from //! `cargo check` json format to the LSP diagnostic format. -use std::collections::HashMap; use flycheck::{Applicability, DiagnosticLevel, DiagnosticSpan}; use itertools::Itertools; +use rustc_hash::FxHashMap; use stdx::format_to; use vfs::{AbsPath, AbsPathBuf}; @@ -186,7 +186,7 @@ fn map_rust_child_diagnostic( return MappedRustChildDiagnostic::MessageLine(rd.message.clone()); } - let mut edit_map: HashMap> = HashMap::new(); + let mut edit_map: FxHashMap> = FxHashMap::default(); let mut suggested_replacements = Vec::new(); let mut is_preferred = true; for &span in &spans { diff --git a/crates/rust-analyzer/src/lsp/ext.rs b/crates/rust-analyzer/src/lsp/ext.rs index 35c8fad374..e23bb8e046 100644 --- a/crates/rust-analyzer/src/lsp/ext.rs +++ b/crates/rust-analyzer/src/lsp/ext.rs @@ -1,6 +1,8 @@ //! rust-analyzer extensions to the LSP. -use std::{collections::HashMap, path::PathBuf}; +#![allow(clippy::disallowed_types)] + +use std::path::PathBuf; use ide_db::line_index::WideEncoding; use lsp_types::request::Request; @@ -9,6 +11,7 @@ use lsp_types::{ PartialResultParams, Position, Range, TextDocumentIdentifier, WorkDoneProgressParams, }; use lsp_types::{PositionEncodingKind, Url}; +use rustc_hash::FxHashMap; use serde::{Deserialize, Serialize}; use crate::line_index::PositionEncoding; @@ -448,12 +451,16 @@ pub struct CodeActionData { #[serde(rename_all = "camelCase")] pub struct SnippetWorkspaceEdit { #[serde(skip_serializing_if = "Option::is_none")] - pub changes: Option>>, + pub changes: Option>>, #[serde(skip_serializing_if = "Option::is_none")] pub document_changes: Option>, #[serde(skip_serializing_if = "Option::is_none")] - pub change_annotations: - Option>, + pub change_annotations: Option< + std::collections::HashMap< + lsp_types::ChangeAnnotationIdentifier, + lsp_types::ChangeAnnotation, + >, + >, } #[derive(Debug, Eq, PartialEq, Clone, Deserialize, Serialize)] diff --git a/crates/rust-analyzer/src/tracing/hprof.rs b/crates/rust-analyzer/src/tracing/hprof.rs index c99b551df8..9064987329 100644 --- a/crates/rust-analyzer/src/tracing/hprof.rs +++ b/crates/rust-analyzer/src/tracing/hprof.rs @@ -179,6 +179,7 @@ impl Node { self.go(0, filter) } + #[allow(clippy::print_stderr)] fn go(&self, level: usize, filter: &WriteFilter) { if self.duration > filter.longer_than && level < filter.depth { let duration = ms(self.duration); diff --git a/crates/rust-analyzer/tests/slow-tests/main.rs b/crates/rust-analyzer/tests/slow-tests/main.rs index 19890110d5..7a2b7497e0 100644 --- a/crates/rust-analyzer/tests/slow-tests/main.rs +++ b/crates/rust-analyzer/tests/slow-tests/main.rs @@ -9,6 +9,7 @@ //! be sure without a real client anyway. #![warn(rust_2018_idioms, unused_lifetimes)] +#![allow(clippy::disallowed_types)] #[cfg(not(feature = "in-rust-tree"))] mod sourcegen; diff --git a/crates/rust-analyzer/tests/slow-tests/tidy.rs b/crates/rust-analyzer/tests/slow-tests/tidy.rs index d3146ab767..740626dfe3 100644 --- a/crates/rust-analyzer/tests/slow-tests/tidy.rs +++ b/crates/rust-analyzer/tests/slow-tests/tidy.rs @@ -1,3 +1,4 @@ +#![allow(clippy::disallowed_types, clippy::print_stderr)] use std::{ collections::HashSet, path::{Path, PathBuf}, @@ -78,8 +79,6 @@ fn files_are_tidy() { match extension { "rs" => { let text = sh.read_file(&path).unwrap(); - check_todo(&path, &text); - check_dbg(&path, &text); check_test_attrs(&path, &text); check_trailing_ws(&path, &text); tidy_docs.visit(&path, &text); @@ -205,74 +204,6 @@ Zlib OR Apache-2.0 OR MIT assert_eq!(licenses, expected); } -fn check_todo(path: &Path, text: &str) { - let need_todo = &[ - // This file itself obviously needs to use todo (<- like this!). - "tests/tidy.rs", - // Some of our assists generate `todo!()`. - "handlers/add_turbo_fish.rs", - "handlers/generate_function.rs", - "handlers/add_missing_match_arms.rs", - "handlers/replace_derive_with_manual_impl.rs", - // To support generating `todo!()` in assists, we have `expr_todo()` in - // `ast::make`. - "ast/make.rs", - // The documentation in string literals may contain anything for its own purposes - "ide-db/src/generated/lints.rs", - "ide-assists/src/utils/gen_trait_fn_body.rs", - "ide-assists/src/tests/generated.rs", - // The tests for missing fields - "ide-diagnostics/src/handlers/missing_fields.rs", - ]; - if need_todo.iter().any(|p| path.ends_with(p)) { - return; - } - if text.contains("TODO") || text.contains("TOOD") || text.contains("todo!") { - // Generated by an assist - if text.contains("${0:todo!()}") { - return; - } - - panic!( - "\nTODO markers or todo! macros should not be committed to the master branch,\n\ - use FIXME instead\n\ - {}\n", - path.display(), - ) - } -} - -fn check_dbg(path: &Path, text: &str) { - let need_dbg = &[ - // This file itself obviously needs to use dbg. - "slow-tests/tidy.rs", - // Assists to remove `dbg!()` - "handlers/remove_dbg.rs", - // We have .dbg postfix - "ide-completion/src/completions/postfix.rs", - "ide-completion/src/completions/keyword.rs", - "ide-completion/src/tests/expression.rs", - "ide-completion/src/tests/proc_macros.rs", - // The documentation in string literals may contain anything for its own purposes - "ide-completion/src/lib.rs", - "ide-db/src/generated/lints.rs", - // test for doc test for remove_dbg - "src/tests/generated.rs", - // `expect!` string can contain `dbg!` (due to .dbg postfix) - "ide-completion/src/tests/special.rs", - ]; - if need_dbg.iter().any(|p| path.ends_with(p)) { - return; - } - if text.contains("dbg!") { - panic!( - "\ndbg! macros should not be committed to the master branch,\n\ - {}\n", - path.display(), - ) - } -} - fn check_test_attrs(path: &Path, text: &str) { let ignore_rule = "https://github.com/rust-lang/rust-analyzer/blob/master/docs/dev/style.md#ignore"; diff --git a/crates/sourcegen/src/lib.rs b/crates/sourcegen/src/lib.rs index 18fa77fd97..ac3aa31b57 100644 --- a/crates/sourcegen/src/lib.rs +++ b/crates/sourcegen/src/lib.rs @@ -167,6 +167,7 @@ pub fn add_preamble(generator: &'static str, mut text: String) -> String { /// Checks that the `file` has the specified `contents`. If that is not the /// case, updates the file and then fails the test. +#[allow(clippy::print_stderr)] pub fn ensure_file_contents(file: &Path, contents: &str) { if let Ok(old_contents) = fs::read_to_string(file) { if normalize_newlines(&old_contents) == normalize_newlines(contents) { diff --git a/crates/stdx/Cargo.toml b/crates/stdx/Cargo.toml index 2e3f9113b0..6cca116335 100644 --- a/crates/stdx/Cargo.toml +++ b/crates/stdx/Cargo.toml @@ -29,4 +29,4 @@ winapi = { version = "0.3.9", features = ["winerror"] } # default = [ "backtrace" ] [lints] -workspace = true \ No newline at end of file +workspace = true diff --git a/crates/stdx/src/anymap.rs b/crates/stdx/src/anymap.rs index 9990f8b086..899cd8ac6b 100644 --- a/crates/stdx/src/anymap.rs +++ b/crates/stdx/src/anymap.rs @@ -54,12 +54,13 @@ use core::any::{Any, TypeId}; use core::hash::BuildHasherDefault; use core::marker::PhantomData; -use ::std::collections::hash_map::{self, HashMap}; +use ::std::collections::hash_map; /// Raw access to the underlying `HashMap`. /// /// This alias is provided for convenience because of the ugly third generic parameter. -pub type RawMap = HashMap, BuildHasherDefault>; +#[allow(clippy::disallowed_types)] // Uses a custom hasher +pub type RawMap = hash_map::HashMap, BuildHasherDefault>; /// A collection containing zero or one values for any given type and allowing convenient, /// type-safe access to those values. diff --git a/crates/stdx/src/lib.rs b/crates/stdx/src/lib.rs index 07b7827228..9a9ebae74e 100644 --- a/crates/stdx/src/lib.rs +++ b/crates/stdx/src/lib.rs @@ -23,12 +23,14 @@ pub fn is_ci() -> bool { } #[must_use] +#[allow(clippy::print_stderr)] pub fn timeit(label: &'static str) -> impl Drop { let start = Instant::now(); defer(move || eprintln!("{}: {:.2?}", label, start.elapsed())) } /// Prints backtrace to stderr, useful for debugging. +#[allow(clippy::print_stderr)] pub fn print_backtrace() { #[cfg(feature = "backtrace")] eprintln!("{:?}", backtrace::Backtrace::new()); diff --git a/crates/stdx/src/panic_context.rs b/crates/stdx/src/panic_context.rs index c3e8813b0e..cf3d85b4da 100644 --- a/crates/stdx/src/panic_context.rs +++ b/crates/stdx/src/panic_context.rs @@ -18,6 +18,7 @@ pub struct PanicContext { } impl PanicContext { + #[allow(clippy::print_stderr)] fn init() { let default_hook = panic::take_hook(); let hook = move |panic_info: &panic::PanicInfo<'_>| { @@ -43,7 +44,7 @@ impl Drop for PanicContext { fn with_ctx(f: impl FnOnce(&mut Vec)) { thread_local! { - static CTX: RefCell> = RefCell::new(Vec::new()); + static CTX: RefCell> = const { RefCell::new(Vec::new()) }; } CTX.with(|ctx| f(&mut ctx.borrow_mut())); } diff --git a/crates/stdx/src/rand.rs b/crates/stdx/src/rand.rs index 64aa57eae0..115a073dab 100644 --- a/crates/stdx/src/rand.rs +++ b/crates/stdx/src/rand.rs @@ -14,8 +14,7 @@ pub fn shuffle(slice: &mut [T], mut rand_index: impl FnMut(usize) -> usize) { } pub fn seed() -> u64 { - use std::collections::hash_map::RandomState; use std::hash::{BuildHasher, Hasher}; - - RandomState::new().build_hasher().finish() + #[allow(clippy::disallowed_types)] + std::collections::hash_map::RandomState::new().build_hasher().finish() } diff --git a/crates/syntax/src/fuzz.rs b/crates/syntax/src/fuzz.rs index 239a89f9b2..15e68fc575 100644 --- a/crates/syntax/src/fuzz.rs +++ b/crates/syntax/src/fuzz.rs @@ -46,6 +46,7 @@ impl CheckReparse { Some(CheckReparse { text, edit, edited_text }) } + #[allow(clippy::print_stderr)] pub fn run(&self) { let parse = SourceFile::parse(&self.text); let new_parse = parse.reparse(&self.edit); diff --git a/crates/syntax/src/tests/sourcegen_ast.rs b/crates/syntax/src/tests/sourcegen_ast.rs index c2e921e4b6..ccb13a0d93 100644 --- a/crates/syntax/src/tests/sourcegen_ast.rs +++ b/crates/syntax/src/tests/sourcegen_ast.rs @@ -3,14 +3,12 @@ //! Specifically, it generates the `SyntaxKind` enum and a number of newtype //! wrappers around `SyntaxNode` which implement `syntax::AstNode`. -use std::{ - collections::{BTreeSet, HashSet}, - fmt::Write, -}; +use std::{collections::BTreeSet, fmt::Write}; use itertools::Itertools; use proc_macro2::{Punct, Spacing}; use quote::{format_ident, quote}; +use rustc_hash::FxHashSet; use ungrammar::{Grammar, Rule}; use crate::tests::ast_src::{ @@ -278,7 +276,7 @@ fn generate_nodes(kinds: KindsSrc<'_>, grammar: &AstSrc) -> String { } }); - let defined_nodes: HashSet<_> = node_names.collect(); + let defined_nodes: FxHashSet<_> = node_names.collect(); for node in kinds .nodes diff --git a/crates/test-utils/src/lib.rs b/crates/test-utils/src/lib.rs index e48b273130..854b613ddf 100644 --- a/crates/test-utils/src/lib.rs +++ b/crates/test-utils/src/lib.rs @@ -7,6 +7,7 @@ //! * marks (see the eponymous module). #![warn(rust_2018_idioms, unused_lifetimes)] +#![allow(clippy::print_stderr)] mod assert_linear; pub mod bench_fixture; @@ -424,7 +425,7 @@ pub fn format_diff(chunks: Vec>) -> String { /// /// A benchmark test looks like this: /// -/// ``` +/// ```ignore /// #[test] /// fn benchmark_foo() { /// if skip_slow_tests() { return; } diff --git a/docs/dev/lsp-extensions.md b/docs/dev/lsp-extensions.md index 3251dd7526..bb01ca9ae6 100644 --- a/docs/dev/lsp-extensions.md +++ b/docs/dev/lsp-extensions.md @@ -1,5 +1,5 @@ ... --> Ci --> Ci+1 -> ... -> Cn --> C0 + // ^ ^ + // : | + // This edge -----+ | + // | + // | + // N0 + // + // In this case, the value we have just read from `Ci+1` + // is actually the cycle fallback value and not especially + // interesting. We unwind now with `CycleParticipant` to avoid + // executing the rest of our query function. This unwinding + // will be caught and our own fallback value will be used. + // + // Note that `Ci+1` may` have *other* callers who are not + // participants in the cycle (e.g., N0 in the graph above). + // They will not have the `cycle` marker set in their + // stack frames, so they will just read the fallback value + // from `Ci+1` and continue on their merry way. + if let Some(cycle) = &top_query.cycle { + cycle.clone().throw() + } + } + }) + } + + pub(super) fn report_untracked_read(&self, current_revision: Revision) { + self.with_query_stack(|stack| { + if let Some(top_query) = stack.last_mut() { + top_query.add_untracked_read(current_revision); + } + }) + } + + /// Update the top query on the stack to act as though it read a value + /// of durability `durability` which changed in `revision`. + pub(super) fn report_synthetic_read(&self, durability: Durability, revision: Revision) { + self.with_query_stack(|stack| { + if let Some(top_query) = stack.last_mut() { + top_query.add_synthetic_read(durability, revision); + } + }) + } + + /// Takes the query stack and returns it. This is used when + /// the current thread is blocking. The stack must be restored + /// with [`Self::restore_query_stack`] when the thread unblocks. + pub(super) fn take_query_stack(&self) -> Vec { + assert!( + self.query_stack.borrow().is_some(), + "query stack already taken" + ); + self.query_stack.take().unwrap() + } + + /// Restores a query stack taken with [`Self::take_query_stack`] once + /// the thread unblocks. + pub(super) fn restore_query_stack(&self, stack: Vec) { + assert!(self.query_stack.borrow().is_none(), "query stack not taken"); + self.query_stack.replace(Some(stack)); + } +} + +impl std::panic::RefUnwindSafe for LocalState {} + +/// When a query is pushed onto the `active_query` stack, this guard +/// is returned to represent its slot. The guard can be used to pop +/// the query from the stack -- in the case of unwinding, the guard's +/// destructor will also remove the query. +pub(crate) struct ActiveQueryGuard<'me> { + local_state: &'me LocalState, + push_len: usize, + database_key_index: DatabaseKeyIndex, +} + +impl ActiveQueryGuard<'_> { + fn pop_helper(&self) -> ActiveQuery { + self.local_state.with_query_stack(|stack| { + // Sanity check: pushes and pops should be balanced. + assert_eq!(stack.len(), self.push_len); + debug_assert_eq!( + stack.last().unwrap().database_key_index, + self.database_key_index + ); + stack.pop().unwrap() + }) + } + + /// Invoked when the query has successfully completed execution. + pub(super) fn complete(self) -> ActiveQuery { + let query = self.pop_helper(); + std::mem::forget(self); + query + } + + /// Pops an active query from the stack. Returns the [`QueryRevisions`] + /// which summarizes the other queries that were accessed during this + /// query's execution. + #[inline] + pub(crate) fn pop(self) -> QueryRevisions { + // Extract accumulated inputs. + let popped_query = self.complete(); + + // If this frame were a cycle participant, it would have unwound. + assert!(popped_query.cycle.is_none()); + + popped_query.revisions() + } + + /// If the active query is registered as a cycle participant, remove and + /// return that cycle. + pub(crate) fn take_cycle(&self) -> Option { + self.local_state + .with_query_stack(|stack| stack.last_mut()?.cycle.take()) + } +} + +impl Drop for ActiveQueryGuard<'_> { + fn drop(&mut self) { + self.pop_helper(); + } +} diff --git a/crates/salsa/src/storage.rs b/crates/salsa/src/storage.rs new file mode 100644 index 0000000000..5e32633002 --- /dev/null +++ b/crates/salsa/src/storage.rs @@ -0,0 +1,59 @@ +use crate::{plumbing::DatabaseStorageTypes, Runtime}; +use triomphe::Arc; + +/// Stores the cached results and dependency information for all the queries +/// defined on your salsa database. Also embeds a [`Runtime`] which is used to +/// manage query execution. Every database must include a `storage: +/// Storage` field. +pub struct Storage { + query_store: Arc, + runtime: Runtime, +} + +impl Default for Storage { + fn default() -> Self { + Self { + query_store: Default::default(), + runtime: Default::default(), + } + } +} + +impl Storage { + /// Gives access to the underlying salsa runtime. + pub fn salsa_runtime(&self) -> &Runtime { + &self.runtime + } + + /// Gives access to the underlying salsa runtime. + pub fn salsa_runtime_mut(&mut self) -> &mut Runtime { + &mut self.runtime + } + + /// Access the query storage tables. Not meant to be used directly by end + /// users. + pub fn query_store(&self) -> &DB::DatabaseStorage { + &self.query_store + } + + /// Access the query storage tables. Not meant to be used directly by end + /// users. + pub fn query_store_mut(&mut self) -> (&DB::DatabaseStorage, &mut Runtime) { + (&self.query_store, &mut self.runtime) + } + + /// Returns a "snapshotted" storage, suitable for use in a forked database. + /// This snapshot hold a read-lock on the global state, which means that any + /// attempt to `set` an input will block until the forked runtime is + /// dropped. See `ParallelDatabase::snapshot` for more information. + /// + /// **Warning.** This second handle is intended to be used from a separate + /// thread. Using two database handles from the **same thread** can lead to + /// deadlock. + pub fn snapshot(&self) -> Self { + Storage { + query_store: self.query_store.clone(), + runtime: self.runtime.snapshot(), + } + } +} diff --git a/crates/salsa/tests/cycles.rs b/crates/salsa/tests/cycles.rs new file mode 100644 index 0000000000..004f866b83 --- /dev/null +++ b/crates/salsa/tests/cycles.rs @@ -0,0 +1,501 @@ +use std::panic::UnwindSafe; + +use salsa::{Durability, ParallelDatabase, Snapshot}; +use test_log::test; + +// Axes: +// +// Threading +// * Intra-thread +// * Cross-thread -- part of cycle is on one thread, part on another +// +// Recovery strategies: +// * Panic +// * Fallback +// * Mixed -- multiple strategies within cycle participants +// +// Across revisions: +// * N/A -- only one revision +// * Present in new revision, not old +// * Present in old revision, not new +// * Present in both revisions +// +// Dependencies +// * Tracked +// * Untracked -- cycle participant(s) contain untracked reads +// +// Layers +// * Direct -- cycle participant is directly invoked from test +// * Indirect -- invoked a query that invokes the cycle +// +// +// | Thread | Recovery | Old, New | Dep style | Layers | Test Name | +// | ------ | -------- | -------- | --------- | ------ | --------- | +// | Intra | Panic | N/A | Tracked | direct | cycle_memoized | +// | Intra | Panic | N/A | Untracked | direct | cycle_volatile | +// | Intra | Fallback | N/A | Tracked | direct | cycle_cycle | +// | Intra | Fallback | N/A | Tracked | indirect | inner_cycle | +// | Intra | Fallback | Both | Tracked | direct | cycle_revalidate | +// | Intra | Fallback | New | Tracked | direct | cycle_appears | +// | Intra | Fallback | Old | Tracked | direct | cycle_disappears | +// | Intra | Fallback | Old | Tracked | direct | cycle_disappears_durability | +// | Intra | Mixed | N/A | Tracked | direct | cycle_mixed_1 | +// | Intra | Mixed | N/A | Tracked | direct | cycle_mixed_2 | +// | Cross | Fallback | N/A | Tracked | both | parallel/cycles.rs: recover_parallel_cycle | +// | Cross | Panic | N/A | Tracked | both | parallel/cycles.rs: panic_parallel_cycle | + +#[derive(PartialEq, Eq, Hash, Clone, Debug)] +struct Error { + cycle: Vec, +} + +#[salsa::database(GroupStruct)] +struct DatabaseImpl { + storage: salsa::Storage, +} + +impl salsa::Database for DatabaseImpl {} + +impl ParallelDatabase for DatabaseImpl { + fn snapshot(&self) -> Snapshot { + Snapshot::new(DatabaseImpl { + storage: self.storage.snapshot(), + }) + } +} + +impl Default for DatabaseImpl { + fn default() -> Self { + let res = DatabaseImpl { + storage: salsa::Storage::default(), + }; + + res + } +} + +/// The queries A, B, and C in `Database` can be configured +/// to invoke one another in arbitrary ways using this +/// enum. +#[derive(Debug, Copy, Clone, PartialEq, Eq)] +enum CycleQuery { + None, + A, + B, + C, + AthenC, +} + +#[salsa::query_group(GroupStruct)] +trait Database: salsa::Database { + // `a` and `b` depend on each other and form a cycle + fn memoized_a(&self) -> (); + fn memoized_b(&self) -> (); + fn volatile_a(&self) -> (); + fn volatile_b(&self) -> (); + + #[salsa::input] + fn a_invokes(&self) -> CycleQuery; + + #[salsa::input] + fn b_invokes(&self) -> CycleQuery; + + #[salsa::input] + fn c_invokes(&self) -> CycleQuery; + + #[salsa::cycle(recover_a)] + fn cycle_a(&self) -> Result<(), Error>; + + #[salsa::cycle(recover_b)] + fn cycle_b(&self) -> Result<(), Error>; + + fn cycle_c(&self) -> Result<(), Error>; +} + +fn recover_a(db: &dyn Database, cycle: &salsa::Cycle) -> Result<(), Error> { + Err(Error { + cycle: cycle.all_participants(db), + }) +} + +fn recover_b(db: &dyn Database, cycle: &salsa::Cycle) -> Result<(), Error> { + Err(Error { + cycle: cycle.all_participants(db), + }) +} + +fn memoized_a(db: &dyn Database) { + db.memoized_b() +} + +fn memoized_b(db: &dyn Database) { + db.memoized_a() +} + +fn volatile_a(db: &dyn Database) { + db.salsa_runtime().report_untracked_read(); + db.volatile_b() +} + +fn volatile_b(db: &dyn Database) { + db.salsa_runtime().report_untracked_read(); + db.volatile_a() +} + +impl CycleQuery { + fn invoke(self, db: &dyn Database) -> Result<(), Error> { + match self { + CycleQuery::A => db.cycle_a(), + CycleQuery::B => db.cycle_b(), + CycleQuery::C => db.cycle_c(), + CycleQuery::AthenC => { + let _ = db.cycle_a(); + db.cycle_c() + } + CycleQuery::None => Ok(()), + } + } +} + +fn cycle_a(db: &dyn Database) -> Result<(), Error> { + dbg!("cycle_a"); + db.a_invokes().invoke(db) +} + +fn cycle_b(db: &dyn Database) -> Result<(), Error> { + dbg!("cycle_b"); + db.b_invokes().invoke(db) +} + +fn cycle_c(db: &dyn Database) -> Result<(), Error> { + dbg!("cycle_c"); + db.c_invokes().invoke(db) +} + +#[track_caller] +fn extract_cycle(f: impl FnOnce() + UnwindSafe) -> salsa::Cycle { + let v = std::panic::catch_unwind(f); + if let Err(d) = &v { + if let Some(cycle) = d.downcast_ref::() { + return cycle.clone(); + } + } + panic!("unexpected value: {:?}", v) +} + +#[test] +fn cycle_memoized() { + let db = DatabaseImpl::default(); + let cycle = extract_cycle(|| db.memoized_a()); + insta::assert_debug_snapshot!(cycle.unexpected_participants(&db), @r###" + [ + "memoized_a(())", + "memoized_b(())", + ] + "###); +} + +#[test] +fn cycle_volatile() { + let db = DatabaseImpl::default(); + let cycle = extract_cycle(|| db.volatile_a()); + insta::assert_debug_snapshot!(cycle.unexpected_participants(&db), @r###" + [ + "volatile_a(())", + "volatile_b(())", + ] + "###); +} + +#[test] +fn cycle_cycle() { + let mut query = DatabaseImpl::default(); + + // A --> B + // ^ | + // +-----+ + + query.set_a_invokes(CycleQuery::B); + query.set_b_invokes(CycleQuery::A); + + assert!(query.cycle_a().is_err()); +} + +#[test] +fn inner_cycle() { + let mut query = DatabaseImpl::default(); + + // A --> B <-- C + // ^ | + // +-----+ + + query.set_a_invokes(CycleQuery::B); + query.set_b_invokes(CycleQuery::A); + query.set_c_invokes(CycleQuery::B); + + let err = query.cycle_c(); + assert!(err.is_err()); + let cycle = err.unwrap_err().cycle; + insta::assert_debug_snapshot!(cycle, @r###" + [ + "cycle_a(())", + "cycle_b(())", + ] + "###); +} + +#[test] +fn cycle_revalidate() { + let mut db = DatabaseImpl::default(); + + // A --> B + // ^ | + // +-----+ + db.set_a_invokes(CycleQuery::B); + db.set_b_invokes(CycleQuery::A); + + assert!(db.cycle_a().is_err()); + db.set_b_invokes(CycleQuery::A); // same value as default + assert!(db.cycle_a().is_err()); +} + +#[test] +fn cycle_revalidate_unchanged_twice() { + let mut db = DatabaseImpl::default(); + + // A --> B + // ^ | + // +-----+ + db.set_a_invokes(CycleQuery::B); + db.set_b_invokes(CycleQuery::A); + + assert!(db.cycle_a().is_err()); + db.set_c_invokes(CycleQuery::A); // force new revisi5on + + // on this run + insta::assert_debug_snapshot!(db.cycle_a(), @r###" + Err( + Error { + cycle: [ + "cycle_a(())", + "cycle_b(())", + ], + }, + ) + "###); +} + +#[test] +fn cycle_appears() { + let mut db = DatabaseImpl::default(); + + // A --> B + db.set_a_invokes(CycleQuery::B); + db.set_b_invokes(CycleQuery::None); + assert!(db.cycle_a().is_ok()); + + // A --> B + // ^ | + // +-----+ + db.set_b_invokes(CycleQuery::A); + tracing::debug!("Set Cycle Leaf"); + assert!(db.cycle_a().is_err()); +} + +#[test] +fn cycle_disappears() { + let mut db = DatabaseImpl::default(); + + // A --> B + // ^ | + // +-----+ + db.set_a_invokes(CycleQuery::B); + db.set_b_invokes(CycleQuery::A); + assert!(db.cycle_a().is_err()); + + // A --> B + db.set_b_invokes(CycleQuery::None); + assert!(db.cycle_a().is_ok()); +} + +/// A variant on `cycle_disappears` in which the values of +/// `a_invokes` and `b_invokes` are set with durability values. +/// If we are not careful, this could cause us to overlook +/// the fact that the cycle will no longer occur. +#[test] +fn cycle_disappears_durability() { + let mut db = DatabaseImpl::default(); + db.set_a_invokes_with_durability(CycleQuery::B, Durability::LOW); + db.set_b_invokes_with_durability(CycleQuery::A, Durability::HIGH); + + let res = db.cycle_a(); + assert!(res.is_err()); + + // At this point, `a` read `LOW` input, and `b` read `HIGH` input. However, + // because `b` participates in the same cycle as `a`, its final durability + // should be `LOW`. + // + // Check that setting a `LOW` input causes us to re-execute `b` query, and + // observe that the cycle goes away. + db.set_a_invokes_with_durability(CycleQuery::None, Durability::LOW); + + let res = db.cycle_b(); + assert!(res.is_ok()); +} + +#[test] +fn cycle_mixed_1() { + let mut db = DatabaseImpl::default(); + // A --> B <-- C + // | ^ + // +-----+ + db.set_a_invokes(CycleQuery::B); + db.set_b_invokes(CycleQuery::C); + db.set_c_invokes(CycleQuery::B); + + let u = db.cycle_c(); + insta::assert_debug_snapshot!(u, @r###" + Err( + Error { + cycle: [ + "cycle_b(())", + "cycle_c(())", + ], + }, + ) + "###); +} + +#[test] +fn cycle_mixed_2() { + let mut db = DatabaseImpl::default(); + + // Configuration: + // + // A --> B --> C + // ^ | + // +-----------+ + db.set_a_invokes(CycleQuery::B); + db.set_b_invokes(CycleQuery::C); + db.set_c_invokes(CycleQuery::A); + + let u = db.cycle_a(); + insta::assert_debug_snapshot!(u, @r###" + Err( + Error { + cycle: [ + "cycle_a(())", + "cycle_b(())", + "cycle_c(())", + ], + }, + ) + "###); +} + +#[test] +fn cycle_deterministic_order() { + // No matter whether we start from A or B, we get the same set of participants: + let db = || { + let mut db = DatabaseImpl::default(); + // A --> B + // ^ | + // +-----+ + db.set_a_invokes(CycleQuery::B); + db.set_b_invokes(CycleQuery::A); + db + }; + let a = db().cycle_a(); + let b = db().cycle_b(); + insta::assert_debug_snapshot!((a, b), @r###" + ( + Err( + Error { + cycle: [ + "cycle_a(())", + "cycle_b(())", + ], + }, + ), + Err( + Error { + cycle: [ + "cycle_a(())", + "cycle_b(())", + ], + }, + ), + ) + "###); +} + +#[test] +fn cycle_multiple() { + // No matter whether we start from A or B, we get the same set of participants: + let mut db = DatabaseImpl::default(); + + // Configuration: + // + // A --> B <-- C + // ^ | ^ + // +-----+ | + // | | + // +-----+ + // + // Here, conceptually, B encounters a cycle with A and then + // recovers. + db.set_a_invokes(CycleQuery::B); + db.set_b_invokes(CycleQuery::AthenC); + db.set_c_invokes(CycleQuery::B); + + let c = db.cycle_c(); + let b = db.cycle_b(); + let a = db.cycle_a(); + insta::assert_debug_snapshot!((a, b, c), @r###" + ( + Err( + Error { + cycle: [ + "cycle_a(())", + "cycle_b(())", + ], + }, + ), + Err( + Error { + cycle: [ + "cycle_a(())", + "cycle_b(())", + ], + }, + ), + Err( + Error { + cycle: [ + "cycle_a(())", + "cycle_b(())", + ], + }, + ), + ) + "###); +} + +#[test] +fn cycle_recovery_set_but_not_participating() { + let mut db = DatabaseImpl::default(); + + // A --> C -+ + // ^ | + // +--+ + db.set_a_invokes(CycleQuery::C); + db.set_c_invokes(CycleQuery::C); + + // Here we expect C to panic and A not to recover: + let r = extract_cycle(|| drop(db.cycle_a())); + insta::assert_debug_snapshot!(r.all_participants(&db), @r###" + [ + "cycle_c(())", + ] + "###); +} diff --git a/crates/salsa/tests/dyn_trait.rs b/crates/salsa/tests/dyn_trait.rs new file mode 100644 index 0000000000..09ebc5c4ce --- /dev/null +++ b/crates/salsa/tests/dyn_trait.rs @@ -0,0 +1,28 @@ +//! Test that you can implement a query using a `dyn Trait` setup. + +#[salsa::database(DynTraitStorage)] +#[derive(Default)] +struct DynTraitDatabase { + storage: salsa::Storage, +} + +impl salsa::Database for DynTraitDatabase {} + +#[salsa::query_group(DynTraitStorage)] +trait DynTrait { + #[salsa::input] + fn input(&self, x: u32) -> u32; + + fn output(&self, x: u32) -> u32; +} + +fn output(db: &dyn DynTrait, x: u32) -> u32 { + db.input(x) * 2 +} + +#[test] +fn dyn_trait() { + let mut query = DynTraitDatabase::default(); + query.set_input(22, 23); + assert_eq!(query.output(22), 46); +} diff --git a/crates/salsa/tests/incremental/constants.rs b/crates/salsa/tests/incremental/constants.rs new file mode 100644 index 0000000000..30f42b136d --- /dev/null +++ b/crates/salsa/tests/incremental/constants.rs @@ -0,0 +1,148 @@ +use crate::implementation::{TestContext, TestContextImpl}; +use salsa::debug::DebugQueryTable; +use salsa::Durability; + +#[salsa::query_group(Constants)] +pub(crate) trait ConstantsDatabase: TestContext { + #[salsa::input] + fn input(&self, key: char) -> usize; + + fn add(&self, key1: char, key2: char) -> usize; + + fn add3(&self, key1: char, key2: char, key3: char) -> usize; +} + +fn add(db: &dyn ConstantsDatabase, key1: char, key2: char) -> usize { + db.log().add(format!("add({}, {})", key1, key2)); + db.input(key1) + db.input(key2) +} + +fn add3(db: &dyn ConstantsDatabase, key1: char, key2: char, key3: char) -> usize { + db.log().add(format!("add3({}, {}, {})", key1, key2, key3)); + db.add(key1, key2) + db.input(key3) +} + +// Test we can assign a constant and things will be correctly +// recomputed afterwards. +#[test] +fn invalidate_constant() { + let db = &mut TestContextImpl::default(); + db.set_input_with_durability('a', 44, Durability::HIGH); + db.set_input_with_durability('b', 22, Durability::HIGH); + assert_eq!(db.add('a', 'b'), 66); + + db.set_input_with_durability('a', 66, Durability::HIGH); + assert_eq!(db.add('a', 'b'), 88); +} + +#[test] +fn invalidate_constant_1() { + let db = &mut TestContextImpl::default(); + + // Not constant: + db.set_input('a', 44); + assert_eq!(db.add('a', 'a'), 88); + + // Becomes constant: + db.set_input_with_durability('a', 44, Durability::HIGH); + assert_eq!(db.add('a', 'a'), 88); + + // Invalidates: + db.set_input_with_durability('a', 33, Durability::HIGH); + assert_eq!(db.add('a', 'a'), 66); +} + +// Test cases where we assign same value to 'a' after declaring it a +// constant. +#[test] +fn set_after_constant_same_value() { + let db = &mut TestContextImpl::default(); + db.set_input_with_durability('a', 44, Durability::HIGH); + db.set_input_with_durability('a', 44, Durability::HIGH); + db.set_input('a', 44); +} + +#[test] +fn not_constant() { + let mut db = TestContextImpl::default(); + + db.set_input('a', 22); + db.set_input('b', 44); + assert_eq!(db.add('a', 'b'), 66); + assert_eq!(Durability::LOW, AddQuery.in_db(&db).durability(('a', 'b'))); +} + +#[test] +fn durability() { + let mut db = TestContextImpl::default(); + + db.set_input_with_durability('a', 22, Durability::HIGH); + db.set_input_with_durability('b', 44, Durability::HIGH); + assert_eq!(db.add('a', 'b'), 66); + assert_eq!(Durability::HIGH, AddQuery.in_db(&db).durability(('a', 'b'))); +} + +#[test] +fn mixed_constant() { + let mut db = TestContextImpl::default(); + + db.set_input_with_durability('a', 22, Durability::HIGH); + db.set_input('b', 44); + assert_eq!(db.add('a', 'b'), 66); + assert_eq!(Durability::LOW, AddQuery.in_db(&db).durability(('a', 'b'))); +} + +#[test] +fn becomes_constant_with_change() { + let mut db = TestContextImpl::default(); + + db.set_input('a', 22); + db.set_input('b', 44); + assert_eq!(db.add('a', 'b'), 66); + assert_eq!(Durability::LOW, AddQuery.in_db(&db).durability(('a', 'b'))); + + db.set_input_with_durability('a', 23, Durability::HIGH); + assert_eq!(db.add('a', 'b'), 67); + assert_eq!(Durability::LOW, AddQuery.in_db(&db).durability(('a', 'b'))); + + db.set_input_with_durability('b', 45, Durability::HIGH); + assert_eq!(db.add('a', 'b'), 68); + assert_eq!(Durability::HIGH, AddQuery.in_db(&db).durability(('a', 'b'))); + + db.set_input_with_durability('b', 45, Durability::MEDIUM); + assert_eq!(db.add('a', 'b'), 68); + assert_eq!( + Durability::MEDIUM, + AddQuery.in_db(&db).durability(('a', 'b')) + ); +} + +// Test a subtle case in which an input changes from constant to +// non-constant, but its value doesn't change. If we're not careful, +// this can cause us to incorrectly consider derived values as still +// being constant. +#[test] +fn constant_to_non_constant() { + let mut db = TestContextImpl::default(); + + db.set_input_with_durability('a', 11, Durability::HIGH); + db.set_input_with_durability('b', 22, Durability::HIGH); + db.set_input_with_durability('c', 33, Durability::HIGH); + + // Here, `add3` invokes `add`, which yields 33. Both calls are + // constant. + assert_eq!(db.add3('a', 'b', 'c'), 66); + + db.set_input('a', 11); + + // Here, `add3` invokes `add`, which *still* yields 33, but which + // is no longer constant. Since value didn't change, we might + // preserve `add3` unchanged, not noticing that it is no longer + // constant. + assert_eq!(db.add3('a', 'b', 'c'), 66); + + // In that case, we would not get the correct result here, when + // 'a' changes *again*. + db.set_input('a', 22); + assert_eq!(db.add3('a', 'b', 'c'), 77); +} diff --git a/crates/salsa/tests/incremental/counter.rs b/crates/salsa/tests/incremental/counter.rs new file mode 100644 index 0000000000..c04857e24c --- /dev/null +++ b/crates/salsa/tests/incremental/counter.rs @@ -0,0 +1,14 @@ +use std::cell::Cell; + +#[derive(Default)] +pub(crate) struct Counter { + value: Cell, +} + +impl Counter { + pub(crate) fn increment(&self) -> usize { + let v = self.value.get(); + self.value.set(v + 1); + v + } +} diff --git a/crates/salsa/tests/incremental/implementation.rs b/crates/salsa/tests/incremental/implementation.rs new file mode 100644 index 0000000000..a9c0a4a018 --- /dev/null +++ b/crates/salsa/tests/incremental/implementation.rs @@ -0,0 +1,58 @@ +use crate::constants; +use crate::counter::Counter; +use crate::log::Log; +use crate::memoized_dep_inputs; +use crate::memoized_inputs; +use crate::memoized_volatile; + +pub(crate) trait TestContext: salsa::Database { + fn clock(&self) -> &Counter; + fn log(&self) -> &Log; +} + +#[salsa::database( + constants::Constants, + memoized_dep_inputs::MemoizedDepInputs, + memoized_inputs::MemoizedInputs, + memoized_volatile::MemoizedVolatile +)] +#[derive(Default)] +pub(crate) struct TestContextImpl { + storage: salsa::Storage, + clock: Counter, + log: Log, +} + +impl TestContextImpl { + #[track_caller] + pub(crate) fn assert_log(&self, expected_log: &[&str]) { + let expected_text = &format!("{:#?}", expected_log); + let actual_text = &format!("{:#?}", self.log().take()); + + if expected_text == actual_text { + return; + } + + for diff in diff::lines(expected_text, actual_text) { + match diff { + diff::Result::Left(l) => println!("-{}", l), + diff::Result::Both(l, _) => println!(" {}", l), + diff::Result::Right(r) => println!("+{}", r), + } + } + + panic!("incorrect log results"); + } +} + +impl TestContext for TestContextImpl { + fn clock(&self) -> &Counter { + &self.clock + } + + fn log(&self) -> &Log { + &self.log + } +} + +impl salsa::Database for TestContextImpl {} diff --git a/crates/salsa/tests/incremental/log.rs b/crates/salsa/tests/incremental/log.rs new file mode 100644 index 0000000000..1ee57fe667 --- /dev/null +++ b/crates/salsa/tests/incremental/log.rs @@ -0,0 +1,16 @@ +use std::cell::RefCell; + +#[derive(Default)] +pub(crate) struct Log { + data: RefCell>, +} + +impl Log { + pub(crate) fn add(&self, text: impl Into) { + self.data.borrow_mut().push(text.into()); + } + + pub(crate) fn take(&self) -> Vec { + self.data.take() + } +} diff --git a/crates/salsa/tests/incremental/main.rs b/crates/salsa/tests/incremental/main.rs new file mode 100644 index 0000000000..bcd13c75f7 --- /dev/null +++ b/crates/salsa/tests/incremental/main.rs @@ -0,0 +1,9 @@ +mod constants; +mod counter; +mod implementation; +mod log; +mod memoized_dep_inputs; +mod memoized_inputs; +mod memoized_volatile; + +fn main() {} diff --git a/crates/salsa/tests/incremental/memoized_dep_inputs.rs b/crates/salsa/tests/incremental/memoized_dep_inputs.rs new file mode 100644 index 0000000000..4ea33e0c1a --- /dev/null +++ b/crates/salsa/tests/incremental/memoized_dep_inputs.rs @@ -0,0 +1,60 @@ +use crate::implementation::{TestContext, TestContextImpl}; + +#[salsa::query_group(MemoizedDepInputs)] +pub(crate) trait MemoizedDepInputsContext: TestContext { + fn dep_memoized2(&self) -> usize; + fn dep_memoized1(&self) -> usize; + #[salsa::dependencies] + fn dep_derived1(&self) -> usize; + #[salsa::input] + fn dep_input1(&self) -> usize; + #[salsa::input] + fn dep_input2(&self) -> usize; +} + +fn dep_memoized2(db: &dyn MemoizedDepInputsContext) -> usize { + db.log().add("Memoized2 invoked"); + db.dep_memoized1() +} + +fn dep_memoized1(db: &dyn MemoizedDepInputsContext) -> usize { + db.log().add("Memoized1 invoked"); + db.dep_derived1() * 2 +} + +fn dep_derived1(db: &dyn MemoizedDepInputsContext) -> usize { + db.log().add("Derived1 invoked"); + db.dep_input1() / 2 +} + +#[test] +fn revalidate() { + let db = &mut TestContextImpl::default(); + + db.set_dep_input1(0); + + // Initial run starts from Memoized2: + let v = db.dep_memoized2(); + assert_eq!(v, 0); + db.assert_log(&["Memoized2 invoked", "Memoized1 invoked", "Derived1 invoked"]); + + // After that, we first try to validate Memoized1 but wind up + // running Memoized2. Note that we don't try to validate + // Derived1, so it is invoked by Memoized1. + db.set_dep_input1(44); + let v = db.dep_memoized2(); + assert_eq!(v, 44); + db.assert_log(&["Memoized1 invoked", "Derived1 invoked", "Memoized2 invoked"]); + + // Here validation of Memoized1 succeeds so Memoized2 never runs. + db.set_dep_input1(45); + let v = db.dep_memoized2(); + assert_eq!(v, 44); + db.assert_log(&["Memoized1 invoked", "Derived1 invoked"]); + + // Here, a change to input2 doesn't affect us, so nothing runs. + db.set_dep_input2(45); + let v = db.dep_memoized2(); + assert_eq!(v, 44); + db.assert_log(&[]); +} diff --git a/crates/salsa/tests/incremental/memoized_inputs.rs b/crates/salsa/tests/incremental/memoized_inputs.rs new file mode 100644 index 0000000000..53d2ace887 --- /dev/null +++ b/crates/salsa/tests/incremental/memoized_inputs.rs @@ -0,0 +1,76 @@ +use crate::implementation::{TestContext, TestContextImpl}; + +#[salsa::query_group(MemoizedInputs)] +pub(crate) trait MemoizedInputsContext: TestContext { + fn max(&self) -> usize; + #[salsa::input] + fn input1(&self) -> usize; + #[salsa::input] + fn input2(&self) -> usize; +} + +fn max(db: &dyn MemoizedInputsContext) -> usize { + db.log().add("Max invoked"); + std::cmp::max(db.input1(), db.input2()) +} + +#[test] +fn revalidate() { + let db = &mut TestContextImpl::default(); + + db.set_input1(0); + db.set_input2(0); + + let v = db.max(); + assert_eq!(v, 0); + db.assert_log(&["Max invoked"]); + + let v = db.max(); + assert_eq!(v, 0); + db.assert_log(&[]); + + db.set_input1(44); + db.assert_log(&[]); + + let v = db.max(); + assert_eq!(v, 44); + db.assert_log(&["Max invoked"]); + + let v = db.max(); + assert_eq!(v, 44); + db.assert_log(&[]); + + db.set_input1(44); + db.assert_log(&[]); + db.set_input2(66); + db.assert_log(&[]); + db.set_input1(64); + db.assert_log(&[]); + + let v = db.max(); + assert_eq!(v, 66); + db.assert_log(&["Max invoked"]); + + let v = db.max(); + assert_eq!(v, 66); + db.assert_log(&[]); +} + +/// Test that invoking `set` on an input with the same value still +/// triggers a new revision. +#[test] +fn set_after_no_change() { + let db = &mut TestContextImpl::default(); + + db.set_input2(0); + + db.set_input1(44); + let v = db.max(); + assert_eq!(v, 44); + db.assert_log(&["Max invoked"]); + + db.set_input1(44); + let v = db.max(); + assert_eq!(v, 44); + db.assert_log(&["Max invoked"]); +} diff --git a/crates/salsa/tests/incremental/memoized_volatile.rs b/crates/salsa/tests/incremental/memoized_volatile.rs new file mode 100644 index 0000000000..6dc5030063 --- /dev/null +++ b/crates/salsa/tests/incremental/memoized_volatile.rs @@ -0,0 +1,77 @@ +use crate::implementation::{TestContext, TestContextImpl}; +use salsa::{Database, Durability}; + +#[salsa::query_group(MemoizedVolatile)] +pub(crate) trait MemoizedVolatileContext: TestContext { + // Queries for testing a "volatile" value wrapped by + // memoization. + fn memoized2(&self) -> usize; + fn memoized1(&self) -> usize; + fn volatile(&self) -> usize; +} + +fn memoized2(db: &dyn MemoizedVolatileContext) -> usize { + db.log().add("Memoized2 invoked"); + db.memoized1() +} + +fn memoized1(db: &dyn MemoizedVolatileContext) -> usize { + db.log().add("Memoized1 invoked"); + let v = db.volatile(); + v / 2 +} + +fn volatile(db: &dyn MemoizedVolatileContext) -> usize { + db.log().add("Volatile invoked"); + db.salsa_runtime().report_untracked_read(); + db.clock().increment() +} + +#[test] +fn volatile_x2() { + let query = TestContextImpl::default(); + + // Invoking volatile twice doesn't execute twice, because volatile + // queries are memoized by default. + query.volatile(); + query.volatile(); + query.assert_log(&["Volatile invoked"]); +} + +/// Test that: +/// +/// - On the first run of R0, we recompute everything. +/// - On the second run of R1, we recompute nothing. +/// - On the first run of R1, we recompute Memoized1 but not Memoized2 (since Memoized1 result +/// did not change). +/// - On the second run of R1, we recompute nothing. +/// - On the first run of R2, we recompute everything (since Memoized1 result *did* change). +#[test] +fn revalidate() { + let mut query = TestContextImpl::default(); + + query.memoized2(); + query.assert_log(&["Memoized2 invoked", "Memoized1 invoked", "Volatile invoked"]); + + query.memoized2(); + query.assert_log(&[]); + + // Second generation: volatile will change (to 1) but memoized1 + // will not (still 0, as 1/2 = 0) + query.salsa_runtime_mut().synthetic_write(Durability::LOW); + query.memoized2(); + query.assert_log(&["Volatile invoked", "Memoized1 invoked"]); + query.memoized2(); + query.assert_log(&[]); + + // Third generation: volatile will change (to 2) and memoized1 + // will too (to 1). Therefore, after validating that Memoized1 + // changed, we now invoke Memoized2. + query.salsa_runtime_mut().synthetic_write(Durability::LOW); + + query.memoized2(); + query.assert_log(&["Volatile invoked", "Memoized1 invoked", "Memoized2 invoked"]); + + query.memoized2(); + query.assert_log(&[]); +} diff --git a/crates/salsa/tests/interned.rs b/crates/salsa/tests/interned.rs new file mode 100644 index 0000000000..bf8683114c --- /dev/null +++ b/crates/salsa/tests/interned.rs @@ -0,0 +1,98 @@ +//! Test that you can implement a query using a `dyn Trait` setup. + +use salsa::InternId; + +#[salsa::database(InternStorage)] +#[derive(Default)] +struct Database { + storage: salsa::Storage, +} + +impl salsa::Database for Database {} + +impl salsa::ParallelDatabase for Database { + fn snapshot(&self) -> salsa::Snapshot { + salsa::Snapshot::new(Database { + storage: self.storage.snapshot(), + }) + } +} + +#[salsa::query_group(InternStorage)] +trait Intern { + #[salsa::interned] + fn intern1(&self, x: String) -> InternId; + + #[salsa::interned] + fn intern2(&self, x: String, y: String) -> InternId; + + #[salsa::interned] + fn intern_key(&self, x: String) -> InternKey; +} + +#[derive(Copy, Clone, Debug, PartialEq, Eq, Hash)] +pub struct InternKey(InternId); + +impl salsa::InternKey for InternKey { + fn from_intern_id(v: InternId) -> Self { + InternKey(v) + } + + fn as_intern_id(&self) -> InternId { + self.0 + } +} + +#[test] +fn test_intern1() { + let db = Database::default(); + let foo0 = db.intern1("foo".to_string()); + let bar0 = db.intern1("bar".to_string()); + let foo1 = db.intern1("foo".to_string()); + let bar1 = db.intern1("bar".to_string()); + + assert_eq!(foo0, foo1); + assert_eq!(bar0, bar1); + assert_ne!(foo0, bar0); + + assert_eq!("foo".to_string(), db.lookup_intern1(foo0)); + assert_eq!("bar".to_string(), db.lookup_intern1(bar0)); +} + +#[test] +fn test_intern2() { + let db = Database::default(); + let foo0 = db.intern2("x".to_string(), "foo".to_string()); + let bar0 = db.intern2("x".to_string(), "bar".to_string()); + let foo1 = db.intern2("x".to_string(), "foo".to_string()); + let bar1 = db.intern2("x".to_string(), "bar".to_string()); + + assert_eq!(foo0, foo1); + assert_eq!(bar0, bar1); + assert_ne!(foo0, bar0); + + assert_eq!( + ("x".to_string(), "foo".to_string()), + db.lookup_intern2(foo0) + ); + assert_eq!( + ("x".to_string(), "bar".to_string()), + db.lookup_intern2(bar0) + ); +} + +#[test] +fn test_intern_key() { + let db = Database::default(); + let foo0 = db.intern_key("foo".to_string()); + let bar0 = db.intern_key("bar".to_string()); + let foo1 = db.intern_key("foo".to_string()); + let bar1 = db.intern_key("bar".to_string()); + + assert_eq!(foo0, foo1); + assert_eq!(bar0, bar1); + assert_ne!(foo0, bar0); + + assert_eq!("foo".to_string(), db.lookup_intern_key(foo0)); + assert_eq!("bar".to_string(), db.lookup_intern_key(bar0)); +} diff --git a/crates/salsa/tests/lru.rs b/crates/salsa/tests/lru.rs new file mode 100644 index 0000000000..3da8519b08 --- /dev/null +++ b/crates/salsa/tests/lru.rs @@ -0,0 +1,102 @@ +//! Test setting LRU actually limits the number of things in the database; +use std::sync::{ + atomic::{AtomicUsize, Ordering}, + Arc, +}; + +#[derive(Debug, PartialEq, Eq)] +struct HotPotato(u32); + +static N_POTATOES: AtomicUsize = AtomicUsize::new(0); + +impl HotPotato { + fn new(id: u32) -> HotPotato { + N_POTATOES.fetch_add(1, Ordering::SeqCst); + HotPotato(id) + } +} + +impl Drop for HotPotato { + fn drop(&mut self) { + N_POTATOES.fetch_sub(1, Ordering::SeqCst); + } +} + +#[salsa::query_group(QueryGroupStorage)] +trait QueryGroup: salsa::Database { + fn get(&self, x: u32) -> Arc; + fn get_volatile(&self, x: u32) -> usize; +} + +fn get(_db: &dyn QueryGroup, x: u32) -> Arc { + Arc::new(HotPotato::new(x)) +} + +fn get_volatile(db: &dyn QueryGroup, _x: u32) -> usize { + static COUNTER: AtomicUsize = AtomicUsize::new(0); + db.salsa_runtime().report_untracked_read(); + COUNTER.fetch_add(1, Ordering::SeqCst) +} + +#[salsa::database(QueryGroupStorage)] +#[derive(Default)] +struct Database { + storage: salsa::Storage, +} + +impl salsa::Database for Database {} + +#[test] +fn lru_works() { + let mut db = Database::default(); + GetQuery.in_db_mut(&mut db).set_lru_capacity(32); + assert_eq!(N_POTATOES.load(Ordering::SeqCst), 0); + + for i in 0..128u32 { + let p = db.get(i); + assert_eq!(p.0, i) + } + assert_eq!(N_POTATOES.load(Ordering::SeqCst), 32); + + for i in 0..128u32 { + let p = db.get(i); + assert_eq!(p.0, i) + } + assert_eq!(N_POTATOES.load(Ordering::SeqCst), 32); + + GetQuery.in_db_mut(&mut db).set_lru_capacity(32); + assert_eq!(N_POTATOES.load(Ordering::SeqCst), 32); + + GetQuery.in_db_mut(&mut db).set_lru_capacity(64); + assert_eq!(N_POTATOES.load(Ordering::SeqCst), 32); + for i in 0..128u32 { + let p = db.get(i); + assert_eq!(p.0, i) + } + assert_eq!(N_POTATOES.load(Ordering::SeqCst), 64); + + // Special case: setting capacity to zero disables LRU + GetQuery.in_db_mut(&mut db).set_lru_capacity(0); + assert_eq!(N_POTATOES.load(Ordering::SeqCst), 64); + for i in 0..128u32 { + let p = db.get(i); + assert_eq!(p.0, i) + } + assert_eq!(N_POTATOES.load(Ordering::SeqCst), 128); + + drop(db); + assert_eq!(N_POTATOES.load(Ordering::SeqCst), 0); +} + +#[test] +fn lru_doesnt_break_volatile_queries() { + let mut db = Database::default(); + GetVolatileQuery.in_db_mut(&mut db).set_lru_capacity(32); + // Here, we check that we execute each volatile query at most once, despite + // LRU. That does mean that we have more values in DB than the LRU capacity, + // but it's much better than inconsistent results from volatile queries! + for i in (0..3).flat_map(|_| 0..128usize) { + let x = db.get_volatile(i as u32); + assert_eq!(x, i) + } +} diff --git a/crates/salsa/tests/macros.rs b/crates/salsa/tests/macros.rs new file mode 100644 index 0000000000..3d818e53c8 --- /dev/null +++ b/crates/salsa/tests/macros.rs @@ -0,0 +1,11 @@ +#[salsa::query_group(MyStruct)] +trait MyDatabase: salsa::Database { + #[salsa::invoke(another_module::another_name)] + fn my_query(&self, key: ()) -> (); +} + +mod another_module { + pub(crate) fn another_name(_: &dyn crate::MyDatabase, (): ()) {} +} + +fn main() {} diff --git a/crates/salsa/tests/no_send_sync.rs b/crates/salsa/tests/no_send_sync.rs new file mode 100644 index 0000000000..2648f2b7a4 --- /dev/null +++ b/crates/salsa/tests/no_send_sync.rs @@ -0,0 +1,33 @@ +extern crate salsa; + +use std::rc::Rc; + +#[salsa::query_group(NoSendSyncStorage)] +trait NoSendSyncDatabase: salsa::Database { + fn no_send_sync_value(&self, key: bool) -> Rc; + fn no_send_sync_key(&self, key: Rc) -> bool; +} + +fn no_send_sync_value(_db: &dyn NoSendSyncDatabase, key: bool) -> Rc { + Rc::new(key) +} + +fn no_send_sync_key(_db: &dyn NoSendSyncDatabase, key: Rc) -> bool { + *key +} + +#[salsa::database(NoSendSyncStorage)] +#[derive(Default)] +struct DatabaseImpl { + storage: salsa::Storage, +} + +impl salsa::Database for DatabaseImpl {} + +#[test] +fn no_send_sync() { + let db = DatabaseImpl::default(); + + assert_eq!(db.no_send_sync_value(true), Rc::new(true)); + assert!(!db.no_send_sync_key(Rc::new(false))); +} diff --git a/crates/salsa/tests/on_demand_inputs.rs b/crates/salsa/tests/on_demand_inputs.rs new file mode 100644 index 0000000000..990920256a --- /dev/null +++ b/crates/salsa/tests/on_demand_inputs.rs @@ -0,0 +1,153 @@ +//! Test that "on-demand" input pattern works. +//! +//! On-demand inputs are inputs computed lazily on the fly. They are simulated +//! via a b query with zero inputs, which uses `add_synthetic_read` to +//! tweak durability and `invalidate` to clear the input. + +use std::{cell::RefCell, collections::HashMap, rc::Rc}; + +use salsa::{Database as _, Durability, EventKind}; + +#[salsa::query_group(QueryGroupStorage)] +trait QueryGroup: salsa::Database + AsRef> { + fn a(&self, x: u32) -> u32; + fn b(&self, x: u32) -> u32; + fn c(&self, x: u32) -> u32; +} + +fn a(db: &dyn QueryGroup, x: u32) -> u32 { + let durability = if x % 2 == 0 { + Durability::LOW + } else { + Durability::HIGH + }; + db.salsa_runtime().report_synthetic_read(durability); + let external_state: &HashMap = db.as_ref(); + external_state[&x] +} + +fn b(db: &dyn QueryGroup, x: u32) -> u32 { + db.a(x) +} + +fn c(db: &dyn QueryGroup, x: u32) -> u32 { + db.b(x) +} + +#[salsa::database(QueryGroupStorage)] +#[derive(Default)] +struct Database { + storage: salsa::Storage, + external_state: HashMap, + on_event: Option>, +} + +impl salsa::Database for Database { + fn salsa_event(&self, event: salsa::Event) { + dbg!(event.debug(self)); + + if let Some(cb) = &self.on_event { + cb(self, event) + } + } +} + +impl AsRef> for Database { + fn as_ref(&self) -> &HashMap { + &self.external_state + } +} + +#[test] +fn on_demand_input_works() { + let mut db = Database::default(); + + db.external_state.insert(1, 10); + assert_eq!(db.b(1), 10); + assert_eq!(db.a(1), 10); + + // We changed external state, but haven't signaled about this yet, + // so we expect to see the old answer + db.external_state.insert(1, 92); + assert_eq!(db.b(1), 10); + assert_eq!(db.a(1), 10); + + AQuery.in_db_mut(&mut db).invalidate(&1); + assert_eq!(db.b(1), 92); + assert_eq!(db.a(1), 92); + + // Downstream queries should also be rerun if we call `a` first. + db.external_state.insert(1, 50); + AQuery.in_db_mut(&mut db).invalidate(&1); + assert_eq!(db.a(1), 50); + assert_eq!(db.b(1), 50); +} + +#[test] +fn on_demand_input_durability() { + let mut db = Database::default(); + + let events = Rc::new(RefCell::new(vec![])); + db.on_event = Some(Box::new({ + let events = events.clone(); + move |db, event| { + if let EventKind::WillCheckCancellation = event.kind { + // these events are not interesting + } else { + events.borrow_mut().push(format!("{:?}", event.debug(db))) + } + } + })); + + events.replace(vec![]); + db.external_state.insert(1, 10); + db.external_state.insert(2, 20); + assert_eq!(db.b(1), 10); + assert_eq!(db.b(2), 20); + insta::assert_debug_snapshot!(events, @r###" + RefCell { + value: [ + "Event { runtime_id: RuntimeId { counter: 0 }, kind: WillExecute { database_key: b(1) } }", + "Event { runtime_id: RuntimeId { counter: 0 }, kind: WillExecute { database_key: a(1) } }", + "Event { runtime_id: RuntimeId { counter: 0 }, kind: WillExecute { database_key: b(2) } }", + "Event { runtime_id: RuntimeId { counter: 0 }, kind: WillExecute { database_key: a(2) } }", + ], + } + "###); + + eprintln!("------------------"); + db.salsa_runtime_mut().synthetic_write(Durability::LOW); + events.replace(vec![]); + assert_eq!(db.c(1), 10); + assert_eq!(db.c(2), 20); + // Re-execute `a(2)` because that has low durability, but not `a(1)` + insta::assert_debug_snapshot!(events, @r###" + RefCell { + value: [ + "Event { runtime_id: RuntimeId { counter: 0 }, kind: WillExecute { database_key: c(1) } }", + "Event { runtime_id: RuntimeId { counter: 0 }, kind: DidValidateMemoizedValue { database_key: b(1) } }", + "Event { runtime_id: RuntimeId { counter: 0 }, kind: WillExecute { database_key: c(2) } }", + "Event { runtime_id: RuntimeId { counter: 0 }, kind: WillExecute { database_key: a(2) } }", + "Event { runtime_id: RuntimeId { counter: 0 }, kind: DidValidateMemoizedValue { database_key: b(2) } }", + ], + } + "###); + + eprintln!("------------------"); + db.salsa_runtime_mut().synthetic_write(Durability::HIGH); + events.replace(vec![]); + assert_eq!(db.c(1), 10); + assert_eq!(db.c(2), 20); + // Re-execute both `a(1)` and `a(2)`, but we don't re-execute any `b` queries as the + // result didn't actually change. + insta::assert_debug_snapshot!(events, @r###" + RefCell { + value: [ + "Event { runtime_id: RuntimeId { counter: 0 }, kind: WillExecute { database_key: a(1) } }", + "Event { runtime_id: RuntimeId { counter: 0 }, kind: DidValidateMemoizedValue { database_key: c(1) } }", + "Event { runtime_id: RuntimeId { counter: 0 }, kind: WillExecute { database_key: a(2) } }", + "Event { runtime_id: RuntimeId { counter: 0 }, kind: DidValidateMemoizedValue { database_key: c(2) } }", + ], + } + "###); +} diff --git a/crates/salsa/tests/panic_safely.rs b/crates/salsa/tests/panic_safely.rs new file mode 100644 index 0000000000..e51a74e1f1 --- /dev/null +++ b/crates/salsa/tests/panic_safely.rs @@ -0,0 +1,95 @@ +use salsa::{Database, ParallelDatabase, Snapshot}; +use std::panic::{self, AssertUnwindSafe}; +use std::sync::atomic::{AtomicU32, Ordering::SeqCst}; + +#[salsa::query_group(PanicSafelyStruct)] +trait PanicSafelyDatabase: salsa::Database { + #[salsa::input] + fn one(&self) -> usize; + + fn panic_safely(&self) -> (); + + fn outer(&self) -> (); +} + +fn panic_safely(db: &dyn PanicSafelyDatabase) { + assert_eq!(db.one(), 1); +} + +static OUTER_CALLS: AtomicU32 = AtomicU32::new(0); + +fn outer(db: &dyn PanicSafelyDatabase) { + OUTER_CALLS.fetch_add(1, SeqCst); + db.panic_safely(); +} + +#[salsa::database(PanicSafelyStruct)] +#[derive(Default)] +struct DatabaseStruct { + storage: salsa::Storage, +} + +impl salsa::Database for DatabaseStruct {} + +impl salsa::ParallelDatabase for DatabaseStruct { + fn snapshot(&self) -> Snapshot { + Snapshot::new(DatabaseStruct { + storage: self.storage.snapshot(), + }) + } +} + +#[test] +fn should_panic_safely() { + let mut db = DatabaseStruct::default(); + db.set_one(0); + + // Invoke `db.panic_safely() without having set `db.one`. `db.one` will + // return 0 and we should catch the panic. + let result = panic::catch_unwind(AssertUnwindSafe({ + let db = db.snapshot(); + move || db.panic_safely() + })); + assert!(result.is_err()); + + // Set `db.one` to 1 and assert ok + db.set_one(1); + let result = panic::catch_unwind(AssertUnwindSafe(|| db.panic_safely())); + assert!(result.is_ok()); + + // Check, that memoized outer is not invalidated by a panic + { + assert_eq!(OUTER_CALLS.load(SeqCst), 0); + db.outer(); + assert_eq!(OUTER_CALLS.load(SeqCst), 1); + + db.set_one(0); + let result = panic::catch_unwind(AssertUnwindSafe(|| db.outer())); + assert!(result.is_err()); + assert_eq!(OUTER_CALLS.load(SeqCst), 1); + + db.set_one(1); + db.outer(); + assert_eq!(OUTER_CALLS.load(SeqCst), 2); + } +} + +#[test] +fn storages_are_unwind_safe() { + fn check_unwind_safe() {} + check_unwind_safe::<&DatabaseStruct>(); +} + +#[test] +fn panics_clear_query_stack() { + let db = DatabaseStruct::default(); + + // Invoke `db.panic_if_not_one() without having set `db.input`. `db.input` + // will default to 0 and we should catch the panic. + let result = panic::catch_unwind(AssertUnwindSafe(|| db.panic_safely())); + assert!(result.is_err()); + + // The database has been poisoned and any attempt to increment the + // revision should panic. + assert_eq!(db.salsa_runtime().active_query(), None); +} diff --git a/crates/salsa/tests/parallel/cancellation.rs b/crates/salsa/tests/parallel/cancellation.rs new file mode 100644 index 0000000000..9a92e5cc1f --- /dev/null +++ b/crates/salsa/tests/parallel/cancellation.rs @@ -0,0 +1,132 @@ +use crate::setup::{CancellationFlag, Knobs, ParDatabase, ParDatabaseImpl, WithValue}; +use salsa::{Cancelled, ParallelDatabase}; + +macro_rules! assert_cancelled { + ($thread:expr) => { + match $thread.join() { + Ok(value) => panic!("expected cancellation, got {:?}", value), + Err(payload) => match payload.downcast::() { + Ok(_) => {} + Err(payload) => ::std::panic::resume_unwind(payload), + }, + } + }; +} + +/// Add test where a call to `sum` is cancelled by a simultaneous +/// write. Check that we recompute the result in next revision, even +/// though none of the inputs have changed. +#[test] +fn in_par_get_set_cancellation_immediate() { + let mut db = ParDatabaseImpl::default(); + + db.set_input('a', 100); + db.set_input('b', 10); + db.set_input('c', 1); + db.set_input('d', 0); + + let thread1 = std::thread::spawn({ + let db = db.snapshot(); + move || { + // This will not return until it sees cancellation is + // signaled. + db.knobs().sum_signal_on_entry.with_value(1, || { + db.knobs() + .sum_wait_for_cancellation + .with_value(CancellationFlag::Panic, || db.sum("abc")) + }) + } + }); + + // Wait until we have entered `sum` in the other thread. + db.wait_for(1); + + // Try to set the input. This will signal cancellation. + db.set_input('d', 1000); + + // This should re-compute the value (even though no input has changed). + let thread2 = std::thread::spawn({ + let db = db.snapshot(); + move || db.sum("abc") + }); + + assert_eq!(db.sum("d"), 1000); + assert_cancelled!(thread1); + assert_eq!(thread2.join().unwrap(), 111); +} + +/// Here, we check that `sum`'s cancellation is propagated +/// to `sum2` properly. +#[test] +fn in_par_get_set_cancellation_transitive() { + let mut db = ParDatabaseImpl::default(); + + db.set_input('a', 100); + db.set_input('b', 10); + db.set_input('c', 1); + db.set_input('d', 0); + + let thread1 = std::thread::spawn({ + let db = db.snapshot(); + move || { + // This will not return until it sees cancellation is + // signaled. + db.knobs().sum_signal_on_entry.with_value(1, || { + db.knobs() + .sum_wait_for_cancellation + .with_value(CancellationFlag::Panic, || db.sum2("abc")) + }) + } + }); + + // Wait until we have entered `sum` in the other thread. + db.wait_for(1); + + // Try to set the input. This will signal cancellation. + db.set_input('d', 1000); + + // This should re-compute the value (even though no input has changed). + let thread2 = std::thread::spawn({ + let db = db.snapshot(); + move || db.sum2("abc") + }); + + assert_eq!(db.sum2("d"), 1000); + assert_cancelled!(thread1); + assert_eq!(thread2.join().unwrap(), 111); +} + +/// https://github.com/salsa-rs/salsa/issues/66 +#[test] +fn no_back_dating_in_cancellation() { + let mut db = ParDatabaseImpl::default(); + + db.set_input('a', 1); + let thread1 = std::thread::spawn({ + let db = db.snapshot(); + move || { + // Here we compute a long-chain of queries, + // but the last one gets cancelled. + db.knobs().sum_signal_on_entry.with_value(1, || { + db.knobs() + .sum_wait_for_cancellation + .with_value(CancellationFlag::Panic, || db.sum3("a")) + }) + } + }); + + db.wait_for(1); + + // Set unrelated input to bump revision + db.set_input('b', 2); + + // Here we should recompuet the whole chain again, clearing the cancellation + // state. If we get `usize::max()` here, it is a bug! + assert_eq!(db.sum3("a"), 1); + + assert_cancelled!(thread1); + + db.set_input('a', 3); + db.set_input('a', 4); + assert_eq!(db.sum3("ab"), 6); +} diff --git a/crates/salsa/tests/parallel/frozen.rs b/crates/salsa/tests/parallel/frozen.rs new file mode 100644 index 0000000000..5359a8820e --- /dev/null +++ b/crates/salsa/tests/parallel/frozen.rs @@ -0,0 +1,57 @@ +use crate::setup::{ParDatabase, ParDatabaseImpl}; +use crate::signal::Signal; +use salsa::{Database, ParallelDatabase}; +use std::{ + panic::{catch_unwind, AssertUnwindSafe}, + sync::Arc, +}; + +/// Add test where a call to `sum` is cancelled by a simultaneous +/// write. Check that we recompute the result in next revision, even +/// though none of the inputs have changed. +#[test] +fn in_par_get_set_cancellation() { + let mut db = ParDatabaseImpl::default(); + + db.set_input('a', 1); + + let signal = Arc::new(Signal::default()); + + let thread1 = std::thread::spawn({ + let db = db.snapshot(); + let signal = signal.clone(); + move || { + // Check that cancellation flag is not yet set, because + // `set` cannot have been called yet. + catch_unwind(AssertUnwindSafe(|| db.unwind_if_cancelled())).unwrap(); + + // Signal other thread to proceed. + signal.signal(1); + + // Wait for other thread to signal cancellation + catch_unwind(AssertUnwindSafe(|| loop { + db.unwind_if_cancelled(); + std::thread::yield_now(); + })) + .unwrap_err(); + } + }); + + let thread2 = std::thread::spawn({ + move || { + // Wait until thread 1 has asserted that they are not cancelled + // before we invoke `set.` + signal.wait_for(1); + + // This will block until thread1 drops the revision lock. + db.set_input('a', 2); + + db.input('a') + } + }); + + thread1.join().unwrap(); + + let c = thread2.join().unwrap(); + assert_eq!(c, 2); +} diff --git a/crates/salsa/tests/parallel/independent.rs b/crates/salsa/tests/parallel/independent.rs new file mode 100644 index 0000000000..bd6ba3bf93 --- /dev/null +++ b/crates/salsa/tests/parallel/independent.rs @@ -0,0 +1,29 @@ +use crate::setup::{ParDatabase, ParDatabaseImpl}; +use salsa::ParallelDatabase; + +/// Test two `sum` queries (on distinct keys) executing in different +/// threads. Really just a test that `snapshot` etc compiles. +#[test] +fn in_par_two_independent_queries() { + let mut db = ParDatabaseImpl::default(); + + db.set_input('a', 100); + db.set_input('b', 10); + db.set_input('c', 1); + db.set_input('d', 200); + db.set_input('e', 20); + db.set_input('f', 2); + + let thread1 = std::thread::spawn({ + let db = db.snapshot(); + move || db.sum("abc") + }); + + let thread2 = std::thread::spawn({ + let db = db.snapshot(); + move || db.sum("def") + }); + + assert_eq!(thread1.join().unwrap(), 111); + assert_eq!(thread2.join().unwrap(), 222); +} diff --git a/crates/salsa/tests/parallel/main.rs b/crates/salsa/tests/parallel/main.rs new file mode 100644 index 0000000000..31c0da1837 --- /dev/null +++ b/crates/salsa/tests/parallel/main.rs @@ -0,0 +1,13 @@ +mod setup; + +mod cancellation; +mod frozen; +mod independent; +mod parallel_cycle_all_recover; +mod parallel_cycle_mid_recover; +mod parallel_cycle_none_recover; +mod parallel_cycle_one_recovers; +mod race; +mod signal; +mod stress; +mod true_parallel; diff --git a/crates/salsa/tests/parallel/parallel_cycle_all_recover.rs b/crates/salsa/tests/parallel/parallel_cycle_all_recover.rs new file mode 100644 index 0000000000..cee51b4db7 --- /dev/null +++ b/crates/salsa/tests/parallel/parallel_cycle_all_recover.rs @@ -0,0 +1,110 @@ +//! Test for cycle recover spread across two threads. +//! See `../cycles.rs` for a complete listing of cycle tests, +//! both intra and cross thread. + +use crate::setup::{Knobs, ParDatabaseImpl}; +use salsa::ParallelDatabase; +use test_log::test; + +// Recover cycle test: +// +// The pattern is as follows. +// +// Thread A Thread B +// -------- -------- +// a1 b1 +// | wait for stage 1 (blocks) +// signal stage 1 | +// wait for stage 2 (blocks) (unblocked) +// | signal stage 2 +// (unblocked) wait for stage 3 (blocks) +// a2 | +// b1 (blocks -> stage 3) | +// | (unblocked) +// | b2 +// | a1 (cycle detected, recovers) +// | b2 completes, recovers +// | b1 completes, recovers +// a2 sees cycle, recovers +// a1 completes, recovers + +#[test] +fn parallel_cycle_all_recover() { + let db = ParDatabaseImpl::default(); + db.knobs().signal_on_will_block.set(3); + + let thread_a = std::thread::spawn({ + let db = db.snapshot(); + move || db.a1(1) + }); + + let thread_b = std::thread::spawn({ + let db = db.snapshot(); + move || db.b1(1) + }); + + assert_eq!(thread_a.join().unwrap(), 11); + assert_eq!(thread_b.join().unwrap(), 21); +} + +#[salsa::query_group(ParallelCycleAllRecover)] +pub(crate) trait TestDatabase: Knobs { + #[salsa::cycle(recover_a1)] + fn a1(&self, key: i32) -> i32; + + #[salsa::cycle(recover_a2)] + fn a2(&self, key: i32) -> i32; + + #[salsa::cycle(recover_b1)] + fn b1(&self, key: i32) -> i32; + + #[salsa::cycle(recover_b2)] + fn b2(&self, key: i32) -> i32; +} + +fn recover_a1(_db: &dyn TestDatabase, _cycle: &salsa::Cycle, key: &i32) -> i32 { + tracing::debug!("recover_a1"); + key * 10 + 1 +} + +fn recover_a2(_db: &dyn TestDatabase, _cycle: &salsa::Cycle, key: &i32) -> i32 { + tracing::debug!("recover_a2"); + key * 10 + 2 +} + +fn recover_b1(_db: &dyn TestDatabase, _cycle: &salsa::Cycle, key: &i32) -> i32 { + tracing::debug!("recover_b1"); + key * 20 + 1 +} + +fn recover_b2(_db: &dyn TestDatabase, _cycle: &salsa::Cycle, key: &i32) -> i32 { + tracing::debug!("recover_b2"); + key * 20 + 2 +} + +fn a1(db: &dyn TestDatabase, key: i32) -> i32 { + // Wait to create the cycle until both threads have entered + db.signal(1); + db.wait_for(2); + + db.a2(key) +} + +fn a2(db: &dyn TestDatabase, key: i32) -> i32 { + db.b1(key) +} + +fn b1(db: &dyn TestDatabase, key: i32) -> i32 { + // Wait to create the cycle until both threads have entered + db.wait_for(1); + db.signal(2); + + // Wait for thread A to block on this thread + db.wait_for(3); + + db.b2(key) +} + +fn b2(db: &dyn TestDatabase, key: i32) -> i32 { + db.a1(key) +} diff --git a/crates/salsa/tests/parallel/parallel_cycle_mid_recover.rs b/crates/salsa/tests/parallel/parallel_cycle_mid_recover.rs new file mode 100644 index 0000000000..f78c05c559 --- /dev/null +++ b/crates/salsa/tests/parallel/parallel_cycle_mid_recover.rs @@ -0,0 +1,110 @@ +//! Test for cycle recover spread across two threads. +//! See `../cycles.rs` for a complete listing of cycle tests, +//! both intra and cross thread. + +use crate::setup::{Knobs, ParDatabaseImpl}; +use salsa::ParallelDatabase; +use test_log::test; + +// Recover cycle test: +// +// The pattern is as follows. +// +// Thread A Thread B +// -------- -------- +// a1 b1 +// | wait for stage 1 (blocks) +// signal stage 1 | +// wait for stage 2 (blocks) (unblocked) +// | | +// | b2 +// | b3 +// | a1 (blocks -> stage 2) +// (unblocked) | +// a2 (cycle detected) | +// b3 recovers +// b2 resumes +// b1 panics because bug + +#[test] +fn parallel_cycle_mid_recovers() { + let db = ParDatabaseImpl::default(); + db.knobs().signal_on_will_block.set(2); + + let thread_a = std::thread::spawn({ + let db = db.snapshot(); + move || db.a1(1) + }); + + let thread_b = std::thread::spawn({ + let db = db.snapshot(); + move || db.b1(1) + }); + + // We expect that the recovery function yields + // `1 * 20 + 2`, which is returned (and forwarded) + // to b1, and from there to a2 and a1. + assert_eq!(thread_a.join().unwrap(), 22); + assert_eq!(thread_b.join().unwrap(), 22); +} + +#[salsa::query_group(ParallelCycleMidRecovers)] +pub(crate) trait TestDatabase: Knobs { + fn a1(&self, key: i32) -> i32; + + fn a2(&self, key: i32) -> i32; + + #[salsa::cycle(recover_b1)] + fn b1(&self, key: i32) -> i32; + + fn b2(&self, key: i32) -> i32; + + #[salsa::cycle(recover_b3)] + fn b3(&self, key: i32) -> i32; +} + +fn recover_b1(_db: &dyn TestDatabase, _cycle: &salsa::Cycle, key: &i32) -> i32 { + tracing::debug!("recover_b1"); + key * 20 + 2 +} + +fn recover_b3(_db: &dyn TestDatabase, _cycle: &salsa::Cycle, key: &i32) -> i32 { + tracing::debug!("recover_b1"); + key * 200 + 2 +} + +fn a1(db: &dyn TestDatabase, key: i32) -> i32 { + // tell thread b we have started + db.signal(1); + + // wait for thread b to block on a1 + db.wait_for(2); + + db.a2(key) +} + +fn a2(db: &dyn TestDatabase, key: i32) -> i32 { + // create the cycle + db.b1(key) +} + +fn b1(db: &dyn TestDatabase, key: i32) -> i32 { + // wait for thread a to have started + db.wait_for(1); + + db.b2(key); + + 0 +} + +fn b2(db: &dyn TestDatabase, key: i32) -> i32 { + // will encounter a cycle but recover + db.b3(key); + db.b1(key); // hasn't recovered yet + 0 +} + +fn b3(db: &dyn TestDatabase, key: i32) -> i32 { + // will block on thread a, signaling stage 2 + db.a1(key) +} diff --git a/crates/salsa/tests/parallel/parallel_cycle_none_recover.rs b/crates/salsa/tests/parallel/parallel_cycle_none_recover.rs new file mode 100644 index 0000000000..65f1b6ea16 --- /dev/null +++ b/crates/salsa/tests/parallel/parallel_cycle_none_recover.rs @@ -0,0 +1,71 @@ +//! Test a cycle where no queries recover that occurs across threads. +//! See the `../cycles.rs` for a complete listing of cycle tests, +//! both intra and cross thread. + +use crate::setup::{Knobs, ParDatabaseImpl}; +use salsa::ParallelDatabase; +use test_log::test; + +#[test] +fn parallel_cycle_none_recover() { + let db = ParDatabaseImpl::default(); + db.knobs().signal_on_will_block.set(3); + + let thread_a = std::thread::spawn({ + let db = db.snapshot(); + move || db.a(-1) + }); + + let thread_b = std::thread::spawn({ + let db = db.snapshot(); + move || db.b(-1) + }); + + // We expect B to panic because it detects a cycle (it is the one that calls A, ultimately). + // Right now, it panics with a string. + let err_b = thread_b.join().unwrap_err(); + if let Some(c) = err_b.downcast_ref::() { + insta::assert_debug_snapshot!(c.unexpected_participants(&db), @r###" + [ + "a(-1)", + "b(-1)", + ] + "###); + } else { + panic!("b failed in an unexpected way: {:?}", err_b); + } + + // We expect A to propagate a panic, which causes us to use the sentinel + // type `Canceled`. + assert!(thread_a + .join() + .unwrap_err() + .downcast_ref::() + .is_some()); +} + +#[salsa::query_group(ParallelCycleNoneRecover)] +pub(crate) trait TestDatabase: Knobs { + fn a(&self, key: i32) -> i32; + fn b(&self, key: i32) -> i32; +} + +fn a(db: &dyn TestDatabase, key: i32) -> i32 { + // Wait to create the cycle until both threads have entered + db.signal(1); + db.wait_for(2); + + db.b(key) +} + +fn b(db: &dyn TestDatabase, key: i32) -> i32 { + // Wait to create the cycle until both threads have entered + db.wait_for(1); + db.signal(2); + + // Wait for thread A to block on this thread + db.wait_for(3); + + // Now try to execute A + db.a(key) +} diff --git a/crates/salsa/tests/parallel/parallel_cycle_one_recovers.rs b/crates/salsa/tests/parallel/parallel_cycle_one_recovers.rs new file mode 100644 index 0000000000..7d3944714a --- /dev/null +++ b/crates/salsa/tests/parallel/parallel_cycle_one_recovers.rs @@ -0,0 +1,95 @@ +//! Test for cycle recover spread across two threads. +//! See `../cycles.rs` for a complete listing of cycle tests, +//! both intra and cross thread. + +use crate::setup::{Knobs, ParDatabaseImpl}; +use salsa::ParallelDatabase; +use test_log::test; + +// Recover cycle test: +// +// The pattern is as follows. +// +// Thread A Thread B +// -------- -------- +// a1 b1 +// | wait for stage 1 (blocks) +// signal stage 1 | +// wait for stage 2 (blocks) (unblocked) +// | signal stage 2 +// (unblocked) wait for stage 3 (blocks) +// a2 | +// b1 (blocks -> stage 3) | +// | (unblocked) +// | b2 +// | a1 (cycle detected) +// a2 recovery fn executes | +// a1 completes normally | +// b2 completes, recovers +// b1 completes, recovers + +#[test] +fn parallel_cycle_one_recovers() { + let db = ParDatabaseImpl::default(); + db.knobs().signal_on_will_block.set(3); + + let thread_a = std::thread::spawn({ + let db = db.snapshot(); + move || db.a1(1) + }); + + let thread_b = std::thread::spawn({ + let db = db.snapshot(); + move || db.b1(1) + }); + + // We expect that the recovery function yields + // `1 * 20 + 2`, which is returned (and forwarded) + // to b1, and from there to a2 and a1. + assert_eq!(thread_a.join().unwrap(), 22); + assert_eq!(thread_b.join().unwrap(), 22); +} + +#[salsa::query_group(ParallelCycleOneRecovers)] +pub(crate) trait TestDatabase: Knobs { + fn a1(&self, key: i32) -> i32; + + #[salsa::cycle(recover)] + fn a2(&self, key: i32) -> i32; + + fn b1(&self, key: i32) -> i32; + + fn b2(&self, key: i32) -> i32; +} + +fn recover(_db: &dyn TestDatabase, _cycle: &salsa::Cycle, key: &i32) -> i32 { + tracing::debug!("recover"); + key * 20 + 2 +} + +fn a1(db: &dyn TestDatabase, key: i32) -> i32 { + // Wait to create the cycle until both threads have entered + db.signal(1); + db.wait_for(2); + + db.a2(key) +} + +fn a2(db: &dyn TestDatabase, key: i32) -> i32 { + db.b1(key) +} + +fn b1(db: &dyn TestDatabase, key: i32) -> i32 { + // Wait to create the cycle until both threads have entered + db.wait_for(1); + db.signal(2); + + // Wait for thread A to block on this thread + db.wait_for(3); + + db.b2(key) +} + +fn b2(db: &dyn TestDatabase, key: i32) -> i32 { + db.a1(key) +} diff --git a/crates/salsa/tests/parallel/race.rs b/crates/salsa/tests/parallel/race.rs new file mode 100644 index 0000000000..e875de998f --- /dev/null +++ b/crates/salsa/tests/parallel/race.rs @@ -0,0 +1,37 @@ +use std::panic::AssertUnwindSafe; + +use crate::setup::{ParDatabase, ParDatabaseImpl}; +use salsa::{Cancelled, ParallelDatabase}; + +/// Test where a read and a set are racing with one another. +/// Should be atomic. +#[test] +fn in_par_get_set_race() { + let mut db = ParDatabaseImpl::default(); + + db.set_input('a', 100); + db.set_input('b', 10); + db.set_input('c', 1); + + let thread1 = std::thread::spawn({ + let db = db.snapshot(); + move || Cancelled::catch(AssertUnwindSafe(|| db.sum("abc"))) + }); + + let thread2 = std::thread::spawn(move || { + db.set_input('a', 1000); + db.sum("a") + }); + + // If the 1st thread runs first, you get 111, otherwise you get + // 1011; if they run concurrently and the 1st thread observes the + // cancellation, it'll unwind. + let result1 = thread1.join().unwrap(); + if let Ok(value1) = result1 { + assert!(value1 == 111 || value1 == 1011, "illegal result {}", value1); + } + + // thread2 can not observe a cancellation because it performs a + // database write before running any other queries. + assert_eq!(thread2.join().unwrap(), 1000); +} diff --git a/crates/salsa/tests/parallel/setup.rs b/crates/salsa/tests/parallel/setup.rs new file mode 100644 index 0000000000..0f7d06542f --- /dev/null +++ b/crates/salsa/tests/parallel/setup.rs @@ -0,0 +1,202 @@ +use crate::signal::Signal; +use salsa::Database; +use salsa::ParallelDatabase; +use salsa::Snapshot; +use std::sync::Arc; +use std::{ + cell::Cell, + panic::{catch_unwind, resume_unwind, AssertUnwindSafe}, +}; + +#[salsa::query_group(Par)] +pub(crate) trait ParDatabase: Knobs { + #[salsa::input] + fn input(&self, key: char) -> usize; + + fn sum(&self, key: &'static str) -> usize; + + /// Invokes `sum` + fn sum2(&self, key: &'static str) -> usize; + + /// Invokes `sum` but doesn't really care about the result. + fn sum2_drop_sum(&self, key: &'static str) -> usize; + + /// Invokes `sum2` + fn sum3(&self, key: &'static str) -> usize; + + /// Invokes `sum2_drop_sum` + fn sum3_drop_sum(&self, key: &'static str) -> usize; +} + +/// Various "knobs" and utilities used by tests to force +/// a certain behavior. +pub(crate) trait Knobs { + fn knobs(&self) -> &KnobsStruct; + + fn signal(&self, stage: usize); + + fn wait_for(&self, stage: usize); +} + +pub(crate) trait WithValue { + fn with_value(&self, value: T, closure: impl FnOnce() -> R) -> R; +} + +impl WithValue for Cell { + fn with_value(&self, value: T, closure: impl FnOnce() -> R) -> R { + let old_value = self.replace(value); + + let result = catch_unwind(AssertUnwindSafe(|| closure())); + + self.set(old_value); + + match result { + Ok(r) => r, + Err(payload) => resume_unwind(payload), + } + } +} + +#[derive(Clone, Copy, PartialEq, Eq)] +pub(crate) enum CancellationFlag { + Down, + Panic, +} + +impl Default for CancellationFlag { + fn default() -> CancellationFlag { + CancellationFlag::Down + } +} + +/// Various "knobs" that can be used to customize how the queries +/// behave on one specific thread. Note that this state is +/// intentionally thread-local (apart from `signal`). +#[derive(Clone, Default)] +pub(crate) struct KnobsStruct { + /// A kind of flexible barrier used to coordinate execution across + /// threads to ensure we reach various weird states. + pub(crate) signal: Arc, + + /// When this database is about to block, send a signal. + pub(crate) signal_on_will_block: Cell, + + /// Invocations of `sum` will signal this stage on entry. + pub(crate) sum_signal_on_entry: Cell, + + /// Invocations of `sum` will wait for this stage on entry. + pub(crate) sum_wait_for_on_entry: Cell, + + /// If true, invocations of `sum` will panic before they exit. + pub(crate) sum_should_panic: Cell, + + /// If true, invocations of `sum` will wait for cancellation before + /// they exit. + pub(crate) sum_wait_for_cancellation: Cell, + + /// Invocations of `sum` will wait for this stage prior to exiting. + pub(crate) sum_wait_for_on_exit: Cell, + + /// Invocations of `sum` will signal this stage prior to exiting. + pub(crate) sum_signal_on_exit: Cell, + + /// Invocations of `sum3_drop_sum` will panic unconditionally + pub(crate) sum3_drop_sum_should_panic: Cell, +} + +fn sum(db: &dyn ParDatabase, key: &'static str) -> usize { + let mut sum = 0; + + db.signal(db.knobs().sum_signal_on_entry.get()); + + db.wait_for(db.knobs().sum_wait_for_on_entry.get()); + + if db.knobs().sum_should_panic.get() { + panic!("query set to panic before exit") + } + + for ch in key.chars() { + sum += db.input(ch); + } + + match db.knobs().sum_wait_for_cancellation.get() { + CancellationFlag::Down => (), + CancellationFlag::Panic => { + tracing::debug!("waiting for cancellation"); + loop { + db.unwind_if_cancelled(); + std::thread::yield_now(); + } + } + } + + db.wait_for(db.knobs().sum_wait_for_on_exit.get()); + + db.signal(db.knobs().sum_signal_on_exit.get()); + + sum +} + +fn sum2(db: &dyn ParDatabase, key: &'static str) -> usize { + db.sum(key) +} + +fn sum2_drop_sum(db: &dyn ParDatabase, key: &'static str) -> usize { + let _ = db.sum(key); + 22 +} + +fn sum3(db: &dyn ParDatabase, key: &'static str) -> usize { + db.sum2(key) +} + +fn sum3_drop_sum(db: &dyn ParDatabase, key: &'static str) -> usize { + if db.knobs().sum3_drop_sum_should_panic.get() { + panic!("sum3_drop_sum executed") + } + db.sum2_drop_sum(key) +} + +#[salsa::database( + Par, + crate::parallel_cycle_all_recover::ParallelCycleAllRecover, + crate::parallel_cycle_none_recover::ParallelCycleNoneRecover, + crate::parallel_cycle_mid_recover::ParallelCycleMidRecovers, + crate::parallel_cycle_one_recovers::ParallelCycleOneRecovers +)] +#[derive(Default)] +pub(crate) struct ParDatabaseImpl { + storage: salsa::Storage, + knobs: KnobsStruct, +} + +impl Database for ParDatabaseImpl { + fn salsa_event(&self, event: salsa::Event) { + if let salsa::EventKind::WillBlockOn { .. } = event.kind { + self.signal(self.knobs().signal_on_will_block.get()); + } + } +} + +impl ParallelDatabase for ParDatabaseImpl { + fn snapshot(&self) -> Snapshot { + Snapshot::new(ParDatabaseImpl { + storage: self.storage.snapshot(), + knobs: self.knobs.clone(), + }) + } +} + +impl Knobs for ParDatabaseImpl { + fn knobs(&self) -> &KnobsStruct { + &self.knobs + } + + fn signal(&self, stage: usize) { + self.knobs.signal.signal(stage); + } + + fn wait_for(&self, stage: usize) { + self.knobs.signal.wait_for(stage); + } +} diff --git a/crates/salsa/tests/parallel/signal.rs b/crates/salsa/tests/parallel/signal.rs new file mode 100644 index 0000000000..0af7b66e48 --- /dev/null +++ b/crates/salsa/tests/parallel/signal.rs @@ -0,0 +1,40 @@ +use parking_lot::{Condvar, Mutex}; + +#[derive(Default)] +pub(crate) struct Signal { + value: Mutex, + cond_var: Condvar, +} + +impl Signal { + pub(crate) fn signal(&self, stage: usize) { + tracing::debug!("signal({})", stage); + + // This check avoids acquiring the lock for things that will + // clearly be a no-op. Not *necessary* but helps to ensure we + // are more likely to encounter weird race conditions; + // otherwise calls to `sum` will tend to be unnecessarily + // synchronous. + if stage > 0 { + let mut v = self.value.lock(); + if stage > *v { + *v = stage; + self.cond_var.notify_all(); + } + } + } + + /// Waits until the given condition is true; the fn is invoked + /// with the current stage. + pub(crate) fn wait_for(&self, stage: usize) { + tracing::debug!("wait_for({})", stage); + + // As above, avoid lock if clearly a no-op. + if stage > 0 { + let mut v = self.value.lock(); + while *v < stage { + self.cond_var.wait(&mut v); + } + } + } +} diff --git a/crates/salsa/tests/parallel/stress.rs b/crates/salsa/tests/parallel/stress.rs new file mode 100644 index 0000000000..16a1b79044 --- /dev/null +++ b/crates/salsa/tests/parallel/stress.rs @@ -0,0 +1,174 @@ +use rand::seq::SliceRandom; +use rand::Rng; + +use salsa::ParallelDatabase; +use salsa::Snapshot; +use salsa::{Cancelled, Database}; + +// Number of operations a reader performs +const N_MUTATOR_OPS: usize = 100; +const N_READER_OPS: usize = 100; + +#[salsa::query_group(Stress)] +trait StressDatabase: salsa::Database { + #[salsa::input] + fn a(&self, key: usize) -> usize; + + fn b(&self, key: usize) -> usize; + + fn c(&self, key: usize) -> usize; +} + +fn b(db: &dyn StressDatabase, key: usize) -> usize { + db.unwind_if_cancelled(); + db.a(key) +} + +fn c(db: &dyn StressDatabase, key: usize) -> usize { + db.b(key) +} + +#[salsa::database(Stress)] +#[derive(Default)] +struct StressDatabaseImpl { + storage: salsa::Storage, +} + +impl salsa::Database for StressDatabaseImpl {} + +impl salsa::ParallelDatabase for StressDatabaseImpl { + fn snapshot(&self) -> Snapshot { + Snapshot::new(StressDatabaseImpl { + storage: self.storage.snapshot(), + }) + } +} + +#[derive(Clone, Copy, Debug)] +enum Query { + A, + B, + C, +} + +enum MutatorOp { + WriteOp(WriteOp), + LaunchReader { + ops: Vec, + check_cancellation: bool, + }, +} + +#[derive(Debug)] +enum WriteOp { + SetA(usize, usize), +} + +#[derive(Debug)] +enum ReadOp { + Get(Query, usize), +} + +impl rand::distributions::Distribution for rand::distributions::Standard { + fn sample(&self, rng: &mut R) -> Query { + *[Query::A, Query::B, Query::C].choose(rng).unwrap() + } +} + +impl rand::distributions::Distribution for rand::distributions::Standard { + fn sample(&self, rng: &mut R) -> MutatorOp { + if rng.gen_bool(0.5) { + MutatorOp::WriteOp(rng.gen()) + } else { + MutatorOp::LaunchReader { + ops: (0..N_READER_OPS).map(|_| rng.gen()).collect(), + check_cancellation: rng.gen(), + } + } + } +} + +impl rand::distributions::Distribution for rand::distributions::Standard { + fn sample(&self, rng: &mut R) -> WriteOp { + let key = rng.gen::() % 10; + let value = rng.gen::() % 10; + WriteOp::SetA(key, value) + } +} + +impl rand::distributions::Distribution for rand::distributions::Standard { + fn sample(&self, rng: &mut R) -> ReadOp { + let query = rng.gen::(); + let key = rng.gen::() % 10; + ReadOp::Get(query, key) + } +} + +fn db_reader_thread(db: &StressDatabaseImpl, ops: Vec, check_cancellation: bool) { + for op in ops { + if check_cancellation { + db.unwind_if_cancelled(); + } + op.execute(db); + } +} + +impl WriteOp { + fn execute(self, db: &mut StressDatabaseImpl) { + match self { + WriteOp::SetA(key, value) => { + db.set_a(key, value); + } + } + } +} + +impl ReadOp { + fn execute(self, db: &StressDatabaseImpl) { + match self { + ReadOp::Get(query, key) => match query { + Query::A => { + db.a(key); + } + Query::B => { + let _ = db.b(key); + } + Query::C => { + let _ = db.c(key); + } + }, + } + } +} + +#[test] +fn stress_test() { + let mut db = StressDatabaseImpl::default(); + for i in 0..10 { + db.set_a(i, i); + } + + let mut rng = rand::thread_rng(); + + // generate the ops that the mutator thread will perform + let write_ops: Vec = (0..N_MUTATOR_OPS).map(|_| rng.gen()).collect(); + + // execute the "main thread", which sometimes snapshots off other threads + let mut all_threads = vec![]; + for op in write_ops { + match op { + MutatorOp::WriteOp(w) => w.execute(&mut db), + MutatorOp::LaunchReader { + ops, + check_cancellation, + } => all_threads.push(std::thread::spawn({ + let db = db.snapshot(); + move || Cancelled::catch(|| db_reader_thread(&db, ops, check_cancellation)) + })), + } + } + + for thread in all_threads { + thread.join().unwrap().ok(); + } +} diff --git a/crates/salsa/tests/parallel/true_parallel.rs b/crates/salsa/tests/parallel/true_parallel.rs new file mode 100644 index 0000000000..03432dca97 --- /dev/null +++ b/crates/salsa/tests/parallel/true_parallel.rs @@ -0,0 +1,126 @@ +use crate::setup::{Knobs, ParDatabase, ParDatabaseImpl, WithValue}; +use salsa::ParallelDatabase; +use std::panic::{self, AssertUnwindSafe}; + +/// Test where two threads are executing sum. We show that they can +/// both be executing sum in parallel by having thread1 wait for +/// thread2 to send a signal before it leaves (similarly, thread2 +/// waits for thread1 to send a signal before it enters). +#[test] +fn true_parallel_different_keys() { + let mut db = ParDatabaseImpl::default(); + + db.set_input('a', 100); + db.set_input('b', 10); + db.set_input('c', 1); + + // Thread 1 will signal stage 1 when it enters and wait for stage 2. + let thread1 = std::thread::spawn({ + let db = db.snapshot(); + move || { + let v = db.knobs().sum_signal_on_entry.with_value(1, || { + db.knobs() + .sum_wait_for_on_exit + .with_value(2, || db.sum("a")) + }); + v + } + }); + + // Thread 2 will wait_for stage 1 when it enters and signal stage 2 + // when it leaves. + let thread2 = std::thread::spawn({ + let db = db.snapshot(); + move || { + let v = db.knobs().sum_wait_for_on_entry.with_value(1, || { + db.knobs().sum_signal_on_exit.with_value(2, || db.sum("b")) + }); + v + } + }); + + assert_eq!(thread1.join().unwrap(), 100); + assert_eq!(thread2.join().unwrap(), 10); +} + +/// Add a test that tries to trigger a conflict, where we fetch +/// `sum("abc")` from two threads simultaneously, and of them +/// therefore has to block. +#[test] +fn true_parallel_same_keys() { + let mut db = ParDatabaseImpl::default(); + + db.set_input('a', 100); + db.set_input('b', 10); + db.set_input('c', 1); + + // Thread 1 will wait_for a barrier in the start of `sum` + let thread1 = std::thread::spawn({ + let db = db.snapshot(); + move || { + let v = db.knobs().sum_signal_on_entry.with_value(1, || { + db.knobs() + .sum_wait_for_on_entry + .with_value(2, || db.sum("abc")) + }); + v + } + }); + + // Thread 2 will wait until Thread 1 has entered sum and then -- + // once it has set itself to block -- signal Thread 1 to + // continue. This way, we test out the mechanism of one thread + // blocking on another. + let thread2 = std::thread::spawn({ + let db = db.snapshot(); + move || { + db.knobs().signal.wait_for(1); + db.knobs().signal_on_will_block.set(2); + db.sum("abc") + } + }); + + assert_eq!(thread1.join().unwrap(), 111); + assert_eq!(thread2.join().unwrap(), 111); +} + +/// Add a test that tries to trigger a conflict, where we fetch `sum("a")` +/// from two threads simultaneously. After `thread2` begins blocking, +/// we force `thread1` to panic and should see that propagate to `thread2`. +#[test] +fn true_parallel_propagate_panic() { + let mut db = ParDatabaseImpl::default(); + + db.set_input('a', 1); + + // `thread1` will wait_for a barrier in the start of `sum`. Once it can + // continue, it will panic. + let thread1 = std::thread::spawn({ + let db = db.snapshot(); + move || { + let v = db.knobs().sum_signal_on_entry.with_value(1, || { + db.knobs().sum_wait_for_on_entry.with_value(2, || { + db.knobs().sum_should_panic.with_value(true, || db.sum("a")) + }) + }); + v + } + }); + + // `thread2` will wait until `thread1` has entered sum and then -- once it + // has set itself to block -- signal `thread1` to continue. + let thread2 = std::thread::spawn({ + let db = db.snapshot(); + move || { + db.knobs().signal.wait_for(1); + db.knobs().signal_on_will_block.set(2); + db.sum("a") + } + }); + + let result1 = panic::catch_unwind(AssertUnwindSafe(|| thread1.join().unwrap())); + let result2 = panic::catch_unwind(AssertUnwindSafe(|| thread2.join().unwrap())); + + assert!(result1.is_err()); + assert!(result2.is_err()); +} diff --git a/crates/salsa/tests/storage_varieties/implementation.rs b/crates/salsa/tests/storage_varieties/implementation.rs new file mode 100644 index 0000000000..2843660f15 --- /dev/null +++ b/crates/salsa/tests/storage_varieties/implementation.rs @@ -0,0 +1,19 @@ +use crate::queries; +use std::cell::Cell; + +#[salsa::database(queries::GroupStruct)] +#[derive(Default)] +pub(crate) struct DatabaseImpl { + storage: salsa::Storage, + counter: Cell, +} + +impl queries::Counter for DatabaseImpl { + fn increment(&self) -> usize { + let v = self.counter.get(); + self.counter.set(v + 1); + v + } +} + +impl salsa::Database for DatabaseImpl {} diff --git a/crates/salsa/tests/storage_varieties/main.rs b/crates/salsa/tests/storage_varieties/main.rs new file mode 100644 index 0000000000..e92c61740e --- /dev/null +++ b/crates/salsa/tests/storage_varieties/main.rs @@ -0,0 +1,5 @@ +mod implementation; +mod queries; +mod tests; + +fn main() {} diff --git a/crates/salsa/tests/storage_varieties/queries.rs b/crates/salsa/tests/storage_varieties/queries.rs new file mode 100644 index 0000000000..0847fadefb --- /dev/null +++ b/crates/salsa/tests/storage_varieties/queries.rs @@ -0,0 +1,22 @@ +pub(crate) trait Counter: salsa::Database { + fn increment(&self) -> usize; +} + +#[salsa::query_group(GroupStruct)] +pub(crate) trait Database: Counter { + fn memoized(&self) -> usize; + fn volatile(&self) -> usize; +} + +/// Because this query is memoized, we only increment the counter +/// the first time it is invoked. +fn memoized(db: &dyn Database) -> usize { + db.volatile() +} + +/// Because this query is volatile, each time it is invoked, +/// we will increment the counter. +fn volatile(db: &dyn Database) -> usize { + db.salsa_runtime().report_untracked_read(); + db.increment() +} diff --git a/crates/salsa/tests/storage_varieties/tests.rs b/crates/salsa/tests/storage_varieties/tests.rs new file mode 100644 index 0000000000..f75c7c142f --- /dev/null +++ b/crates/salsa/tests/storage_varieties/tests.rs @@ -0,0 +1,49 @@ +#![cfg(test)] + +use crate::implementation::DatabaseImpl; +use crate::queries::Database; +use salsa::Database as _Database; +use salsa::Durability; + +#[test] +fn memoized_twice() { + let db = DatabaseImpl::default(); + let v1 = db.memoized(); + let v2 = db.memoized(); + assert_eq!(v1, v2); +} + +#[test] +fn volatile_twice() { + let mut db = DatabaseImpl::default(); + let v1 = db.volatile(); + let v2 = db.volatile(); // volatiles are cached, so 2nd read returns the same + assert_eq!(v1, v2); + + db.salsa_runtime_mut().synthetic_write(Durability::LOW); // clears volatile caches + + let v3 = db.volatile(); // will re-increment the counter + let v4 = db.volatile(); // second call will be cached + assert_eq!(v1 + 1, v3); + assert_eq!(v3, v4); +} + +#[test] +fn intermingled() { + let mut db = DatabaseImpl::default(); + let v1 = db.volatile(); + let v2 = db.memoized(); + let v3 = db.volatile(); // cached + let v4 = db.memoized(); // cached + + assert_eq!(v1, v2); + assert_eq!(v1, v3); + assert_eq!(v2, v4); + + db.salsa_runtime_mut().synthetic_write(Durability::LOW); // clears volatile caches + + let v5 = db.memoized(); // re-executes volatile, caches new result + let v6 = db.memoized(); // re-use cached result + assert_eq!(v4 + 1, v5); + assert_eq!(v5, v6); +} diff --git a/crates/salsa/tests/transparent.rs b/crates/salsa/tests/transparent.rs new file mode 100644 index 0000000000..2e6dd4267b --- /dev/null +++ b/crates/salsa/tests/transparent.rs @@ -0,0 +1,39 @@ +//! Test that transparent (uncached) queries work + +#[salsa::query_group(QueryGroupStorage)] +trait QueryGroup { + #[salsa::input] + fn input(&self, x: u32) -> u32; + #[salsa::transparent] + fn wrap(&self, x: u32) -> u32; + fn get(&self, x: u32) -> u32; +} + +fn wrap(db: &dyn QueryGroup, x: u32) -> u32 { + db.input(x) +} + +fn get(db: &dyn QueryGroup, x: u32) -> u32 { + db.wrap(x) +} + +#[salsa::database(QueryGroupStorage)] +#[derive(Default)] +struct Database { + storage: salsa::Storage, +} + +impl salsa::Database for Database {} + +#[test] +fn transparent_queries_work() { + let mut db = Database::default(); + + db.set_input(1, 10); + assert_eq!(db.get(1), 10); + assert_eq!(db.get(1), 10); + + db.set_input(1, 92); + assert_eq!(db.get(1), 92); + assert_eq!(db.get(1), 92); +} diff --git a/crates/salsa/tests/variadic.rs b/crates/salsa/tests/variadic.rs new file mode 100644 index 0000000000..cb857844eb --- /dev/null +++ b/crates/salsa/tests/variadic.rs @@ -0,0 +1,51 @@ +#[salsa::query_group(HelloWorld)] +trait HelloWorldDatabase: salsa::Database { + #[salsa::input] + fn input(&self, a: u32, b: u32) -> u32; + + fn none(&self) -> u32; + + fn one(&self, k: u32) -> u32; + + fn two(&self, a: u32, b: u32) -> u32; + + fn trailing(&self, a: u32, b: u32) -> u32; +} + +fn none(_db: &dyn HelloWorldDatabase) -> u32 { + 22 +} + +fn one(_db: &dyn HelloWorldDatabase, k: u32) -> u32 { + k * 2 +} + +fn two(_db: &dyn HelloWorldDatabase, a: u32, b: u32) -> u32 { + a * b +} + +fn trailing(_db: &dyn HelloWorldDatabase, a: u32, b: u32) -> u32 { + a - b +} + +#[salsa::database(HelloWorld)] +#[derive(Default)] +struct DatabaseStruct { + storage: salsa::Storage, +} + +impl salsa::Database for DatabaseStruct {} + +#[test] +fn execute() { + let mut db = DatabaseStruct::default(); + + // test what happens with inputs: + db.set_input(1, 2, 3); + assert_eq!(db.input(1, 2), 3); + + assert_eq!(db.none(), 22); + assert_eq!(db.one(11), 22); + assert_eq!(db.two(11, 2), 22); + assert_eq!(db.trailing(24, 2), 22); +} diff --git a/crates/span/Cargo.toml b/crates/span/Cargo.toml index a4abba29bb..7093f3a691 100644 --- a/crates/span/Cargo.toml +++ b/crates/span/Cargo.toml @@ -11,7 +11,7 @@ authors.workspace = true [dependencies] la-arena.workspace = true -rust-analyzer-salsa.workspace = true +salsa.workspace = true # local deps From 0a6197df973586315c12e673a74e99308d593c61 Mon Sep 17 00:00:00 2001 From: Lukas Wirth Date: Wed, 7 Feb 2024 16:30:00 +0100 Subject: [PATCH 040/122] rustfmt --- crates/salsa/src/derived.rs | 34 +++------ crates/salsa/src/derived/slot.rs | 73 +++++-------------- crates/salsa/src/input.rs | 39 +++------- crates/salsa/src/intern_id.rs | 4 +- crates/salsa/src/interned.rs | 42 ++++------- crates/salsa/src/lib.rs | 31 ++------ crates/salsa/src/lru.rs | 17 +---- crates/salsa/src/plumbing.rs | 5 +- crates/salsa/src/revision.rs | 8 +- crates/salsa/src/runtime.rs | 50 ++++--------- crates/salsa/src/runtime/dependency_graph.rs | 43 ++--------- crates/salsa/src/runtime/local_state.rs | 33 ++------- crates/salsa/src/storage.rs | 10 +-- crates/salsa/tests/cycles.rs | 16 +--- crates/salsa/tests/incremental/constants.rs | 5 +- crates/salsa/tests/interned.rs | 14 +--- crates/salsa/tests/on_demand_inputs.rs | 6 +- crates/salsa/tests/panic_safely.rs | 4 +- .../parallel/parallel_cycle_none_recover.rs | 6 +- crates/salsa/tests/parallel/stress.rs | 22 ++---- crates/salsa/tests/parallel/true_parallel.rs | 31 ++++---- 21 files changed, 128 insertions(+), 365 deletions(-) diff --git a/crates/salsa/src/derived.rs b/crates/salsa/src/derived.rs index ede4230a7f..404e10e196 100644 --- a/crates/salsa/src/derived.rs +++ b/crates/salsa/src/derived.rs @@ -108,9 +108,7 @@ where query_index: Q::QUERY_INDEX, key_index, }; - entry - .or_insert_with(|| Arc::new(Slot::new(key.clone(), database_key_index))) - .clone() + entry.or_insert_with(|| Arc::new(Slot::new(key.clone(), database_key_index))).clone() } } @@ -152,13 +150,7 @@ where assert_eq!(input.group_index, self.group_index); assert_eq!(input.query_index, Q::QUERY_INDEX); debug_assert!(revision < db.salsa_runtime().current_revision()); - let slot = self - .slot_map - .read() - .get_index(input.key_index as usize) - .unwrap() - .1 - .clone(); + let slot = self.slot_map.read().get_index(input.key_index as usize).unwrap().1.clone(); slot.maybe_changed_after(db, revision) } @@ -166,22 +158,17 @@ where db.unwind_if_cancelled(); let slot = self.slot(key); - let StampedValue { - value, - durability, - changed_at, - } = slot.read(db); + let StampedValue { value, durability, changed_at } = slot.read(db); if let Some(evicted) = self.lru_list.record_use(&slot) { evicted.evict(); } - db.salsa_runtime() - .report_query_read_and_unwind_if_cycle_resulted( - slot.database_key_index(), - durability, - changed_at, - ); + db.salsa_runtime().report_query_read_and_unwind_if_cycle_resulted( + slot.database_key_index(), + durability, + changed_at, + ); value } @@ -195,10 +182,7 @@ where C: std::iter::FromIterator>, { let slot_map = self.slot_map.read(); - slot_map - .values() - .filter_map(|slot| slot.as_table_entry()) - .collect() + slot_map.values().filter_map(|slot| slot.as_table_entry()).collect() } } diff --git a/crates/salsa/src/derived/slot.rs b/crates/salsa/src/derived/slot.rs index 19d75f0775..957d628984 100644 --- a/crates/salsa/src/derived/slot.rs +++ b/crates/salsa/src/derived/slot.rs @@ -209,14 +209,7 @@ where } } - self.execute( - db, - runtime, - revision_now, - active_query, - panic_guard, - old_memo, - ) + self.execute(db, runtime, revision_now, active_query, panic_guard, old_memo) } fn execute( @@ -232,9 +225,7 @@ where db.salsa_event(Event { runtime_id: db.salsa_runtime().id(), - kind: EventKind::WillExecute { - database_key: self.database_key_index, - }, + kind: EventKind::WillExecute { database_key: self.database_key_index }, }); // Query was not previously executed, or value is potentially @@ -310,22 +301,12 @@ where changed_at: revisions.changed_at, }; - let memo_value = if self.should_memoize_value(&self.key) { - Some(new_value.value.clone()) - } else { - None - }; + let memo_value = + if self.should_memoize_value(&self.key) { Some(new_value.value.clone()) } else { None }; - debug!( - "read_upgrade({:?}): result.revisions = {:#?}", - self, revisions, - ); + debug!("read_upgrade({:?}): result.revisions = {:#?}", self, revisions,); - panic_guard.proceed(Some(Memo { - value: memo_value, - verified_at: revision_now, - revisions, - })); + panic_guard.proceed(Some(Memo { value: memo_value, verified_at: revision_now, revisions })); new_value } @@ -388,10 +369,7 @@ where value: value.clone(), }; - info!( - "{:?}: returning memoized value changed at {:?}", - self, value.changed_at - ); + info!("{:?}: returning memoized value changed at {:?}", self, value.changed_at); ProbeState::UpToDate(value) } else { @@ -503,11 +481,9 @@ where // If we know when value last changed, we can return right away. // Note that we don't need the actual value to be available. ProbeState::NoValue(_, changed_at) - | ProbeState::UpToDate(StampedValue { - value: _, - durability: _, - changed_at, - }) => MaybeChangedSinceProbeState::ChangedAt(changed_at), + | ProbeState::UpToDate(StampedValue { value: _, durability: _, changed_at }) => { + MaybeChangedSinceProbeState::ChangedAt(changed_at) + } // If we have nothing cached, then value may have changed. ProbeState::NotComputed(_) => MaybeChangedSinceProbeState::ChangedAt(revision_now), @@ -561,14 +537,8 @@ where // We found that this memoized value may have changed // but we have an old value. We can re-run the code and // actually *check* if it has changed. - let StampedValue { changed_at, .. } = self.execute( - db, - runtime, - revision_now, - active_query, - panic_guard, - Some(old_memo), - ); + let StampedValue { changed_at, .. } = + self.execute(db, runtime, revision_now, active_query, panic_guard, Some(old_memo)); changed_at > revision } else { // We found that inputs to this memoized value may have chanced @@ -605,10 +575,7 @@ where Q: QueryFunction, { fn in_progress(id: RuntimeId) -> Self { - QueryState::InProgress { - id, - anyone_waiting: Default::default(), - } + QueryState::InProgress { id, anyone_waiting: Default::default() } } } @@ -632,11 +599,7 @@ where slot: &'me Slot, runtime: &'me Runtime, ) -> Self { - Self { - database_key_index, - slot, - runtime, - } + Self { database_key_index, slot, runtime } } /// Indicates that we have concluded normally (without panicking). @@ -674,8 +637,7 @@ where // acquire a mutex; the mutex will guarantee that all writes // we are interested in are visible. if anyone_waiting.load(Ordering::Relaxed) { - self.runtime - .unblock_queries_blocked_on(self.database_key_index, wait_result); + self.runtime.unblock_queries_blocked_on(self.database_key_index, wait_result); } } _ => panic!( @@ -784,9 +746,8 @@ where // are only interested in finding out whether the // input changed *again*. QueryInputs::Tracked { inputs } => { - let changed_input = inputs - .iter() - .find(|&&input| db.maybe_changed_after(input, verified_at)); + let changed_input = + inputs.iter().find(|&&input| db.maybe_changed_after(input, verified_at)); if let Some(input) = changed_input { debug!("validate_memoized_value: `{:?}` may have changed", input); diff --git a/crates/salsa/src/input.rs b/crates/salsa/src/input.rs index edcff7e6b0..037e45b908 100644 --- a/crates/salsa/src/input.rs +++ b/crates/salsa/src/input.rs @@ -50,10 +50,7 @@ where const CYCLE_STRATEGY: crate::plumbing::CycleRecoveryStrategy = CycleRecoveryStrategy::Panic; fn new(group_index: u16) -> Self { - InputStorage { - group_index, - slots: Default::default(), - } + InputStorage { group_index, slots: Default::default() } } fn fmt_index( @@ -91,18 +88,13 @@ where .get(key) .unwrap_or_else(|| panic!("no value set for {:?}({:?})", Q::default(), key)); - let StampedValue { - value, + let StampedValue { value, durability, changed_at } = slot.stamped_value.read().clone(); + + db.salsa_runtime().report_query_read_and_unwind_if_cycle_resulted( + slot.database_key_index, durability, changed_at, - } = slot.stamped_value.read().clone(); - - db.salsa_runtime() - .report_query_read_and_unwind_if_cycle_resulted( - slot.database_key_index, - durability, - changed_at, - ); + ); value } @@ -133,10 +125,7 @@ where Q: Query, { fn maybe_changed_after(&self, _db: &>::DynDb, revision: Revision) -> bool { - debug!( - "maybe_changed_after(slot={:?}, revision={:?})", - self, revision, - ); + debug!("maybe_changed_after(slot={:?}, revision={:?})", self, revision,); let changed_at = self.stamped_value.read().changed_at; @@ -160,13 +149,7 @@ where Q: Query, { fn set(&self, runtime: &mut Runtime, key: &Q::Key, value: Q::Value, durability: Durability) { - tracing::debug!( - "{:?}({:?}) = {:?} ({:?})", - Q::default(), - key, - value, - durability - ); + tracing::debug!("{:?}({:?}) = {:?} ({:?})", Q::default(), key, value, durability); // The value is changing, so we need a new revision (*). We also // need to update the 'last changed' revision by invoking @@ -190,11 +173,7 @@ where // racing with somebody else to modify this same cell. // (Otherwise, someone else might write a *newer* revision // into the same cell while we block on the lock.) - let stamped_value = StampedValue { - value, - durability, - changed_at: next_revision, - }; + let stamped_value = StampedValue { value, durability, changed_at: next_revision }; match slots.entry(key.clone()) { Entry::Occupied(entry) => { diff --git a/crates/salsa/src/intern_id.rs b/crates/salsa/src/intern_id.rs index f003706266..b060d8aab6 100644 --- a/crates/salsa/src/intern_id.rs +++ b/crates/salsa/src/intern_id.rs @@ -63,9 +63,7 @@ impl InternId { /// `value` must be less than `MAX` pub const unsafe fn new_unchecked(value: u32) -> Self { debug_assert!(value < InternId::MAX); - InternId { - value: NonZeroU32::new_unchecked(value + 1), - } + InternId { value: NonZeroU32::new_unchecked(value + 1) } } /// Convert this raw-id into a u32 value. diff --git a/crates/salsa/src/interned.rs b/crates/salsa/src/interned.rs index dd986bc5d1..392534ea0b 100644 --- a/crates/salsa/src/interned.rs +++ b/crates/salsa/src/interned.rs @@ -110,10 +110,7 @@ where K: Eq + Hash, { fn default() -> Self { - Self { - map: Default::default(), - values: Default::default(), - } + Self { map: Default::default(), values: Default::default() } } } @@ -159,11 +156,7 @@ where query_index: Q::QUERY_INDEX, key_index: index.as_u32(), }; - Arc::new(Slot { - database_key_index, - value: owned_key2, - interned_at: revision_now, - }) + Arc::new(Slot { database_key_index, value: owned_key2, interned_at: revision_now }) }; let (slot, index); @@ -194,10 +187,7 @@ where const CYCLE_STRATEGY: crate::plumbing::CycleRecoveryStrategy = CycleRecoveryStrategy::Panic; fn new(group_index: u16) -> Self { - InternedStorage { - group_index, - tables: RwLock::new(InternTables::default()), - } + InternedStorage { group_index, tables: RwLock::new(InternTables::default()) } } fn fmt_index( @@ -231,12 +221,11 @@ where db.unwind_if_cancelled(); let (slot, index) = self.intern_index(db, key); let changed_at = slot.interned_at; - db.salsa_runtime() - .report_query_read_and_unwind_if_cycle_resulted( - slot.database_key_index, - INTERN_DURABILITY, - changed_at, - ); + db.salsa_runtime().report_query_read_and_unwind_if_cycle_resulted( + slot.database_key_index, + INTERN_DURABILITY, + changed_at, + ); ::from_intern_id(index) } @@ -313,9 +302,7 @@ where const CYCLE_STRATEGY: CycleRecoveryStrategy = CycleRecoveryStrategy::Panic; fn new(_group_index: u16) -> Self { - LookupInternedStorage { - phantom: std::marker::PhantomData, - } + LookupInternedStorage { phantom: std::marker::PhantomData } } fn fmt_index( @@ -350,12 +337,11 @@ where let slot = interned_storage.lookup_value(index); let value = slot.value.clone(); let interned_at = slot.interned_at; - db.salsa_runtime() - .report_query_read_and_unwind_if_cycle_resulted( - slot.database_key_index, - INTERN_DURABILITY, - interned_at, - ); + db.salsa_runtime().report_query_read_and_unwind_if_cycle_resulted( + slot.database_key_index, + INTERN_DURABILITY, + interned_at, + ); value } diff --git a/crates/salsa/src/lib.rs b/crates/salsa/src/lib.rs index 2ed5d0d131..19a9fd2571 100644 --- a/crates/salsa/src/lib.rs +++ b/crates/salsa/src/lib.rs @@ -216,18 +216,14 @@ impl fmt::Debug for EventKind { .debug_struct("DidValidateMemoizedValue") .field("database_key", database_key) .finish(), - EventKind::WillBlockOn { - other_runtime_id, - database_key, - } => fmt + EventKind::WillBlockOn { other_runtime_id, database_key } => fmt .debug_struct("WillBlockOn") .field("other_runtime_id", other_runtime_id) .field("database_key", database_key) .finish(), - EventKind::WillExecute { database_key } => fmt - .debug_struct("WillExecute") - .field("database_key", database_key) - .finish(), + EventKind::WillExecute { database_key } => { + fmt.debug_struct("WillExecute").field("database_key", database_key).finish() + } EventKind::WillCheckCancellation => fmt.debug_struct("WillCheckCancellation").finish(), } } @@ -251,10 +247,7 @@ where .debug_struct("DidValidateMemoizedValue") .field("database_key", &database_key.debug(self.db)) .finish(), - EventKind::WillBlockOn { - other_runtime_id, - database_key, - } => fmt + EventKind::WillBlockOn { other_runtime_id, database_key } => fmt .debug_struct("WillBlockOn") .field("other_runtime_id", &other_runtime_id) .field("database_key", &database_key.debug(self.db)) @@ -707,9 +700,7 @@ impl Cycle { /// Returns a vector with the debug information for /// all the participants in the cycle. pub fn all_participants(&self, db: &DB) -> Vec { - self.participant_keys() - .map(|d| format!("{:?}", d.debug(db))) - .collect() + self.participant_keys().map(|d| format!("{:?}", d.debug(db))).collect() } /// Returns a vector with the debug information for @@ -733,18 +724,12 @@ impl Cycle { fn fmt(&self, fmt: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { fmt.debug_struct("UnexpectedCycle") .field("all_participants", &self.c.all_participants(self.db)) - .field( - "unexpected_participants", - &self.c.unexpected_participants(self.db), - ) + .field("unexpected_participants", &self.c.unexpected_participants(self.db)) .finish() } } - UnexpectedCycleDebug { - c: self, - db: db.ops_database(), - } + UnexpectedCycleDebug { c: self, db: db.ops_database() } } } diff --git a/crates/salsa/src/lru.rs b/crates/salsa/src/lru.rs index 39dd9dfe8c..bd54168ca2 100644 --- a/crates/salsa/src/lru.rs +++ b/crates/salsa/src/lru.rs @@ -68,10 +68,7 @@ where #[cfg_attr(not(test), allow(dead_code))] fn with_seed(seed: &str) -> Self { - Lru { - green_zone: AtomicUsize::new(0), - data: Mutex::new(LruData::with_seed(seed)), - } + Lru { green_zone: AtomicUsize::new(0), data: Mutex::new(LruData::with_seed(seed)) } } /// Adjust the total number of nodes permitted to have a value at @@ -143,13 +140,7 @@ where } fn with_rng(rng: Rand64) -> Self { - LruData { - end_yellow_zone: 0, - end_green_zone: 0, - end_red_zone: 0, - entries: Vec::new(), - rng, - } + LruData { end_yellow_zone: 0, end_green_zone: 0, end_red_zone: 0, entries: Vec::new(), rng } } fn green_zone(&self) -> std::ops::Range { @@ -294,9 +285,7 @@ where impl Default for LruIndex { fn default() -> Self { - Self { - index: AtomicUsize::new(std::usize::MAX), - } + Self { index: AtomicUsize::new(std::usize::MAX) } } } diff --git a/crates/salsa/src/plumbing.rs b/crates/salsa/src/plumbing.rs index c047b9f12a..f09d6d7337 100644 --- a/crates/salsa/src/plumbing.rs +++ b/crates/salsa/src/plumbing.rs @@ -78,10 +78,7 @@ pub trait QueryFunction: Query { key: &Self::Key, ) -> Self::Value { let _ = (db, cycle, key); - panic!( - "query `{:?}` doesn't support cycle fallback", - Self::default() - ) + panic!("query `{:?}` doesn't support cycle fallback", Self::default()) } } diff --git a/crates/salsa/src/revision.rs b/crates/salsa/src/revision.rs index f97295ced6..61d38a3bcc 100644 --- a/crates/salsa/src/revision.rs +++ b/crates/salsa/src/revision.rs @@ -21,9 +21,7 @@ impl Revision { } pub(crate) fn from(g: u32) -> Self { - Self { - generation: NonZeroU32::new(g).unwrap(), - } + Self { generation: NonZeroU32::new(g).unwrap() } } pub(crate) fn next(self) -> Revision { @@ -48,9 +46,7 @@ pub(crate) struct AtomicRevision { impl AtomicRevision { pub(crate) fn start() -> Self { - Self { - data: AtomicU32::new(START), - } + Self { data: AtomicU32::new(START) } } pub(crate) fn load(&self) -> Revision { diff --git a/crates/salsa/src/runtime.rs b/crates/salsa/src/runtime.rs index 29fe68bd3d..29c5afa37a 100644 --- a/crates/salsa/src/runtime.rs +++ b/crates/salsa/src/runtime.rs @@ -85,9 +85,7 @@ impl Runtime { let revision_guard = RevisionGuard::new(&self.shared_state); - let id = RuntimeId { - counter: self.shared_state.next_id.fetch_add(1, Ordering::SeqCst), - }; + let id = RuntimeId { counter: self.shared_state.next_id.fetch_add(1, Ordering::SeqCst) }; Runtime { id, @@ -242,8 +240,7 @@ impl Runtime { /// Queries which report untracked reads will be re-executed in the next /// revision. pub fn report_untracked_read(&self) { - self.local_state - .report_untracked_read(self.current_revision()); + self.local_state.report_untracked_read(self.current_revision()); } /// Acts as though the current query had read an input with the given durability; this will force the current query's durability to be at most `durability`. @@ -251,8 +248,7 @@ impl Runtime { /// This is mostly useful to control the durability level for [on-demand inputs](https://salsa-rs.github.io/salsa/common_patterns/on_demand_inputs.html). pub fn report_synthetic_read(&self, durability: Durability) { let changed_at = self.last_changed_revision(durability); - self.local_state - .report_synthetic_read(durability, changed_at); + self.local_state.report_synthetic_read(durability, changed_at); } /// Handles a cycle in the dependency graph that was detected when the @@ -270,10 +266,7 @@ impl Runtime { database_key_index: DatabaseKeyIndex, to_id: RuntimeId, ) { - debug!( - "unblock_cycle_and_maybe_throw(database_key={:?})", - database_key_index - ); + debug!("unblock_cycle_and_maybe_throw(database_key={:?})", database_key_index); let mut from_stack = self.local_state.take_query_stack(); let from_id = self.id(); @@ -312,11 +305,7 @@ impl Runtime { Cycle::new(Arc::new(v)) }; - debug!( - "cycle {:?}, cycle_query {:#?}", - cycle.debug(db), - cycle_query, - ); + debug!("cycle {:?}, cycle_query {:#?}", cycle.debug(db), cycle_query,); // We can remove the cycle participants from the list of dependencies; // they are a strongly connected component (SCC) and we only care about @@ -329,12 +318,10 @@ impl Runtime { // are going to be unwound so that fallback can occur. dg.for_each_cycle_participant(from_id, &mut from_stack, database_key_index, to_id, |aqs| { aqs.iter_mut() - .skip_while( - |aq| match db.cycle_recovery_strategy(aq.database_key_index) { - CycleRecoveryStrategy::Panic => true, - CycleRecoveryStrategy::Fallback => false, - }, - ) + .skip_while(|aq| match db.cycle_recovery_strategy(aq.database_key_index) { + CycleRecoveryStrategy::Panic => true, + CycleRecoveryStrategy::Fallback => false, + }) .for_each(|aq| { debug!("marking {:?} for fallback", aq.database_key_index.debug(db)); aq.take_inputs_from(&cycle_query); @@ -404,10 +391,7 @@ impl Runtime { db.salsa_event(Event { runtime_id: self.id(), - kind: EventKind::WillBlockOn { - other_runtime_id: other_id, - database_key, - }, + kind: EventKind::WillBlockOn { other_runtime_id: other_id, database_key }, }); let stack = self.local_state.take_query_stack(); @@ -585,18 +569,12 @@ impl ActiveQuery { if dependencies.is_empty() { QueryInputs::NoInputs } else { - QueryInputs::Tracked { - inputs: dependencies.iter().copied().collect(), - } + QueryInputs::Tracked { inputs: dependencies.iter().copied().collect() } } } }; - QueryRevisions { - changed_at: self.changed_at, - inputs, - durability: self.durability, - } + QueryRevisions { changed_at: self.changed_at, inputs, durability: self.durability } } /// Adds any dependencies from `other` into `self`. @@ -673,9 +651,7 @@ impl RevisionGuard { shared_state.query_lock.raw().lock_shared_recursive(); } - Self { - shared_state: shared_state.clone(), - } + Self { shared_state: shared_state.clone() } } } diff --git a/crates/salsa/src/runtime/dependency_graph.rs b/crates/salsa/src/runtime/dependency_graph.rs index f69524b7fb..9fa2851d0e 100644 --- a/crates/salsa/src/runtime/dependency_graph.rs +++ b/crates/salsa/src/runtime/dependency_graph.rs @@ -103,21 +103,14 @@ impl DependencyGraph { // load up the next thread (i.e., we start at B/QB2, // and then load up the dependency on C/QC2). let edge = self.edges.get_mut(&id).unwrap(); - let prefix = edge - .stack - .iter_mut() - .take_while(|p| p.database_key_index != key) - .count(); + let prefix = edge.stack.iter_mut().take_while(|p| p.database_key_index != key).count(); closure(&mut edge.stack[prefix..]); id = edge.blocked_on_id; key = edge.blocked_on_key; } // Finally, we copy in the results from `from_stack`. - let prefix = from_stack - .iter_mut() - .take_while(|p| p.database_key_index != key) - .count(); + let prefix = from_stack.iter_mut().take_while(|p| p.database_key_index != key).count(); closure(&mut from_stack[prefix..]); } @@ -141,24 +134,13 @@ impl DependencyGraph { let mut others_unblocked = false; while id != from_id { let edge = self.edges.get(&id).unwrap(); - let prefix = edge - .stack - .iter() - .take_while(|p| p.database_key_index != key) - .count(); + let prefix = edge.stack.iter().take_while(|p| p.database_key_index != key).count(); let next_id = edge.blocked_on_id; let next_key = edge.blocked_on_key; - if let Some(cycle) = edge.stack[prefix..] - .iter() - .rev() - .find_map(|aq| aq.cycle.clone()) - { + if let Some(cycle) = edge.stack[prefix..].iter().rev().find_map(|aq| aq.cycle.clone()) { // Remove `id` from the list of runtimes blocked on `next_key`: - self.query_dependents - .get_mut(&next_key) - .unwrap() - .retain(|r| *r != id); + self.query_dependents.get_mut(&next_key).unwrap().retain(|r| *r != id); // Unblock runtime so that it can resume execution once lock is released: self.unblock_runtime(id, WaitResult::Cycle(cycle)); @@ -170,10 +152,7 @@ impl DependencyGraph { key = next_key; } - let prefix = from_stack - .iter() - .take_while(|p| p.database_key_index != key) - .count(); + let prefix = from_stack.iter().take_while(|p| p.database_key_index != key).count(); let this_unblocked = from_stack[prefix..].iter().any(|aq| aq.cycle.is_some()); (this_unblocked, others_unblocked) @@ -239,10 +218,7 @@ impl DependencyGraph { condvar: condvar.clone(), }, ); - self.query_dependents - .entry(database_key) - .or_default() - .push(from_id); + self.query_dependents.entry(database_key).or_default().push(from_id); condvar } @@ -253,10 +229,7 @@ impl DependencyGraph { database_key: DatabaseKeyIndex, wait_result: WaitResult, ) { - let dependents = self - .query_dependents - .remove(&database_key) - .unwrap_or_default(); + let dependents = self.query_dependents.remove(&database_key).unwrap_or_default(); for from_id in dependents { self.unblock_runtime(from_id, wait_result.clone()); diff --git a/crates/salsa/src/runtime/local_state.rs b/crates/salsa/src/runtime/local_state.rs index b6c3573f00..b21f1ee4af 100644 --- a/crates/salsa/src/runtime/local_state.rs +++ b/crates/salsa/src/runtime/local_state.rs @@ -53,9 +53,7 @@ pub(crate) enum QueryInputs { impl Default for LocalState { fn default() -> Self { - LocalState { - query_stack: RefCell::new(Some(Vec::new())), - } + LocalState { query_stack: RefCell::new(Some(Vec::new())) } } } @@ -65,19 +63,11 @@ impl LocalState { let mut query_stack = self.query_stack.borrow_mut(); let query_stack = query_stack.as_mut().expect("local stack taken"); query_stack.push(ActiveQuery::new(database_key_index)); - ActiveQueryGuard { - local_state: self, - database_key_index, - push_len: query_stack.len(), - } + ActiveQueryGuard { local_state: self, database_key_index, push_len: query_stack.len() } } fn with_query_stack(&self, c: impl FnOnce(&mut Vec) -> R) -> R { - c(self - .query_stack - .borrow_mut() - .as_mut() - .expect("query stack taken")) + c(self.query_stack.borrow_mut().as_mut().expect("query stack taken")) } pub(super) fn query_in_progress(&self) -> bool { @@ -86,9 +76,7 @@ impl LocalState { pub(super) fn active_query(&self) -> Option { self.with_query_stack(|stack| { - stack - .last() - .map(|active_query| active_query.database_key_index) + stack.last().map(|active_query| active_query.database_key_index) }) } @@ -156,10 +144,7 @@ impl LocalState { /// the current thread is blocking. The stack must be restored /// with [`Self::restore_query_stack`] when the thread unblocks. pub(super) fn take_query_stack(&self) -> Vec { - assert!( - self.query_stack.borrow().is_some(), - "query stack already taken" - ); + assert!(self.query_stack.borrow().is_some(), "query stack already taken"); self.query_stack.take().unwrap() } @@ -188,10 +173,7 @@ impl ActiveQueryGuard<'_> { self.local_state.with_query_stack(|stack| { // Sanity check: pushes and pops should be balanced. assert_eq!(stack.len(), self.push_len); - debug_assert_eq!( - stack.last().unwrap().database_key_index, - self.database_key_index - ); + debug_assert_eq!(stack.last().unwrap().database_key_index, self.database_key_index); stack.pop().unwrap() }) } @@ -220,8 +202,7 @@ impl ActiveQueryGuard<'_> { /// If the active query is registered as a cycle participant, remove and /// return that cycle. pub(crate) fn take_cycle(&self) -> Option { - self.local_state - .with_query_stack(|stack| stack.last_mut()?.cycle.take()) + self.local_state.with_query_stack(|stack| stack.last_mut()?.cycle.take()) } } diff --git a/crates/salsa/src/storage.rs b/crates/salsa/src/storage.rs index 5e32633002..e0acf44041 100644 --- a/crates/salsa/src/storage.rs +++ b/crates/salsa/src/storage.rs @@ -12,10 +12,7 @@ pub struct Storage { impl Default for Storage { fn default() -> Self { - Self { - query_store: Default::default(), - runtime: Default::default(), - } + Self { query_store: Default::default(), runtime: Default::default() } } } @@ -51,9 +48,6 @@ impl Storage { /// thread. Using two database handles from the **same thread** can lead to /// deadlock. pub fn snapshot(&self) -> Self { - Storage { - query_store: self.query_store.clone(), - runtime: self.runtime.snapshot(), - } + Storage { query_store: self.query_store.clone(), runtime: self.runtime.snapshot() } } } diff --git a/crates/salsa/tests/cycles.rs b/crates/salsa/tests/cycles.rs index 004f866b83..4c3ec312f2 100644 --- a/crates/salsa/tests/cycles.rs +++ b/crates/salsa/tests/cycles.rs @@ -58,17 +58,13 @@ impl salsa::Database for DatabaseImpl {} impl ParallelDatabase for DatabaseImpl { fn snapshot(&self) -> Snapshot { - Snapshot::new(DatabaseImpl { - storage: self.storage.snapshot(), - }) + Snapshot::new(DatabaseImpl { storage: self.storage.snapshot() }) } } impl Default for DatabaseImpl { fn default() -> Self { - let res = DatabaseImpl { - storage: salsa::Storage::default(), - }; + let res = DatabaseImpl { storage: salsa::Storage::default() }; res } @@ -113,15 +109,11 @@ trait Database: salsa::Database { } fn recover_a(db: &dyn Database, cycle: &salsa::Cycle) -> Result<(), Error> { - Err(Error { - cycle: cycle.all_participants(db), - }) + Err(Error { cycle: cycle.all_participants(db) }) } fn recover_b(db: &dyn Database, cycle: &salsa::Cycle) -> Result<(), Error> { - Err(Error { - cycle: cycle.all_participants(db), - }) + Err(Error { cycle: cycle.all_participants(db) }) } fn memoized_a(db: &dyn Database) { diff --git a/crates/salsa/tests/incremental/constants.rs b/crates/salsa/tests/incremental/constants.rs index 30f42b136d..ea0eb81978 100644 --- a/crates/salsa/tests/incremental/constants.rs +++ b/crates/salsa/tests/incremental/constants.rs @@ -111,10 +111,7 @@ fn becomes_constant_with_change() { db.set_input_with_durability('b', 45, Durability::MEDIUM); assert_eq!(db.add('a', 'b'), 68); - assert_eq!( - Durability::MEDIUM, - AddQuery.in_db(&db).durability(('a', 'b')) - ); + assert_eq!(Durability::MEDIUM, AddQuery.in_db(&db).durability(('a', 'b'))); } // Test a subtle case in which an input changes from constant to diff --git a/crates/salsa/tests/interned.rs b/crates/salsa/tests/interned.rs index bf8683114c..b9b916d19a 100644 --- a/crates/salsa/tests/interned.rs +++ b/crates/salsa/tests/interned.rs @@ -12,9 +12,7 @@ impl salsa::Database for Database {} impl salsa::ParallelDatabase for Database { fn snapshot(&self) -> salsa::Snapshot { - salsa::Snapshot::new(Database { - storage: self.storage.snapshot(), - }) + salsa::Snapshot::new(Database { storage: self.storage.snapshot() }) } } @@ -71,14 +69,8 @@ fn test_intern2() { assert_eq!(bar0, bar1); assert_ne!(foo0, bar0); - assert_eq!( - ("x".to_string(), "foo".to_string()), - db.lookup_intern2(foo0) - ); - assert_eq!( - ("x".to_string(), "bar".to_string()), - db.lookup_intern2(bar0) - ); + assert_eq!(("x".to_string(), "foo".to_string()), db.lookup_intern2(foo0)); + assert_eq!(("x".to_string(), "bar".to_string()), db.lookup_intern2(bar0)); } #[test] diff --git a/crates/salsa/tests/on_demand_inputs.rs b/crates/salsa/tests/on_demand_inputs.rs index 990920256a..2ad4975eff 100644 --- a/crates/salsa/tests/on_demand_inputs.rs +++ b/crates/salsa/tests/on_demand_inputs.rs @@ -16,11 +16,7 @@ trait QueryGroup: salsa::Database + AsRef> { } fn a(db: &dyn QueryGroup, x: u32) -> u32 { - let durability = if x % 2 == 0 { - Durability::LOW - } else { - Durability::HIGH - }; + let durability = if x % 2 == 0 { Durability::LOW } else { Durability::HIGH }; db.salsa_runtime().report_synthetic_read(durability); let external_state: &HashMap = db.as_ref(); external_state[&x] diff --git a/crates/salsa/tests/panic_safely.rs b/crates/salsa/tests/panic_safely.rs index e51a74e1f1..c11ae9c214 100644 --- a/crates/salsa/tests/panic_safely.rs +++ b/crates/salsa/tests/panic_safely.rs @@ -33,9 +33,7 @@ impl salsa::Database for DatabaseStruct {} impl salsa::ParallelDatabase for DatabaseStruct { fn snapshot(&self) -> Snapshot { - Snapshot::new(DatabaseStruct { - storage: self.storage.snapshot(), - }) + Snapshot::new(DatabaseStruct { storage: self.storage.snapshot() }) } } diff --git a/crates/salsa/tests/parallel/parallel_cycle_none_recover.rs b/crates/salsa/tests/parallel/parallel_cycle_none_recover.rs index 65f1b6ea16..1467041422 100644 --- a/crates/salsa/tests/parallel/parallel_cycle_none_recover.rs +++ b/crates/salsa/tests/parallel/parallel_cycle_none_recover.rs @@ -37,11 +37,7 @@ fn parallel_cycle_none_recover() { // We expect A to propagate a panic, which causes us to use the sentinel // type `Canceled`. - assert!(thread_a - .join() - .unwrap_err() - .downcast_ref::() - .is_some()); + assert!(thread_a.join().unwrap_err().downcast_ref::().is_some()); } #[salsa::query_group(ParallelCycleNoneRecover)] diff --git a/crates/salsa/tests/parallel/stress.rs b/crates/salsa/tests/parallel/stress.rs index 16a1b79044..2fa317b2b9 100644 --- a/crates/salsa/tests/parallel/stress.rs +++ b/crates/salsa/tests/parallel/stress.rs @@ -38,9 +38,7 @@ impl salsa::Database for StressDatabaseImpl {} impl salsa::ParallelDatabase for StressDatabaseImpl { fn snapshot(&self) -> Snapshot { - Snapshot::new(StressDatabaseImpl { - storage: self.storage.snapshot(), - }) + Snapshot::new(StressDatabaseImpl { storage: self.storage.snapshot() }) } } @@ -53,10 +51,7 @@ enum Query { enum MutatorOp { WriteOp(WriteOp), - LaunchReader { - ops: Vec, - check_cancellation: bool, - }, + LaunchReader { ops: Vec, check_cancellation: bool }, } #[derive(Debug)] @@ -158,13 +153,12 @@ fn stress_test() { for op in write_ops { match op { MutatorOp::WriteOp(w) => w.execute(&mut db), - MutatorOp::LaunchReader { - ops, - check_cancellation, - } => all_threads.push(std::thread::spawn({ - let db = db.snapshot(); - move || Cancelled::catch(|| db_reader_thread(&db, ops, check_cancellation)) - })), + MutatorOp::LaunchReader { ops, check_cancellation } => { + all_threads.push(std::thread::spawn({ + let db = db.snapshot(); + move || Cancelled::catch(|| db_reader_thread(&db, ops, check_cancellation)) + })) + } } } diff --git a/crates/salsa/tests/parallel/true_parallel.rs b/crates/salsa/tests/parallel/true_parallel.rs index 03432dca97..d0e58efd1a 100644 --- a/crates/salsa/tests/parallel/true_parallel.rs +++ b/crates/salsa/tests/parallel/true_parallel.rs @@ -18,11 +18,10 @@ fn true_parallel_different_keys() { let thread1 = std::thread::spawn({ let db = db.snapshot(); move || { - let v = db.knobs().sum_signal_on_entry.with_value(1, || { - db.knobs() - .sum_wait_for_on_exit - .with_value(2, || db.sum("a")) - }); + let v = db + .knobs() + .sum_signal_on_entry + .with_value(1, || db.knobs().sum_wait_for_on_exit.with_value(2, || db.sum("a"))); v } }); @@ -32,9 +31,10 @@ fn true_parallel_different_keys() { let thread2 = std::thread::spawn({ let db = db.snapshot(); move || { - let v = db.knobs().sum_wait_for_on_entry.with_value(1, || { - db.knobs().sum_signal_on_exit.with_value(2, || db.sum("b")) - }); + let v = db + .knobs() + .sum_wait_for_on_entry + .with_value(1, || db.knobs().sum_signal_on_exit.with_value(2, || db.sum("b"))); v } }); @@ -58,11 +58,10 @@ fn true_parallel_same_keys() { let thread1 = std::thread::spawn({ let db = db.snapshot(); move || { - let v = db.knobs().sum_signal_on_entry.with_value(1, || { - db.knobs() - .sum_wait_for_on_entry - .with_value(2, || db.sum("abc")) - }); + let v = db + .knobs() + .sum_signal_on_entry + .with_value(1, || db.knobs().sum_wait_for_on_entry.with_value(2, || db.sum("abc"))); v } }); @@ -99,9 +98,9 @@ fn true_parallel_propagate_panic() { let db = db.snapshot(); move || { let v = db.knobs().sum_signal_on_entry.with_value(1, || { - db.knobs().sum_wait_for_on_entry.with_value(2, || { - db.knobs().sum_should_panic.with_value(true, || db.sum("a")) - }) + db.knobs() + .sum_wait_for_on_entry + .with_value(2, || db.knobs().sum_should_panic.with_value(true, || db.sum("a"))) }); v } From bc5823d5bbc2387c92bc109daec6cb0c56f717d6 Mon Sep 17 00:00:00 2001 From: Lukas Wirth Date: Wed, 7 Feb 2024 16:30:07 +0100 Subject: [PATCH 041/122] Bump salsa dev-deps --- Cargo.lock | 58 ++++++++++++++++++++--------------------- crates/salsa/Cargo.toml | 12 ++++----- 2 files changed, 35 insertions(+), 35 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 60e2fa059b..aebff14048 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -476,13 +476,13 @@ checksum = "7ab85b9b05e3978cc9a9cf8fea7f01b494e1a09ed3037e16ba39edc7a29eb61a" [[package]] name = "getrandom" -version = "0.1.16" +version = "0.2.12" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8fc3cb4d91f53b50155bdcfd23f6a4c39ae1969c2ae85982b135750cccaf5fce" +checksum = "190092ea657667030ac6a35e305e62fc4dd69fd98ac98631e5d3a2b1575a12b5" dependencies = [ "cfg-if", "libc", - "wasi 0.9.0+wasi-snapshot-preview1", + "wasi", ] [[package]] @@ -943,6 +943,12 @@ dependencies = [ "windows-sys 0.48.0", ] +[[package]] +name = "libm" +version = "0.2.8" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "4ec2a862134d2a7d32d7983ddcdd1c4923530833c9f2ea1a44fc5fa473989058" + [[package]] name = "libmimalloc-sys" version = "0.1.33" @@ -1128,7 +1134,7 @@ checksum = "e5d732bc30207a6423068df043e3d02e0735b155ad7ce1a6f76fe2baa5b158de" dependencies = [ "libc", "log", - "wasi 0.11.0+wasi-snapshot-preview1", + "wasi", "windows-sys 0.42.0", ] @@ -1198,6 +1204,16 @@ dependencies = [ "windows-sys 0.48.0", ] +[[package]] +name = "num-traits" +version = "0.2.17" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "39e3200413f237f41ab11ad6d161bc7239c84dcb631773ccd7de3dfe4b5c267c" +dependencies = [ + "autocfg", + "libm", +] + [[package]] name = "num_cpus" version = "1.15.0" @@ -1575,22 +1591,20 @@ dependencies = [ [[package]] name = "rand" -version = "0.7.3" +version = "0.8.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6a6b1679d49b24bbfe0c803429aa1874472f50d9b363131f0e89fc356b544d03" +checksum = "34af8d1a0e25924bc5b7c43c079c942339d8f0a8b57c39049bef581b46327404" dependencies = [ - "getrandom", "libc", "rand_chacha", "rand_core", - "rand_hc", ] [[package]] name = "rand_chacha" -version = "0.2.2" +version = "0.3.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f4c8ed856279c9737206bf725bf36935d8666ead7aa69b52be55af369d193402" +checksum = "e6c10a63a0fa32252be49d21e7709d4d4baf8d231c2dbce1eaa8141b9b127d88" dependencies = [ "ppv-lite86", "rand_core", @@ -1598,31 +1612,23 @@ dependencies = [ [[package]] name = "rand_core" -version = "0.5.1" +version = "0.6.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "90bde5296fc891b0cef12a6d03ddccc162ce7b2aff54160af9338f8d40df6d19" +checksum = "ec0be4795e2f6a28069bec0b5ff3e2ac9bafc99e6a9a7dc3547996c5c816922c" dependencies = [ "getrandom", ] [[package]] name = "rand_distr" -version = "0.2.2" +version = "0.4.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "96977acbdd3a6576fb1d27391900035bf3863d4a16422973a409b488cf29ffb2" +checksum = "32cb0b9bc82b0a0876c2dd994a7e7a2683d3e7390ca40e6886785ef0c7e3ee31" dependencies = [ + "num-traits", "rand", ] -[[package]] -name = "rand_hc" -version = "0.2.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ca3129af7b92a17112d59ad498c6f81eaf463253766b90396d39ea7a39d6613c" -dependencies = [ - "rand_core", -] - [[package]] name = "rayon" version = "1.8.0" @@ -2362,12 +2368,6 @@ dependencies = [ "winapi-util", ] -[[package]] -name = "wasi" -version = "0.9.0+wasi-snapshot-preview1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "cccddf32554fecc6acb585f82a32a72e28b48f8c4c1883ddfeeeaa96f7d8e519" - [[package]] name = "wasi" version = "0.11.0+wasi-snapshot-preview1" diff --git a/crates/salsa/Cargo.toml b/crates/salsa/Cargo.toml index 77b0c49c7e..a2c08b1252 100644 --- a/crates/salsa/Cargo.toml +++ b/crates/salsa/Cargo.toml @@ -25,9 +25,9 @@ triomphe = "0.1.11" salsa-macros = { version = "0.0.0", path = "salsa-macros" } [dev-dependencies] -diff = "0.1.0" -linked-hash-map = "0.5.2" -rand = "0.7" -rand_distr = "0.2.1" -test-log = "0.2.7" -insta = "1.8.0" +diff = "0.1.13" +linked-hash-map = "0.5.6" +rand = "0.8.5" +rand_distr = "0.4.3" +test-log = "0.2.14" +insta = "1.18.0" From def5a1d0c45b917e8d8bf6df274be5dcc51a9add Mon Sep 17 00:00:00 2001 From: Lukas Wirth Date: Wed, 7 Feb 2024 16:30:13 +0100 Subject: [PATCH 042/122] unreachable-pub --- crates/salsa/salsa-macros/src/parenthesized.rs | 2 +- crates/salsa/src/lru.rs | 8 ++++---- 2 files changed, 5 insertions(+), 5 deletions(-) diff --git a/crates/salsa/salsa-macros/src/parenthesized.rs b/crates/salsa/salsa-macros/src/parenthesized.rs index e981764460..b755879f19 100644 --- a/crates/salsa/salsa-macros/src/parenthesized.rs +++ b/crates/salsa/salsa-macros/src/parenthesized.rs @@ -1,4 +1,4 @@ -pub(crate) struct Parenthesized(pub T); +pub(crate) struct Parenthesized(pub(crate) T); impl syn::parse::Parse for Parenthesized where diff --git a/crates/salsa/src/lru.rs b/crates/salsa/src/lru.rs index bd54168ca2..18441ea004 100644 --- a/crates/salsa/src/lru.rs +++ b/crates/salsa/src/lru.rs @@ -62,7 +62,7 @@ where Node: LruNode, { /// Creates a new LRU list where LRU caching is disabled. - pub fn new() -> Self { + pub(crate) fn new() -> Self { Self::with_seed(LRU_SEED) } @@ -73,7 +73,7 @@ where /// Adjust the total number of nodes permitted to have a value at /// once. If `len` is zero, this disables LRU caching completely. - pub fn set_lru_capacity(&self, len: usize) { + pub(crate) fn set_lru_capacity(&self, len: usize) { let mut data = self.data.lock(); // We require each zone to have at least 1 slot. Therefore, @@ -102,7 +102,7 @@ where } /// Records that `node` was used. This may displace an old node (if the LRU limits are - pub fn record_use(&self, node: &Arc) -> Option> { + pub(crate) fn record_use(&self, node: &Arc) -> Option> { tracing::debug!("record_use(node={:?})", node); // Load green zone length and check if the LRU cache is even enabled. @@ -125,7 +125,7 @@ where self.data.lock().record_use(node) } - pub fn purge(&self) { + pub(crate) fn purge(&self) { self.green_zone.store(0, Ordering::SeqCst); *self.data.lock() = LruData::with_seed(LRU_SEED); } From e339c65a1a8f06e153ca0bf81c732f6b441eef45 Mon Sep 17 00:00:00 2001 From: Lukas Wirth Date: Wed, 7 Feb 2024 16:30:20 +0100 Subject: [PATCH 043/122] Remove dev-dependency insta --- Cargo.lock | 145 +----------- crates/salsa/Cargo.toml | 3 +- .../salsa-macros/src/database_storage.rs | 7 +- .../salsa/salsa-macros/src/parenthesized.rs | 1 + crates/salsa/salsa-macros/src/query_group.rs | 36 ++- crates/salsa/src/derived.rs | 1 + crates/salsa/src/derived/slot.rs | 1 + crates/salsa/src/doctest.rs | 1 + crates/salsa/src/durability.rs | 1 + crates/salsa/src/hash.rs | 1 + crates/salsa/src/input.rs | 1 + crates/salsa/src/intern_id.rs | 1 + crates/salsa/src/interned.rs | 1 + crates/salsa/src/lib.rs | 1 + crates/salsa/src/lru.rs | 1 + crates/salsa/src/plumbing.rs | 1 + crates/salsa/src/revision.rs | 1 + crates/salsa/src/runtime.rs | 1 + crates/salsa/src/runtime/dependency_graph.rs | 1 + crates/salsa/src/runtime/local_state.rs | 1 + crates/salsa/src/storage.rs | 1 + crates/salsa/tests/cycles.rs | 214 +++++++++--------- crates/salsa/tests/on_demand_inputs.rs | 62 ++--- .../parallel/parallel_cycle_none_recover.rs | 14 +- 24 files changed, 188 insertions(+), 310 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index aebff14048..4be85b1bc1 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -221,18 +221,6 @@ dependencies = [ "winapi", ] -[[package]] -name = "console" -version = "0.15.8" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0e1f83fc076bd6dd27517eacdf25fef6c4dfe5f1d7448bafaaf3a26f13b5e4eb" -dependencies = [ - "encode_unicode", - "lazy_static", - "libc", - "windows-sys 0.52.0", -] - [[package]] name = "countme" version = "3.0.1" @@ -375,12 +363,6 @@ dependencies = [ "log", ] -[[package]] -name = "encode_unicode" -version = "0.3.6" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a357d28ed41a50f9c765dbfe56cbc04a64e53e5fc58ba79fbc34c10ef3df831f" - [[package]] name = "env_logger" version = "0.10.2" @@ -847,19 +829,6 @@ dependencies = [ "libc", ] -[[package]] -name = "insta" -version = "1.34.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5d64600be34b2fcfc267740a243fa7744441bb4947a619ac4e5bb6507f35fbfc" -dependencies = [ - "console", - "lazy_static", - "linked-hash-map", - "similar", - "yaml-rust", -] - [[package]] name = "intern" version = "0.0.0" @@ -943,12 +912,6 @@ dependencies = [ "windows-sys 0.48.0", ] -[[package]] -name = "libm" -version = "0.2.8" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4ec2a862134d2a7d32d7983ddcdd1c4923530833c9f2ea1a44fc5fa473989058" - [[package]] name = "libmimalloc-sys" version = "0.1.33" @@ -1204,16 +1167,6 @@ dependencies = [ "windows-sys 0.48.0", ] -[[package]] -name = "num-traits" -version = "0.2.17" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "39e3200413f237f41ab11ad6d161bc7239c84dcb631773ccd7de3dfe4b5c267c" -dependencies = [ - "autocfg", - "libm", -] - [[package]] name = "num_cpus" version = "1.15.0" @@ -1274,7 +1227,7 @@ dependencies = [ "libc", "redox_syscall 0.4.1", "smallvec", - "windows-targets 0.48.0", + "windows-targets", ] [[package]] @@ -1619,16 +1572,6 @@ dependencies = [ "getrandom", ] -[[package]] -name = "rand_distr" -version = "0.4.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "32cb0b9bc82b0a0876c2dd994a7e7a2683d3e7390ca40e6886785ef0c7e3ee31" -dependencies = [ - "num-traits", - "rand", -] - [[package]] name = "rayon" version = "1.8.0" @@ -1769,14 +1712,13 @@ name = "salsa" version = "0.0.0" dependencies = [ "diff", + "expect-test", "indexmap", - "insta", "linked-hash-map", "lock_api", "oorandom", "parking_lot", "rand", - "rand_distr", "rustc-hash", "salsa-macros", "smallvec", @@ -1886,12 +1828,6 @@ dependencies = [ "lazy_static", ] -[[package]] -name = "similar" -version = "2.4.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "32fea41aca09ee824cc9724996433064c89f7777e60762749a4170a14abbfa21" - [[package]] name = "smallvec" version = "1.12.0" @@ -2426,16 +2362,7 @@ version = "0.48.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "677d2418bec65e3338edb076e806bc1ec15693c5d0104683f2efe857f61056a9" dependencies = [ - "windows-targets 0.48.0", -] - -[[package]] -name = "windows-sys" -version = "0.52.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "282be5f36a8ce781fad8c8ae18fa3f9beff57ec1b52cb3de0789201425d9a33d" -dependencies = [ - "windows-targets 0.52.0", + "windows-targets", ] [[package]] @@ -2453,21 +2380,6 @@ dependencies = [ "windows_x86_64_msvc 0.48.0", ] -[[package]] -name = "windows-targets" -version = "0.52.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8a18201040b24831fbb9e4eb208f8892e1f50a37feb53cc7ff887feb8f50e7cd" -dependencies = [ - "windows_aarch64_gnullvm 0.52.0", - "windows_aarch64_msvc 0.52.0", - "windows_i686_gnu 0.52.0", - "windows_i686_msvc 0.52.0", - "windows_x86_64_gnu 0.52.0", - "windows_x86_64_gnullvm 0.52.0", - "windows_x86_64_msvc 0.52.0", -] - [[package]] name = "windows_aarch64_gnullvm" version = "0.42.2" @@ -2480,12 +2392,6 @@ version = "0.48.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "91ae572e1b79dba883e0d315474df7305d12f569b400fcf90581b06062f7e1bc" -[[package]] -name = "windows_aarch64_gnullvm" -version = "0.52.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "cb7764e35d4db8a7921e09562a0304bf2f93e0a51bfccee0bd0bb0b666b015ea" - [[package]] name = "windows_aarch64_msvc" version = "0.42.2" @@ -2498,12 +2404,6 @@ version = "0.48.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "b2ef27e0d7bdfcfc7b868b317c1d32c641a6fe4629c171b8928c7b08d98d7cf3" -[[package]] -name = "windows_aarch64_msvc" -version = "0.52.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bbaa0368d4f1d2aaefc55b6fcfee13f41544ddf36801e793edbbfd7d7df075ef" - [[package]] name = "windows_i686_gnu" version = "0.42.2" @@ -2516,12 +2416,6 @@ version = "0.48.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "622a1962a7db830d6fd0a69683c80a18fda201879f0f447f065a3b7467daa241" -[[package]] -name = "windows_i686_gnu" -version = "0.52.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a28637cb1fa3560a16915793afb20081aba2c92ee8af57b4d5f28e4b3e7df313" - [[package]] name = "windows_i686_msvc" version = "0.42.2" @@ -2534,12 +2428,6 @@ version = "0.48.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "4542c6e364ce21bf45d69fdd2a8e455fa38d316158cfd43b3ac1c5b1b19f8e00" -[[package]] -name = "windows_i686_msvc" -version = "0.52.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ffe5e8e31046ce6230cc7215707b816e339ff4d4d67c65dffa206fd0f7aa7b9a" - [[package]] name = "windows_x86_64_gnu" version = "0.42.2" @@ -2552,12 +2440,6 @@ version = "0.48.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "ca2b8a661f7628cbd23440e50b05d705db3686f894fc9580820623656af974b1" -[[package]] -name = "windows_x86_64_gnu" -version = "0.52.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3d6fa32db2bc4a2f5abeacf2b69f7992cd09dca97498da74a151a3132c26befd" - [[package]] name = "windows_x86_64_gnullvm" version = "0.42.2" @@ -2570,12 +2452,6 @@ version = "0.48.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "7896dbc1f41e08872e9d5e8f8baa8fdd2677f29468c4e156210174edc7f7b953" -[[package]] -name = "windows_x86_64_gnullvm" -version = "0.52.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1a657e1e9d3f514745a572a6846d3c7aa7dbe1658c056ed9c3344c4109a6949e" - [[package]] name = "windows_x86_64_msvc" version = "0.42.2" @@ -2588,12 +2464,6 @@ version = "0.48.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "1a515f5799fe4961cb532f983ce2b23082366b898e52ffbce459c86f67c8378a" -[[package]] -name = "windows_x86_64_msvc" -version = "0.52.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "dff9641d1cd4be8d1a070daf9e3773c5f67e78b4d9d42263020c057706765c04" - [[package]] name = "write-json" version = "0.1.2" @@ -2643,15 +2513,6 @@ dependencies = [ "zip", ] -[[package]] -name = "yaml-rust" -version = "0.4.5" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "56c1936c4cc7a1c9ab21a1ebb602eb942ba868cbd44a99cb7cdc5892335e1c85" -dependencies = [ - "linked-hash-map", -] - [[package]] name = "zip" version = "0.6.6" diff --git a/crates/salsa/Cargo.toml b/crates/salsa/Cargo.toml index a2c08b1252..42885b47a6 100644 --- a/crates/salsa/Cargo.toml +++ b/crates/salsa/Cargo.toml @@ -28,6 +28,5 @@ salsa-macros = { version = "0.0.0", path = "salsa-macros" } diff = "0.1.13" linked-hash-map = "0.5.6" rand = "0.8.5" -rand_distr = "0.4.3" test-log = "0.2.14" -insta = "1.18.0" +expect-test = "1.4.0" diff --git a/crates/salsa/salsa-macros/src/database_storage.rs b/crates/salsa/salsa-macros/src/database_storage.rs index 52d424c5f8..26b8e61309 100644 --- a/crates/salsa/salsa-macros/src/database_storage.rs +++ b/crates/salsa/salsa-macros/src/database_storage.rs @@ -1,3 +1,4 @@ +//! use heck::ToSnakeCase; use proc_macro::TokenStream; use syn::parse::{Parse, ParseStream}; @@ -113,10 +114,8 @@ pub(crate) fn database(args: TokenStream, input: TokenStream) -> TokenStream { let mut maybe_changed_ops = proc_macro2::TokenStream::new(); let mut cycle_recovery_strategy_ops = proc_macro2::TokenStream::new(); let mut for_each_ops = proc_macro2::TokenStream::new(); - for ((QueryGroup { group_path }, group_storage), group_index) in query_groups - .iter() - .zip(&query_group_storage_names) - .zip(0_u16..) + for ((QueryGroup { group_path }, group_storage), group_index) in + query_groups.iter().zip(&query_group_storage_names).zip(0_u16..) { fmt_ops.extend(quote! { #group_index => { diff --git a/crates/salsa/salsa-macros/src/parenthesized.rs b/crates/salsa/salsa-macros/src/parenthesized.rs index b755879f19..d61166cecd 100644 --- a/crates/salsa/salsa-macros/src/parenthesized.rs +++ b/crates/salsa/salsa-macros/src/parenthesized.rs @@ -1,3 +1,4 @@ +//! pub(crate) struct Parenthesized(pub(crate) T); impl syn::parse::Parse for Parenthesized diff --git a/crates/salsa/salsa-macros/src/query_group.rs b/crates/salsa/salsa-macros/src/query_group.rs index 5cb6624501..0b1ee2d3b5 100644 --- a/crates/salsa/salsa-macros/src/query_group.rs +++ b/crates/salsa/salsa-macros/src/query_group.rs @@ -1,3 +1,4 @@ +//! use std::{convert::TryFrom, iter::FromIterator}; use crate::parenthesized::Parenthesized; @@ -20,12 +21,9 @@ pub(crate) fn query_group(args: TokenStream, input: TokenStream) -> TokenStream let input_span = input.span(); let (trait_attrs, salsa_attrs) = filter_attrs(input.attrs); if !salsa_attrs.is_empty() { - return Error::new( - input_span, - format!("unsupported attributes: {:?}", salsa_attrs), - ) - .to_compile_error() - .into(); + return Error::new(input_span, format!("unsupported attributes: {:?}", salsa_attrs)) + .to_compile_error() + .into(); } let trait_vis = input.vis; @@ -43,7 +41,8 @@ pub(crate) fn query_group(args: TokenStream, input: TokenStream) -> TokenStream let mut cycle = None; let mut invoke = None; - let mut query_type = format_ident!("{}Query", query_name.to_string().to_upper_camel_case()); + let mut query_type = + format_ident!("{}Query", query_name.to_string().to_upper_camel_case()); let mut num_storages = 0; // Extract attributes. @@ -175,9 +174,7 @@ pub(crate) fn query_group(args: TokenStream, input: TokenStream) -> TokenStream fn_name: lookup_fn_name, receiver: self_receiver.clone(), attrs: vec![], // FIXME -- some automatically generated docs on this method? - storage: QueryStorage::InternedLookup { - intern_query_type: query_type.clone(), - }, + storage: QueryStorage::InternedLookup { intern_query_type: query_type.clone() }, keys: lookup_keys, value: lookup_value, invoke: None, @@ -211,9 +208,9 @@ pub(crate) fn query_group(args: TokenStream, input: TokenStream) -> TokenStream let mut storage_fields = proc_macro2::TokenStream::new(); let mut queries_with_storage = vec![]; for query in &queries { - #[allow(clippy::map_identity)] // clippy is incorrect here, this is not the identity function due to match ergonomics - let (key_names, keys): (Vec<_>, Vec<_>) = - query.keys.iter().map(|(a, b)| (a, b)).unzip(); + #[allow(clippy::map_identity)] + // clippy is incorrect here, this is not the identity function due to match ergonomics + let (key_names, keys): (Vec<_>, Vec<_>) = query.keys.iter().map(|(a, b)| (a, b)).unzip(); let value = &query.value; let fn_name = &query.fn_name; let qt = &query.query_type; @@ -361,11 +358,8 @@ pub(crate) fn query_group(args: TokenStream, input: TokenStream) -> TokenStream } }); - let non_transparent_queries = || { - queries - .iter() - .filter(|q| !matches!(q.storage, QueryStorage::Transparent)) - }; + let non_transparent_queries = + || queries.iter().filter(|q| !matches!(q.storage, QueryStorage::Transparent)); // Emit the query types. for (query, query_index) in non_transparent_queries().zip(0_u16..) { @@ -681,11 +675,7 @@ impl TryFrom for SalsaAttr { } fn is_not_salsa_attr_path(path: &syn::Path) -> bool { - path.segments - .first() - .map(|s| s.ident != "salsa") - .unwrap_or(true) - || path.segments.len() != 2 + path.segments.first().map(|s| s.ident != "salsa").unwrap_or(true) || path.segments.len() != 2 } fn filter_attrs(attrs: Vec) -> (Vec, Vec) { diff --git a/crates/salsa/src/derived.rs b/crates/salsa/src/derived.rs index 404e10e196..c381e66e08 100644 --- a/crates/salsa/src/derived.rs +++ b/crates/salsa/src/derived.rs @@ -1,3 +1,4 @@ +//! use crate::debug::TableEntry; use crate::durability::Durability; use crate::hash::FxIndexMap; diff --git a/crates/salsa/src/derived/slot.rs b/crates/salsa/src/derived/slot.rs index 957d628984..4fad791a26 100644 --- a/crates/salsa/src/derived/slot.rs +++ b/crates/salsa/src/derived/slot.rs @@ -1,3 +1,4 @@ +//! use crate::debug::TableEntry; use crate::derived::MemoizationPolicy; use crate::durability::Durability; diff --git a/crates/salsa/src/doctest.rs b/crates/salsa/src/doctest.rs index 7bafff0d00..29a8066356 100644 --- a/crates/salsa/src/doctest.rs +++ b/crates/salsa/src/doctest.rs @@ -1,3 +1,4 @@ +//! #![allow(dead_code)] /// Test that a database with a key/value that is not `Send` will, diff --git a/crates/salsa/src/durability.rs b/crates/salsa/src/durability.rs index 58a81e3786..0c82f6345a 100644 --- a/crates/salsa/src/durability.rs +++ b/crates/salsa/src/durability.rs @@ -1,3 +1,4 @@ +//! /// Describes how likely a value is to change -- how "durable" it is. /// By default, inputs have `Durability::LOW` and interned values have /// `Durability::HIGH`. But inputs can be explicitly set with other diff --git a/crates/salsa/src/hash.rs b/crates/salsa/src/hash.rs index 3b2d7df3fb..47a2dd1ce0 100644 --- a/crates/salsa/src/hash.rs +++ b/crates/salsa/src/hash.rs @@ -1,3 +1,4 @@ +//! pub(crate) type FxHasher = std::hash::BuildHasherDefault; pub(crate) type FxIndexSet = indexmap::IndexSet; pub(crate) type FxIndexMap = indexmap::IndexMap; diff --git a/crates/salsa/src/input.rs b/crates/salsa/src/input.rs index 037e45b908..f6188d4a84 100644 --- a/crates/salsa/src/input.rs +++ b/crates/salsa/src/input.rs @@ -1,3 +1,4 @@ +//! use crate::debug::TableEntry; use crate::durability::Durability; use crate::hash::FxIndexMap; diff --git a/crates/salsa/src/intern_id.rs b/crates/salsa/src/intern_id.rs index b060d8aab6..a7bbc088f9 100644 --- a/crates/salsa/src/intern_id.rs +++ b/crates/salsa/src/intern_id.rs @@ -1,3 +1,4 @@ +//! use std::fmt; use std::num::NonZeroU32; diff --git a/crates/salsa/src/interned.rs b/crates/salsa/src/interned.rs index 392534ea0b..22f22e6112 100644 --- a/crates/salsa/src/interned.rs +++ b/crates/salsa/src/interned.rs @@ -1,3 +1,4 @@ +//! use crate::debug::TableEntry; use crate::durability::Durability; use crate::intern_id::InternId; diff --git a/crates/salsa/src/lib.rs b/crates/salsa/src/lib.rs index 19a9fd2571..575408f362 100644 --- a/crates/salsa/src/lib.rs +++ b/crates/salsa/src/lib.rs @@ -1,3 +1,4 @@ +//! #![allow(clippy::type_complexity)] #![allow(clippy::question_mark)] #![warn(rust_2018_idioms)] diff --git a/crates/salsa/src/lru.rs b/crates/salsa/src/lru.rs index 18441ea004..c6b9778f20 100644 --- a/crates/salsa/src/lru.rs +++ b/crates/salsa/src/lru.rs @@ -1,3 +1,4 @@ +//! use oorandom::Rand64; use parking_lot::Mutex; use std::fmt::Debug; diff --git a/crates/salsa/src/plumbing.rs b/crates/salsa/src/plumbing.rs index f09d6d7337..560a9b8315 100644 --- a/crates/salsa/src/plumbing.rs +++ b/crates/salsa/src/plumbing.rs @@ -1,3 +1,4 @@ +//! #![allow(missing_docs)] use crate::debug::TableEntry; diff --git a/crates/salsa/src/revision.rs b/crates/salsa/src/revision.rs index 61d38a3bcc..d97aaf9deb 100644 --- a/crates/salsa/src/revision.rs +++ b/crates/salsa/src/revision.rs @@ -1,3 +1,4 @@ +//! use std::num::NonZeroU32; use std::sync::atomic::{AtomicU32, Ordering}; diff --git a/crates/salsa/src/runtime.rs b/crates/salsa/src/runtime.rs index 29c5afa37a..40b8856991 100644 --- a/crates/salsa/src/runtime.rs +++ b/crates/salsa/src/runtime.rs @@ -1,3 +1,4 @@ +//! use crate::durability::Durability; use crate::hash::FxIndexSet; use crate::plumbing::CycleRecoveryStrategy; diff --git a/crates/salsa/src/runtime/dependency_graph.rs b/crates/salsa/src/runtime/dependency_graph.rs index 9fa2851d0e..e41eb280de 100644 --- a/crates/salsa/src/runtime/dependency_graph.rs +++ b/crates/salsa/src/runtime/dependency_graph.rs @@ -1,3 +1,4 @@ +//! use triomphe::Arc; use crate::{DatabaseKeyIndex, RuntimeId}; diff --git a/crates/salsa/src/runtime/local_state.rs b/crates/salsa/src/runtime/local_state.rs index b21f1ee4af..91b95dffe7 100644 --- a/crates/salsa/src/runtime/local_state.rs +++ b/crates/salsa/src/runtime/local_state.rs @@ -1,3 +1,4 @@ +//! use tracing::debug; use crate::durability::Durability; diff --git a/crates/salsa/src/storage.rs b/crates/salsa/src/storage.rs index e0acf44041..c0e6416f4a 100644 --- a/crates/salsa/src/storage.rs +++ b/crates/salsa/src/storage.rs @@ -1,3 +1,4 @@ +//! use crate::{plumbing::DatabaseStorageTypes, Runtime}; use triomphe::Arc; diff --git a/crates/salsa/tests/cycles.rs b/crates/salsa/tests/cycles.rs index 4c3ec312f2..0f89c6375f 100644 --- a/crates/salsa/tests/cycles.rs +++ b/crates/salsa/tests/cycles.rs @@ -1,5 +1,6 @@ use std::panic::UnwindSafe; +use expect_test::expect; use salsa::{Durability, ParallelDatabase, Snapshot}; use test_log::test; @@ -179,24 +180,26 @@ fn extract_cycle(f: impl FnOnce() + UnwindSafe) -> salsa::Cycle { fn cycle_memoized() { let db = DatabaseImpl::default(); let cycle = extract_cycle(|| db.memoized_a()); - insta::assert_debug_snapshot!(cycle.unexpected_participants(&db), @r###" - [ - "memoized_a(())", - "memoized_b(())", - ] - "###); + expect![[r#" + [ + "memoized_a(())", + "memoized_b(())", + ] + "#]] + .assert_debug_eq(&cycle.unexpected_participants(&db)); } #[test] fn cycle_volatile() { let db = DatabaseImpl::default(); let cycle = extract_cycle(|| db.volatile_a()); - insta::assert_debug_snapshot!(cycle.unexpected_participants(&db), @r###" - [ - "volatile_a(())", - "volatile_b(())", - ] - "###); + expect![[r#" + [ + "volatile_a(())", + "volatile_b(())", + ] + "#]] + .assert_debug_eq(&cycle.unexpected_participants(&db)); } #[test] @@ -228,12 +231,13 @@ fn inner_cycle() { let err = query.cycle_c(); assert!(err.is_err()); let cycle = err.unwrap_err().cycle; - insta::assert_debug_snapshot!(cycle, @r###" - [ - "cycle_a(())", - "cycle_b(())", - ] - "###); + expect![[r#" + [ + "cycle_a(())", + "cycle_b(())", + ] + "#]] + .assert_debug_eq(&cycle); } #[test] @@ -265,16 +269,17 @@ fn cycle_revalidate_unchanged_twice() { db.set_c_invokes(CycleQuery::A); // force new revisi5on // on this run - insta::assert_debug_snapshot!(db.cycle_a(), @r###" - Err( - Error { - cycle: [ - "cycle_a(())", - "cycle_b(())", - ], - }, - ) - "###); + expect![[r#" + Err( + Error { + cycle: [ + "cycle_a(())", + "cycle_b(())", + ], + }, + ) + "#]] + .assert_debug_eq(&db.cycle_a()); } #[test] @@ -346,16 +351,17 @@ fn cycle_mixed_1() { db.set_c_invokes(CycleQuery::B); let u = db.cycle_c(); - insta::assert_debug_snapshot!(u, @r###" - Err( - Error { - cycle: [ - "cycle_b(())", - "cycle_c(())", - ], - }, - ) - "###); + expect![[r#" + Err( + Error { + cycle: [ + "cycle_b(())", + "cycle_c(())", + ], + }, + ) + "#]] + .assert_debug_eq(&u); } #[test] @@ -372,17 +378,18 @@ fn cycle_mixed_2() { db.set_c_invokes(CycleQuery::A); let u = db.cycle_a(); - insta::assert_debug_snapshot!(u, @r###" - Err( - Error { - cycle: [ - "cycle_a(())", - "cycle_b(())", - "cycle_c(())", - ], - }, - ) - "###); + expect![[r#" + Err( + Error { + cycle: [ + "cycle_a(())", + "cycle_b(())", + "cycle_c(())", + ], + }, + ) + "#]] + .assert_debug_eq(&u); } #[test] @@ -399,26 +406,27 @@ fn cycle_deterministic_order() { }; let a = db().cycle_a(); let b = db().cycle_b(); - insta::assert_debug_snapshot!((a, b), @r###" - ( - Err( - Error { - cycle: [ - "cycle_a(())", - "cycle_b(())", - ], - }, - ), - Err( - Error { - cycle: [ - "cycle_a(())", - "cycle_b(())", - ], - }, - ), - ) - "###); + expect![[r#" + ( + Err( + Error { + cycle: [ + "cycle_a(())", + "cycle_b(())", + ], + }, + ), + Err( + Error { + cycle: [ + "cycle_a(())", + "cycle_b(())", + ], + }, + ), + ) + "#]] + .assert_debug_eq(&(a, b)); } #[test] @@ -443,34 +451,35 @@ fn cycle_multiple() { let c = db.cycle_c(); let b = db.cycle_b(); let a = db.cycle_a(); - insta::assert_debug_snapshot!((a, b, c), @r###" - ( - Err( - Error { - cycle: [ - "cycle_a(())", - "cycle_b(())", - ], - }, - ), - Err( - Error { - cycle: [ - "cycle_a(())", - "cycle_b(())", - ], - }, - ), - Err( - Error { - cycle: [ - "cycle_a(())", - "cycle_b(())", - ], - }, - ), - ) - "###); + expect![[r#" + ( + Err( + Error { + cycle: [ + "cycle_a(())", + "cycle_b(())", + ], + }, + ), + Err( + Error { + cycle: [ + "cycle_a(())", + "cycle_b(())", + ], + }, + ), + Err( + Error { + cycle: [ + "cycle_a(())", + "cycle_b(())", + ], + }, + ), + ) + "#]] + .assert_debug_eq(&(a, b, c)); } #[test] @@ -485,9 +494,10 @@ fn cycle_recovery_set_but_not_participating() { // Here we expect C to panic and A not to recover: let r = extract_cycle(|| drop(db.cycle_a())); - insta::assert_debug_snapshot!(r.all_participants(&db), @r###" - [ - "cycle_c(())", - ] - "###); + expect![[r#" + [ + "cycle_c(())", + ] + "#]] + .assert_debug_eq(&r.all_participants(&db)); } diff --git a/crates/salsa/tests/on_demand_inputs.rs b/crates/salsa/tests/on_demand_inputs.rs index 2ad4975eff..61039ae7b2 100644 --- a/crates/salsa/tests/on_demand_inputs.rs +++ b/crates/salsa/tests/on_demand_inputs.rs @@ -100,16 +100,16 @@ fn on_demand_input_durability() { db.external_state.insert(2, 20); assert_eq!(db.b(1), 10); assert_eq!(db.b(2), 20); - insta::assert_debug_snapshot!(events, @r###" - RefCell { - value: [ - "Event { runtime_id: RuntimeId { counter: 0 }, kind: WillExecute { database_key: b(1) } }", - "Event { runtime_id: RuntimeId { counter: 0 }, kind: WillExecute { database_key: a(1) } }", - "Event { runtime_id: RuntimeId { counter: 0 }, kind: WillExecute { database_key: b(2) } }", - "Event { runtime_id: RuntimeId { counter: 0 }, kind: WillExecute { database_key: a(2) } }", - ], - } - "###); + expect_test::expect![[r#" + RefCell { + value: [ + "Event { runtime_id: RuntimeId { counter: 0 }, kind: WillExecute { database_key: b(1) } }", + "Event { runtime_id: RuntimeId { counter: 0 }, kind: WillExecute { database_key: a(1) } }", + "Event { runtime_id: RuntimeId { counter: 0 }, kind: WillExecute { database_key: b(2) } }", + "Event { runtime_id: RuntimeId { counter: 0 }, kind: WillExecute { database_key: a(2) } }", + ], + } + "#]].assert_debug_eq(&events); eprintln!("------------------"); db.salsa_runtime_mut().synthetic_write(Durability::LOW); @@ -117,17 +117,17 @@ fn on_demand_input_durability() { assert_eq!(db.c(1), 10); assert_eq!(db.c(2), 20); // Re-execute `a(2)` because that has low durability, but not `a(1)` - insta::assert_debug_snapshot!(events, @r###" - RefCell { - value: [ - "Event { runtime_id: RuntimeId { counter: 0 }, kind: WillExecute { database_key: c(1) } }", - "Event { runtime_id: RuntimeId { counter: 0 }, kind: DidValidateMemoizedValue { database_key: b(1) } }", - "Event { runtime_id: RuntimeId { counter: 0 }, kind: WillExecute { database_key: c(2) } }", - "Event { runtime_id: RuntimeId { counter: 0 }, kind: WillExecute { database_key: a(2) } }", - "Event { runtime_id: RuntimeId { counter: 0 }, kind: DidValidateMemoizedValue { database_key: b(2) } }", - ], - } - "###); + expect_test::expect![[r#" + RefCell { + value: [ + "Event { runtime_id: RuntimeId { counter: 0 }, kind: WillExecute { database_key: c(1) } }", + "Event { runtime_id: RuntimeId { counter: 0 }, kind: DidValidateMemoizedValue { database_key: b(1) } }", + "Event { runtime_id: RuntimeId { counter: 0 }, kind: WillExecute { database_key: c(2) } }", + "Event { runtime_id: RuntimeId { counter: 0 }, kind: WillExecute { database_key: a(2) } }", + "Event { runtime_id: RuntimeId { counter: 0 }, kind: DidValidateMemoizedValue { database_key: b(2) } }", + ], + } + "#]].assert_debug_eq(&events); eprintln!("------------------"); db.salsa_runtime_mut().synthetic_write(Durability::HIGH); @@ -136,14 +136,14 @@ fn on_demand_input_durability() { assert_eq!(db.c(2), 20); // Re-execute both `a(1)` and `a(2)`, but we don't re-execute any `b` queries as the // result didn't actually change. - insta::assert_debug_snapshot!(events, @r###" - RefCell { - value: [ - "Event { runtime_id: RuntimeId { counter: 0 }, kind: WillExecute { database_key: a(1) } }", - "Event { runtime_id: RuntimeId { counter: 0 }, kind: DidValidateMemoizedValue { database_key: c(1) } }", - "Event { runtime_id: RuntimeId { counter: 0 }, kind: WillExecute { database_key: a(2) } }", - "Event { runtime_id: RuntimeId { counter: 0 }, kind: DidValidateMemoizedValue { database_key: c(2) } }", - ], - } - "###); + expect_test::expect![[r#" + RefCell { + value: [ + "Event { runtime_id: RuntimeId { counter: 0 }, kind: WillExecute { database_key: a(1) } }", + "Event { runtime_id: RuntimeId { counter: 0 }, kind: DidValidateMemoizedValue { database_key: c(1) } }", + "Event { runtime_id: RuntimeId { counter: 0 }, kind: WillExecute { database_key: a(2) } }", + "Event { runtime_id: RuntimeId { counter: 0 }, kind: DidValidateMemoizedValue { database_key: c(2) } }", + ], + } + "#]].assert_debug_eq(&events); } diff --git a/crates/salsa/tests/parallel/parallel_cycle_none_recover.rs b/crates/salsa/tests/parallel/parallel_cycle_none_recover.rs index 1467041422..35fe379118 100644 --- a/crates/salsa/tests/parallel/parallel_cycle_none_recover.rs +++ b/crates/salsa/tests/parallel/parallel_cycle_none_recover.rs @@ -3,6 +3,7 @@ //! both intra and cross thread. use crate::setup::{Knobs, ParDatabaseImpl}; +use expect_test::expect; use salsa::ParallelDatabase; use test_log::test; @@ -25,12 +26,13 @@ fn parallel_cycle_none_recover() { // Right now, it panics with a string. let err_b = thread_b.join().unwrap_err(); if let Some(c) = err_b.downcast_ref::() { - insta::assert_debug_snapshot!(c.unexpected_participants(&db), @r###" - [ - "a(-1)", - "b(-1)", - ] - "###); + expect![[r#" + [ + "a(-1)", + "b(-1)", + ] + "#]] + .assert_debug_eq(&c.unexpected_participants(&db)); } else { panic!("b failed in an unexpected way: {:?}", err_b); } From 731b159f10af1c772a34accf1abe0c21ed1cbb73 Mon Sep 17 00:00:00 2001 From: Lukas Wirth Date: Wed, 7 Feb 2024 16:34:21 +0100 Subject: [PATCH 044/122] Remove dev-dependency diff --- Cargo.lock | 8 +------- crates/salsa/Cargo.toml | 2 +- crates/salsa/tests/incremental/implementation.rs | 8 ++++---- 3 files changed, 6 insertions(+), 12 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 4be85b1bc1..dc2bf3a769 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -324,12 +324,6 @@ dependencies = [ "syn", ] -[[package]] -name = "diff" -version = "0.1.13" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "56254986775e3233ffa9c4d7d3faaf6d36a2c09d30b20687e9f88bc8bafc16c8" - [[package]] name = "dissimilar" version = "1.0.7" @@ -1711,7 +1705,7 @@ checksum = "f91339c0467de62360649f8d3e185ca8de4224ff281f66000de5eb2a77a79041" name = "salsa" version = "0.0.0" dependencies = [ - "diff", + "dissimilar", "expect-test", "indexmap", "linked-hash-map", diff --git a/crates/salsa/Cargo.toml b/crates/salsa/Cargo.toml index 42885b47a6..5cd351c0ca 100644 --- a/crates/salsa/Cargo.toml +++ b/crates/salsa/Cargo.toml @@ -25,8 +25,8 @@ triomphe = "0.1.11" salsa-macros = { version = "0.0.0", path = "salsa-macros" } [dev-dependencies] -diff = "0.1.13" linked-hash-map = "0.5.6" rand = "0.8.5" test-log = "0.2.14" expect-test = "1.4.0" +dissimilar = "1.0.7" diff --git a/crates/salsa/tests/incremental/implementation.rs b/crates/salsa/tests/incremental/implementation.rs index a9c0a4a018..445154e85a 100644 --- a/crates/salsa/tests/incremental/implementation.rs +++ b/crates/salsa/tests/incremental/implementation.rs @@ -33,11 +33,11 @@ impl TestContextImpl { return; } - for diff in diff::lines(expected_text, actual_text) { + for diff in dissimilar::diff(expected_text, actual_text) { match diff { - diff::Result::Left(l) => println!("-{}", l), - diff::Result::Both(l, _) => println!(" {}", l), - diff::Result::Right(r) => println!("+{}", r), + dissimilar::Chunk::Delete(l) => println!("-{}", l), + dissimilar::Chunk::Equal(l) => println!(" {}", l), + dissimilar::Chunk::Insert(r) => println!("+{}", r), } } From 3688064ff5cf72a3568bca496c4241ae373948be Mon Sep 17 00:00:00 2001 From: Lukas Wirth Date: Wed, 7 Feb 2024 16:53:27 +0100 Subject: [PATCH 045/122] Clippy --- crates/salsa/Cargo.toml | 3 +++ crates/salsa/salsa-macros/Cargo.toml | 3 +++ crates/salsa/salsa-macros/src/database_storage.rs | 12 +++--------- crates/salsa/salsa-macros/src/lib.rs | 2 -- crates/salsa/salsa-macros/src/parenthesized.rs | 2 +- crates/salsa/salsa-macros/src/query_group.rs | 7 ------- crates/salsa/tests/cycles.rs | 12 +----------- crates/salsa/tests/incremental/implementation.rs | 1 + crates/salsa/tests/no_send_sync.rs | 2 -- crates/salsa/tests/on_demand_inputs.rs | 6 ++---- crates/salsa/tests/parallel/setup.rs | 11 +++-------- 11 files changed, 17 insertions(+), 44 deletions(-) diff --git a/crates/salsa/Cargo.toml b/crates/salsa/Cargo.toml index 5cd351c0ca..4ccbc3de84 100644 --- a/crates/salsa/Cargo.toml +++ b/crates/salsa/Cargo.toml @@ -30,3 +30,6 @@ rand = "0.8.5" test-log = "0.2.14" expect-test = "1.4.0" dissimilar = "1.0.7" + +[lints] +workspace = true diff --git a/crates/salsa/salsa-macros/Cargo.toml b/crates/salsa/salsa-macros/Cargo.toml index 8d60a3244d..791d2f6e9f 100644 --- a/crates/salsa/salsa-macros/Cargo.toml +++ b/crates/salsa/salsa-macros/Cargo.toml @@ -18,3 +18,6 @@ heck = "0.4" proc-macro2 = "1.0" quote = "1.0" syn = { version = "2.0", features = ["full", "extra-traits"] } + +[lints] +workspace = true diff --git a/crates/salsa/salsa-macros/src/database_storage.rs b/crates/salsa/salsa-macros/src/database_storage.rs index 26b8e61309..0ec75bb043 100644 --- a/crates/salsa/salsa-macros/src/database_storage.rs +++ b/crates/salsa/salsa-macros/src/database_storage.rs @@ -203,12 +203,6 @@ pub(crate) fn database(args: TokenStream, input: TokenStream) -> TokenStream { output.extend(has_group_impls); - if std::env::var("SALSA_DUMP").is_ok() { - println!("~~~ database_storage"); - println!("{}", output); - println!("~~~ database_storage"); - } - output.into() } @@ -218,7 +212,7 @@ struct QueryGroupList { } impl Parse for QueryGroupList { - fn parse(input: ParseStream) -> syn::Result { + fn parse(input: ParseStream<'_>) -> syn::Result { let query_groups: PunctuatedQueryGroups = input.parse_terminated(QueryGroup::parse, Token![,])?; Ok(QueryGroupList { query_groups }) @@ -241,7 +235,7 @@ impl Parse for QueryGroup { /// ```ignore /// impl HelloWorldDatabase; /// ``` - fn parse(input: ParseStream) -> syn::Result { + fn parse(input: ParseStream<'_>) -> syn::Result { let group_path: Path = input.parse()?; Ok(QueryGroup { group_path }) } @@ -250,7 +244,7 @@ impl Parse for QueryGroup { struct Nothing; impl Parse for Nothing { - fn parse(_input: ParseStream) -> syn::Result { + fn parse(_input: ParseStream<'_>) -> syn::Result { Ok(Nothing) } } diff --git a/crates/salsa/salsa-macros/src/lib.rs b/crates/salsa/salsa-macros/src/lib.rs index e50236fe7b..8af48b1e3f 100644 --- a/crates/salsa/salsa-macros/src/lib.rs +++ b/crates/salsa/salsa-macros/src/lib.rs @@ -2,8 +2,6 @@ #![recursion_limit = "256"] -extern crate proc_macro; -extern crate proc_macro2; #[macro_use] extern crate quote; diff --git a/crates/salsa/salsa-macros/src/parenthesized.rs b/crates/salsa/salsa-macros/src/parenthesized.rs index d61166cecd..9df41e03c1 100644 --- a/crates/salsa/salsa-macros/src/parenthesized.rs +++ b/crates/salsa/salsa-macros/src/parenthesized.rs @@ -5,7 +5,7 @@ impl syn::parse::Parse for Parenthesized where T: syn::parse::Parse, { - fn parse(input: syn::parse::ParseStream) -> syn::Result { + fn parse(input: syn::parse::ParseStream<'_>) -> syn::Result { let content; syn::parenthesized!(content in input); content.parse::().map(Parenthesized) diff --git a/crates/salsa/salsa-macros/src/query_group.rs b/crates/salsa/salsa-macros/src/query_group.rs index 0b1ee2d3b5..7d0eac9f5d 100644 --- a/crates/salsa/salsa-macros/src/query_group.rs +++ b/crates/salsa/salsa-macros/src/query_group.rs @@ -625,13 +625,6 @@ pub(crate) fn query_group(args: TokenStream, input: TokenStream) -> TokenStream } // ANCHOR_END:group_storage_methods }); - - if std::env::var("SALSA_DUMP").is_ok() { - println!("~~~ query_group"); - println!("{}", output); - println!("~~~ query_group"); - } - output.into() } diff --git a/crates/salsa/tests/cycles.rs b/crates/salsa/tests/cycles.rs index 0f89c6375f..00ca533244 100644 --- a/crates/salsa/tests/cycles.rs +++ b/crates/salsa/tests/cycles.rs @@ -51,6 +51,7 @@ struct Error { } #[salsa::database(GroupStruct)] +#[derive(Default)] struct DatabaseImpl { storage: salsa::Storage, } @@ -63,14 +64,6 @@ impl ParallelDatabase for DatabaseImpl { } } -impl Default for DatabaseImpl { - fn default() -> Self { - let res = DatabaseImpl { storage: salsa::Storage::default() }; - - res - } -} - /// The queries A, B, and C in `Database` can be configured /// to invoke one another in arbitrary ways using this /// enum. @@ -151,17 +144,14 @@ impl CycleQuery { } fn cycle_a(db: &dyn Database) -> Result<(), Error> { - dbg!("cycle_a"); db.a_invokes().invoke(db) } fn cycle_b(db: &dyn Database) -> Result<(), Error> { - dbg!("cycle_b"); db.b_invokes().invoke(db) } fn cycle_c(db: &dyn Database) -> Result<(), Error> { - dbg!("cycle_c"); db.c_invokes().invoke(db) } diff --git a/crates/salsa/tests/incremental/implementation.rs b/crates/salsa/tests/incremental/implementation.rs index 445154e85a..19752bba00 100644 --- a/crates/salsa/tests/incremental/implementation.rs +++ b/crates/salsa/tests/incremental/implementation.rs @@ -33,6 +33,7 @@ impl TestContextImpl { return; } + #[allow(clippy::print_stdout)] for diff in dissimilar::diff(expected_text, actual_text) { match diff { dissimilar::Chunk::Delete(l) => println!("-{}", l), diff --git a/crates/salsa/tests/no_send_sync.rs b/crates/salsa/tests/no_send_sync.rs index 2648f2b7a4..2a25c437c3 100644 --- a/crates/salsa/tests/no_send_sync.rs +++ b/crates/salsa/tests/no_send_sync.rs @@ -1,5 +1,3 @@ -extern crate salsa; - use std::rc::Rc; #[salsa::query_group(NoSendSyncStorage)] diff --git a/crates/salsa/tests/on_demand_inputs.rs b/crates/salsa/tests/on_demand_inputs.rs index 61039ae7b2..5d0e486644 100644 --- a/crates/salsa/tests/on_demand_inputs.rs +++ b/crates/salsa/tests/on_demand_inputs.rs @@ -4,6 +4,8 @@ //! via a b query with zero inputs, which uses `add_synthetic_read` to //! tweak durability and `invalidate` to clear the input. +#![allow(clippy::disallowed_types, clippy::type_complexity)] + use std::{cell::RefCell, collections::HashMap, rc::Rc}; use salsa::{Database as _, Durability, EventKind}; @@ -40,8 +42,6 @@ struct Database { impl salsa::Database for Database { fn salsa_event(&self, event: salsa::Event) { - dbg!(event.debug(self)); - if let Some(cb) = &self.on_event { cb(self, event) } @@ -111,7 +111,6 @@ fn on_demand_input_durability() { } "#]].assert_debug_eq(&events); - eprintln!("------------------"); db.salsa_runtime_mut().synthetic_write(Durability::LOW); events.replace(vec![]); assert_eq!(db.c(1), 10); @@ -129,7 +128,6 @@ fn on_demand_input_durability() { } "#]].assert_debug_eq(&events); - eprintln!("------------------"); db.salsa_runtime_mut().synthetic_write(Durability::HIGH); events.replace(vec![]); assert_eq!(db.c(1), 10); diff --git a/crates/salsa/tests/parallel/setup.rs b/crates/salsa/tests/parallel/setup.rs index 0f7d06542f..0a35902b43 100644 --- a/crates/salsa/tests/parallel/setup.rs +++ b/crates/salsa/tests/parallel/setup.rs @@ -46,7 +46,7 @@ impl WithValue for Cell { fn with_value(&self, value: T, closure: impl FnOnce() -> R) -> R { let old_value = self.replace(value); - let result = catch_unwind(AssertUnwindSafe(|| closure())); + let result = catch_unwind(AssertUnwindSafe(closure)); self.set(old_value); @@ -57,18 +57,13 @@ impl WithValue for Cell { } } -#[derive(Clone, Copy, PartialEq, Eq)] +#[derive(Default, Clone, Copy, PartialEq, Eq)] pub(crate) enum CancellationFlag { + #[default] Down, Panic, } -impl Default for CancellationFlag { - fn default() -> CancellationFlag { - CancellationFlag::Down - } -} - /// Various "knobs" that can be used to customize how the queries /// behave on one specific thread. Note that this state is /// intentionally thread-local (apart from `signal`). From 97cd68097ac84b83848908acdfea9df1a4015128 Mon Sep 17 00:00:00 2001 From: Lukas Wirth Date: Wed, 7 Feb 2024 18:02:41 +0100 Subject: [PATCH 046/122] Optimize input queries that take no arguments --- crates/salsa/salsa-macros/src/query_group.rs | 3 + crates/salsa/src/input.rs | 210 ++++++++++++++++--- crates/salsa/src/plumbing.rs | 2 +- 3 files changed, 181 insertions(+), 34 deletions(-) diff --git a/crates/salsa/salsa-macros/src/query_group.rs b/crates/salsa/salsa-macros/src/query_group.rs index 7d0eac9f5d..e535d7ed04 100644 --- a/crates/salsa/salsa-macros/src/query_group.rs +++ b/crates/salsa/salsa-macros/src/query_group.rs @@ -371,6 +371,9 @@ pub(crate) fn query_group(args: TokenStream, input: TokenStream) -> TokenStream QueryStorage::Dependencies => { quote!(salsa::plumbing::DependencyStorage) } + QueryStorage::Input if query.keys.is_empty() => { + quote!(salsa::plumbing::UnitInputStorage) + } QueryStorage::Input => quote!(salsa::plumbing::InputStorage), QueryStorage::Interned => quote!(salsa::plumbing::InternedStorage), QueryStorage::InternedLookup { intern_query_type } => { diff --git a/crates/salsa/src/input.rs b/crates/salsa/src/input.rs index f6188d4a84..4e8fca6149 100644 --- a/crates/salsa/src/input.rs +++ b/crates/salsa/src/input.rs @@ -15,6 +15,7 @@ use crate::{DatabaseKeyIndex, QueryDb}; use indexmap::map::Entry; use parking_lot::RwLock; use std::convert::TryFrom; +use std::iter; use tracing::debug; /// Input queries store the result plus a list of the other queries @@ -25,15 +26,12 @@ where Q: Query, { group_index: u16, - slots: RwLock>>, + slots: RwLock>>, } -struct Slot -where - Q: Query, -{ +struct Slot { database_key_index: DatabaseKeyIndex, - stamped_value: RwLock>, + stamped_value: RwLock>, } impl std::panic::RefUnwindSafe for InputStorage @@ -78,7 +76,14 @@ where debug_assert!(revision < db.salsa_runtime().current_revision()); let slots = &self.slots.read(); let slot = slots.get_index(input.key_index as usize).unwrap().1; - slot.maybe_changed_after(db, revision) + + debug!("maybe_changed_after(slot={:?}, revision={:?})", Q::default(), revision,); + + let changed_at = slot.stamped_value.read().changed_at; + + debug!("maybe_changed_after: changed_at = {:?}", changed_at); + + changed_at > revision } fn fetch(&self, db: &>::DynDb, key: &Q::Key) -> Q::Value { @@ -121,21 +126,6 @@ where } } -impl Slot -where - Q: Query, -{ - fn maybe_changed_after(&self, _db: &>::DynDb, revision: Revision) -> bool { - debug!("maybe_changed_after(slot={:?}, revision={:?})", self, revision,); - - let changed_at = self.stamped_value.read().changed_at; - - debug!("maybe_changed_after: changed_at = {:?}", changed_at); - - changed_at > revision - } -} - impl QueryStorageMassOps for InputStorage where Q: Query, @@ -202,6 +192,167 @@ where } } +/// Same as `InputStorage`, but optimized for queries that take no inputs. +pub struct UnitInputStorage +where + Q: Query, +{ + group_index: u16, + slot: UnitSlot, +} + +struct UnitSlot { + database_key_index: DatabaseKeyIndex, + stamped_value: RwLock>>, +} + +impl std::panic::RefUnwindSafe for UnitInputStorage +where + Q: Query, + Q::Key: std::panic::RefUnwindSafe, + Q::Value: std::panic::RefUnwindSafe, +{ +} + +impl QueryStorageOps for UnitInputStorage +where + Q: Query, +{ + const CYCLE_STRATEGY: crate::plumbing::CycleRecoveryStrategy = CycleRecoveryStrategy::Panic; + + fn new(group_index: u16) -> Self { + let database_key_index = + DatabaseKeyIndex { group_index, query_index: Q::QUERY_INDEX, key_index: 0 }; + UnitInputStorage { + group_index, + slot: UnitSlot { database_key_index, stamped_value: RwLock::new(None) }, + } + } + + fn fmt_index( + &self, + _db: &>::DynDb, + index: DatabaseKeyIndex, + fmt: &mut std::fmt::Formatter<'_>, + ) -> std::fmt::Result { + assert_eq!(index.group_index, self.group_index); + assert_eq!(index.query_index, Q::QUERY_INDEX); + write!(fmt, "{}", Q::QUERY_NAME) + } + + fn maybe_changed_after( + &self, + db: &>::DynDb, + input: DatabaseKeyIndex, + revision: Revision, + ) -> bool { + assert_eq!(input.group_index, self.group_index); + assert_eq!(input.query_index, Q::QUERY_INDEX); + debug_assert!(revision < db.salsa_runtime().current_revision()); + + debug!("maybe_changed_after(slot={:?}, revision={:?})", Q::default(), revision,); + + let changed_at = self.slot.stamped_value.read().as_ref().unwrap().changed_at; + + debug!("maybe_changed_after: changed_at = {:?}", changed_at); + + changed_at > revision + } + + fn fetch(&self, db: &>::DynDb, &(): &Q::Key) -> Q::Value { + db.unwind_if_cancelled(); + + let StampedValue { value, durability, changed_at } = self + .slot + .stamped_value + .read() + .clone() + .unwrap_or_else(|| panic!("no value set for {:?}", Q::default())); + + db.salsa_runtime().report_query_read_and_unwind_if_cycle_resulted( + self.slot.database_key_index, + durability, + changed_at, + ); + + value + } + + fn durability(&self, _db: &>::DynDb, &(): &Q::Key) -> Durability { + match &*self.slot.stamped_value.read() { + Some(stamped_value) => stamped_value.durability, + None => panic!("no value set for {:?}", Q::default(),), + } + } + + fn entries(&self, _db: &>::DynDb) -> C + where + C: std::iter::FromIterator>, + { + iter::once(TableEntry::new( + (), + self.slot.stamped_value.read().as_ref().map(|it| it.value.clone()), + )) + .collect() + } +} + +impl QueryStorageMassOps for UnitInputStorage +where + Q: Query, +{ + fn purge(&self) { + *self.slot.stamped_value.write() = Default::default(); + } +} + +impl InputQueryStorageOps for UnitInputStorage +where + Q: Query, +{ + fn set(&self, runtime: &mut Runtime, (): &Q::Key, value: Q::Value, durability: Durability) { + tracing::debug!("{:?} = {:?} ({:?})", Q::default(), value, durability); + + // The value is changing, so we need a new revision (*). We also + // need to update the 'last changed' revision by invoking + // `guard.mark_durability_as_changed`. + // + // CAREFUL: This will block until the global revision lock can + // be acquired. If there are still queries executing, they may + // need to read from this input. Therefore, we wait to acquire + // the lock on `map` until we also hold the global query write + // lock. + // + // (*) Technically, since you can't presently access an input + // for a non-existent key, and you can't enumerate the set of + // keys, we only need a new revision if the key used to + // exist. But we may add such methods in the future and this + // case doesn't generally seem worth optimizing for. + runtime.with_incremented_revision(|next_revision| { + let mut stamped_value_slot = self.slot.stamped_value.write(); + + // Do this *after* we acquire the lock, so that we are not + // racing with somebody else to modify this same cell. + // (Otherwise, someone else might write a *newer* revision + // into the same cell while we block on the lock.) + let stamped_value = StampedValue { value, durability, changed_at: next_revision }; + + match &mut *stamped_value_slot { + Some(slot_stamped_value) => { + let old_durability = slot_stamped_value.durability; + *slot_stamped_value = stamped_value; + Some(old_durability) + } + + stamped_value_slot @ None => { + *stamped_value_slot = Some(stamped_value); + None + } + } + }); + } +} + /// Check that `Slot: Send + Sync` as long as /// `DB::DatabaseData: Send + Sync`, which in turn implies that /// `Q::Key: Send + Sync`, `Q::Value: Send + Sync`. @@ -213,7 +364,8 @@ where Q::Value: Send + Sync, { fn is_send_sync() {} - is_send_sync::>(); + is_send_sync::>(); + is_send_sync::>(); } /// Check that `Slot: 'static` as long as @@ -227,14 +379,6 @@ where Q::Value: 'static, { fn is_static() {} - is_static::>(); -} - -impl std::fmt::Debug for Slot -where - Q: Query, -{ - fn fmt(&self, fmt: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { - write!(fmt, "{:?}", Q::default()) - } + is_static::>(); + is_static::>(); } diff --git a/crates/salsa/src/plumbing.rs b/crates/salsa/src/plumbing.rs index 560a9b8315..71332e39ca 100644 --- a/crates/salsa/src/plumbing.rs +++ b/crates/salsa/src/plumbing.rs @@ -15,7 +15,7 @@ use triomphe::Arc; pub use crate::derived::DependencyStorage; pub use crate::derived::MemoizedStorage; -pub use crate::input::InputStorage; +pub use crate::input::{InputStorage, UnitInputStorage}; pub use crate::interned::InternedStorage; pub use crate::interned::LookupInternedStorage; pub use crate::{revision::Revision, DatabaseKeyIndex, QueryDb, Runtime}; From aa9bc01824a43ebd29c18eb0d18d41634a5876aa Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Lauren=C8=9Biu=20Nicola?= Date: Wed, 7 Feb 2024 19:24:35 +0200 Subject: [PATCH 047/122] Bump paths-filter --- .github/workflows/ci.yaml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/ci.yaml b/.github/workflows/ci.yaml index 0140bce13a..964be478fa 100644 --- a/.github/workflows/ci.yaml +++ b/.github/workflows/ci.yaml @@ -28,7 +28,7 @@ jobs: proc_macros: ${{ steps.filter.outputs.proc_macros }} steps: - uses: actions/checkout@v3 - - uses: dorny/paths-filter@4067d885736b84de7c414f582ac45897079b0a78 + - uses: dorny/paths-filter@1441771bbfdd59dcd748680ee64ebd8faab1a242 id: filter with: filters: | From 79e41114428f238f5c0265e1e237d1afb5c66654 Mon Sep 17 00:00:00 2001 From: clubby789 Date: Wed, 7 Feb 2024 19:42:07 +0000 Subject: [PATCH 048/122] Remove `ffi_returns_twice` references --- crates/hir-def/src/attr/builtin.rs | 3 --- crates/ide-db/src/generated/lints.rs | 11 ----------- 2 files changed, 14 deletions(-) diff --git a/crates/hir-def/src/attr/builtin.rs b/crates/hir-def/src/attr/builtin.rs index 48a596f7f5..b20ee9e5bf 100644 --- a/crates/hir-def/src/attr/builtin.rs +++ b/crates/hir-def/src/attr/builtin.rs @@ -283,9 +283,6 @@ pub const INERT_ATTRIBUTES: &[BuiltinAttribute] = &[ experimental!(optimize), ), - gated!( - ffi_returns_twice, Normal, template!(Word), WarnFollowing, experimental!(ffi_returns_twice) - ), gated!(ffi_pure, Normal, template!(Word), WarnFollowing, experimental!(ffi_pure)), gated!(ffi_const, Normal, template!(Word), WarnFollowing, experimental!(ffi_const)), gated!( diff --git a/crates/ide-db/src/generated/lints.rs b/crates/ide-db/src/generated/lints.rs index 677c8fd54c..2fc0793320 100644 --- a/crates/ide-db/src/generated/lints.rs +++ b/crates/ide-db/src/generated/lints.rs @@ -5044,17 +5044,6 @@ against are compatible with those of the `#[ffi_pure]`. [ARM C/C++ compiler]: http://infocenter.arm.com/help/index.jsp?topic=/com.arm.doc.dui0491c/Cacigdac.html [GCC]: https://gcc.gnu.org/onlinedocs/gcc/Common-Function-Attributes.html#index-pure-function-attribute [IBM ILE C/C++]: https://www.ibm.com/support/knowledgecenter/fr/ssw_ibm_i_71/rzarg/fn_attrib_pure.htm -"##, - }, - Lint { - label: "ffi_returns_twice", - description: r##"# `ffi_returns_twice` - -The tracking issue for this feature is: [#58314] - -[#58314]: https://github.com/rust-lang/rust/issues/58314 - ------------------------- "##, }, Lint { From 81ea48a573ea1dfd729a31c35e74c46af2598769 Mon Sep 17 00:00:00 2001 From: Lukas Wirth Date: Thu, 8 Feb 2024 10:05:28 +0100 Subject: [PATCH 049/122] fix: Fix tuple structs not rendering visibility in their fields --- crates/hir/src/display.rs | 4 +- crates/ide/src/hover/tests.rs | 75 +++++++++++++++++++++++++++++++++-- 2 files changed, 74 insertions(+), 5 deletions(-) diff --git a/crates/hir/src/display.rs b/crates/hir/src/display.rs index 9b99b141fc..30f402a79f 100644 --- a/crates/hir/src/display.rs +++ b/crates/hir/src/display.rs @@ -158,7 +158,8 @@ impl HirDisplay for Adt { impl HirDisplay for Struct { fn hir_fmt(&self, f: &mut HirFormatter<'_>) -> Result<(), HirDisplayError> { - write_visibility(self.module(f.db).id, self.visibility(f.db), f)?; + let module_id = self.module(f.db).id; + write_visibility(module_id, self.visibility(f.db), f)?; f.write_str("struct ")?; write!(f, "{}", self.name(f.db).display(f.db.upcast()))?; let def_id = GenericDefId::AdtId(AdtId::StructId(self.id)); @@ -171,6 +172,7 @@ impl HirDisplay for Struct { while let Some((id, _)) = it.next() { let field = Field { parent: (*self).into(), id }; + write_visibility(module_id, field.visibility(f.db), f)?; field.ty(f.db).hir_fmt(f)?; if it.peek().is_some() { f.write_str(", ")?; diff --git a/crates/ide/src/hover/tests.rs b/crates/ide/src/hover/tests.rs index 9f4427090e..78efa9cac8 100644 --- a/crates/ide/src/hover/tests.rs +++ b/crates/ide/src/hover/tests.rs @@ -702,7 +702,7 @@ fn hover_shows_struct_field_info() { // Hovering over the field when instantiating check( r#" -struct Foo { field_a: u32 } +struct Foo { pub field_a: u32 } fn main() { let foo = Foo { field_a$0: 0, }; @@ -717,7 +717,7 @@ fn main() { ```rust // size = 4, align = 4, offset = 0 - field_a: u32 + pub field_a: u32 ``` "#]], ); @@ -725,7 +725,7 @@ fn main() { // Hovering over the field in the definition check( r#" -struct Foo { field_a$0: u32 } +struct Foo { pub field_a$0: u32 } fn main() { let foo = Foo { field_a: 0 }; @@ -740,7 +740,74 @@ fn main() { ```rust // size = 4, align = 4, offset = 0 - field_a: u32 + pub field_a: u32 + ``` + "#]], + ); +} + +#[test] +fn hover_shows_tuple_struct_field_info() { + check( + r#" +struct Foo(pub u32) + +fn main() { + let foo = Foo { 0$0: 0, }; +} +"#, + expect![[r#" + *0* + + ```rust + test::Foo + ``` + + ```rust + // size = 4, align = 4, offset = 0 + pub 0: u32 + ``` + "#]], + ); + check( + r#" +struct Foo(pub u32) + +fn foo(foo: Foo) { + foo.0$0; +} +"#, + expect![[r#" + *0* + + ```rust + test::Foo + ``` + + ```rust + // size = 4, align = 4, offset = 0 + pub 0: u32 + ``` + "#]], + ); +} + +#[test] +fn hover_tuple_struct() { + check( + r#" +struct Foo$0(pub u32) +"#, + expect![[r#" + *Foo* + + ```rust + test + ``` + + ```rust + // size = 4, align = 4 + struct Foo(pub u32); ``` "#]], ); From 0258f60cfeb2a3355dd050fce9ed51b55d8cb613 Mon Sep 17 00:00:00 2001 From: Lukas Wirth Date: Thu, 8 Feb 2024 10:40:42 +0100 Subject: [PATCH 050/122] feat: Allow cargo check to run on only the current package --- crates/flycheck/src/lib.rs | 25 +++++++--- crates/rust-analyzer/src/config.rs | 7 +++ .../src/handlers/notification.rs | 49 ++++++++++++------- crates/rust-analyzer/src/main_loop.rs | 2 +- crates/rust-analyzer/src/reload.rs | 2 +- docs/user/generated_config.adoc | 6 +++ editors/code/package.json | 5 ++ 7 files changed, 68 insertions(+), 28 deletions(-) diff --git a/crates/flycheck/src/lib.rs b/crates/flycheck/src/lib.rs index ef1404487e..c59aff2a8b 100644 --- a/crates/flycheck/src/lib.rs +++ b/crates/flycheck/src/lib.rs @@ -100,9 +100,14 @@ impl FlycheckHandle { FlycheckHandle { id, sender, _thread: thread } } - /// Schedule a re-start of the cargo check worker. - pub fn restart(&self) { - self.sender.send(StateChange::Restart).unwrap(); + /// Schedule a re-start of the cargo check worker to do a workspace wide check. + pub fn restart_workspace(&self) { + self.sender.send(StateChange::Restart(None)).unwrap(); + } + + /// Schedule a re-start of the cargo check worker to do a package wide check. + pub fn restart_for_package(&self, package: String) { + self.sender.send(StateChange::Restart(Some(package))).unwrap(); } /// Stop this cargo check worker. @@ -153,7 +158,7 @@ pub enum Progress { } enum StateChange { - Restart, + Restart(Option), Cancel, } @@ -213,7 +218,7 @@ impl FlycheckActor { tracing::debug!(flycheck_id = self.id, "flycheck cancelled"); self.cancel_check_process(); } - Event::RequestStateChange(StateChange::Restart) => { + Event::RequestStateChange(StateChange::Restart(package)) => { // Cancel the previously spawned process self.cancel_check_process(); while let Ok(restart) = inbox.recv_timeout(Duration::from_millis(50)) { @@ -223,7 +228,7 @@ impl FlycheckActor { } } - let command = self.check_command(); + let command = self.check_command(package.as_deref()); let formatted_command = format!("{:?}", command); tracing::debug!(?command, "will restart flycheck"); @@ -297,7 +302,7 @@ impl FlycheckActor { } } - fn check_command(&self) -> Command { + fn check_command(&self, package: Option<&str>) -> Command { let (mut cmd, args) = match &self.config { FlycheckConfig::CargoCommand { command, @@ -314,7 +319,11 @@ impl FlycheckActor { let mut cmd = Command::new(toolchain::cargo()); cmd.arg(command); cmd.current_dir(&self.root); - cmd.arg("--workspace"); + + match package { + Some(pkg) => cmd.arg("-p").arg(pkg), + None => cmd.arg("--workspace"), + }; cmd.arg(if *ansi_color_output { "--message-format=json-diagnostic-rendered-ansi" diff --git a/crates/rust-analyzer/src/config.rs b/crates/rust-analyzer/src/config.rs index 815f6ea12e..ff81c4dc5a 100644 --- a/crates/rust-analyzer/src/config.rs +++ b/crates/rust-analyzer/src/config.rs @@ -223,6 +223,9 @@ config_data! { /// /// Aliased as `"checkOnSave.targets"`. check_targets | checkOnSave_targets | checkOnSave_target: Option = "null", + /// Whether `--workspace` should be passed to `cargo check`. + /// If false, `-p ` will be passed instead. + check_workspace: bool = "true", /// Toggles the additional completions that automatically add imports when completed. /// Note that your client must specify the `additionalTextEdits` LSP client capability to truly have this feature enabled. @@ -1323,6 +1326,10 @@ impl Config { } } + pub fn flycheck_workspace(&self) -> bool { + self.data.check_workspace + } + pub fn flycheck(&self) -> FlycheckConfig { match &self.data.check_overrideCommand { Some(args) if !args.is_empty() => { diff --git a/crates/rust-analyzer/src/handlers/notification.rs b/crates/rust-analyzer/src/handlers/notification.rs index 1f24e95010..268fe00979 100644 --- a/crates/rust-analyzer/src/handlers/notification.rs +++ b/crates/rust-analyzer/src/handlers/notification.rs @@ -160,7 +160,7 @@ pub(crate) fn handle_did_save_text_document( } else if state.config.check_on_save() { // No specific flycheck was triggered, so let's trigger all of them. for flycheck in state.flycheck.iter() { - flycheck.restart(); + flycheck.restart_workspace(); } } Ok(()) @@ -281,27 +281,40 @@ fn run_flycheck(state: &mut GlobalState, vfs_path: VfsPath) -> bool { let crate_root_paths: Vec<_> = crate_root_paths.iter().map(Deref::deref).collect(); // Find all workspaces that have at least one target containing the saved file - let workspace_ids = world.workspaces.iter().enumerate().filter(|(_, ws)| match ws { - project_model::ProjectWorkspace::Cargo { cargo, .. } => { - cargo.packages().any(|pkg| { - cargo[pkg] - .targets - .iter() - .any(|&it| crate_root_paths.contains(&cargo[it].root.as_path())) - }) - } - project_model::ProjectWorkspace::Json { project, .. } => { - project.crates().any(|(c, _)| crate_ids.iter().any(|&crate_id| crate_id == c)) - } - project_model::ProjectWorkspace::DetachedFiles { .. } => false, + let workspace_ids = world.workspaces.iter().enumerate().filter_map(|(idx, ws)| { + let package = match ws { + project_model::ProjectWorkspace::Cargo { cargo, .. } => { + cargo.packages().find_map(|pkg| { + let has_target_with_root = cargo[pkg] + .targets + .iter() + .any(|&it| crate_root_paths.contains(&cargo[it].root.as_path())); + has_target_with_root.then(|| cargo[pkg].name.clone()) + }) + } + project_model::ProjectWorkspace::Json { project, .. } => { + if !project + .crates() + .any(|(c, _)| crate_ids.iter().any(|&crate_id| crate_id == c)) + { + return None; + } + None + } + project_model::ProjectWorkspace::DetachedFiles { .. } => return None, + }; + Some((idx, package)) }); // Find and trigger corresponding flychecks for flycheck in world.flycheck.iter() { - for (id, _) in workspace_ids.clone() { + for (id, package) in workspace_ids.clone() { if id == flycheck.id() { updated = true; - flycheck.restart(); + match package.filter(|_| !world.config.flycheck_workspace()) { + Some(package) => flycheck.restart_for_package(package), + None => flycheck.restart_workspace(), + } continue; } } @@ -309,7 +322,7 @@ fn run_flycheck(state: &mut GlobalState, vfs_path: VfsPath) -> bool { // No specific flycheck was triggered, so let's trigger all of them. if !updated { for flycheck in world.flycheck.iter() { - flycheck.restart(); + flycheck.restart_workspace(); } } Ok(()) @@ -351,7 +364,7 @@ pub(crate) fn handle_run_flycheck( } // No specific flycheck was triggered, so let's trigger all of them. for flycheck in state.flycheck.iter() { - flycheck.restart(); + flycheck.restart_workspace(); } Ok(()) } diff --git a/crates/rust-analyzer/src/main_loop.rs b/crates/rust-analyzer/src/main_loop.rs index f3ead6d04f..dc15a4c0a4 100644 --- a/crates/rust-analyzer/src/main_loop.rs +++ b/crates/rust-analyzer/src/main_loop.rs @@ -314,7 +314,7 @@ impl GlobalState { if became_quiescent { if self.config.check_on_save() { // Project has loaded properly, kick off initial flycheck - self.flycheck.iter().for_each(FlycheckHandle::restart); + self.flycheck.iter().for_each(FlycheckHandle::restart_workspace); } if self.config.prefill_caches() { self.prime_caches_queue.request_op("became quiescent".to_string(), ()); diff --git a/crates/rust-analyzer/src/reload.rs b/crates/rust-analyzer/src/reload.rs index 65c00cc08d..2a0478d62f 100644 --- a/crates/rust-analyzer/src/reload.rs +++ b/crates/rust-analyzer/src/reload.rs @@ -503,7 +503,7 @@ impl GlobalState { let mut crate_graph_file_dependencies = FxHashSet::default(); let mut load = |path: &AbsPath| { - let _p = tracing::span!(tracing::Level::INFO, "switch_workspaces::load").entered(); + let _p = tracing::span!(tracing::Level::DEBUG, "switch_workspaces::load").entered(); let vfs_path = vfs::VfsPath::from(path.to_path_buf()); crate_graph_file_dependencies.insert(vfs_path.clone()); match vfs.file_id(&vfs_path) { diff --git a/docs/user/generated_config.adoc b/docs/user/generated_config.adoc index cfa7503d73..3ff2229346 100644 --- a/docs/user/generated_config.adoc +++ b/docs/user/generated_config.adoc @@ -251,6 +251,12 @@ Can be a single target, e.g. `"x86_64-unknown-linux-gnu"` or a list of targets, Aliased as `"checkOnSave.targets"`. -- +[[rust-analyzer.check.workspace]]rust-analyzer.check.workspace (default: `true`):: ++ +-- +Whether `--workspace` should be passed to `cargo check`. +If false, `-p ` will be passed instead. +-- [[rust-analyzer.completion.autoimport.enable]]rust-analyzer.completion.autoimport.enable (default: `true`):: + -- diff --git a/editors/code/package.json b/editors/code/package.json index 841e364ed8..7ad5a3abbc 100644 --- a/editors/code/package.json +++ b/editors/code/package.json @@ -803,6 +803,11 @@ } ] }, + "rust-analyzer.check.workspace": { + "markdownDescription": "Whether `--workspace` should be passed to `cargo check`.\nIf false, `-p ` will be passed instead.", + "default": true, + "type": "boolean" + }, "rust-analyzer.completion.autoimport.enable": { "markdownDescription": "Toggles the additional completions that automatically add imports when completed.\nNote that your client must specify the `additionalTextEdits` LSP client capability to truly have this feature enabled.", "default": true, From e865d45904e1f2a60f81fa84288b15c696feee4c Mon Sep 17 00:00:00 2001 From: Yutaro Ohno Date: Fri, 8 Dec 2023 19:05:27 +0900 Subject: [PATCH 051/122] fix: Recover from missing argument in call expressions Previously, when parsing an argument list with a missing argument (e.g., `(a, , b)` in `foo(a, , b)`), the parser would stop upon an unexpected token (at the second comma in the example), resulting in an incorrect parse tree. This commit improves error handling in such cases, ensuring a more accurate parse tree is built. --- crates/hir-expand/src/fixup.rs | 14 -------- crates/parser/src/grammar/expressions.rs | 33 ++++++++++++++----- .../inline/err/0015_arg_list_recovery.rast | 27 +++++++++++++++ .../inline/err/0015_arg_list_recovery.rs | 1 + 4 files changed, 53 insertions(+), 22 deletions(-) diff --git a/crates/hir-expand/src/fixup.rs b/crates/hir-expand/src/fixup.rs index a3b2c700ff..ed29411a6b 100644 --- a/crates/hir-expand/src/fixup.rs +++ b/crates/hir-expand/src/fixup.rs @@ -631,20 +631,6 @@ fn foo () {a . b ; bar () ;} ) } - #[test] - fn extraneous_comma() { - check( - r#" -fn foo() { - bar(,); -} -"#, - expect![[r#" -fn foo () {__ra_fixup ;} -"#]], - ) - } - #[test] fn fixup_if_1() { check( diff --git a/crates/parser/src/grammar/expressions.rs b/crates/parser/src/grammar/expressions.rs index e99c111d39..52a08c4fb1 100644 --- a/crates/parser/src/grammar/expressions.rs +++ b/crates/parser/src/grammar/expressions.rs @@ -611,6 +611,7 @@ fn cast_expr(p: &mut Parser<'_>, lhs: CompletedMarker) -> CompletedMarker { // foo(bar::); // foo(bar:); // foo(bar+); +// foo(a, , b); // } fn arg_list(p: &mut Parser<'_>) { assert!(p.at(T!['('])); @@ -619,14 +620,30 @@ fn arg_list(p: &mut Parser<'_>) { // fn main() { // foo(#[attr] 92) // } - delimited( - p, - T!['('], - T![')'], - T![,], - EXPR_FIRST.union(ATTRIBUTE_FIRST), - |p: &mut Parser<'_>| expr(p).is_some(), - ); + p.bump(T!['(']); + while !p.at(T![')']) && !p.at(EOF) { + if p.at(T![,]) { + // Recover if an argument is missing and only got a delimiter, + // e.g. `(a, , b)`. + p.error("expected expression"); + p.bump(T![,]); + continue; + } + + if expr(p).is_none() { + break; + } + if !p.at(T![,]) { + if p.at_ts(EXPR_FIRST.union(ATTRIBUTE_FIRST)) { + p.error(format!("expected {:?}", T![,])); + } else { + break; + } + } else { + p.bump(T![,]); + } + } + p.expect(T![')']); m.complete(p, ARG_LIST); } diff --git a/crates/parser/test_data/parser/inline/err/0015_arg_list_recovery.rast b/crates/parser/test_data/parser/inline/err/0015_arg_list_recovery.rast index 5d0fe859c2..85def9e148 100644 --- a/crates/parser/test_data/parser/inline/err/0015_arg_list_recovery.rast +++ b/crates/parser/test_data/parser/inline/err/0015_arg_list_recovery.rast @@ -68,6 +68,32 @@ SOURCE_FILE PLUS "+" R_PAREN ")" SEMICOLON ";" + WHITESPACE "\n " + EXPR_STMT + CALL_EXPR + PATH_EXPR + PATH + PATH_SEGMENT + NAME_REF + IDENT "foo" + ARG_LIST + L_PAREN "(" + PATH_EXPR + PATH + PATH_SEGMENT + NAME_REF + IDENT "a" + COMMA "," + WHITESPACE " " + COMMA "," + WHITESPACE " " + PATH_EXPR + PATH + PATH_SEGMENT + NAME_REF + IDENT "b" + R_PAREN ")" + SEMICOLON ";" WHITESPACE "\n" R_CURLY "}" WHITESPACE "\n" @@ -75,3 +101,4 @@ error 25: expected identifier error 39: expected COMMA error 39: expected expression error 55: expected expression +error 68: expected expression diff --git a/crates/parser/test_data/parser/inline/err/0015_arg_list_recovery.rs b/crates/parser/test_data/parser/inline/err/0015_arg_list_recovery.rs index 0e7ac9cc30..175a31f8b5 100644 --- a/crates/parser/test_data/parser/inline/err/0015_arg_list_recovery.rs +++ b/crates/parser/test_data/parser/inline/err/0015_arg_list_recovery.rs @@ -2,4 +2,5 @@ fn main() { foo(bar::); foo(bar:); foo(bar+); + foo(a, , b); } From 974e69b0c5e6455fb72061316b93026763375d86 Mon Sep 17 00:00:00 2001 From: Lukas Wirth Date: Thu, 8 Feb 2024 11:14:33 +0100 Subject: [PATCH 052/122] Recover from missing slots in delimited parsing --- crates/hir-expand/src/fixup.rs | 14 ++++++ crates/parser/src/grammar.rs | 15 ++++++ crates/parser/src/grammar/expressions.rs | 33 ++++--------- crates/parser/src/grammar/generic_args.rs | 12 ++++- crates/parser/src/grammar/generic_params.rs | 25 +++++++--- crates/parser/src/grammar/items/adt.rs | 47 ++++++++++++------- crates/parser/src/grammar/items/use_item.rs | 13 +++-- .../inline/err/0015_arg_list_recovery.rast | 5 +- .../err/0026_use_tree_list_err_recovery.rast | 25 ++++++++++ .../err/0026_use_tree_list_err_recovery.rs | 1 + .../err/0029_tuple_field_list_recovery.rast | 44 +++++++++++++++++ .../err/0029_tuple_field_list_recovery.rs | 2 + .../err/0030_generic_arg_list_recover.rast | 33 +++++++++++++ .../err/0030_generic_arg_list_recover.rs | 1 + .../err/0031_generic_param_list_recover.rast | 45 ++++++++++++++++++ .../err/0031_generic_param_list_recover.rs | 1 + 16 files changed, 261 insertions(+), 55 deletions(-) create mode 100644 crates/parser/test_data/parser/inline/err/0029_tuple_field_list_recovery.rast create mode 100644 crates/parser/test_data/parser/inline/err/0029_tuple_field_list_recovery.rs create mode 100644 crates/parser/test_data/parser/inline/err/0030_generic_arg_list_recover.rast create mode 100644 crates/parser/test_data/parser/inline/err/0030_generic_arg_list_recover.rs create mode 100644 crates/parser/test_data/parser/inline/err/0031_generic_param_list_recover.rast create mode 100644 crates/parser/test_data/parser/inline/err/0031_generic_param_list_recover.rs diff --git a/crates/hir-expand/src/fixup.rs b/crates/hir-expand/src/fixup.rs index ed29411a6b..a3b2c700ff 100644 --- a/crates/hir-expand/src/fixup.rs +++ b/crates/hir-expand/src/fixup.rs @@ -631,6 +631,20 @@ fn foo () {a . b ; bar () ;} ) } + #[test] + fn extraneous_comma() { + check( + r#" +fn foo() { + bar(,); +} +"#, + expect![[r#" +fn foo () {__ra_fixup ;} +"#]], + ) + } + #[test] fn fixup_if_1() { check( diff --git a/crates/parser/src/grammar.rs b/crates/parser/src/grammar.rs index 53fda3ae4f..34715628f1 100644 --- a/crates/parser/src/grammar.rs +++ b/crates/parser/src/grammar.rs @@ -393,11 +393,26 @@ fn delimited( bra: SyntaxKind, ket: SyntaxKind, delim: SyntaxKind, + unexpected_delim_message: impl Fn() -> String, first_set: TokenSet, mut parser: impl FnMut(&mut Parser<'_>) -> bool, ) { p.bump(bra); while !p.at(ket) && !p.at(EOF) { + if p.at(delim) { + // Recover if an argument is missing and only got a delimiter, + // e.g. `(a, , b)`. + + // Wrap the erroneous delimiter in an error node so that fixup logic gets rid of it. + // FIXME: Ideally this should be handled in fixup in a structured way, but our list + // nodes currently have no concept of a missing node between two delimiters. + // So doing it this way is easier. + let m = p.start(); + p.error(unexpected_delim_message()); + p.bump(delim); + m.complete(p, ERROR); + continue; + } if !parser(p) { break; } diff --git a/crates/parser/src/grammar/expressions.rs b/crates/parser/src/grammar/expressions.rs index 52a08c4fb1..f40c515fa0 100644 --- a/crates/parser/src/grammar/expressions.rs +++ b/crates/parser/src/grammar/expressions.rs @@ -620,30 +620,15 @@ fn arg_list(p: &mut Parser<'_>) { // fn main() { // foo(#[attr] 92) // } - p.bump(T!['(']); - while !p.at(T![')']) && !p.at(EOF) { - if p.at(T![,]) { - // Recover if an argument is missing and only got a delimiter, - // e.g. `(a, , b)`. - p.error("expected expression"); - p.bump(T![,]); - continue; - } - - if expr(p).is_none() { - break; - } - if !p.at(T![,]) { - if p.at_ts(EXPR_FIRST.union(ATTRIBUTE_FIRST)) { - p.error(format!("expected {:?}", T![,])); - } else { - break; - } - } else { - p.bump(T![,]); - } - } - p.expect(T![')']); + delimited( + p, + T!['('], + T![')'], + T![,], + || "expected expression".into(), + EXPR_FIRST.union(ATTRIBUTE_FIRST), + |p| expr(p).is_some(), + ); m.complete(p, ARG_LIST); } diff --git a/crates/parser/src/grammar/generic_args.rs b/crates/parser/src/grammar/generic_args.rs index 211af98e6e..249be2a333 100644 --- a/crates/parser/src/grammar/generic_args.rs +++ b/crates/parser/src/grammar/generic_args.rs @@ -1,5 +1,7 @@ use super::*; +// test_err generic_arg_list_recover +// type T = T<0, ,T>; pub(super) fn opt_generic_arg_list(p: &mut Parser<'_>, colon_colon_required: bool) { let m; if p.at(T![::]) && p.nth(2) == T![<] { @@ -11,7 +13,15 @@ pub(super) fn opt_generic_arg_list(p: &mut Parser<'_>, colon_colon_required: boo return; } - delimited(p, T![<], T![>], T![,], GENERIC_ARG_FIRST, generic_arg); + delimited( + p, + T![<], + T![>], + T![,], + || "expected generic argument".into(), + GENERIC_ARG_FIRST, + generic_arg, + ); m.complete(p, GENERIC_ARG_LIST); } diff --git a/crates/parser/src/grammar/generic_params.rs b/crates/parser/src/grammar/generic_params.rs index 29d9b05d3f..3c577aa3cb 100644 --- a/crates/parser/src/grammar/generic_params.rs +++ b/crates/parser/src/grammar/generic_params.rs @@ -10,16 +10,27 @@ pub(super) fn opt_generic_param_list(p: &mut Parser<'_>) { // test generic_param_list // fn f() {} + +// test_err generic_param_list_recover +// fn f() {} fn generic_param_list(p: &mut Parser<'_>) { assert!(p.at(T![<])); let m = p.start(); - delimited(p, T![<], T![>], T![,], GENERIC_PARAM_FIRST.union(ATTRIBUTE_FIRST), |p| { - // test generic_param_attribute - // fn foo<#[lt_attr] 'a, #[t_attr] T>() {} - let m = p.start(); - attributes::outer_attrs(p); - generic_param(p, m) - }); + delimited( + p, + T![<], + T![>], + T![,], + || "expected generic parameter".into(), + GENERIC_PARAM_FIRST.union(ATTRIBUTE_FIRST), + |p| { + // test generic_param_attribute + // fn foo<#[lt_attr] 'a, #[t_attr] T>() {} + let m = p.start(); + attributes::outer_attrs(p); + generic_param(p, m) + }, + ); m.complete(p, GENERIC_PARAM_LIST); } diff --git a/crates/parser/src/grammar/items/adt.rs b/crates/parser/src/grammar/items/adt.rs index 17f41b8e13..21078175c0 100644 --- a/crates/parser/src/grammar/items/adt.rs +++ b/crates/parser/src/grammar/items/adt.rs @@ -146,28 +146,39 @@ pub(crate) fn record_field_list(p: &mut Parser<'_>) { const TUPLE_FIELD_FIRST: TokenSet = types::TYPE_FIRST.union(ATTRIBUTE_FIRST).union(VISIBILITY_FIRST); +// test_err tuple_field_list_recovery +// struct S(struct S; +// struct S(A,,B); fn tuple_field_list(p: &mut Parser<'_>) { assert!(p.at(T!['('])); let m = p.start(); - delimited(p, T!['('], T![')'], T![,], TUPLE_FIELD_FIRST, |p| { - let m = p.start(); - // test tuple_field_attrs - // struct S (#[attr] f32); - attributes::outer_attrs(p); - let has_vis = opt_visibility(p, true); - if !p.at_ts(types::TYPE_FIRST) { - p.error("expected a type"); - if has_vis { - m.complete(p, ERROR); - } else { - m.abandon(p); + delimited( + p, + T!['('], + T![')'], + T![,], + || "expected tuple field".into(), + TUPLE_FIELD_FIRST, + |p| { + let m = p.start(); + // test tuple_field_attrs + // struct S (#[attr] f32); + attributes::outer_attrs(p); + let has_vis = opt_visibility(p, true); + if !p.at_ts(types::TYPE_FIRST) { + p.error("expected a type"); + if has_vis { + m.complete(p, ERROR); + } else { + m.abandon(p); + } + return false; } - return false; - } - types::type_(p); - m.complete(p, TUPLE_FIELD); - true - }); + types::type_(p); + m.complete(p, TUPLE_FIELD); + true + }, + ); m.complete(p, TUPLE_FIELD_LIST); } diff --git a/crates/parser/src/grammar/items/use_item.rs b/crates/parser/src/grammar/items/use_item.rs index f689c06b31..675a1fd465 100644 --- a/crates/parser/src/grammar/items/use_item.rs +++ b/crates/parser/src/grammar/items/use_item.rs @@ -93,9 +93,16 @@ pub(crate) fn use_tree_list(p: &mut Parser<'_>) { // use b; // struct T; // fn test() {} - delimited(p, T!['{'], T!['}'], T![,], USE_TREE_LIST_FIRST_SET, |p: &mut Parser<'_>| { - use_tree(p, false) || p.at_ts(USE_TREE_LIST_RECOVERY_SET) - }); + // use {a ,, b}; + delimited( + p, + T!['{'], + T!['}'], + T![,], + || "expected use tree".into(), + USE_TREE_LIST_FIRST_SET, + |p: &mut Parser<'_>| use_tree(p, false) || p.at_ts(USE_TREE_LIST_RECOVERY_SET), + ); m.complete(p, USE_TREE_LIST); } diff --git a/crates/parser/test_data/parser/inline/err/0015_arg_list_recovery.rast b/crates/parser/test_data/parser/inline/err/0015_arg_list_recovery.rast index 85def9e148..cd5aa680c6 100644 --- a/crates/parser/test_data/parser/inline/err/0015_arg_list_recovery.rast +++ b/crates/parser/test_data/parser/inline/err/0015_arg_list_recovery.rast @@ -85,7 +85,8 @@ SOURCE_FILE IDENT "a" COMMA "," WHITESPACE " " - COMMA "," + ERROR + COMMA "," WHITESPACE " " PATH_EXPR PATH @@ -101,4 +102,4 @@ error 25: expected identifier error 39: expected COMMA error 39: expected expression error 55: expected expression -error 68: expected expression +error 69: expected expression diff --git a/crates/parser/test_data/parser/inline/err/0026_use_tree_list_err_recovery.rast b/crates/parser/test_data/parser/inline/err/0026_use_tree_list_err_recovery.rast index cb90b093ba..b576d872e1 100644 --- a/crates/parser/test_data/parser/inline/err/0026_use_tree_list_err_recovery.rast +++ b/crates/parser/test_data/parser/inline/err/0026_use_tree_list_err_recovery.rast @@ -43,4 +43,29 @@ SOURCE_FILE L_CURLY "{" R_CURLY "}" WHITESPACE "\n" + USE + USE_KW "use" + WHITESPACE " " + USE_TREE + USE_TREE_LIST + L_CURLY "{" + USE_TREE + PATH + PATH_SEGMENT + NAME_REF + IDENT "a" + WHITESPACE " " + COMMA "," + ERROR + COMMA "," + WHITESPACE " " + USE_TREE + PATH + PATH_SEGMENT + NAME_REF + IDENT "b" + R_CURLY "}" + SEMICOLON ";" + WHITESPACE "\n" error 6: expected R_CURLY +error 46: expected use tree diff --git a/crates/parser/test_data/parser/inline/err/0026_use_tree_list_err_recovery.rs b/crates/parser/test_data/parser/inline/err/0026_use_tree_list_err_recovery.rs index f16959c25f..9885e6ab27 100644 --- a/crates/parser/test_data/parser/inline/err/0026_use_tree_list_err_recovery.rs +++ b/crates/parser/test_data/parser/inline/err/0026_use_tree_list_err_recovery.rs @@ -2,3 +2,4 @@ use {a; use b; struct T; fn test() {} +use {a ,, b}; diff --git a/crates/parser/test_data/parser/inline/err/0029_tuple_field_list_recovery.rast b/crates/parser/test_data/parser/inline/err/0029_tuple_field_list_recovery.rast new file mode 100644 index 0000000000..6b0bfa007e --- /dev/null +++ b/crates/parser/test_data/parser/inline/err/0029_tuple_field_list_recovery.rast @@ -0,0 +1,44 @@ +SOURCE_FILE + STRUCT + STRUCT_KW "struct" + WHITESPACE " " + NAME + IDENT "S" + TUPLE_FIELD_LIST + L_PAREN "(" + STRUCT + STRUCT_KW "struct" + WHITESPACE " " + NAME + IDENT "S" + SEMICOLON ";" + WHITESPACE "\n" + STRUCT + STRUCT_KW "struct" + WHITESPACE " " + NAME + IDENT "S" + TUPLE_FIELD_LIST + L_PAREN "(" + TUPLE_FIELD + PATH_TYPE + PATH + PATH_SEGMENT + NAME_REF + IDENT "A" + COMMA "," + ERROR + COMMA "," + TUPLE_FIELD + PATH_TYPE + PATH + PATH_SEGMENT + NAME_REF + IDENT "B" + R_PAREN ")" + SEMICOLON ";" + WHITESPACE "\n" +error 9: expected a type +error 9: expected R_PAREN +error 9: expected SEMICOLON +error 30: expected tuple field diff --git a/crates/parser/test_data/parser/inline/err/0029_tuple_field_list_recovery.rs b/crates/parser/test_data/parser/inline/err/0029_tuple_field_list_recovery.rs new file mode 100644 index 0000000000..ecb4d8bda1 --- /dev/null +++ b/crates/parser/test_data/parser/inline/err/0029_tuple_field_list_recovery.rs @@ -0,0 +1,2 @@ +struct S(struct S; +struct S(A,,B); diff --git a/crates/parser/test_data/parser/inline/err/0030_generic_arg_list_recover.rast b/crates/parser/test_data/parser/inline/err/0030_generic_arg_list_recover.rast new file mode 100644 index 0000000000..4cf5a3386b --- /dev/null +++ b/crates/parser/test_data/parser/inline/err/0030_generic_arg_list_recover.rast @@ -0,0 +1,33 @@ +SOURCE_FILE + TYPE_ALIAS + TYPE_KW "type" + WHITESPACE " " + NAME + IDENT "T" + WHITESPACE " " + EQ "=" + WHITESPACE " " + PATH_TYPE + PATH + PATH_SEGMENT + NAME_REF + IDENT "T" + GENERIC_ARG_LIST + L_ANGLE "<" + CONST_ARG + LITERAL + INT_NUMBER "0" + COMMA "," + WHITESPACE " " + ERROR + COMMA "," + TYPE_ARG + PATH_TYPE + PATH + PATH_SEGMENT + NAME_REF + IDENT "T" + R_ANGLE ">" + SEMICOLON ";" + WHITESPACE "\n" +error 14: expected generic argument diff --git a/crates/parser/test_data/parser/inline/err/0030_generic_arg_list_recover.rs b/crates/parser/test_data/parser/inline/err/0030_generic_arg_list_recover.rs new file mode 100644 index 0000000000..7d849aa1be --- /dev/null +++ b/crates/parser/test_data/parser/inline/err/0030_generic_arg_list_recover.rs @@ -0,0 +1 @@ +type T = T<0, ,T>; diff --git a/crates/parser/test_data/parser/inline/err/0031_generic_param_list_recover.rast b/crates/parser/test_data/parser/inline/err/0031_generic_param_list_recover.rast new file mode 100644 index 0000000000..0a1ed01fbe --- /dev/null +++ b/crates/parser/test_data/parser/inline/err/0031_generic_param_list_recover.rast @@ -0,0 +1,45 @@ +SOURCE_FILE + FN + FN_KW "fn" + WHITESPACE " " + NAME + IDENT "f" + GENERIC_PARAM_LIST + L_ANGLE "<" + TYPE_PARAM + NAME + IDENT "T" + COLON ":" + WHITESPACE " " + TYPE_BOUND_LIST + TYPE_BOUND + PATH_TYPE + PATH + PATH_SEGMENT + NAME_REF + IDENT "Clone" + COMMA "," + ERROR + COMMA "," + WHITESPACE " " + TYPE_PARAM + NAME + IDENT "U" + COLON ":" + TYPE_BOUND_LIST + COMMA "," + WHITESPACE " " + TYPE_PARAM + NAME + IDENT "V" + R_ANGLE ">" + PARAM_LIST + L_PAREN "(" + R_PAREN ")" + WHITESPACE " " + BLOCK_EXPR + STMT_LIST + L_CURLY "{" + R_CURLY "}" + WHITESPACE "\n" +error 14: expected generic parameter diff --git a/crates/parser/test_data/parser/inline/err/0031_generic_param_list_recover.rs b/crates/parser/test_data/parser/inline/err/0031_generic_param_list_recover.rs new file mode 100644 index 0000000000..2b5149bb0d --- /dev/null +++ b/crates/parser/test_data/parser/inline/err/0031_generic_param_list_recover.rs @@ -0,0 +1 @@ +fn f() {} From 4bf6b160a90721aa5c11c6306ae4b1144afff4c6 Mon Sep 17 00:00:00 2001 From: Lukas Wirth Date: Wed, 31 Jan 2024 10:34:40 +0100 Subject: [PATCH 053/122] Fix incorrect range uses in missing_fields quick fixes --- .../src/handlers/missing_fields.rs | 29 +++++++++---------- 1 file changed, 13 insertions(+), 16 deletions(-) diff --git a/crates/ide-diagnostics/src/handlers/missing_fields.rs b/crates/ide-diagnostics/src/handlers/missing_fields.rs index 37ac912f06..feaf1aa727 100644 --- a/crates/ide-diagnostics/src/handlers/missing_fields.rs +++ b/crates/ide-diagnostics/src/handlers/missing_fields.rs @@ -13,7 +13,7 @@ use syntax::{ ast::{self, make}, AstNode, SyntaxNode, SyntaxNodePtr, }; -use text_edit::TextEdit; +use text_edit::{TextEdit, TextRange}; use crate::{fix, Diagnostic, DiagnosticCode, DiagnosticsContext}; @@ -59,9 +59,15 @@ fn fixes(ctx: &DiagnosticsContext<'_>, d: &hir::MissingFields) -> Option, d: &hir::MissingFields) -> Option, d: &hir::MissingFields) -> Option, d: &hir::MissingFields) -> Option { let missing_fields = ctx.sema.record_pattern_missing_fields(field_list_parent); @@ -160,11 +161,7 @@ fn fixes(ctx: &DiagnosticsContext<'_>, d: &hir::MissingFields) -> Option Date: Wed, 31 Jan 2024 10:54:52 +0100 Subject: [PATCH 054/122] Better error message for when proc-macros have not yet been built --- .../ide-diagnostics/src/handlers/unresolved_proc_macro.rs | 2 +- crates/rust-analyzer/src/reload.rs | 8 +++++++- 2 files changed, 8 insertions(+), 2 deletions(-) diff --git a/crates/ide-diagnostics/src/handlers/unresolved_proc_macro.rs b/crates/ide-diagnostics/src/handlers/unresolved_proc_macro.rs index 015a3d6b2c..340f77feea 100644 --- a/crates/ide-diagnostics/src/handlers/unresolved_proc_macro.rs +++ b/crates/ide-diagnostics/src/handlers/unresolved_proc_macro.rs @@ -32,7 +32,7 @@ pub(crate) fn unresolved_proc_macro( let severity = if config_enabled { Severity::Error } else { Severity::WeakWarning }; let def_map = ctx.sema.db.crate_def_map(d.krate); let message = if config_enabled { - def_map.proc_macro_loading_error().unwrap_or("proc macro not found in the built dylib") + def_map.proc_macro_loading_error().unwrap_or("internal error") } else { match d.kind { hir::MacroKind::Attr if proc_macros_enabled => "attribute macro expansion is disabled", diff --git a/crates/rust-analyzer/src/reload.rs b/crates/rust-analyzer/src/reload.rs index 2a0478d62f..dcf6089ca7 100644 --- a/crates/rust-analyzer/src/reload.rs +++ b/crates/rust-analyzer/src/reload.rs @@ -528,10 +528,16 @@ impl GlobalState { (crate_graph, proc_macros, crate_graph_file_dependencies) }; + let mut change = Change::new(); if self.config.expand_proc_macros() { + change.set_proc_macros( + crate_graph + .iter() + .map(|id| (id, Err("Proc-macros have not been built yet".to_owned()))) + .collect(), + ); self.fetch_proc_macros_queue.request_op(cause, proc_macro_paths); } - let mut change = Change::new(); change.set_crate_graph(crate_graph); self.analysis_host.apply_change(change); self.crate_graph_file_dependencies = crate_graph_file_dependencies; From 545382db2532801bd6fb05532d71114f8fb509fd Mon Sep 17 00:00:00 2001 From: Lukas Wirth Date: Thu, 8 Feb 2024 14:54:52 +0100 Subject: [PATCH 055/122] Fix warnings --- crates/hir-ty/src/diagnostics/match_check/pat_analysis.rs | 2 +- crates/ide-diagnostics/src/handlers/missing_fields.rs | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/crates/hir-ty/src/diagnostics/match_check/pat_analysis.rs b/crates/hir-ty/src/diagnostics/match_check/pat_analysis.rs index 51be8960b8..712842372b 100644 --- a/crates/hir-ty/src/diagnostics/match_check/pat_analysis.rs +++ b/crates/hir-ty/src/diagnostics/match_check/pat_analysis.rs @@ -359,7 +359,7 @@ impl<'p> TypeCx for MatchCheckCtx<'p> { &'a self, ctor: &'a rustc_pattern_analysis::constructor::Constructor, ty: &'a Self::Ty, - ) -> impl Iterator + ExactSizeIterator + Captures<'a> { + ) -> impl ExactSizeIterator + Captures<'a> { let single = |ty| smallvec![ty]; let tys: SmallVec<[_; 2]> = match ctor { Struct | Variant(_) | UnionField => match ty.kind(Interner) { diff --git a/crates/ide-diagnostics/src/handlers/missing_fields.rs b/crates/ide-diagnostics/src/handlers/missing_fields.rs index feaf1aa727..3bc043c8fc 100644 --- a/crates/ide-diagnostics/src/handlers/missing_fields.rs +++ b/crates/ide-diagnostics/src/handlers/missing_fields.rs @@ -13,7 +13,7 @@ use syntax::{ ast::{self, make}, AstNode, SyntaxNode, SyntaxNodePtr, }; -use text_edit::{TextEdit, TextRange}; +use text_edit::TextEdit; use crate::{fix, Diagnostic, DiagnosticCode, DiagnosticsContext}; From 15bffe25bde3b61bc33356e7c8234de8b82d5759 Mon Sep 17 00:00:00 2001 From: Lukas Wirth Date: Tue, 30 Jan 2024 21:04:37 +0100 Subject: [PATCH 056/122] feat: Add break and return postfix keyword completions --- crates/ide-completion/src/completions/dot.rs | 17 +- crates/ide-completion/src/completions/expr.rs | 10 +- .../src/completions/item_list.rs | 4 +- .../ide-completion/src/completions/keyword.rs | 6 + .../ide-completion/src/completions/postfix.rs | 61 ++++++-- .../ide-completion/src/completions/record.rs | 7 +- .../ide-completion/src/completions/snippet.rs | 4 +- crates/ide-completion/src/context.rs | 146 ++++++++++-------- crates/ide-completion/src/context/analysis.rs | 55 ++++--- crates/ide-completion/src/render.rs | 4 + crates/ide-completion/src/tests/expression.rs | 33 ++++ .../ide-completion/src/tests/proc_macros.rs | 8 + crates/ide-completion/src/tests/special.rs | 2 + crates/ide-db/src/famous_defs.rs | 13 +- crates/test-utils/src/minicore.rs | 2 +- 15 files changed, 256 insertions(+), 116 deletions(-) diff --git a/crates/ide-completion/src/completions/dot.rs b/crates/ide-completion/src/completions/dot.rs index 00135a6d20..24a1f9492e 100644 --- a/crates/ide-completion/src/completions/dot.rs +++ b/crates/ide-completion/src/completions/dot.rs @@ -4,7 +4,10 @@ use ide_db::FxHashSet; use syntax::SmolStr; use crate::{ - context::{CompletionContext, DotAccess, DotAccessKind, ExprCtx, PathCompletionCtx, Qualified}, + context::{ + CompletionContext, DotAccess, DotAccessExprCtx, DotAccessKind, PathCompletionCtx, + PathExprCtx, Qualified, + }, CompletionItem, CompletionItemKind, Completions, }; @@ -51,7 +54,7 @@ pub(crate) fn complete_undotted_self( acc: &mut Completions, ctx: &CompletionContext<'_>, path_ctx: &PathCompletionCtx, - expr_ctx: &ExprCtx, + expr_ctx: &PathExprCtx, ) { if !ctx.config.enable_self_on_the_fly { return; @@ -66,7 +69,7 @@ pub(crate) fn complete_undotted_self( return; } let self_param = match expr_ctx { - ExprCtx { self_param: Some(self_param), .. } => self_param, + PathExprCtx { self_param: Some(self_param), .. } => self_param, _ => return, }; @@ -82,6 +85,10 @@ pub(crate) fn complete_undotted_self( receiver: None, receiver_ty: None, kind: DotAccessKind::Field { receiver_is_ambiguous_float_literal: false }, + ctx: DotAccessExprCtx { + in_block_expr: expr_ctx.in_block_expr, + in_breakable: expr_ctx.in_breakable, + }, }, Some(hir::known::SELF_PARAM), field, @@ -99,6 +106,10 @@ pub(crate) fn complete_undotted_self( receiver: None, receiver_ty: None, kind: DotAccessKind::Method { has_parens: false }, + ctx: DotAccessExprCtx { + in_block_expr: expr_ctx.in_block_expr, + in_breakable: expr_ctx.in_breakable, + }, }, func, Some(hir::known::SELF_PARAM), diff --git a/crates/ide-completion/src/completions/expr.rs b/crates/ide-completion/src/completions/expr.rs index 1433216d61..77fd5dd98b 100644 --- a/crates/ide-completion/src/completions/expr.rs +++ b/crates/ide-completion/src/completions/expr.rs @@ -5,7 +5,7 @@ use syntax::ast; use crate::{ completions::record::add_default_update, - context::{ExprCtx, PathCompletionCtx, Qualified}, + context::{BreakableKind, PathCompletionCtx, PathExprCtx, Qualified}, CompletionContext, Completions, }; @@ -13,16 +13,16 @@ pub(crate) fn complete_expr_path( acc: &mut Completions, ctx: &CompletionContext<'_>, path_ctx @ PathCompletionCtx { qualified, .. }: &PathCompletionCtx, - expr_ctx: &ExprCtx, + expr_ctx: &PathExprCtx, ) { let _p = tracing::span!(tracing::Level::INFO, "complete_expr_path").entered(); if !ctx.qualifier_ctx.none() { return; } - let &ExprCtx { + let &PathExprCtx { in_block_expr, - in_loop_body, + in_breakable, after_if_expr, in_condition, incomplete_let, @@ -290,7 +290,7 @@ pub(crate) fn complete_expr_path( add_keyword("mut", "mut "); } - if in_loop_body { + if in_breakable != BreakableKind::None { if in_block_expr { add_keyword("continue", "continue;"); add_keyword("break", "break;"); diff --git a/crates/ide-completion/src/completions/item_list.rs b/crates/ide-completion/src/completions/item_list.rs index addd9dac1a..0a6a8633a2 100644 --- a/crates/ide-completion/src/completions/item_list.rs +++ b/crates/ide-completion/src/completions/item_list.rs @@ -1,7 +1,7 @@ //! Completion of paths and keywords at item list position. use crate::{ - context::{ExprCtx, ItemListKind, PathCompletionCtx, Qualified}, + context::{ItemListKind, PathCompletionCtx, PathExprCtx, Qualified}, CompletionContext, Completions, }; @@ -11,7 +11,7 @@ pub(crate) fn complete_item_list_in_expr( acc: &mut Completions, ctx: &CompletionContext<'_>, path_ctx: &PathCompletionCtx, - expr_ctx: &ExprCtx, + expr_ctx: &PathExprCtx, ) { if !expr_ctx.in_block_expr { return; diff --git a/crates/ide-completion/src/completions/keyword.rs b/crates/ide-completion/src/completions/keyword.rs index b9ab2afca2..ed32a5db23 100644 --- a/crates/ide-completion/src/completions/keyword.rs +++ b/crates/ide-completion/src/completions/keyword.rs @@ -81,11 +81,13 @@ fn foo(a: A) { a.$0 } sn call function(expr) sn dbg dbg!(expr) sn dbgr dbg!(&expr) + sn deref *expr sn let let sn letm let mut sn match match expr {} sn ref &expr sn refm &mut expr + sn return return expr sn unsafe unsafe {} "#]], ); @@ -106,11 +108,13 @@ fn foo() { sn call function(expr) sn dbg dbg!(expr) sn dbgr dbg!(&expr) + sn deref *expr sn let let sn letm let mut sn match match expr {} sn ref &expr sn refm &mut expr + sn return return expr sn unsafe unsafe {} "#]], ); @@ -133,11 +137,13 @@ fn foo(a: A) { a.$0 } sn call function(expr) sn dbg dbg!(expr) sn dbgr dbg!(&expr) + sn deref *expr sn let let sn letm let mut sn match match expr {} sn ref &expr sn refm &mut expr + sn return return expr sn unsafe unsafe {} "#]], ); diff --git a/crates/ide-completion/src/completions/postfix.rs b/crates/ide-completion/src/completions/postfix.rs index af83d4104f..d34d2e7e98 100644 --- a/crates/ide-completion/src/completions/postfix.rs +++ b/crates/ide-completion/src/completions/postfix.rs @@ -2,6 +2,7 @@ mod format_like; +use hir::ItemInNs; use ide_db::{ documentation::{Documentation, HasDocs}, imports::insert_use::ImportScope, @@ -17,7 +18,7 @@ use text_edit::TextEdit; use crate::{ completions::postfix::format_like::add_format_like_completions, - context::{CompletionContext, DotAccess, DotAccessKind}, + context::{BreakableKind, CompletionContext, DotAccess, DotAccessKind}, item::{Builder, CompletionRelevancePostfixMatch}, CompletionItem, CompletionItemKind, CompletionRelevance, Completions, SnippetScope, }; @@ -44,6 +45,7 @@ pub(crate) fn complete_postfix( ), _ => return, }; + let expr_ctx = &dot_access.ctx; let receiver_text = get_receiver_text(dot_receiver, receiver_is_ambiguous_float_literal); @@ -59,16 +61,22 @@ pub(crate) fn complete_postfix( if let Some(drop_trait) = ctx.famous_defs().core_ops_Drop() { if receiver_ty.impls_trait(ctx.db, drop_trait, &[]) { - if let &[hir::AssocItem::Function(drop_fn)] = &*drop_trait.items(ctx.db) { - cov_mark::hit!(postfix_drop_completion); - // FIXME: check that `drop` is in scope, use fully qualified path if it isn't/if shadowed - let mut item = postfix_snippet( - "drop", - "fn drop(&mut self)", - &format!("drop($0{receiver_text})"), - ); - item.set_documentation(drop_fn.docs(ctx.db)); - item.add_to(acc, ctx.db); + if let Some(drop_fn) = ctx.famous_defs().core_mem_drop() { + if let Some(path) = ctx.module.find_use_path( + ctx.db, + ItemInNs::Values(drop_fn.into()), + ctx.config.prefer_no_std, + ctx.config.prefer_prelude, + ) { + cov_mark::hit!(postfix_drop_completion); + let mut item = postfix_snippet( + "drop", + "fn drop(&mut self)", + &format!("{path}($0{receiver_text})", path = path.display(ctx.db)), + ); + item.set_documentation(drop_fn.docs(ctx.db)); + item.add_to(acc, ctx.db); + } } } } @@ -140,6 +148,7 @@ pub(crate) fn complete_postfix( postfix_snippet("ref", "&expr", &format!("&{receiver_text}")).add_to(acc, ctx.db); postfix_snippet("refm", "&mut expr", &format!("&mut {receiver_text}")).add_to(acc, ctx.db); + postfix_snippet("deref", "*expr", &format!("*{receiver_text}")).add_to(acc, ctx.db); let mut unsafe_should_be_wrapped = true; if dot_receiver.syntax().kind() == BLOCK_EXPR { @@ -224,6 +233,28 @@ pub(crate) fn complete_postfix( add_format_like_completions(acc, ctx, &dot_receiver, cap, &literal_text); } } + + postfix_snippet( + "return", + "return expr", + &format!( + "return {receiver_text}{semi}", + semi = if expr_ctx.in_block_expr { ";" } else { "" } + ), + ) + .add_to(acc, ctx.db); + + if let BreakableKind::Block | BreakableKind::Loop = expr_ctx.in_breakable { + postfix_snippet( + "break", + "break expr", + &format!( + "break {receiver_text}{semi}", + semi = if expr_ctx.in_block_expr { ";" } else { "" } + ), + ) + .add_to(acc, ctx.db); + } } fn get_receiver_text(receiver: &ast::Expr, receiver_is_ambiguous_float_literal: bool) -> String { @@ -368,6 +399,7 @@ fn main() { sn call function(expr) sn dbg dbg!(expr) sn dbgr dbg!(&expr) + sn deref *expr sn if if expr {} sn let let sn letm let mut @@ -375,6 +407,7 @@ fn main() { sn not !expr sn ref &expr sn refm &mut expr + sn return return expr sn unsafe unsafe {} sn while while expr {} "#]], @@ -399,11 +432,13 @@ fn main() { sn call function(expr) sn dbg dbg!(expr) sn dbgr dbg!(&expr) + sn deref *expr sn if if expr {} sn match match expr {} sn not !expr sn ref &expr sn refm &mut expr + sn return return expr sn unsafe unsafe {} sn while while expr {} "#]], @@ -424,11 +459,13 @@ fn main() { sn call function(expr) sn dbg dbg!(expr) sn dbgr dbg!(&expr) + sn deref *expr sn let let sn letm let mut sn match match expr {} sn ref &expr sn refm &mut expr + sn return return expr sn unsafe unsafe {} "#]], ) @@ -448,6 +485,7 @@ fn main() { sn call function(expr) sn dbg dbg!(expr) sn dbgr dbg!(&expr) + sn deref *expr sn if if expr {} sn let let sn letm let mut @@ -455,6 +493,7 @@ fn main() { sn not !expr sn ref &expr sn refm &mut expr + sn return return expr sn unsafe unsafe {} sn while while expr {} "#]], diff --git a/crates/ide-completion/src/completions/record.rs b/crates/ide-completion/src/completions/record.rs index e53d1cc632..1dcf41f8dd 100644 --- a/crates/ide-completion/src/completions/record.rs +++ b/crates/ide-completion/src/completions/record.rs @@ -6,7 +6,7 @@ use syntax::{ }; use crate::{ - context::{DotAccess, DotAccessKind, PatternContext}, + context::{DotAccess, DotAccessExprCtx, DotAccessKind, PatternContext}, CompletionContext, CompletionItem, CompletionItemKind, CompletionRelevance, CompletionRelevancePostfixMatch, Completions, }; @@ -118,12 +118,17 @@ fn complete_fields( missing_fields: Vec<(hir::Field, hir::Type)>, ) { for (field, ty) in missing_fields { + // This should call something else, we shouldn't be synthesizing a DotAccess here acc.add_field( ctx, &DotAccess { receiver: None, receiver_ty: None, kind: DotAccessKind::Field { receiver_is_ambiguous_float_literal: false }, + ctx: DotAccessExprCtx { + in_block_expr: false, + in_breakable: crate::context::BreakableKind::None, + }, }, None, field, diff --git a/crates/ide-completion/src/completions/snippet.rs b/crates/ide-completion/src/completions/snippet.rs index a019192205..e831113350 100644 --- a/crates/ide-completion/src/completions/snippet.rs +++ b/crates/ide-completion/src/completions/snippet.rs @@ -3,7 +3,7 @@ use ide_db::{documentation::Documentation, imports::insert_use::ImportScope, SnippetCap}; use crate::{ - context::{ExprCtx, ItemListKind, PathCompletionCtx, Qualified}, + context::{ItemListKind, PathCompletionCtx, PathExprCtx, Qualified}, item::Builder, CompletionContext, CompletionItem, CompletionItemKind, Completions, SnippetScope, }; @@ -12,7 +12,7 @@ pub(crate) fn complete_expr_snippet( acc: &mut Completions, ctx: &CompletionContext<'_>, path_ctx: &PathCompletionCtx, - &ExprCtx { in_block_expr, .. }: &ExprCtx, + &PathExprCtx { in_block_expr, .. }: &PathExprCtx, ) { if !matches!(path_ctx.qualified, Qualified::No) { return; diff --git a/crates/ide-completion/src/context.rs b/crates/ide-completion/src/context.rs index 2c0370c58f..e07937787c 100644 --- a/crates/ide-completion/src/context.rs +++ b/crates/ide-completion/src/context.rs @@ -45,13 +45,13 @@ pub(crate) enum Visible { /// Existing qualifiers for the thing we are currently completing. #[derive(Debug, Default)] -pub(super) struct QualifierCtx { - pub(super) unsafe_tok: Option, - pub(super) vis_node: Option, +pub(crate) struct QualifierCtx { + pub(crate) unsafe_tok: Option, + pub(crate) vis_node: Option, } impl QualifierCtx { - pub(super) fn none(&self) -> bool { + pub(crate) fn none(&self) -> bool { self.unsafe_tok.is_none() && self.vis_node.is_none() } } @@ -60,27 +60,27 @@ impl QualifierCtx { #[derive(Debug)] pub(crate) struct PathCompletionCtx { /// If this is a call with () already there (or {} in case of record patterns) - pub(super) has_call_parens: bool, + pub(crate) has_call_parens: bool, /// If this has a macro call bang ! - pub(super) has_macro_bang: bool, + pub(crate) has_macro_bang: bool, /// The qualifier of the current path. - pub(super) qualified: Qualified, + pub(crate) qualified: Qualified, /// The parent of the path we are completing. - pub(super) parent: Option, + pub(crate) parent: Option, #[allow(dead_code)] /// The path of which we are completing the segment - pub(super) path: ast::Path, + pub(crate) path: ast::Path, /// The path of which we are completing the segment in the original file pub(crate) original_path: Option, - pub(super) kind: PathKind, + pub(crate) kind: PathKind, /// Whether the path segment has type args or not. - pub(super) has_type_args: bool, + pub(crate) has_type_args: bool, /// Whether the qualifier comes from a use tree parent or not pub(crate) use_tree_parent: bool, } impl PathCompletionCtx { - pub(super) fn is_trivial_path(&self) -> bool { + pub(crate) fn is_trivial_path(&self) -> bool { matches!( self, PathCompletionCtx { @@ -97,9 +97,9 @@ impl PathCompletionCtx { /// The kind of path we are completing right now. #[derive(Debug, PartialEq, Eq)] -pub(super) enum PathKind { +pub(crate) enum PathKind { Expr { - expr_ctx: ExprCtx, + expr_ctx: PathExprCtx, }, Type { location: TypeLocation, @@ -132,9 +132,9 @@ pub(crate) struct AttrCtx { } #[derive(Debug, PartialEq, Eq)] -pub(crate) struct ExprCtx { +pub(crate) struct PathExprCtx { pub(crate) in_block_expr: bool, - pub(crate) in_loop_body: bool, + pub(crate) in_breakable: BreakableKind, pub(crate) after_if_expr: bool, /// Whether this expression is the direct condition of an if or while expression pub(crate) in_condition: bool, @@ -221,7 +221,7 @@ pub(crate) enum TypeAscriptionTarget { /// The kind of item list a [`PathKind::Item`] belongs to. #[derive(Debug, PartialEq, Eq)] -pub(super) enum ItemListKind { +pub(crate) enum ItemListKind { SourceFile, Module, Impl, @@ -231,7 +231,7 @@ pub(super) enum ItemListKind { } #[derive(Debug)] -pub(super) enum Qualified { +pub(crate) enum Qualified { No, With { path: ast::Path, @@ -259,37 +259,37 @@ pub(super) enum Qualified { /// The state of the pattern we are completing. #[derive(Debug, Clone, PartialEq, Eq)] -pub(super) struct PatternContext { - pub(super) refutability: PatternRefutability, - pub(super) param_ctx: Option, - pub(super) has_type_ascription: bool, - pub(super) parent_pat: Option, - pub(super) ref_token: Option, - pub(super) mut_token: Option, +pub(crate) struct PatternContext { + pub(crate) refutability: PatternRefutability, + pub(crate) param_ctx: Option, + pub(crate) has_type_ascription: bool, + pub(crate) parent_pat: Option, + pub(crate) ref_token: Option, + pub(crate) mut_token: Option, /// The record pattern this name or ref is a field of - pub(super) record_pat: Option, - pub(super) impl_: Option, + pub(crate) record_pat: Option, + pub(crate) impl_: Option, /// List of missing variants in a match expr - pub(super) missing_variants: Vec, + pub(crate) missing_variants: Vec, } #[derive(Debug, Clone, PartialEq, Eq)] -pub(super) struct ParamContext { - pub(super) param_list: ast::ParamList, - pub(super) param: ast::Param, - pub(super) kind: ParamKind, +pub(crate) struct ParamContext { + pub(crate) param_list: ast::ParamList, + pub(crate) param: ast::Param, + pub(crate) kind: ParamKind, } /// The state of the lifetime we are completing. #[derive(Debug)] -pub(super) struct LifetimeContext { - pub(super) lifetime: Option, - pub(super) kind: LifetimeKind, +pub(crate) struct LifetimeContext { + pub(crate) lifetime: Option, + pub(crate) kind: LifetimeKind, } /// The kind of lifetime we are completing. #[derive(Debug)] -pub(super) enum LifetimeKind { +pub(crate) enum LifetimeKind { LifetimeParam { is_decl: bool, param: ast::LifetimeParam }, Lifetime, LabelRef, @@ -298,16 +298,16 @@ pub(super) enum LifetimeKind { /// The state of the name we are completing. #[derive(Debug)] -pub(super) struct NameContext { +pub(crate) struct NameContext { #[allow(dead_code)] - pub(super) name: Option, - pub(super) kind: NameKind, + pub(crate) name: Option, + pub(crate) kind: NameKind, } /// The kind of the name we are completing. #[derive(Debug)] #[allow(dead_code)] -pub(super) enum NameKind { +pub(crate) enum NameKind { Const, ConstParam, Enum, @@ -331,15 +331,15 @@ pub(super) enum NameKind { /// The state of the NameRef we are completing. #[derive(Debug)] -pub(super) struct NameRefContext { +pub(crate) struct NameRefContext { /// NameRef syntax in the original file - pub(super) nameref: Option, - pub(super) kind: NameRefKind, + pub(crate) nameref: Option, + pub(crate) kind: NameRefKind, } /// The kind of the NameRef we are completing. #[derive(Debug)] -pub(super) enum NameRefKind { +pub(crate) enum NameRefKind { Path(PathCompletionCtx), DotAccess(DotAccess), /// Position where we are only interested in keyword completions @@ -355,7 +355,7 @@ pub(super) enum NameRefKind { /// The identifier we are currently completing. #[derive(Debug)] -pub(super) enum CompletionAnalysis { +pub(crate) enum CompletionAnalysis { Name(NameContext), NameRef(NameRefContext), Lifetime(LifetimeContext), @@ -376,14 +376,15 @@ pub(super) enum CompletionAnalysis { /// Information about the field or method access we are completing. #[derive(Debug)] -pub(super) struct DotAccess { - pub(super) receiver: Option, - pub(super) receiver_ty: Option, - pub(super) kind: DotAccessKind, +pub(crate) struct DotAccess { + pub(crate) receiver: Option, + pub(crate) receiver_ty: Option, + pub(crate) kind: DotAccessKind, + pub(crate) ctx: DotAccessExprCtx, } #[derive(Debug)] -pub(super) enum DotAccessKind { +pub(crate) enum DotAccessKind { Field { /// True if the receiver is an integer and there is no ident in the original file after it yet /// like `0.$0` @@ -394,6 +395,21 @@ pub(super) enum DotAccessKind { }, } +#[derive(Debug, PartialEq, Eq)] +pub(crate) struct DotAccessExprCtx { + pub(crate) in_block_expr: bool, + pub(crate) in_breakable: BreakableKind, +} + +#[derive(Copy, Clone, Debug, PartialEq, Eq)] +pub(crate) enum BreakableKind { + None, + Loop, + For, + While, + Block, +} + #[derive(Clone, Debug, PartialEq, Eq)] pub(crate) enum ParamKind { Function(ast::Fn), @@ -404,39 +420,39 @@ pub(crate) enum ParamKind { /// exactly is the cursor, syntax-wise. #[derive(Debug)] pub(crate) struct CompletionContext<'a> { - pub(super) sema: Semantics<'a, RootDatabase>, - pub(super) scope: SemanticsScope<'a>, - pub(super) db: &'a RootDatabase, - pub(super) config: &'a CompletionConfig, - pub(super) position: FilePosition, + pub(crate) sema: Semantics<'a, RootDatabase>, + pub(crate) scope: SemanticsScope<'a>, + pub(crate) db: &'a RootDatabase, + pub(crate) config: &'a CompletionConfig, + pub(crate) position: FilePosition, /// The token before the cursor, in the original file. - pub(super) original_token: SyntaxToken, + pub(crate) original_token: SyntaxToken, /// The token before the cursor, in the macro-expanded file. - pub(super) token: SyntaxToken, + pub(crate) token: SyntaxToken, /// The crate of the current file. - pub(super) krate: hir::Crate, + pub(crate) krate: hir::Crate, /// The module of the `scope`. - pub(super) module: hir::Module, + pub(crate) module: hir::Module, /// Whether nightly toolchain is used. Cached since this is looked up a lot. is_nightly: bool, /// The expected name of what we are completing. /// This is usually the parameter name of the function argument we are completing. - pub(super) expected_name: Option, + pub(crate) expected_name: Option, /// The expected type of what we are completing. - pub(super) expected_type: Option, + pub(crate) expected_type: Option, - pub(super) qualifier_ctx: QualifierCtx, + pub(crate) qualifier_ctx: QualifierCtx, - pub(super) locals: FxHashMap, + pub(crate) locals: FxHashMap, /// The module depth of the current module of the cursor position. /// - crate-root /// - mod foo /// - mod bar /// Here depth will be 2 - pub(super) depth_from_crate_root: usize, + pub(crate) depth_from_crate_root: usize, } impl CompletionContext<'_> { @@ -634,7 +650,7 @@ impl CompletionContext<'_> { // CompletionContext construction impl<'a> CompletionContext<'a> { - pub(super) fn new( + pub(crate) fn new( db: &'a RootDatabase, position @ FilePosition { file_id, offset }: FilePosition, config: &'a CompletionConfig, diff --git a/crates/ide-completion/src/context/analysis.rs b/crates/ide-completion/src/context/analysis.rs index c06b64df1c..92af688977 100644 --- a/crates/ide-completion/src/context/analysis.rs +++ b/crates/ide-completion/src/context/analysis.rs @@ -11,10 +11,11 @@ use syntax::{ }; use crate::context::{ - AttrCtx, CompletionAnalysis, DotAccess, DotAccessKind, ExprCtx, ItemListKind, LifetimeContext, - LifetimeKind, NameContext, NameKind, NameRefContext, NameRefKind, ParamContext, ParamKind, - PathCompletionCtx, PathKind, PatternContext, PatternRefutability, Qualified, QualifierCtx, - TypeAscriptionTarget, TypeLocation, COMPLETION_MARKER, + AttrCtx, BreakableKind, CompletionAnalysis, DotAccess, DotAccessExprCtx, DotAccessKind, + ItemListKind, LifetimeContext, LifetimeKind, NameContext, NameKind, NameRefContext, + NameRefKind, ParamContext, ParamKind, PathCompletionCtx, PathExprCtx, PathKind, PatternContext, + PatternRefutability, Qualified, QualifierCtx, TypeAscriptionTarget, TypeLocation, + COMPLETION_MARKER, }; struct ExpansionResult { @@ -623,7 +624,8 @@ fn classify_name_ref( let kind = NameRefKind::DotAccess(DotAccess { receiver_ty: receiver.as_ref().and_then(|it| sema.type_of_expr(it)), kind: DotAccessKind::Field { receiver_is_ambiguous_float_literal }, - receiver + receiver, + ctx: DotAccessExprCtx { in_block_expr: is_in_block(field.syntax()), in_breakable: is_in_breakable(field.syntax()) } }); return Some(make_res(kind)); }, @@ -636,7 +638,8 @@ fn classify_name_ref( let kind = NameRefKind::DotAccess(DotAccess { receiver_ty: receiver.as_ref().and_then(|it| sema.type_of_expr(it)), kind: DotAccessKind::Method { has_parens: method.arg_list().map_or(false, |it| it.l_paren_token().is_some()) }, - receiver + receiver, + ctx: DotAccessExprCtx { in_block_expr: is_in_block(method.syntax()), in_breakable: is_in_breakable(method.syntax()) } }); return Some(make_res(kind)); }, @@ -659,13 +662,6 @@ fn classify_name_ref( use_tree_parent: false, }; - let is_in_block = |it: &SyntaxNode| { - it.parent() - .map(|node| { - ast::ExprStmt::can_cast(node.kind()) || ast::StmtList::can_cast(node.kind()) - }) - .unwrap_or(false) - }; let func_update_record = |syn: &SyntaxNode| { if let Some(record_expr) = syn.ancestors().nth(2).and_then(ast::RecordExpr::cast) { find_node_in_file_compensated(sema, original_file, &record_expr) @@ -932,7 +928,7 @@ fn classify_name_ref( let make_path_kind_expr = |expr: ast::Expr| { let it = expr.syntax(); let in_block_expr = is_in_block(it); - let in_loop_body = is_in_loop_body(it); + let in_loop_body = is_in_breakable(it); let after_if_expr = after_if_expr(it.clone()); let ref_expr_parent = path.as_single_name_ref().and_then(|_| it.parent()).and_then(ast::RefExpr::cast); @@ -998,9 +994,9 @@ fn classify_name_ref( }; PathKind::Expr { - expr_ctx: ExprCtx { + expr_ctx: PathExprCtx { in_block_expr, - in_loop_body, + in_breakable: in_loop_body, after_if_expr, in_condition, ref_expr_parent, @@ -1202,7 +1198,7 @@ fn classify_name_ref( if path_ctx.is_trivial_path() { // fetch the full expression that may have qualifiers attached to it let top_node = match path_ctx.kind { - PathKind::Expr { expr_ctx: ExprCtx { in_block_expr: true, .. } } => { + PathKind::Expr { expr_ctx: PathExprCtx { in_block_expr: true, .. } } => { parent.ancestors().find(|it| ast::PathExpr::can_cast(it.kind())).and_then(|p| { let parent = p.parent()?; if ast::StmtList::can_cast(parent.kind()) { @@ -1467,21 +1463,30 @@ fn is_in_token_of_for_loop(path: &ast::Path) -> bool { .unwrap_or(false) } -fn is_in_loop_body(node: &SyntaxNode) -> bool { +fn is_in_breakable(node: &SyntaxNode) -> BreakableKind { node.ancestors() .take_while(|it| it.kind() != SyntaxKind::FN && it.kind() != SyntaxKind::CLOSURE_EXPR) .find_map(|it| { - let loop_body = match_ast! { + let (breakable, loop_body) = match_ast! { match it { - ast::ForExpr(it) => it.loop_body(), - ast::WhileExpr(it) => it.loop_body(), - ast::LoopExpr(it) => it.loop_body(), - _ => None, + ast::ForExpr(it) => (BreakableKind::For, it.loop_body()), + ast::WhileExpr(it) => (BreakableKind::While, it.loop_body()), + ast::LoopExpr(it) => (BreakableKind::Loop, it.loop_body()), + ast::BlockExpr(it) => return it.label().map(|_| BreakableKind::Block), + _ => return None, } }; - loop_body.filter(|it| it.syntax().text_range().contains_range(node.text_range())) + loop_body + .filter(|it| it.syntax().text_range().contains_range(node.text_range())) + .map(|_| breakable) }) - .is_some() + .unwrap_or(BreakableKind::None) +} + +fn is_in_block(node: &SyntaxNode) -> bool { + node.parent() + .map(|node| ast::ExprStmt::can_cast(node.kind()) || ast::StmtList::can_cast(node.kind())) + .unwrap_or(false) } fn previous_non_trivia_token(e: impl Into) -> Option { diff --git a/crates/ide-completion/src/render.rs b/crates/ide-completion/src/render.rs index 4d49d2f498..5172829266 100644 --- a/crates/ide-completion/src/render.rs +++ b/crates/ide-completion/src/render.rs @@ -2199,12 +2199,14 @@ fn main() { sn while [] sn ref [] sn refm [] + sn deref [] sn unsafe [] sn match [] sn box [] sn dbg [] sn dbgr [] sn call [] + sn return [] "#]], ); } @@ -2227,6 +2229,7 @@ fn main() { me f() [] sn ref [] sn refm [] + sn deref [] sn unsafe [] sn match [] sn box [] @@ -2235,6 +2238,7 @@ fn main() { sn call [] sn let [] sn letm [] + sn return [] "#]], ); } diff --git a/crates/ide-completion/src/tests/expression.rs b/crates/ide-completion/src/tests/expression.rs index 758c254a88..78907a2896 100644 --- a/crates/ide-completion/src/tests/expression.rs +++ b/crates/ide-completion/src/tests/expression.rs @@ -362,6 +362,27 @@ fn completes_in_loop_ctx() { sn ppd "#]], ); + check_empty( + r"fn my() { loop { foo.$0 } }", + expect![[r#" + sn box Box::new(expr) + sn break break expr + sn call function(expr) + sn dbg dbg!(expr) + sn dbgr dbg!(&expr) + sn deref *expr + sn if if expr {} + sn let let + sn letm let mut + sn match match expr {} + sn not !expr + sn ref &expr + sn refm &mut expr + sn return return expr + sn unsafe unsafe {} + sn while while expr {} + "#]], + ); } #[test] @@ -1115,9 +1136,11 @@ fn main() { sn call function(expr) sn dbg dbg!(expr) sn dbgr dbg!(&expr) + sn deref *expr sn match match expr {} sn ref &expr sn refm &mut expr + sn return return expr sn unsafe unsafe {} "#]], ); @@ -1139,9 +1162,11 @@ fn main() { sn call function(expr) sn dbg dbg!(expr) sn dbgr dbg!(&expr) + sn deref *expr sn match match expr {} sn ref &expr sn refm &mut expr + sn return return expr sn unsafe unsafe {} "#]], ); @@ -1167,9 +1192,11 @@ fn main() { sn call function(expr) sn dbg dbg!(expr) sn dbgr dbg!(&expr) + sn deref *expr sn match match expr {} sn ref &expr sn refm &mut expr + sn return return expr sn unsafe unsafe {} "#]], ); @@ -1191,9 +1218,11 @@ fn main() { sn call function(expr) sn dbg dbg!(expr) sn dbgr dbg!(&expr) + sn deref *expr sn match match expr {} sn ref &expr sn refm &mut expr + sn return return expr sn unsafe unsafe {} "#]], ); @@ -1215,9 +1244,11 @@ fn main() { sn call function(expr) sn dbg dbg!(expr) sn dbgr dbg!(&expr) + sn deref *expr sn match match expr {} sn ref &expr sn refm &mut expr + sn return return expr sn unsafe unsafe {} "#]], ); @@ -1238,11 +1269,13 @@ fn main() { sn call function(expr) sn dbg dbg!(expr) sn dbgr dbg!(&expr) + sn deref *expr sn if if expr {} sn match match expr {} sn not !expr sn ref &expr sn refm &mut expr + sn return return expr sn unsafe unsafe {} sn while while expr {} "#]], diff --git a/crates/ide-completion/src/tests/proc_macros.rs b/crates/ide-completion/src/tests/proc_macros.rs index 2d6234e310..613f33309f 100644 --- a/crates/ide-completion/src/tests/proc_macros.rs +++ b/crates/ide-completion/src/tests/proc_macros.rs @@ -29,11 +29,13 @@ fn main() { sn call function(expr) sn dbg dbg!(expr) sn dbgr dbg!(&expr) + sn deref *expr sn let let sn letm let mut sn match match expr {} sn ref &expr sn refm &mut expr + sn return return expr sn unsafe unsafe {} "#]], ) @@ -60,11 +62,13 @@ fn main() { sn call function(expr) sn dbg dbg!(expr) sn dbgr dbg!(&expr) + sn deref *expr sn let let sn letm let mut sn match match expr {} sn ref &expr sn refm &mut expr + sn return return expr sn unsafe unsafe {} "#]], ) @@ -93,11 +97,13 @@ fn main() {} sn call function(expr) sn dbg dbg!(expr) sn dbgr dbg!(&expr) + sn deref *expr sn let let sn letm let mut sn match match expr {} sn ref &expr sn refm &mut expr + sn return return expr sn unsafe unsafe {} "#]], ) @@ -126,11 +132,13 @@ fn main() {} sn call function(expr) sn dbg dbg!(expr) sn dbgr dbg!(&expr) + sn deref *expr sn let let sn letm let mut sn match match expr {} sn ref &expr sn refm &mut expr + sn return return expr sn unsafe unsafe {} "#]], ) diff --git a/crates/ide-completion/src/tests/special.rs b/crates/ide-completion/src/tests/special.rs index f96fb71f28..a87d16c789 100644 --- a/crates/ide-completion/src/tests/special.rs +++ b/crates/ide-completion/src/tests/special.rs @@ -1157,11 +1157,13 @@ fn here_we_go() { sn call function(expr) sn dbg dbg!(expr) sn dbgr dbg!(&expr) + sn deref *expr sn let let sn letm let mut sn match match expr {} sn ref &expr sn refm &mut expr + sn return return expr sn unsafe unsafe {} "#]], ); diff --git a/crates/ide-db/src/famous_defs.rs b/crates/ide-db/src/famous_defs.rs index 722517a767..4edfa37b32 100644 --- a/crates/ide-db/src/famous_defs.rs +++ b/crates/ide-db/src/famous_defs.rs @@ -1,7 +1,7 @@ //! See [`FamousDefs`]. use base_db::{CrateOrigin, LangCrateOrigin, SourceDatabase}; -use hir::{Crate, Enum, Macro, Module, ScopeDef, Semantics, Trait}; +use hir::{Crate, Enum, Function, Macro, Module, ScopeDef, Semantics, Trait}; use crate::RootDatabase; @@ -110,6 +110,10 @@ impl FamousDefs<'_, '_> { self.find_macro("core:macros:builtin:derive") } + pub fn core_mem_drop(&self) -> Option { + self.find_function("core:mem:drop") + } + pub fn builtin_crates(&self) -> impl Iterator { IntoIterator::into_iter([ self.std(), @@ -149,6 +153,13 @@ impl FamousDefs<'_, '_> { } } + fn find_function(&self, path: &str) -> Option { + match self.find_def(path)? { + hir::ScopeDef::ModuleDef(hir::ModuleDef::Function(it)) => Some(it), + _ => None, + } + } + fn find_lang_crate(&self, origin: LangCrateOrigin) -> Option { let krate = self.1; let db = self.0.db; diff --git a/crates/test-utils/src/minicore.rs b/crates/test-utils/src/minicore.rs index 9c25d88cb8..23a3a7e0af 100644 --- a/crates/test-utils/src/minicore.rs +++ b/crates/test-utils/src/minicore.rs @@ -328,7 +328,6 @@ pub mod convert { } pub mod mem { - // region:drop // region:manually_drop #[lang = "manually_drop"] #[repr(transparent)] @@ -353,6 +352,7 @@ pub mod mem { // endregion:manually_drop + // region:drop pub fn drop(_x: T) {} pub const fn replace(dest: &mut T, src: T) -> T { unsafe { From ddddc9c0e20141dd3f123a1b5b1e25dd682a83d2 Mon Sep 17 00:00:00 2001 From: Lukas Wirth Date: Thu, 8 Feb 2024 14:57:21 +0100 Subject: [PATCH 057/122] Update test outputs --- crates/ide/src/hover/tests.rs | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) diff --git a/crates/ide/src/hover/tests.rs b/crates/ide/src/hover/tests.rs index 78efa9cac8..30bfe6ee9d 100644 --- a/crates/ide/src/hover/tests.rs +++ b/crates/ide/src/hover/tests.rs @@ -406,8 +406,8 @@ fn main() { file_id: FileId( 1, ), - full_range: 631..866, - focus_range: 692..698, + full_range: 632..867, + focus_range: 693..699, name: "FnOnce", kind: Trait, container_name: "function", @@ -7263,8 +7263,8 @@ impl Iterator for S { file_id: FileId( 1, ), - full_range: 6156..6364, - focus_range: 6221..6227, + full_range: 6157..6365, + focus_range: 6222..6228, name: "Future", kind: Trait, container_name: "future", @@ -7277,8 +7277,8 @@ impl Iterator for S { file_id: FileId( 1, ), - full_range: 6994..7460, - focus_range: 7038..7046, + full_range: 6995..7461, + focus_range: 7039..7047, name: "Iterator", kind: Trait, container_name: "iterator", From dad0fdb13f84326326ca15b116dda62c0c482858 Mon Sep 17 00:00:00 2001 From: austaras Date: Sun, 4 Feb 2024 11:35:27 +0800 Subject: [PATCH 058/122] fix: preserve where clause when builtin derive --- .../builtin_derive_macro.rs | 2 +- crates/hir-expand/src/builtin_derive_macro.rs | 22 ++++++++++--- crates/hir-ty/src/tests/macros.rs | 31 +++++++++++++++++++ 3 files changed, 50 insertions(+), 5 deletions(-) diff --git a/crates/hir-def/src/macro_expansion_tests/builtin_derive_macro.rs b/crates/hir-def/src/macro_expansion_tests/builtin_derive_macro.rs index 553c0b7953..86b4466153 100644 --- a/crates/hir-def/src/macro_expansion_tests/builtin_derive_macro.rs +++ b/crates/hir-def/src/macro_expansion_tests/builtin_derive_macro.rs @@ -157,7 +157,7 @@ where generic: Vec, } -impl $crate::clone::Clone for Foo where T: Trait, T::InFieldShorthand: $crate::clone::Clone, T::InGenericArg: $crate::clone::Clone, { +impl $crate::clone::Clone for Foo where ::InWc: Marker, T: Trait, T::InFieldShorthand: $crate::clone::Clone, T::InGenericArg: $crate::clone::Clone, { fn clone(&self ) -> Self { match self { Foo { diff --git a/crates/hir-expand/src/builtin_derive_macro.rs b/crates/hir-expand/src/builtin_derive_macro.rs index 024fb8c1f6..2795487514 100644 --- a/crates/hir-expand/src/builtin_derive_macro.rs +++ b/crates/hir-expand/src/builtin_derive_macro.rs @@ -194,6 +194,7 @@ struct BasicAdtInfo { /// second field is `Some(ty)` if it's a const param of type `ty`, `None` if it's a type param. /// third fields is where bounds, if any param_types: Vec<(tt::Subtree, Option, Option)>, + where_clause: Vec, associated_types: Vec, } @@ -202,10 +203,11 @@ fn parse_adt( adt: &ast::Adt, call_site: Span, ) -> Result { - let (name, generic_param_list, shape) = match adt { + let (name, generic_param_list, where_clause, shape) = match adt { ast::Adt::Struct(it) => ( it.name(), it.generic_param_list(), + it.where_clause(), AdtShape::Struct(VariantShape::from(tm, it.field_list())?), ), ast::Adt::Enum(it) => { @@ -217,6 +219,7 @@ fn parse_adt( ( it.name(), it.generic_param_list(), + it.where_clause(), AdtShape::Enum { default_variant, variants: it @@ -233,7 +236,9 @@ fn parse_adt( }, ) } - ast::Adt::Union(it) => (it.name(), it.generic_param_list(), AdtShape::Union), + ast::Adt::Union(it) => { + (it.name(), it.generic_param_list(), it.where_clause(), AdtShape::Union) + } }; let mut param_type_set: FxHashSet = FxHashSet::default(); @@ -274,6 +279,14 @@ fn parse_adt( }) .collect(); + let where_clause = if let Some(w) = where_clause { + w.predicates() + .map(|it| mbe::syntax_node_to_token_tree(it.syntax(), tm, call_site)) + .collect() + } else { + vec![] + }; + // For a generic parameter `T`, when shorthand associated type `T::Assoc` appears in field // types (of any variant for enums), we generate trait bound for it. It sounds reasonable to // also generate trait bound for qualified associated type `::Assoc`, but rustc @@ -301,7 +314,7 @@ fn parse_adt( .map(|it| mbe::syntax_node_to_token_tree(it.syntax(), tm, call_site)) .collect(); let name_token = name_to_token(tm, name)?; - Ok(BasicAdtInfo { name: name_token, shape, param_types, associated_types }) + Ok(BasicAdtInfo { name: name_token, shape, param_types, where_clause, associated_types }) } fn name_to_token( @@ -366,7 +379,8 @@ fn expand_simple_derive( } }; let trait_body = make_trait_body(&info); - let mut where_block = vec![]; + let mut where_block: Vec<_> = + info.where_clause.into_iter().map(|w| quote! {invoc_span => #w , }).collect(); let (params, args): (Vec<_>, Vec<_>) = info .param_types .into_iter() diff --git a/crates/hir-ty/src/tests/macros.rs b/crates/hir-ty/src/tests/macros.rs index b0a9361f1c..2f75338f99 100644 --- a/crates/hir-ty/src/tests/macros.rs +++ b/crates/hir-ty/src/tests/macros.rs @@ -1373,3 +1373,34 @@ pub fn attr_macro() {} "#, ); } + +#[test] +fn clone_with_type_bound() { + check_types( + r#" +//- minicore: derive, clone, builtin_impls +#[derive(Clone)] +struct Float; + +trait TensorKind: Clone { + /// The primitive type of the tensor. + type Primitive: Clone; +} + +impl TensorKind for Float { + type Primitive = f64; +} + +#[derive(Clone)] +struct Tensor where K: TensorKind +{ + primitive: K::Primitive, +} + +fn foo(t: Tensor) { + let x = t.clone(); + //^ Tensor +} +"#, + ); +} From 2987fac76fdc09056670180c5f2e2357d9af9247 Mon Sep 17 00:00:00 2001 From: davidsemakula Date: Tue, 30 Jan 2024 19:57:36 +0300 Subject: [PATCH 059/122] diagnostic to remove trailing return --- crates/hir-ty/src/diagnostics/expr.rs | 31 +++ crates/hir/src/diagnostics.rs | 18 ++ .../src/handlers/remove_trailing_return.rs | 262 ++++++++++++++++++ .../src/handlers/type_mismatch.rs | 7 +- crates/ide-diagnostics/src/lib.rs | 2 + 5 files changed, 318 insertions(+), 2 deletions(-) create mode 100644 crates/ide-diagnostics/src/handlers/remove_trailing_return.rs diff --git a/crates/hir-ty/src/diagnostics/expr.rs b/crates/hir-ty/src/diagnostics/expr.rs index c09351390a..af3ca3c082 100644 --- a/crates/hir-ty/src/diagnostics/expr.rs +++ b/crates/hir-ty/src/diagnostics/expr.rs @@ -44,6 +44,9 @@ pub enum BodyValidationDiagnostic { match_expr: ExprId, uncovered_patterns: String, }, + RemoveTrailingReturn { + return_expr: ExprId, + }, RemoveUnnecessaryElse { if_expr: ExprId, }, @@ -75,6 +78,10 @@ impl ExprValidator { let body = db.body(self.owner); let mut filter_map_next_checker = None; + if matches!(self.owner, DefWithBodyId::FunctionId(_)) { + self.check_for_trailing_return(body.body_expr, &body); + } + for (id, expr) in body.exprs.iter() { if let Some((variant, missed_fields, true)) = record_literal_missing_fields(db, &self.infer, id, expr) @@ -93,12 +100,16 @@ impl ExprValidator { Expr::Call { .. } | Expr::MethodCall { .. } => { self.validate_call(db, id, expr, &mut filter_map_next_checker); } + Expr::Closure { body: body_expr, .. } => { + self.check_for_trailing_return(*body_expr, &body); + } Expr::If { .. } => { self.check_for_unnecessary_else(id, expr, &body); } _ => {} } } + for (id, pat) in body.pats.iter() { if let Some((variant, missed_fields, true)) = record_pattern_missing_fields(db, &self.infer, id, pat) @@ -244,6 +255,26 @@ impl ExprValidator { pattern } + fn check_for_trailing_return(&mut self, body_expr: ExprId, body: &Body) { + match &body.exprs[body_expr] { + Expr::Block { statements, tail, .. } => { + let last_stmt = tail.or_else(|| match statements.last()? { + Statement::Expr { expr, .. } => Some(*expr), + _ => None, + }); + if let Some(last_stmt) = last_stmt { + self.check_for_trailing_return(last_stmt, body); + } + } + Expr::Return { .. } => { + self.diagnostics.push(BodyValidationDiagnostic::RemoveTrailingReturn { + return_expr: body_expr, + }); + } + _ => (), + } + } + fn check_for_unnecessary_else(&mut self, id: ExprId, expr: &Expr, body: &Body) { if let Expr::If { condition: _, then_branch, else_branch } = expr { if else_branch.is_none() { diff --git a/crates/hir/src/diagnostics.rs b/crates/hir/src/diagnostics.rs index 487e0c8f7a..cec6be8e89 100644 --- a/crates/hir/src/diagnostics.rs +++ b/crates/hir/src/diagnostics.rs @@ -68,6 +68,7 @@ diagnostics![ PrivateAssocItem, PrivateField, ReplaceFilterMapNextWithFindMap, + RemoveTrailingReturn, RemoveUnnecessaryElse, TraitImplIncorrectSafety, TraitImplMissingAssocItems, @@ -343,6 +344,12 @@ pub struct TraitImplRedundantAssocItems { pub assoc_item: (Name, AssocItem), } +#[derive(Debug)] +pub struct RemoveTrailingReturn { + pub file_id: HirFileId, + pub return_expr: AstPtr, +} + #[derive(Debug)] pub struct RemoveUnnecessaryElse { pub if_expr: InFile>, @@ -450,6 +457,17 @@ impl AnyDiagnostic { Err(SyntheticSyntax) => (), } } + BodyValidationDiagnostic::RemoveTrailingReturn { return_expr } => { + if let Ok(source_ptr) = source_map.expr_syntax(return_expr) { + return Some( + RemoveTrailingReturn { + file_id: source_ptr.file_id, + return_expr: source_ptr.value, + } + .into(), + ); + } + } BodyValidationDiagnostic::RemoveUnnecessaryElse { if_expr } => { if let Ok(source_ptr) = source_map.expr_syntax(if_expr) { if let Some(ptr) = source_ptr.value.cast::() { diff --git a/crates/ide-diagnostics/src/handlers/remove_trailing_return.rs b/crates/ide-diagnostics/src/handlers/remove_trailing_return.rs new file mode 100644 index 0000000000..6cb5911096 --- /dev/null +++ b/crates/ide-diagnostics/src/handlers/remove_trailing_return.rs @@ -0,0 +1,262 @@ +use hir::{db::ExpandDatabase, diagnostics::RemoveTrailingReturn, HirFileIdExt, InFile}; +use ide_db::{assists::Assist, source_change::SourceChange}; +use syntax::{ast, AstNode, SyntaxNodePtr}; +use text_edit::TextEdit; + +use crate::{fix, Diagnostic, DiagnosticCode, DiagnosticsContext}; + +// Diagnostic: remove-trailing-return +// +// This diagnostic is triggered when there is a redundant `return` at the end of a function +// or closure. +pub(crate) fn remove_trailing_return( + ctx: &DiagnosticsContext<'_>, + d: &RemoveTrailingReturn, +) -> Diagnostic { + let display_range = ctx.sema.diagnostics_display_range(InFile { + file_id: d.file_id, + value: expr_stmt(ctx, d) + .as_ref() + .map(|stmt| SyntaxNodePtr::new(stmt.syntax())) + .unwrap_or_else(|| d.return_expr.into()), + }); + Diagnostic::new( + DiagnosticCode::Clippy("needless_return"), + "replace return ; with ", + display_range, + ) + .with_fixes(fixes(ctx, d)) +} + +fn fixes(ctx: &DiagnosticsContext<'_>, d: &RemoveTrailingReturn) -> Option> { + let return_expr = return_expr(ctx, d)?; + let stmt = expr_stmt(ctx, d); + + let range = stmt.as_ref().map_or(return_expr.syntax(), AstNode::syntax).text_range(); + let replacement = + return_expr.expr().map_or_else(String::new, |expr| format!("{}", expr.syntax().text())); + + let edit = TextEdit::replace(range, replacement); + let source_change = SourceChange::from_text_edit(d.file_id.original_file(ctx.sema.db), edit); + + Some(vec![fix( + "remove_trailing_return", + "Replace return ; with ", + source_change, + range, + )]) +} + +fn return_expr(ctx: &DiagnosticsContext<'_>, d: &RemoveTrailingReturn) -> Option { + let root = ctx.sema.db.parse_or_expand(d.file_id); + let expr = d.return_expr.to_node(&root); + ast::ReturnExpr::cast(expr.syntax().clone()) +} + +fn expr_stmt(ctx: &DiagnosticsContext<'_>, d: &RemoveTrailingReturn) -> Option { + let return_expr = return_expr(ctx, d)?; + return_expr.syntax().parent().and_then(ast::ExprStmt::cast) +} + +#[cfg(test)] +mod tests { + use crate::tests::{check_diagnostics, check_fix}; + + #[test] + fn remove_trailing_return() { + check_diagnostics( + r#" +fn foo() -> u8 { + return 2; +} //^^^^^^^^^ 💡 weak: replace return ; with +"#, + ); + } + + #[test] + fn remove_trailing_return_inner_function() { + check_diagnostics( + r#" +fn foo() -> u8 { + fn bar() -> u8 { + return 2; + } //^^^^^^^^^ 💡 weak: replace return ; with + bar() +} +"#, + ); + } + + #[test] + fn remove_trailing_return_closure() { + check_diagnostics( + r#" +fn foo() -> u8 { + let bar = || return 2; + bar() //^^^^^^^^ 💡 weak: replace return ; with +} +"#, + ); + check_diagnostics( + r#" +fn foo() -> u8 { + let bar = || { + return 2; + };//^^^^^^^^^ 💡 weak: replace return ; with + bar() +} +"#, + ); + } + + #[test] + fn remove_trailing_return_unit() { + check_diagnostics( + r#" +fn foo() { + return +} //^^^^^^ 💡 weak: replace return ; with +"#, + ); + } + + #[test] + fn remove_trailing_return_no_semi() { + check_diagnostics( + r#" +fn foo() -> u8 { + return 2 +} //^^^^^^^^ 💡 weak: replace return ; with +"#, + ); + } + + #[test] + fn no_diagnostic_if_no_return_keyword() { + check_diagnostics( + r#" +fn foo() -> u8 { + 3 +} +"#, + ); + } + + #[test] + fn no_diagnostic_if_not_last_statement() { + check_diagnostics( + r#" +fn foo() -> u8 { + if true { return 2; } + 3 +} +"#, + ); + } + + #[test] + fn replace_with_expr() { + check_fix( + r#" +fn foo() -> u8 { + return$0 2; +} +"#, + r#" +fn foo() -> u8 { + 2 +} +"#, + ); + } + + #[test] + fn replace_with_unit() { + check_fix( + r#" +fn foo() { + return$0/*ensure tidy is happy*/ +} +"#, + r#" +fn foo() { + /*ensure tidy is happy*/ +} +"#, + ); + } + + #[test] + fn replace_with_expr_no_semi() { + check_fix( + r#" +fn foo() -> u8 { + return$0 2 +} +"#, + r#" +fn foo() -> u8 { + 2 +} +"#, + ); + } + + #[test] + fn replace_in_inner_function() { + check_fix( + r#" +fn foo() -> u8 { + fn bar() -> u8 { + return$0 2; + } + bar() +} +"#, + r#" +fn foo() -> u8 { + fn bar() -> u8 { + 2 + } + bar() +} +"#, + ); + } + + #[test] + fn replace_in_closure() { + check_fix( + r#" +fn foo() -> u8 { + let bar = || return$0 2; + bar() +} +"#, + r#" +fn foo() -> u8 { + let bar = || 2; + bar() +} +"#, + ); + check_fix( + r#" +fn foo() -> u8 { + let bar = || { + return$0 2; + }; + bar() +} +"#, + r#" +fn foo() -> u8 { + let bar = || { + 2 + }; + bar() +} +"#, + ); + } +} diff --git a/crates/ide-diagnostics/src/handlers/type_mismatch.rs b/crates/ide-diagnostics/src/handlers/type_mismatch.rs index 750189beec..eec8efe785 100644 --- a/crates/ide-diagnostics/src/handlers/type_mismatch.rs +++ b/crates/ide-diagnostics/src/handlers/type_mismatch.rs @@ -186,7 +186,9 @@ fn str_ref_to_owned( #[cfg(test)] mod tests { - use crate::tests::{check_diagnostics, check_fix, check_no_fix}; + use crate::tests::{ + check_diagnostics, check_diagnostics_with_disabled, check_fix, check_no_fix, + }; #[test] fn missing_reference() { @@ -718,7 +720,7 @@ struct Bar { #[test] fn return_no_value() { - check_diagnostics( + check_diagnostics_with_disabled( r#" fn f() -> i32 { return; @@ -727,6 +729,7 @@ fn f() -> i32 { } fn g() { return; } "#, + std::iter::once("needless_return".to_string()), ); } diff --git a/crates/ide-diagnostics/src/lib.rs b/crates/ide-diagnostics/src/lib.rs index 7423de0be7..7c5cf67330 100644 --- a/crates/ide-diagnostics/src/lib.rs +++ b/crates/ide-diagnostics/src/lib.rs @@ -43,6 +43,7 @@ mod handlers { pub(crate) mod no_such_field; pub(crate) mod private_assoc_item; pub(crate) mod private_field; + pub(crate) mod remove_trailing_return; pub(crate) mod remove_unnecessary_else; pub(crate) mod replace_filter_map_next_with_find_map; pub(crate) mod trait_impl_incorrect_safety; @@ -383,6 +384,7 @@ pub fn diagnostics( AnyDiagnostic::UnusedVariable(d) => handlers::unused_variables::unused_variables(&ctx, &d), AnyDiagnostic::BreakOutsideOfLoop(d) => handlers::break_outside_of_loop::break_outside_of_loop(&ctx, &d), AnyDiagnostic::MismatchedTupleStructPatArgCount(d) => handlers::mismatched_arg_count::mismatched_tuple_struct_pat_arg_count(&ctx, &d), + AnyDiagnostic::RemoveTrailingReturn(d) => handlers::remove_trailing_return::remove_trailing_return(&ctx, &d), AnyDiagnostic::RemoveUnnecessaryElse(d) => handlers::remove_unnecessary_else::remove_unnecessary_else(&ctx, &d), }; res.push(d) From cad222ff1bdf438aaf638811f53e0be33b32fcf3 Mon Sep 17 00:00:00 2001 From: davidsemakula Date: Wed, 31 Jan 2024 06:46:48 +0300 Subject: [PATCH 060/122] remove trailing return in trailing if expression --- crates/hir-ty/src/diagnostics/expr.rs | 6 ++ .../src/handlers/remove_trailing_return.rs | 60 +++++++++++++++++++ 2 files changed, 66 insertions(+) diff --git a/crates/hir-ty/src/diagnostics/expr.rs b/crates/hir-ty/src/diagnostics/expr.rs index af3ca3c082..50312af4f2 100644 --- a/crates/hir-ty/src/diagnostics/expr.rs +++ b/crates/hir-ty/src/diagnostics/expr.rs @@ -266,6 +266,12 @@ impl ExprValidator { self.check_for_trailing_return(last_stmt, body); } } + Expr::If { then_branch, else_branch, .. } => { + self.check_for_trailing_return(*then_branch, body); + if let Some(else_branch) = else_branch { + self.check_for_trailing_return(*else_branch, body); + } + } Expr::Return { .. } => { self.diagnostics.push(BodyValidationDiagnostic::RemoveTrailingReturn { return_expr: body_expr, diff --git a/crates/ide-diagnostics/src/handlers/remove_trailing_return.rs b/crates/ide-diagnostics/src/handlers/remove_trailing_return.rs index 6cb5911096..ceef9da16c 100644 --- a/crates/ide-diagnostics/src/handlers/remove_trailing_return.rs +++ b/crates/ide-diagnostics/src/handlers/remove_trailing_return.rs @@ -131,6 +131,22 @@ fn foo() -> u8 { ); } + #[test] + fn remove_trailing_return_in_if() { + check_diagnostics( + r#" +fn foo(x: usize) -> u8 { + if x > 0 { + return 1; + //^^^^^^^^^ 💡 weak: replace return ; with + } else { + return 0; + } //^^^^^^^^^ 💡 weak: replace return ; with +} +"#, + ); + } + #[test] fn no_diagnostic_if_no_return_keyword() { check_diagnostics( @@ -256,6 +272,50 @@ fn foo() -> u8 { }; bar() } +"#, + ); + } + + #[test] + fn replace_in_if() { + check_fix( + r#" +fn foo(x: usize) -> u8 { + if x > 0 { + return$0 1; + } else { + 0 + } +} +"#, + r#" +fn foo(x: usize) -> u8 { + if x > 0 { + 1 + } else { + 0 + } +} +"#, + ); + check_fix( + r#" +fn foo(x: usize) -> u8 { + if x > 0 { + 1 + } else { + return$0 0; + } +} +"#, + r#" +fn foo(x: usize) -> u8 { + if x > 0 { + 1 + } else { + 0 + } +} "#, ); } From 98e6f43a2f8073a4647d8609c9f23873090cd794 Mon Sep 17 00:00:00 2001 From: davidsemakula Date: Wed, 31 Jan 2024 07:04:49 +0300 Subject: [PATCH 061/122] remove trailing return in trailing match expression --- crates/hir-ty/src/diagnostics/expr.rs | 6 ++ crates/hir/src/diagnostics.rs | 19 ++++--- .../src/handlers/remove_trailing_return.rs | 55 +++++++++++++++++++ 3 files changed, 72 insertions(+), 8 deletions(-) diff --git a/crates/hir-ty/src/diagnostics/expr.rs b/crates/hir-ty/src/diagnostics/expr.rs index 50312af4f2..7f8fb7f4b5 100644 --- a/crates/hir-ty/src/diagnostics/expr.rs +++ b/crates/hir-ty/src/diagnostics/expr.rs @@ -272,6 +272,12 @@ impl ExprValidator { self.check_for_trailing_return(*else_branch, body); } } + Expr::Match { arms, .. } => { + for arm in arms.iter() { + let MatchArm { expr, .. } = arm; + self.check_for_trailing_return(*expr, body); + } + } Expr::Return { .. } => { self.diagnostics.push(BodyValidationDiagnostic::RemoveTrailingReturn { return_expr: body_expr, diff --git a/crates/hir/src/diagnostics.rs b/crates/hir/src/diagnostics.rs index cec6be8e89..b9f86f3415 100644 --- a/crates/hir/src/diagnostics.rs +++ b/crates/hir/src/diagnostics.rs @@ -11,7 +11,7 @@ use cfg::{CfgExpr, CfgOptions}; use either::Either; use hir_def::{body::SyntheticSyntax, hir::ExprOrPatId, path::ModPath, AssocItemId, DefWithBodyId}; use hir_expand::{name::Name, HirFileId, InFile}; -use syntax::{ast, AstPtr, SyntaxError, SyntaxNodePtr, TextRange}; +use syntax::{ast, AstNode, AstPtr, SyntaxError, SyntaxNodePtr, TextRange}; use crate::{AssocItem, Field, Local, MacroKind, Trait, Type}; @@ -459,13 +459,16 @@ impl AnyDiagnostic { } BodyValidationDiagnostic::RemoveTrailingReturn { return_expr } => { if let Ok(source_ptr) = source_map.expr_syntax(return_expr) { - return Some( - RemoveTrailingReturn { - file_id: source_ptr.file_id, - return_expr: source_ptr.value, - } - .into(), - ); + // Filters out desugared return expressions (e.g. desugared try operators). + if ast::ReturnExpr::can_cast(source_ptr.value.kind()) { + return Some( + RemoveTrailingReturn { + file_id: source_ptr.file_id, + return_expr: source_ptr.value, + } + .into(), + ); + } } } BodyValidationDiagnostic::RemoveUnnecessaryElse { if_expr } => { diff --git a/crates/ide-diagnostics/src/handlers/remove_trailing_return.rs b/crates/ide-diagnostics/src/handlers/remove_trailing_return.rs index ceef9da16c..6c4ec7c132 100644 --- a/crates/ide-diagnostics/src/handlers/remove_trailing_return.rs +++ b/crates/ide-diagnostics/src/handlers/remove_trailing_return.rs @@ -147,6 +147,21 @@ fn foo(x: usize) -> u8 { ); } + #[test] + fn remove_trailing_return_in_match() { + check_diagnostics( + r#" +fn foo(x: Result) -> u8 { + match x { + Ok(_) => return 1, + //^^^^^^^^ 💡 weak: replace return ; with + Err(_) => return 0, + } //^^^^^^^^ 💡 weak: replace return ; with +} +"#, + ); + } + #[test] fn no_diagnostic_if_no_return_keyword() { check_diagnostics( @@ -316,6 +331,46 @@ fn foo(x: usize) -> u8 { 0 } } +"#, + ); + } + + #[test] + fn replace_in_match() { + check_fix( + r#" +fn foo(x: Result) -> u8 { + match x { + Ok(_) => return$0 1, + Err(_) => 0, + } +} +"#, + r#" +fn foo(x: Result) -> u8 { + match x { + Ok(_) => 1, + Err(_) => 0, + } +} +"#, + ); + check_fix( + r#" +fn foo(x: Result) -> u8 { + match x { + Ok(_) => 1, + Err(_) => return$0 0, + } +} +"#, + r#" +fn foo(x: Result) -> u8 { + match x { + Ok(_) => 1, + Err(_) => 0, + } +} "#, ); } From a250c2dde032c90ca6b4e789921831c085b0bc85 Mon Sep 17 00:00:00 2001 From: davidsemakula Date: Wed, 31 Jan 2024 15:04:41 +0300 Subject: [PATCH 062/122] refactor remove trailing return diagnostic --- crates/hir/src/diagnostics.rs | 10 ++-- .../src/handlers/remove_trailing_return.rs | 46 ++++++++----------- 2 files changed, 24 insertions(+), 32 deletions(-) diff --git a/crates/hir/src/diagnostics.rs b/crates/hir/src/diagnostics.rs index b9f86f3415..e07c48aa8a 100644 --- a/crates/hir/src/diagnostics.rs +++ b/crates/hir/src/diagnostics.rs @@ -11,7 +11,7 @@ use cfg::{CfgExpr, CfgOptions}; use either::Either; use hir_def::{body::SyntheticSyntax, hir::ExprOrPatId, path::ModPath, AssocItemId, DefWithBodyId}; use hir_expand::{name::Name, HirFileId, InFile}; -use syntax::{ast, AstNode, AstPtr, SyntaxError, SyntaxNodePtr, TextRange}; +use syntax::{ast, AstPtr, SyntaxError, SyntaxNodePtr, TextRange}; use crate::{AssocItem, Field, Local, MacroKind, Trait, Type}; @@ -346,8 +346,7 @@ pub struct TraitImplRedundantAssocItems { #[derive(Debug)] pub struct RemoveTrailingReturn { - pub file_id: HirFileId, - pub return_expr: AstPtr, + pub return_expr: InFile>, } #[derive(Debug)] @@ -460,11 +459,10 @@ impl AnyDiagnostic { BodyValidationDiagnostic::RemoveTrailingReturn { return_expr } => { if let Ok(source_ptr) = source_map.expr_syntax(return_expr) { // Filters out desugared return expressions (e.g. desugared try operators). - if ast::ReturnExpr::can_cast(source_ptr.value.kind()) { + if let Some(ptr) = source_ptr.value.cast::() { return Some( RemoveTrailingReturn { - file_id: source_ptr.file_id, - return_expr: source_ptr.value, + return_expr: InFile::new(source_ptr.file_id, ptr), } .into(), ); diff --git a/crates/ide-diagnostics/src/handlers/remove_trailing_return.rs b/crates/ide-diagnostics/src/handlers/remove_trailing_return.rs index 6c4ec7c132..276ac0d15d 100644 --- a/crates/ide-diagnostics/src/handlers/remove_trailing_return.rs +++ b/crates/ide-diagnostics/src/handlers/remove_trailing_return.rs @@ -1,9 +1,9 @@ -use hir::{db::ExpandDatabase, diagnostics::RemoveTrailingReturn, HirFileIdExt, InFile}; -use ide_db::{assists::Assist, source_change::SourceChange}; -use syntax::{ast, AstNode, SyntaxNodePtr}; +use hir::{db::ExpandDatabase, diagnostics::RemoveTrailingReturn}; +use ide_db::{assists::Assist, base_db::FileRange, source_change::SourceChange}; +use syntax::{ast, AstNode}; use text_edit::TextEdit; -use crate::{fix, Diagnostic, DiagnosticCode, DiagnosticsContext}; +use crate::{adjusted_display_range, fix, Diagnostic, DiagnosticCode, DiagnosticsContext}; // Diagnostic: remove-trailing-return // @@ -13,12 +13,12 @@ pub(crate) fn remove_trailing_return( ctx: &DiagnosticsContext<'_>, d: &RemoveTrailingReturn, ) -> Diagnostic { - let display_range = ctx.sema.diagnostics_display_range(InFile { - file_id: d.file_id, - value: expr_stmt(ctx, d) - .as_ref() - .map(|stmt| SyntaxNodePtr::new(stmt.syntax())) - .unwrap_or_else(|| d.return_expr.into()), + let display_range = adjusted_display_range(ctx, d.return_expr, &|return_expr| { + return_expr + .syntax() + .parent() + .and_then(ast::ExprStmt::cast) + .map(|stmt| stmt.syntax().text_range()) }); Diagnostic::new( DiagnosticCode::Clippy("needless_return"), @@ -29,15 +29,20 @@ pub(crate) fn remove_trailing_return( } fn fixes(ctx: &DiagnosticsContext<'_>, d: &RemoveTrailingReturn) -> Option> { - let return_expr = return_expr(ctx, d)?; - let stmt = expr_stmt(ctx, d); + let root = ctx.sema.db.parse_or_expand(d.return_expr.file_id); + let return_expr = d.return_expr.value.to_node(&root); + let stmt = return_expr.syntax().parent().and_then(ast::ExprStmt::cast); + + let FileRange { range, file_id } = + ctx.sema.original_range_opt(stmt.as_ref().map_or(return_expr.syntax(), AstNode::syntax))?; + if Some(file_id) != d.return_expr.file_id.file_id() { + return None; + } - let range = stmt.as_ref().map_or(return_expr.syntax(), AstNode::syntax).text_range(); let replacement = return_expr.expr().map_or_else(String::new, |expr| format!("{}", expr.syntax().text())); - let edit = TextEdit::replace(range, replacement); - let source_change = SourceChange::from_text_edit(d.file_id.original_file(ctx.sema.db), edit); + let source_change = SourceChange::from_text_edit(file_id, edit); Some(vec![fix( "remove_trailing_return", @@ -47,17 +52,6 @@ fn fixes(ctx: &DiagnosticsContext<'_>, d: &RemoveTrailingReturn) -> Option, d: &RemoveTrailingReturn) -> Option { - let root = ctx.sema.db.parse_or_expand(d.file_id); - let expr = d.return_expr.to_node(&root); - ast::ReturnExpr::cast(expr.syntax().clone()) -} - -fn expr_stmt(ctx: &DiagnosticsContext<'_>, d: &RemoveTrailingReturn) -> Option { - let return_expr = return_expr(ctx, d)?; - return_expr.syntax().parent().and_then(ast::ExprStmt::cast) -} - #[cfg(test)] mod tests { use crate::tests::{check_diagnostics, check_fix}; From 602acfcb70f5b0af1157268196eba440cf064b25 Mon Sep 17 00:00:00 2001 From: davidsemakula Date: Thu, 8 Feb 2024 19:29:27 +0300 Subject: [PATCH 063/122] fix conflicting "unnecessary else" and "trailing return" diagnostics tests --- crates/hir/src/diagnostics.rs | 2 +- .../src/handlers/remove_trailing_return.rs | 10 +++++-- .../src/handlers/remove_unnecessary_else.rs | 14 ++++++---- crates/ide-diagnostics/src/tests.rs | 28 +++++++++++++++++-- 4 files changed, 42 insertions(+), 12 deletions(-) diff --git a/crates/hir/src/diagnostics.rs b/crates/hir/src/diagnostics.rs index e07c48aa8a..08843a6c99 100644 --- a/crates/hir/src/diagnostics.rs +++ b/crates/hir/src/diagnostics.rs @@ -67,9 +67,9 @@ diagnostics![ NoSuchField, PrivateAssocItem, PrivateField, - ReplaceFilterMapNextWithFindMap, RemoveTrailingReturn, RemoveUnnecessaryElse, + ReplaceFilterMapNextWithFindMap, TraitImplIncorrectSafety, TraitImplMissingAssocItems, TraitImplOrphan, diff --git a/crates/ide-diagnostics/src/handlers/remove_trailing_return.rs b/crates/ide-diagnostics/src/handlers/remove_trailing_return.rs index 276ac0d15d..605e8baba0 100644 --- a/crates/ide-diagnostics/src/handlers/remove_trailing_return.rs +++ b/crates/ide-diagnostics/src/handlers/remove_trailing_return.rs @@ -54,7 +54,9 @@ fn fixes(ctx: &DiagnosticsContext<'_>, d: &RemoveTrailingReturn) -> Option u8 { #[test] fn remove_trailing_return_in_if() { - check_diagnostics( + check_diagnostics_with_disabled( r#" fn foo(x: usize) -> u8 { if x > 0 { @@ -138,6 +140,7 @@ fn foo(x: usize) -> u8 { } //^^^^^^^^^ 💡 weak: replace return ; with } "#, + std::iter::once("remove-unnecessary-else".to_string()), ); } @@ -287,7 +290,7 @@ fn foo() -> u8 { #[test] fn replace_in_if() { - check_fix( + check_fix_with_disabled( r#" fn foo(x: usize) -> u8 { if x > 0 { @@ -306,6 +309,7 @@ fn foo(x: usize) -> u8 { } } "#, + std::iter::once("remove-unnecessary-else".to_string()), ); check_fix( r#" diff --git a/crates/ide-diagnostics/src/handlers/remove_unnecessary_else.rs b/crates/ide-diagnostics/src/handlers/remove_unnecessary_else.rs index c6c85256f9..124086c8fa 100644 --- a/crates/ide-diagnostics/src/handlers/remove_unnecessary_else.rs +++ b/crates/ide-diagnostics/src/handlers/remove_unnecessary_else.rs @@ -87,11 +87,15 @@ fn fixes(ctx: &DiagnosticsContext<'_>, d: &RemoveUnnecessaryElse) -> Option) #[track_caller] fn check_nth_fix(nth: usize, ra_fixture_before: &str, ra_fixture_after: &str) { + let mut config = DiagnosticsConfig::test_sample(); + config.expr_fill_default = ExprFillDefaultMode::Default; + check_nth_fix_with_config(config, nth, ra_fixture_before, ra_fixture_after) +} + +#[track_caller] +pub(crate) fn check_fix_with_disabled( + ra_fixture_before: &str, + ra_fixture_after: &str, + disabled: impl Iterator, +) { + let mut config = DiagnosticsConfig::test_sample(); + config.expr_fill_default = ExprFillDefaultMode::Default; + config.disabled.extend(disabled); + check_nth_fix_with_config(config, 0, ra_fixture_before, ra_fixture_after) +} + +#[track_caller] +fn check_nth_fix_with_config( + config: DiagnosticsConfig, + nth: usize, + ra_fixture_before: &str, + ra_fixture_after: &str, +) { let after = trim_indent(ra_fixture_after); let (db, file_position) = RootDatabase::with_position(ra_fixture_before); - let mut conf = DiagnosticsConfig::test_sample(); - conf.expr_fill_default = ExprFillDefaultMode::Default; let diagnostic = - super::diagnostics(&db, &conf, &AssistResolveStrategy::All, file_position.file_id) + super::diagnostics(&db, &config, &AssistResolveStrategy::All, file_position.file_id) .pop() .expect("no diagnostics"); let fix = &diagnostic From 6330b028b3f4d10220170fa995aeeb0655f9e8dc Mon Sep 17 00:00:00 2001 From: David Barsky Date: Tue, 7 Nov 2023 16:26:32 -0500 Subject: [PATCH 064/122] feature: Add a `UnindexedProject` notification and a corresponding setting. --- .../diagnostics/match_check/pat_analysis.rs | 2 +- crates/rust-analyzer/src/config.rs | 9 ++- crates/rust-analyzer/src/global_state.rs | 20 +++++- .../src/handlers/notification.rs | 8 +++ crates/rust-analyzer/src/lsp/ext.rs | 13 ++++ crates/rust-analyzer/src/main_loop.rs | 53 +++++++++++++++- crates/rust-analyzer/src/task_pool.rs | 11 ++++ crates/rust-analyzer/tests/slow-tests/main.rs | 62 ++++++++++++++++++- .../rust-analyzer/tests/slow-tests/support.rs | 38 +++++++++++- docs/dev/lsp-extensions.md | 21 ++++++- docs/user/generated_config.adoc | 5 ++ editors/code/package.json | 5 ++ editors/code/src/ctx.ts | 17 +++++ editors/code/src/lsp_ext.ts | 6 ++ 14 files changed, 262 insertions(+), 8 deletions(-) diff --git a/crates/hir-ty/src/diagnostics/match_check/pat_analysis.rs b/crates/hir-ty/src/diagnostics/match_check/pat_analysis.rs index 51be8960b8..712842372b 100644 --- a/crates/hir-ty/src/diagnostics/match_check/pat_analysis.rs +++ b/crates/hir-ty/src/diagnostics/match_check/pat_analysis.rs @@ -359,7 +359,7 @@ impl<'p> TypeCx for MatchCheckCtx<'p> { &'a self, ctor: &'a rustc_pattern_analysis::constructor::Constructor, ty: &'a Self::Ty, - ) -> impl Iterator + ExactSizeIterator + Captures<'a> { + ) -> impl ExactSizeIterator + Captures<'a> { let single = |ty| smallvec![ty]; let tys: SmallVec<[_; 2]> = match ctor { Struct | Variant(_) | UnionField => match ty.kind(Interner) { diff --git a/crates/rust-analyzer/src/config.rs b/crates/rust-analyzer/src/config.rs index 815f6ea12e..3b071d9d68 100644 --- a/crates/rust-analyzer/src/config.rs +++ b/crates/rust-analyzer/src/config.rs @@ -478,6 +478,9 @@ config_data! { /// Whether to show `can't find Cargo.toml` error message. notifications_cargoTomlNotFound: bool = "true", + /// Whether to send an UnindexedProject notification to the client. + notifications_unindexedProject: bool = "false", + /// How many worker threads in the main loop. The default `null` means to pick automatically. numThreads: Option = "null", @@ -745,6 +748,7 @@ pub enum FilesWatcher { #[derive(Debug, Clone)] pub struct NotificationsConfig { pub cargo_toml_not_found: bool, + pub unindexed_project: bool, } #[derive(Debug, Clone)] @@ -1220,7 +1224,10 @@ impl Config { } pub fn notifications(&self) -> NotificationsConfig { - NotificationsConfig { cargo_toml_not_found: self.data.notifications_cargoTomlNotFound } + NotificationsConfig { + cargo_toml_not_found: self.data.notifications_cargoTomlNotFound, + unindexed_project: self.data.notifications_unindexedProject, + } } pub fn cargo_autoreload(&self) -> bool { diff --git a/crates/rust-analyzer/src/global_state.rs b/crates/rust-analyzer/src/global_state.rs index 2f226d0115..4b3bca8e4a 100644 --- a/crates/rust-analyzer/src/global_state.rs +++ b/crates/rust-analyzer/src/global_state.rs @@ -33,7 +33,7 @@ use crate::{ mem_docs::MemDocs, op_queue::OpQueue, reload, - task_pool::TaskPool, + task_pool::{TaskPool, TaskQueue}, }; // Enforces drop order @@ -126,6 +126,17 @@ pub(crate) struct GlobalState { OpQueue<(), (Arc>, Vec>)>, pub(crate) fetch_proc_macros_queue: OpQueue, bool>, pub(crate) prime_caches_queue: OpQueue, + + /// A deferred task queue. + /// + /// This queue is used for doing database-dependent work inside of sync + /// handlers, as accessing the database may block latency-sensitive + /// interactions and should be moved away from the main thread. + /// + /// For certain features, such as [`lsp_ext::UnindexedProjectParams`], + /// this queue should run only *after* [`GlobalState::process_changes`] has + /// been called. + pub(crate) deferred_task_queue: TaskQueue, } /// An immutable snapshot of the world's state at a point in time. @@ -165,6 +176,11 @@ impl GlobalState { Handle { handle, receiver } }; + let task_queue = { + let (sender, receiver) = unbounded(); + TaskQueue { sender, receiver } + }; + let mut analysis_host = AnalysisHost::new(config.lru_parse_query_capacity()); if let Some(capacities) = config.lru_query_capacities() { analysis_host.update_lru_capacities(capacities); @@ -208,6 +224,8 @@ impl GlobalState { fetch_proc_macros_queue: OpQueue::default(), prime_caches_queue: OpQueue::default(), + + deferred_task_queue: task_queue, }; // Apply any required database inputs from the config. this.update_configuration(config); diff --git a/crates/rust-analyzer/src/handlers/notification.rs b/crates/rust-analyzer/src/handlers/notification.rs index 1f24e95010..7416006fc4 100644 --- a/crates/rust-analyzer/src/handlers/notification.rs +++ b/crates/rust-analyzer/src/handlers/notification.rs @@ -70,7 +70,15 @@ pub(crate) fn handle_did_open_text_document( if already_exists { tracing::error!("duplicate DidOpenTextDocument: {}", path); } + state.vfs.write().0.set_file_contents(path, Some(params.text_document.text.into_bytes())); + if state.config.notifications().unindexed_project { + tracing::debug!("queuing task"); + let _ = state + .deferred_task_queue + .sender + .send(crate::main_loop::QueuedTask::CheckIfIndexed(params.text_document.uri)); + } } Ok(()) } diff --git a/crates/rust-analyzer/src/lsp/ext.rs b/crates/rust-analyzer/src/lsp/ext.rs index e23bb8e046..aa40728ce6 100644 --- a/crates/rust-analyzer/src/lsp/ext.rs +++ b/crates/rust-analyzer/src/lsp/ext.rs @@ -703,3 +703,16 @@ pub struct CompletionImport { pub struct ClientCommandOptions { pub commands: Vec, } + +pub enum UnindexedProject {} + +impl Notification for UnindexedProject { + type Params = UnindexedProjectParams; + const METHOD: &'static str = "rust-analyzer/unindexedProject"; +} + +#[derive(Deserialize, Serialize, Debug)] +#[serde(rename_all = "camelCase")] +pub struct UnindexedProjectParams { + pub text_documents: Vec, +} diff --git a/crates/rust-analyzer/src/main_loop.rs b/crates/rust-analyzer/src/main_loop.rs index f3ead6d04f..39056c700a 100644 --- a/crates/rust-analyzer/src/main_loop.rs +++ b/crates/rust-analyzer/src/main_loop.rs @@ -1,5 +1,6 @@ //! The main loop of `rust-analyzer` responsible for dispatching LSP //! requests/replies and notifications back to the client. +use crate::lsp::ext; use std::{ fmt, time::{Duration, Instant}, @@ -56,6 +57,7 @@ pub fn main_loop(config: Config, connection: Connection) -> anyhow::Result<()> { enum Event { Lsp(lsp_server::Message), Task(Task), + QueuedTask(QueuedTask), Vfs(vfs::loader::Message), Flycheck(flycheck::Message), } @@ -67,13 +69,20 @@ impl fmt::Display for Event { Event::Task(_) => write!(f, "Event::Task"), Event::Vfs(_) => write!(f, "Event::Vfs"), Event::Flycheck(_) => write!(f, "Event::Flycheck"), + Event::QueuedTask(_) => write!(f, "Event::QueuedTask"), } } } +#[derive(Debug)] +pub(crate) enum QueuedTask { + CheckIfIndexed(lsp_types::Url), +} + #[derive(Debug)] pub(crate) enum Task { Response(lsp_server::Response), + ClientNotification(ext::UnindexedProjectParams), Retry(lsp_server::Request), Diagnostics(Vec<(FileId, Vec)>), PrimeCaches(PrimeCachesProgress), @@ -115,6 +124,7 @@ impl fmt::Debug for Event { match self { Event::Lsp(it) => fmt::Debug::fmt(it, f), Event::Task(it) => fmt::Debug::fmt(it, f), + Event::QueuedTask(it) => fmt::Debug::fmt(it, f), Event::Vfs(it) => fmt::Debug::fmt(it, f), Event::Flycheck(it) => fmt::Debug::fmt(it, f), } @@ -193,6 +203,9 @@ impl GlobalState { recv(self.task_pool.receiver) -> task => Some(Event::Task(task.unwrap())), + recv(self.deferred_task_queue.receiver) -> task => + Some(Event::QueuedTask(task.unwrap())), + recv(self.fmt_pool.receiver) -> task => Some(Event::Task(task.unwrap())), @@ -211,7 +224,7 @@ impl GlobalState { .entered(); let event_dbg_msg = format!("{event:?}"); - tracing::debug!("{:?} handle_event({})", loop_start, event_dbg_msg); + tracing::debug!(?loop_start, ?event, "handle_event"); if tracing::enabled!(tracing::Level::INFO) { let task_queue_len = self.task_pool.handle.len(); if task_queue_len > 0 { @@ -226,6 +239,16 @@ impl GlobalState { lsp_server::Message::Notification(not) => self.on_notification(not)?, lsp_server::Message::Response(resp) => self.complete_request(resp), }, + Event::QueuedTask(task) => { + let _p = + tracing::span!(tracing::Level::INFO, "GlobalState::handle_event/queued_task") + .entered(); + self.handle_queued_task(task); + // Coalesce multiple task events into one loop turn + while let Ok(task) = self.deferred_task_queue.receiver.try_recv() { + self.handle_queued_task(task); + } + } Event::Task(task) => { let _p = tracing::span!(tracing::Level::INFO, "GlobalState::handle_event/task") .entered(); @@ -498,6 +521,9 @@ impl GlobalState { fn handle_task(&mut self, prime_caches_progress: &mut Vec, task: Task) { match task { Task::Response(response) => self.respond(response), + Task::ClientNotification(params) => { + self.send_notification::(params) + } // Only retry requests that haven't been cancelled. Otherwise we do unnecessary work. Task::Retry(req) if !self.is_completed(&req) => self.on_request(req), Task::Retry(_) => (), @@ -638,6 +664,31 @@ impl GlobalState { } } + fn handle_queued_task(&mut self, task: QueuedTask) { + match task { + QueuedTask::CheckIfIndexed(uri) => { + let snap = self.snapshot(); + + self.task_pool.handle.spawn_with_sender(ThreadIntent::Worker, move |sender| { + let _p = tracing::span!(tracing::Level::INFO, "GlobalState::check_if_indexed") + .entered(); + tracing::debug!(?uri, "handling uri"); + let id = from_proto::file_id(&snap, &uri).expect("unable to get FileId"); + if let Ok(crates) = &snap.analysis.crates_for(id) { + if crates.is_empty() { + let params = ext::UnindexedProjectParams { + text_documents: vec![lsp_types::TextDocumentIdentifier { uri }], + }; + sender.send(Task::ClientNotification(params)).unwrap(); + } else { + tracing::debug!(?uri, "is indexed"); + } + } + }); + } + } + } + fn handle_flycheck_msg(&mut self, message: flycheck::Message) { match message { flycheck::Message::AddDiagnostic { id, workspace_root, diagnostic } => { diff --git a/crates/rust-analyzer/src/task_pool.rs b/crates/rust-analyzer/src/task_pool.rs index a5a10e8691..f7de5fb2ff 100644 --- a/crates/rust-analyzer/src/task_pool.rs +++ b/crates/rust-analyzer/src/task_pool.rs @@ -4,6 +4,8 @@ use crossbeam_channel::Sender; use stdx::thread::{Pool, ThreadIntent}; +use crate::main_loop::QueuedTask; + pub(crate) struct TaskPool { sender: Sender, pool: Pool, @@ -40,3 +42,12 @@ impl TaskPool { self.pool.len() } } + +/// `TaskQueue`, like its name suggests, queues tasks. +/// +/// This should only be used used if a task must run after [`GlobalState::process_changes`] +/// has been called. +pub(crate) struct TaskQueue { + pub(crate) sender: crossbeam_channel::Sender, + pub(crate) receiver: crossbeam_channel::Receiver, +} diff --git a/crates/rust-analyzer/tests/slow-tests/main.rs b/crates/rust-analyzer/tests/slow-tests/main.rs index 7a2b7497e0..1ee1c1489a 100644 --- a/crates/rust-analyzer/tests/slow-tests/main.rs +++ b/crates/rust-analyzer/tests/slow-tests/main.rs @@ -30,7 +30,7 @@ use lsp_types::{ PartialResultParams, Position, Range, RenameFilesParams, TextDocumentItem, TextDocumentPositionParams, WorkDoneProgressParams, }; -use rust_analyzer::lsp::ext::{OnEnter, Runnables, RunnablesParams}; +use rust_analyzer::lsp::ext::{OnEnter, Runnables, RunnablesParams, UnindexedProject}; use serde_json::json; use stdx::format_to_acc; use test_utils::skip_slow_tests; @@ -587,6 +587,66 @@ fn main() {{}} ); } +#[test] +fn test_opening_a_file_outside_of_indexed_workspace() { + if skip_slow_tests() { + return; + } + + let tmp_dir = TestDir::new(); + let path = tmp_dir.path(); + + let project = json!({ + "roots": [path], + "crates": [ { + "root_module": path.join("src/crate_one/lib.rs"), + "deps": [], + "edition": "2015", + "cfg": [ "cfg_atom_1", "feature=\"cfg_1\""], + } ] + }); + + let code = format!( + r#" +//- /rust-project.json +{project} + +//- /src/crate_one/lib.rs +mod bar; + +fn main() {{}} +"#, + ); + + let server = Project::with_fixture(&code) + .tmp_dir(tmp_dir) + .with_config(serde_json::json!({ + "notifications": { + "unindexedProject": true + }, + })) + .server() + .wait_until_workspace_is_loaded(); + + let uri = server.doc_id("src/crate_two/lib.rs").uri; + server.notification::(DidOpenTextDocumentParams { + text_document: TextDocumentItem { + uri: uri.clone(), + language_id: "rust".to_string(), + version: 0, + text: "/// Docs\nfn foo() {}".to_string(), + }, + }); + let expected = json!({ + "textDocuments": [ + { + "uri": uri + } + ] + }); + server.expect_notification::(expected); +} + #[test] fn diagnostics_dont_block_typing() { if skip_slow_tests() { diff --git a/crates/rust-analyzer/tests/slow-tests/support.rs b/crates/rust-analyzer/tests/slow-tests/support.rs index d699374f9c..c2bdefa217 100644 --- a/crates/rust-analyzer/tests/slow-tests/support.rs +++ b/crates/rust-analyzer/tests/slow-tests/support.rs @@ -9,7 +9,7 @@ use std::{ use crossbeam_channel::{after, select, Receiver}; use lsp_server::{Connection, Message, Notification, Request}; use lsp_types::{notification::Exit, request::Shutdown, TextDocumentIdentifier, Url}; -use rust_analyzer::{config::Config, lsp, main_loop, tracing}; +use rust_analyzer::{config::Config, lsp, main_loop}; use serde::Serialize; use serde_json::{json, to_string_pretty, Value}; use test_utils::FixtureWithProjectMeta; @@ -91,7 +91,7 @@ impl Project<'_> { static INIT: Once = Once::new(); INIT.call_once(|| { - let _ = tracing::Config { + let _ = rust_analyzer::tracing::Config { writer: TestWriter::default(), // Deliberately enable all `error` logs if the user has not set RA_LOG, as there is usually // useful information in there for debugging. @@ -214,6 +214,40 @@ impl Server { self.send_notification(r) } + pub(crate) fn expect_notification(&self, expected: Value) + where + N: lsp_types::notification::Notification, + N::Params: Serialize, + { + while let Some(Message::Notification(actual)) = + recv_timeout(&self.client.receiver).unwrap_or_else(|_| panic!("timed out")) + { + if actual.method == N::METHOD { + let actual = actual + .clone() + .extract::(N::METHOD) + .expect("was not able to extract notification"); + + tracing::debug!(?actual, "got notification"); + if let Some((expected_part, actual_part)) = find_mismatch(&expected, &actual) { + panic!( + "JSON mismatch\nExpected:\n{}\nWas:\n{}\nExpected part:\n{}\nActual part:\n{}\n", + to_string_pretty(&expected).unwrap(), + to_string_pretty(&actual).unwrap(), + to_string_pretty(expected_part).unwrap(), + to_string_pretty(actual_part).unwrap(), + ); + } else { + tracing::debug!("sucessfully matched notification"); + return; + } + } else { + continue; + } + } + panic!("never got expected notification"); + } + #[track_caller] pub(crate) fn request(&self, params: R::Params, expected_resp: Value) where diff --git a/docs/dev/lsp-extensions.md b/docs/dev/lsp-extensions.md index dd6f1be400..f3100ee194 100644 --- a/docs/dev/lsp-extensions.md +++ b/docs/dev/lsp-extensions.md @@ -1,5 +1,5 @@