diff --git a/crates/ide_completion/src/context.rs b/crates/ide_completion/src/context.rs index 9c2e2e75de..3ba02d78b5 100644 --- a/crates/ide_completion/src/context.rs +++ b/crates/ide_completion/src/context.rs @@ -148,7 +148,11 @@ pub(crate) struct CompletionContext<'a> { pub(super) krate: hir::Crate, /// The module of the `scope`. pub(super) module: hir::Module, + + /// The expected name of what we are completing. + /// This is usually the parameter name of the function argument we are completing. pub(super) expected_name: Option, + /// The expected type of what we are completing. pub(super) expected_type: Option, /// The parent function of the cursor position if it exists. @@ -157,6 +161,7 @@ pub(crate) struct CompletionContext<'a> { pub(super) impl_def: Option, /// The NameLike under the cursor in the original file if it exists. pub(super) name_syntax: Option, + /// Are we completing inside a let statement with a missing semicolon? pub(super) incomplete_let: bool, pub(super) completion_location: Option, @@ -424,6 +429,7 @@ impl<'a> CompletionContext<'a> { let scope = sema.scope_at_offset(&token.parent()?, offset)?; let krate = scope.krate(); let module = scope.module(); + let mut locals = FxHashMap::default(); scope.process_all_names(&mut |name, scope| { if let ScopeDef::Local(local) = scope { @@ -467,8 +473,9 @@ impl<'a> CompletionContext<'a> { Some(ctx) } - /// Do the attribute expansion at the current cursor position for both original file and fake file - /// as long as possible. As soon as one of the two expansions fail we stop to stay in sync. + /// Expand attributes and macro calls at the current cursor position for both the original file + /// and fake file repeatedly. As soon as one of the two expansions fail we stop so the original + /// and speculative states stay in sync. fn expand_and_fill( &mut self, mut original_file: SyntaxNode, @@ -489,7 +496,9 @@ impl<'a> CompletionContext<'a> { ), |(a, b)| parent_item(a).zip(parent_item(b)), ); - for (actual_item, item_with_fake_ident) in ancestor_items { + + // first try to expand attributes as these are always the outermost macro calls + 'ancestors: for (actual_item, item_with_fake_ident) in ancestor_items { match ( self.sema.expand_attr_macro(&actual_item), self.sema.speculative_expand_attr_macro( @@ -498,12 +507,14 @@ impl<'a> CompletionContext<'a> { fake_ident_token.clone(), ), ) { - // maybe parent items have attributes - (None, None) => (), + // maybe parent items have attributes, so continue walking the ancestors + (None, None) => continue 'ancestors, // successful expansions (Some(actual_expansion), Some((fake_expansion, fake_mapped_token))) => { let new_offset = fake_mapped_token.text_range().start(); if new_offset > actual_expansion.text_range().end() { + // offset outside of bounds from the original expansion, + // stop here to prevent problems from happening break 'expansion; } original_file = actual_expansion; @@ -516,13 +527,15 @@ impl<'a> CompletionContext<'a> { _ => break 'expansion, } } + + // No attributes have been expanded, so look for macro_call! token trees or derive token trees let orig_tt = match find_node_at_offset::(&original_file, offset) { Some(it) => it, - None => break, + None => break 'expansion, }; let spec_tt = match find_node_at_offset::(&speculative_file, offset) { Some(it) => it, - None => break, + None => break 'expansion, }; // Expand pseudo-derive expansion @@ -530,7 +543,7 @@ impl<'a> CompletionContext<'a> { orig_tt.syntax().parent().and_then(ast::Meta::cast).and_then(|it| it.parent_attr()), spec_tt.syntax().parent().and_then(ast::Meta::cast).and_then(|it| it.parent_attr()), ) { - match ( + if let (Some(actual_expansion), Some((fake_expansion, fake_mapped_token))) = ( self.sema.expand_derive_as_pseudo_attr_macro(&orig_attr), self.sema.speculative_expand_derive_as_pseudo_attr_macro( &orig_attr, @@ -538,18 +551,15 @@ impl<'a> CompletionContext<'a> { fake_ident_token.clone(), ), ) { - // Clearly not a derive macro - (None, None) => (), - // successful expansions - (Some(actual_expansion), Some((fake_expansion, fake_mapped_token))) => { - let new_offset = fake_mapped_token.text_range().start(); - derive_ctx = - Some((actual_expansion, fake_expansion, new_offset, orig_attr)); - break 'expansion; - } - // exactly one expansion failed, inconsistent state so stop expanding completely - _ => break 'expansion, + derive_ctx = Some(( + actual_expansion, + fake_expansion, + fake_mapped_token.text_range().start(), + orig_attr, + )); } + // at this point we won't have any more successful expansions, so stop + break 'expansion; } // Expand fn-like macro calls @@ -560,12 +570,14 @@ impl<'a> CompletionContext<'a> { let mac_call_path0 = actual_macro_call.path().as_ref().map(|s| s.syntax().text()); let mac_call_path1 = macro_call_with_fake_ident.path().as_ref().map(|s| s.syntax().text()); + + // inconsistent state, stop expanding if mac_call_path0 != mac_call_path1 { - break; + break 'expansion; } let speculative_args = match macro_call_with_fake_ident.token_tree() { Some(tt) => tt, - None => break, + None => break 'expansion, }; match ( @@ -580,24 +592,30 @@ impl<'a> CompletionContext<'a> { (Some(actual_expansion), Some((fake_expansion, fake_mapped_token))) => { let new_offset = fake_mapped_token.text_range().start(); if new_offset > actual_expansion.text_range().end() { - break; + // offset outside of bounds from the original expansion, + // stop here to prevent problems from happening + break 'expansion; } original_file = actual_expansion; speculative_file = fake_expansion; fake_ident_token = fake_mapped_token; offset = new_offset; - continue; + continue 'expansion; } - _ => break, + // at least on expansion failed, we won't have anything to expand from this point + // onwards so break out + _ => break 'expansion, } } - break; + // none of our states have changed so stop the loop + break 'expansion; } self.fill(&original_file, speculative_file, offset, derive_ctx); } + /// Calculate the expected type and name of the cursor position. fn expected_type_and_name(&self) -> (Option, Option) { let mut node = match self.token.parent() { Some(it) => it, @@ -734,6 +752,8 @@ impl<'a> CompletionContext<'a> { } } + /// Fill the completion context, this is what does semantic reasoning about the surrounding context + /// of the completion location. fn fill( &mut self, original_file: &SyntaxNode, @@ -1067,6 +1087,7 @@ fn pattern_context_for(original_file: &SyntaxNode, pat: ast::Pat) -> PatternCont } } +/// Attempts to find `node` inside `syntax` via `node`'s text range. fn find_node_in_file(syntax: &SyntaxNode, node: &N) -> Option { let syntax_range = syntax.text_range(); let range = node.syntax().text_range(); @@ -1074,7 +1095,8 @@ fn find_node_in_file(syntax: &SyntaxNode, node: &N) -> Option { syntax.covering_element(intersection).ancestors().find_map(N::cast) } -/// Compensates for the offset introduced by the fake ident +/// Attempts to find `node` inside `syntax` via `node`'s text range while compensating +/// for the offset introduced by the fake ident. /// This is wrong if `node` comes before the insertion point! Use `find_node_in_file` instead. fn find_node_in_file_compensated(syntax: &SyntaxNode, node: &N) -> Option { let syntax_range = syntax.text_range(); @@ -1143,6 +1165,7 @@ const OP_TRAIT_LANG_NAMES: &[&str] = &[ "shr", "sub", ]; + #[cfg(test)] mod tests { use expect_test::{expect, Expect};