mirror of
https://github.com/rust-lang/rust-analyzer
synced 2024-12-27 05:23:24 +00:00
Merge pull request #18723 from ChayimFriedman2/tracing-complete
fix: Fix a case where completion was unable to expand a macro
This commit is contained in:
commit
15d2d509d0
16 changed files with 604 additions and 368 deletions
|
@ -153,13 +153,13 @@ fn syntax_context(db: &dyn ExpandDatabase, file: HirFileId) -> SyntaxContextId {
|
||||||
/// This expands the given macro call, but with different arguments. This is
|
/// This expands the given macro call, but with different arguments. This is
|
||||||
/// used for completion, where we want to see what 'would happen' if we insert a
|
/// used for completion, where we want to see what 'would happen' if we insert a
|
||||||
/// token. The `token_to_map` mapped down into the expansion, with the mapped
|
/// token. The `token_to_map` mapped down into the expansion, with the mapped
|
||||||
/// token returned.
|
/// token(s) returned with their priority.
|
||||||
pub fn expand_speculative(
|
pub fn expand_speculative(
|
||||||
db: &dyn ExpandDatabase,
|
db: &dyn ExpandDatabase,
|
||||||
actual_macro_call: MacroCallId,
|
actual_macro_call: MacroCallId,
|
||||||
speculative_args: &SyntaxNode,
|
speculative_args: &SyntaxNode,
|
||||||
token_to_map: SyntaxToken,
|
token_to_map: SyntaxToken,
|
||||||
) -> Option<(SyntaxNode, SyntaxToken)> {
|
) -> Option<(SyntaxNode, Vec<(SyntaxToken, u8)>)> {
|
||||||
let loc = db.lookup_intern_macro_call(actual_macro_call);
|
let loc = db.lookup_intern_macro_call(actual_macro_call);
|
||||||
let (_, _, span) = db.macro_arg_considering_derives(actual_macro_call, &loc.kind);
|
let (_, _, span) = db.macro_arg_considering_derives(actual_macro_call, &loc.kind);
|
||||||
|
|
||||||
|
@ -303,17 +303,19 @@ pub fn expand_speculative(
|
||||||
token_tree_to_syntax_node(&speculative_expansion.value, expand_to, loc.def.edition);
|
token_tree_to_syntax_node(&speculative_expansion.value, expand_to, loc.def.edition);
|
||||||
|
|
||||||
let syntax_node = node.syntax_node();
|
let syntax_node = node.syntax_node();
|
||||||
let (token, _) = rev_tmap
|
let token = rev_tmap
|
||||||
.ranges_with_span(span_map.span_for_range(token_to_map.text_range()))
|
.ranges_with_span(span_map.span_for_range(token_to_map.text_range()))
|
||||||
.filter_map(|(range, ctx)| syntax_node.covering_element(range).into_token().zip(Some(ctx)))
|
.filter_map(|(range, ctx)| syntax_node.covering_element(range).into_token().zip(Some(ctx)))
|
||||||
.min_by_key(|(t, ctx)| {
|
.map(|(t, ctx)| {
|
||||||
// prefer tokens of the same kind and text, as well as non opaque marked ones
|
// prefer tokens of the same kind and text, as well as non opaque marked ones
|
||||||
// Note the inversion of the score here, as we want to prefer the first token in case
|
// Note the inversion of the score here, as we want to prefer the first token in case
|
||||||
// of all tokens having the same score
|
// of all tokens having the same score
|
||||||
ctx.is_opaque(db) as u8
|
let ranking = ctx.is_opaque(db) as u8
|
||||||
+ 2 * (t.kind() != token_to_map.kind()) as u8
|
+ 2 * (t.kind() != token_to_map.kind()) as u8
|
||||||
+ 4 * ((t.text() != token_to_map.text()) as u8)
|
+ 4 * ((t.text() != token_to_map.text()) as u8);
|
||||||
})?;
|
(t, ranking)
|
||||||
|
})
|
||||||
|
.collect();
|
||||||
Some((node.syntax_node(), token))
|
Some((node.syntax_node(), token))
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -571,7 +571,7 @@ impl<'db> SemanticsImpl<'db> {
|
||||||
actual_macro_call: &ast::MacroCall,
|
actual_macro_call: &ast::MacroCall,
|
||||||
speculative_args: &ast::TokenTree,
|
speculative_args: &ast::TokenTree,
|
||||||
token_to_map: SyntaxToken,
|
token_to_map: SyntaxToken,
|
||||||
) -> Option<(SyntaxNode, SyntaxToken)> {
|
) -> Option<(SyntaxNode, Vec<(SyntaxToken, u8)>)> {
|
||||||
let SourceAnalyzer { file_id, resolver, .. } =
|
let SourceAnalyzer { file_id, resolver, .. } =
|
||||||
self.analyze_no_infer(actual_macro_call.syntax())?;
|
self.analyze_no_infer(actual_macro_call.syntax())?;
|
||||||
let macro_call = InFile::new(file_id, actual_macro_call);
|
let macro_call = InFile::new(file_id, actual_macro_call);
|
||||||
|
@ -592,7 +592,7 @@ impl<'db> SemanticsImpl<'db> {
|
||||||
macro_file: MacroFileId,
|
macro_file: MacroFileId,
|
||||||
speculative_args: &SyntaxNode,
|
speculative_args: &SyntaxNode,
|
||||||
token_to_map: SyntaxToken,
|
token_to_map: SyntaxToken,
|
||||||
) -> Option<(SyntaxNode, SyntaxToken)> {
|
) -> Option<(SyntaxNode, Vec<(SyntaxToken, u8)>)> {
|
||||||
hir_expand::db::expand_speculative(
|
hir_expand::db::expand_speculative(
|
||||||
self.db.upcast(),
|
self.db.upcast(),
|
||||||
macro_file.macro_call_id,
|
macro_file.macro_call_id,
|
||||||
|
@ -608,7 +608,7 @@ impl<'db> SemanticsImpl<'db> {
|
||||||
actual_macro_call: &ast::Item,
|
actual_macro_call: &ast::Item,
|
||||||
speculative_args: &ast::Item,
|
speculative_args: &ast::Item,
|
||||||
token_to_map: SyntaxToken,
|
token_to_map: SyntaxToken,
|
||||||
) -> Option<(SyntaxNode, SyntaxToken)> {
|
) -> Option<(SyntaxNode, Vec<(SyntaxToken, u8)>)> {
|
||||||
let macro_call = self.wrap_node_infile(actual_macro_call.clone());
|
let macro_call = self.wrap_node_infile(actual_macro_call.clone());
|
||||||
let macro_call_id = self.with_ctx(|ctx| ctx.item_to_macro_call(macro_call.as_ref()))?;
|
let macro_call_id = self.with_ctx(|ctx| ctx.item_to_macro_call(macro_call.as_ref()))?;
|
||||||
hir_expand::db::expand_speculative(
|
hir_expand::db::expand_speculative(
|
||||||
|
@ -624,7 +624,7 @@ impl<'db> SemanticsImpl<'db> {
|
||||||
actual_macro_call: &ast::Attr,
|
actual_macro_call: &ast::Attr,
|
||||||
speculative_args: &ast::Attr,
|
speculative_args: &ast::Attr,
|
||||||
token_to_map: SyntaxToken,
|
token_to_map: SyntaxToken,
|
||||||
) -> Option<(SyntaxNode, SyntaxToken)> {
|
) -> Option<(SyntaxNode, Vec<(SyntaxToken, u8)>)> {
|
||||||
let attr = self.wrap_node_infile(actual_macro_call.clone());
|
let attr = self.wrap_node_infile(actual_macro_call.clone());
|
||||||
let adt = actual_macro_call.syntax().parent().and_then(ast::Adt::cast)?;
|
let adt = actual_macro_call.syntax().parent().and_then(ast::Adt::cast)?;
|
||||||
let macro_call_id = self.with_ctx(|ctx| {
|
let macro_call_id = self.with_ctx(|ctx| {
|
||||||
|
|
|
@ -718,7 +718,7 @@ impl<'a> CompletionContext<'a> {
|
||||||
expected: (expected_type, expected_name),
|
expected: (expected_type, expected_name),
|
||||||
qualifier_ctx,
|
qualifier_ctx,
|
||||||
token,
|
token,
|
||||||
offset,
|
original_offset,
|
||||||
} = expand_and_analyze(
|
} = expand_and_analyze(
|
||||||
&sema,
|
&sema,
|
||||||
original_file.syntax().clone(),
|
original_file.syntax().clone(),
|
||||||
|
@ -728,7 +728,7 @@ impl<'a> CompletionContext<'a> {
|
||||||
)?;
|
)?;
|
||||||
|
|
||||||
// adjust for macro input, this still fails if there is no token written yet
|
// adjust for macro input, this still fails if there is no token written yet
|
||||||
let scope = sema.scope_at_offset(&token.parent()?, offset)?;
|
let scope = sema.scope_at_offset(&token.parent()?, original_offset)?;
|
||||||
|
|
||||||
let krate = scope.krate();
|
let krate = scope.krate();
|
||||||
let module = scope.module();
|
let module = scope.module();
|
||||||
|
|
|
@ -22,10 +22,14 @@ use crate::context::{
|
||||||
COMPLETION_MARKER,
|
COMPLETION_MARKER,
|
||||||
};
|
};
|
||||||
|
|
||||||
|
#[derive(Debug)]
|
||||||
struct ExpansionResult {
|
struct ExpansionResult {
|
||||||
original_file: SyntaxNode,
|
original_file: SyntaxNode,
|
||||||
speculative_file: SyntaxNode,
|
speculative_file: SyntaxNode,
|
||||||
offset: TextSize,
|
/// The offset in the original file.
|
||||||
|
original_offset: TextSize,
|
||||||
|
/// The offset in the speculatively expanded file.
|
||||||
|
speculative_offset: TextSize,
|
||||||
fake_ident_token: SyntaxToken,
|
fake_ident_token: SyntaxToken,
|
||||||
derive_ctx: Option<(SyntaxNode, SyntaxNode, TextSize, ast::Attr)>,
|
derive_ctx: Option<(SyntaxNode, SyntaxNode, TextSize, ast::Attr)>,
|
||||||
}
|
}
|
||||||
|
@ -36,7 +40,8 @@ pub(super) struct AnalysisResult {
|
||||||
pub(super) qualifier_ctx: QualifierCtx,
|
pub(super) qualifier_ctx: QualifierCtx,
|
||||||
/// the original token of the expanded file
|
/// the original token of the expanded file
|
||||||
pub(super) token: SyntaxToken,
|
pub(super) token: SyntaxToken,
|
||||||
pub(super) offset: TextSize,
|
/// The offset in the original file.
|
||||||
|
pub(super) original_offset: TextSize,
|
||||||
}
|
}
|
||||||
|
|
||||||
pub(super) fn expand_and_analyze(
|
pub(super) fn expand_and_analyze(
|
||||||
|
@ -54,226 +59,344 @@ pub(super) fn expand_and_analyze(
|
||||||
// make the offset point to the start of the original token, as that is what the
|
// make the offset point to the start of the original token, as that is what the
|
||||||
// intermediate offsets calculated in expansion always points to
|
// intermediate offsets calculated in expansion always points to
|
||||||
let offset = offset - relative_offset;
|
let offset = offset - relative_offset;
|
||||||
let expansion =
|
let expansion = expand(
|
||||||
expand(sema, original_file, speculative_file, offset, fake_ident_token, relative_offset);
|
sema,
|
||||||
|
original_file.clone(),
|
||||||
|
speculative_file.clone(),
|
||||||
|
offset,
|
||||||
|
fake_ident_token.clone(),
|
||||||
|
relative_offset,
|
||||||
|
)
|
||||||
|
.unwrap_or(ExpansionResult {
|
||||||
|
original_file,
|
||||||
|
speculative_file,
|
||||||
|
original_offset: offset,
|
||||||
|
speculative_offset: fake_ident_token.text_range().start(),
|
||||||
|
fake_ident_token,
|
||||||
|
derive_ctx: None,
|
||||||
|
});
|
||||||
|
|
||||||
// add the relative offset back, so that left_biased finds the proper token
|
// add the relative offset back, so that left_biased finds the proper token
|
||||||
let offset = expansion.offset + relative_offset;
|
let original_offset = expansion.original_offset + relative_offset;
|
||||||
let token = expansion.original_file.token_at_offset(offset).left_biased()?;
|
let token = expansion.original_file.token_at_offset(original_offset).left_biased()?;
|
||||||
|
|
||||||
analyze(sema, expansion, original_token, &token).map(|(analysis, expected, qualifier_ctx)| {
|
analyze(sema, expansion, original_token, &token).map(|(analysis, expected, qualifier_ctx)| {
|
||||||
AnalysisResult { analysis, expected, qualifier_ctx, token, offset }
|
AnalysisResult { analysis, expected, qualifier_ctx, token, original_offset }
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Expand attributes and macro calls at the current cursor position for both the original file
|
/// Expand attributes and macro calls at the current cursor position for both the original file
|
||||||
/// and fake file repeatedly. As soon as one of the two expansions fail we stop so the original
|
/// and fake file repeatedly. As soon as one of the two expansions fail we stop so the original
|
||||||
/// and speculative states stay in sync.
|
/// and speculative states stay in sync.
|
||||||
|
///
|
||||||
|
/// We do this by recursively expanding all macros and picking the best possible match. We cannot just
|
||||||
|
/// choose the first expansion each time because macros can expand to something that does not include
|
||||||
|
/// our completion marker, e.g.:
|
||||||
|
/// ```
|
||||||
|
/// macro_rules! helper { ($v:ident) => {} }
|
||||||
|
/// macro_rules! my_macro {
|
||||||
|
/// ($v:ident) => {
|
||||||
|
/// helper!($v);
|
||||||
|
/// $v
|
||||||
|
/// };
|
||||||
|
/// }
|
||||||
|
///
|
||||||
|
/// my_macro!(complete_me_here)
|
||||||
|
/// ```
|
||||||
|
/// If we would expand the first thing we encounter only (which in fact this method used to do), we would
|
||||||
|
/// be unable to complete here, because we would be walking directly into the void. So we instead try
|
||||||
|
/// *every* possible path.
|
||||||
|
///
|
||||||
|
/// This can also creates discrepancies between the speculative and real expansions: because we insert
|
||||||
|
/// tokens, we insert characters, which means if we try the second occurrence it may not be at the same
|
||||||
|
/// position in the original and speculative file. We take an educated guess here, and for each token
|
||||||
|
/// that we check, we subtract `COMPLETION_MARKER.len()`. This may not be accurate because proc macros
|
||||||
|
/// can insert the text of the completion marker in other places while removing the span, but this is
|
||||||
|
/// the best we can do.
|
||||||
fn expand(
|
fn expand(
|
||||||
sema: &Semantics<'_, RootDatabase>,
|
sema: &Semantics<'_, RootDatabase>,
|
||||||
mut original_file: SyntaxNode,
|
original_file: SyntaxNode,
|
||||||
mut speculative_file: SyntaxNode,
|
speculative_file: SyntaxNode,
|
||||||
mut offset: TextSize,
|
original_offset: TextSize,
|
||||||
mut fake_ident_token: SyntaxToken,
|
fake_ident_token: SyntaxToken,
|
||||||
relative_offset: TextSize,
|
relative_offset: TextSize,
|
||||||
) -> ExpansionResult {
|
) -> Option<ExpansionResult> {
|
||||||
let _p = tracing::info_span!("CompletionContext::expand").entered();
|
let _p = tracing::info_span!("CompletionContext::expand").entered();
|
||||||
let mut derive_ctx = None;
|
|
||||||
|
|
||||||
'expansion: loop {
|
if !sema.might_be_inside_macro_call(&fake_ident_token)
|
||||||
let parent_item =
|
&& original_file
|
||||||
|item: &ast::Item| item.syntax().ancestors().skip(1).find_map(ast::Item::cast);
|
.token_at_offset(original_offset + relative_offset)
|
||||||
let ancestor_items = iter::successors(
|
.right_biased()
|
||||||
Option::zip(
|
.is_some_and(|original_token| !sema.might_be_inside_macro_call(&original_token))
|
||||||
find_node_at_offset::<ast::Item>(&original_file, offset),
|
{
|
||||||
find_node_at_offset::<ast::Item>(&speculative_file, offset),
|
// Recursion base case.
|
||||||
|
return Some(ExpansionResult {
|
||||||
|
original_file,
|
||||||
|
speculative_file,
|
||||||
|
original_offset,
|
||||||
|
speculative_offset: fake_ident_token.text_range().start(),
|
||||||
|
fake_ident_token,
|
||||||
|
derive_ctx: None,
|
||||||
|
});
|
||||||
|
}
|
||||||
|
|
||||||
|
let parent_item =
|
||||||
|
|item: &ast::Item| item.syntax().ancestors().skip(1).find_map(ast::Item::cast);
|
||||||
|
let ancestor_items = iter::successors(
|
||||||
|
Option::zip(
|
||||||
|
find_node_at_offset::<ast::Item>(&original_file, original_offset),
|
||||||
|
find_node_at_offset::<ast::Item>(
|
||||||
|
&speculative_file,
|
||||||
|
fake_ident_token.text_range().start(),
|
||||||
),
|
),
|
||||||
|(a, b)| parent_item(a).zip(parent_item(b)),
|
),
|
||||||
);
|
|(a, b)| parent_item(a).zip(parent_item(b)),
|
||||||
|
);
|
||||||
|
|
||||||
// first try to expand attributes as these are always the outermost macro calls
|
// first try to expand attributes as these are always the outermost macro calls
|
||||||
'ancestors: for (actual_item, item_with_fake_ident) in ancestor_items {
|
'ancestors: for (actual_item, item_with_fake_ident) in ancestor_items {
|
||||||
match (
|
match (
|
||||||
sema.expand_attr_macro(&actual_item),
|
sema.expand_attr_macro(&actual_item),
|
||||||
sema.speculative_expand_attr_macro(
|
sema.speculative_expand_attr_macro(
|
||||||
&actual_item,
|
&actual_item,
|
||||||
&item_with_fake_ident,
|
&item_with_fake_ident,
|
||||||
fake_ident_token.clone(),
|
fake_ident_token.clone(),
|
||||||
),
|
),
|
||||||
) {
|
) {
|
||||||
// maybe parent items have attributes, so continue walking the ancestors
|
// maybe parent items have attributes, so continue walking the ancestors
|
||||||
(None, None) => continue 'ancestors,
|
(None, None) => continue 'ancestors,
|
||||||
// successful expansions
|
// successful expansions
|
||||||
(
|
(
|
||||||
Some(ExpandResult { value: actual_expansion, err: _ }),
|
Some(ExpandResult { value: actual_expansion, err: _ }),
|
||||||
Some((fake_expansion, fake_mapped_token)),
|
Some((fake_expansion, fake_mapped_tokens)),
|
||||||
) => {
|
) => {
|
||||||
let new_offset = fake_mapped_token.text_range().start();
|
let mut accumulated_offset_from_fake_tokens = 0;
|
||||||
if new_offset + relative_offset > actual_expansion.text_range().end() {
|
let actual_range = actual_expansion.text_range().end();
|
||||||
// offset outside of bounds from the original expansion,
|
let result = fake_mapped_tokens
|
||||||
// stop here to prevent problems from happening
|
.into_iter()
|
||||||
break 'expansion;
|
.filter_map(|(fake_mapped_token, rank)| {
|
||||||
}
|
let accumulated_offset = accumulated_offset_from_fake_tokens;
|
||||||
original_file = actual_expansion;
|
if !fake_mapped_token.text().contains(COMPLETION_MARKER) {
|
||||||
speculative_file = fake_expansion;
|
// Proc macros can make the same span with different text, we don't
|
||||||
fake_ident_token = fake_mapped_token;
|
// want them to participate in completion because the macro author probably
|
||||||
offset = new_offset;
|
// didn't intend them to.
|
||||||
continue 'expansion;
|
return None;
|
||||||
|
}
|
||||||
|
accumulated_offset_from_fake_tokens += COMPLETION_MARKER.len();
|
||||||
|
|
||||||
|
let new_offset = fake_mapped_token.text_range().start()
|
||||||
|
- TextSize::new(accumulated_offset as u32);
|
||||||
|
if new_offset + relative_offset > actual_range {
|
||||||
|
// offset outside of bounds from the original expansion,
|
||||||
|
// stop here to prevent problems from happening
|
||||||
|
return None;
|
||||||
|
}
|
||||||
|
let result = expand(
|
||||||
|
sema,
|
||||||
|
actual_expansion.clone(),
|
||||||
|
fake_expansion.clone(),
|
||||||
|
new_offset,
|
||||||
|
fake_mapped_token,
|
||||||
|
relative_offset,
|
||||||
|
)?;
|
||||||
|
Some((result, rank))
|
||||||
|
})
|
||||||
|
.min_by_key(|(_, rank)| *rank)
|
||||||
|
.map(|(result, _)| result);
|
||||||
|
if result.is_some() {
|
||||||
|
return result;
|
||||||
}
|
}
|
||||||
// exactly one expansion failed, inconsistent state so stop expanding completely
|
}
|
||||||
_ => break 'expansion,
|
// exactly one expansion failed, inconsistent state so stop expanding completely
|
||||||
|
_ => break 'ancestors,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// No attributes have been expanded, so look for macro_call! token trees or derive token trees
|
||||||
|
let orig_tt = ancestors_at_offset(&original_file, original_offset)
|
||||||
|
.map_while(Either::<ast::TokenTree, ast::Meta>::cast)
|
||||||
|
.last()?;
|
||||||
|
let spec_tt = ancestors_at_offset(&speculative_file, fake_ident_token.text_range().start())
|
||||||
|
.map_while(Either::<ast::TokenTree, ast::Meta>::cast)
|
||||||
|
.last()?;
|
||||||
|
|
||||||
|
let (tts, attrs) = match (orig_tt, spec_tt) {
|
||||||
|
(Either::Left(orig_tt), Either::Left(spec_tt)) => {
|
||||||
|
let attrs = orig_tt
|
||||||
|
.syntax()
|
||||||
|
.parent()
|
||||||
|
.and_then(ast::Meta::cast)
|
||||||
|
.and_then(|it| it.parent_attr())
|
||||||
|
.zip(
|
||||||
|
spec_tt
|
||||||
|
.syntax()
|
||||||
|
.parent()
|
||||||
|
.and_then(ast::Meta::cast)
|
||||||
|
.and_then(|it| it.parent_attr()),
|
||||||
|
);
|
||||||
|
(Some((orig_tt, spec_tt)), attrs)
|
||||||
|
}
|
||||||
|
(Either::Right(orig_path), Either::Right(spec_path)) => {
|
||||||
|
(None, orig_path.parent_attr().zip(spec_path.parent_attr()))
|
||||||
|
}
|
||||||
|
_ => return None,
|
||||||
|
};
|
||||||
|
|
||||||
|
// Expand pseudo-derive expansion aka `derive(Debug$0)`
|
||||||
|
if let Some((orig_attr, spec_attr)) = attrs {
|
||||||
|
if let (Some(actual_expansion), Some((fake_expansion, fake_mapped_tokens))) = (
|
||||||
|
sema.expand_derive_as_pseudo_attr_macro(&orig_attr),
|
||||||
|
sema.speculative_expand_derive_as_pseudo_attr_macro(
|
||||||
|
&orig_attr,
|
||||||
|
&spec_attr,
|
||||||
|
fake_ident_token.clone(),
|
||||||
|
),
|
||||||
|
) {
|
||||||
|
if let Some((fake_mapped_token, _)) =
|
||||||
|
fake_mapped_tokens.into_iter().min_by_key(|(_, rank)| *rank)
|
||||||
|
{
|
||||||
|
return Some(ExpansionResult {
|
||||||
|
original_file,
|
||||||
|
speculative_file,
|
||||||
|
original_offset,
|
||||||
|
speculative_offset: fake_ident_token.text_range().start(),
|
||||||
|
fake_ident_token,
|
||||||
|
derive_ctx: Some((
|
||||||
|
actual_expansion,
|
||||||
|
fake_expansion,
|
||||||
|
fake_mapped_token.text_range().start(),
|
||||||
|
orig_attr,
|
||||||
|
)),
|
||||||
|
});
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// No attributes have been expanded, so look for macro_call! token trees or derive token trees
|
if let Some(spec_adt) =
|
||||||
let orig_tt = match ancestors_at_offset(&original_file, offset)
|
spec_attr.syntax().ancestors().find_map(ast::Item::cast).and_then(|it| match it {
|
||||||
.map_while(Either::<ast::TokenTree, ast::Meta>::cast)
|
ast::Item::Struct(it) => Some(ast::Adt::Struct(it)),
|
||||||
.last()
|
ast::Item::Enum(it) => Some(ast::Adt::Enum(it)),
|
||||||
|
ast::Item::Union(it) => Some(ast::Adt::Union(it)),
|
||||||
|
_ => None,
|
||||||
|
})
|
||||||
{
|
{
|
||||||
Some(it) => it,
|
// might be the path of derive helper or a token tree inside of one
|
||||||
None => break 'expansion,
|
if let Some(helpers) = sema.derive_helper(&orig_attr) {
|
||||||
};
|
for (_mac, file) in helpers {
|
||||||
let spec_tt = match ancestors_at_offset(&speculative_file, offset)
|
if let Some((fake_expansion, fake_mapped_tokens)) = sema.speculative_expand_raw(
|
||||||
.map_while(Either::<ast::TokenTree, ast::Meta>::cast)
|
file,
|
||||||
.last()
|
spec_adt.syntax(),
|
||||||
{
|
fake_ident_token.clone(),
|
||||||
Some(it) => it,
|
) {
|
||||||
None => break 'expansion,
|
// we are inside a derive helper token tree, treat this as being inside
|
||||||
};
|
// the derive expansion
|
||||||
|
let actual_expansion = sema.parse_or_expand(file.into());
|
||||||
|
let mut accumulated_offset_from_fake_tokens = 0;
|
||||||
|
let actual_range = actual_expansion.text_range().end();
|
||||||
|
let result = fake_mapped_tokens
|
||||||
|
.into_iter()
|
||||||
|
.filter_map(|(fake_mapped_token, rank)| {
|
||||||
|
let accumulated_offset = accumulated_offset_from_fake_tokens;
|
||||||
|
if !fake_mapped_token.text().contains(COMPLETION_MARKER) {
|
||||||
|
// Proc macros can make the same span with different text, we don't
|
||||||
|
// want them to participate in completion because the macro author probably
|
||||||
|
// didn't intend them to.
|
||||||
|
return None;
|
||||||
|
}
|
||||||
|
accumulated_offset_from_fake_tokens += COMPLETION_MARKER.len();
|
||||||
|
|
||||||
let (tts, attrs) = match (orig_tt, spec_tt) {
|
let new_offset = fake_mapped_token.text_range().start()
|
||||||
(Either::Left(orig_tt), Either::Left(spec_tt)) => {
|
- TextSize::new(accumulated_offset as u32);
|
||||||
let attrs = orig_tt
|
if new_offset + relative_offset > actual_range {
|
||||||
.syntax()
|
// offset outside of bounds from the original expansion,
|
||||||
.parent()
|
// stop here to prevent problems from happening
|
||||||
.and_then(ast::Meta::cast)
|
return None;
|
||||||
.and_then(|it| it.parent_attr())
|
}
|
||||||
.zip(
|
let result = expand(
|
||||||
spec_tt
|
sema,
|
||||||
.syntax()
|
actual_expansion.clone(),
|
||||||
.parent()
|
fake_expansion.clone(),
|
||||||
.and_then(ast::Meta::cast)
|
new_offset,
|
||||||
.and_then(|it| it.parent_attr()),
|
fake_mapped_token,
|
||||||
);
|
relative_offset,
|
||||||
(Some((orig_tt, spec_tt)), attrs)
|
)?;
|
||||||
}
|
Some((result, rank))
|
||||||
(Either::Right(orig_path), Either::Right(spec_path)) => {
|
})
|
||||||
(None, orig_path.parent_attr().zip(spec_path.parent_attr()))
|
.min_by_key(|(_, rank)| *rank)
|
||||||
}
|
.map(|(result, _)| result);
|
||||||
_ => break 'expansion,
|
if result.is_some() {
|
||||||
};
|
return result;
|
||||||
|
|
||||||
// Expand pseudo-derive expansion aka `derive(Debug$0)`
|
|
||||||
if let Some((orig_attr, spec_attr)) = attrs {
|
|
||||||
if let (Some(actual_expansion), Some((fake_expansion, fake_mapped_token))) = (
|
|
||||||
sema.expand_derive_as_pseudo_attr_macro(&orig_attr),
|
|
||||||
sema.speculative_expand_derive_as_pseudo_attr_macro(
|
|
||||||
&orig_attr,
|
|
||||||
&spec_attr,
|
|
||||||
fake_ident_token.clone(),
|
|
||||||
),
|
|
||||||
) {
|
|
||||||
derive_ctx = Some((
|
|
||||||
actual_expansion,
|
|
||||||
fake_expansion,
|
|
||||||
fake_mapped_token.text_range().start(),
|
|
||||||
orig_attr,
|
|
||||||
));
|
|
||||||
break 'expansion;
|
|
||||||
}
|
|
||||||
|
|
||||||
if let Some(spec_adt) =
|
|
||||||
spec_attr.syntax().ancestors().find_map(ast::Item::cast).and_then(|it| match it {
|
|
||||||
ast::Item::Struct(it) => Some(ast::Adt::Struct(it)),
|
|
||||||
ast::Item::Enum(it) => Some(ast::Adt::Enum(it)),
|
|
||||||
ast::Item::Union(it) => Some(ast::Adt::Union(it)),
|
|
||||||
_ => None,
|
|
||||||
})
|
|
||||||
{
|
|
||||||
// might be the path of derive helper or a token tree inside of one
|
|
||||||
if let Some(helpers) = sema.derive_helper(&orig_attr) {
|
|
||||||
for (_mac, file) in helpers {
|
|
||||||
if let Some((fake_expansion, fake_mapped_token)) = sema
|
|
||||||
.speculative_expand_raw(
|
|
||||||
file,
|
|
||||||
spec_adt.syntax(),
|
|
||||||
fake_ident_token.clone(),
|
|
||||||
)
|
|
||||||
{
|
|
||||||
// we are inside a derive helper token tree, treat this as being inside
|
|
||||||
// the derive expansion
|
|
||||||
let actual_expansion = sema.parse_or_expand(file.into());
|
|
||||||
let new_offset = fake_mapped_token.text_range().start();
|
|
||||||
if new_offset + relative_offset > actual_expansion.text_range().end() {
|
|
||||||
// offset outside of bounds from the original expansion,
|
|
||||||
// stop here to prevent problems from happening
|
|
||||||
break 'expansion;
|
|
||||||
}
|
|
||||||
original_file = actual_expansion;
|
|
||||||
speculative_file = fake_expansion;
|
|
||||||
fake_ident_token = fake_mapped_token;
|
|
||||||
offset = new_offset;
|
|
||||||
continue 'expansion;
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
// at this point we won't have any more successful expansions, so stop
|
|
||||||
break 'expansion;
|
|
||||||
}
|
}
|
||||||
|
// at this point we won't have any more successful expansions, so stop
|
||||||
// Expand fn-like macro calls
|
return None;
|
||||||
let Some((orig_tt, spec_tt)) = tts else { break 'expansion };
|
|
||||||
if let (Some(actual_macro_call), Some(macro_call_with_fake_ident)) = (
|
|
||||||
orig_tt.syntax().parent().and_then(ast::MacroCall::cast),
|
|
||||||
spec_tt.syntax().parent().and_then(ast::MacroCall::cast),
|
|
||||||
) {
|
|
||||||
let mac_call_path0 = actual_macro_call.path().as_ref().map(|s| s.syntax().text());
|
|
||||||
let mac_call_path1 =
|
|
||||||
macro_call_with_fake_ident.path().as_ref().map(|s| s.syntax().text());
|
|
||||||
|
|
||||||
// inconsistent state, stop expanding
|
|
||||||
if mac_call_path0 != mac_call_path1 {
|
|
||||||
break 'expansion;
|
|
||||||
}
|
|
||||||
let speculative_args = match macro_call_with_fake_ident.token_tree() {
|
|
||||||
Some(tt) => tt,
|
|
||||||
None => break 'expansion,
|
|
||||||
};
|
|
||||||
|
|
||||||
match (
|
|
||||||
sema.expand_macro_call(&actual_macro_call),
|
|
||||||
sema.speculative_expand_macro_call(
|
|
||||||
&actual_macro_call,
|
|
||||||
&speculative_args,
|
|
||||||
fake_ident_token.clone(),
|
|
||||||
),
|
|
||||||
) {
|
|
||||||
// successful expansions
|
|
||||||
(Some(actual_expansion), Some((fake_expansion, fake_mapped_token))) => {
|
|
||||||
let new_offset = fake_mapped_token.text_range().start();
|
|
||||||
if new_offset + relative_offset > actual_expansion.text_range().end() {
|
|
||||||
// offset outside of bounds from the original expansion,
|
|
||||||
// stop here to prevent problems from happening
|
|
||||||
break 'expansion;
|
|
||||||
}
|
|
||||||
original_file = actual_expansion;
|
|
||||||
speculative_file = fake_expansion;
|
|
||||||
fake_ident_token = fake_mapped_token;
|
|
||||||
offset = new_offset;
|
|
||||||
continue 'expansion;
|
|
||||||
}
|
|
||||||
// at least on expansion failed, we won't have anything to expand from this point
|
|
||||||
// onwards so break out
|
|
||||||
_ => break 'expansion,
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// none of our states have changed so stop the loop
|
|
||||||
break 'expansion;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
ExpansionResult { original_file, speculative_file, offset, fake_ident_token, derive_ctx }
|
// Expand fn-like macro calls
|
||||||
|
let (orig_tt, spec_tt) = tts?;
|
||||||
|
let (actual_macro_call, macro_call_with_fake_ident) = (
|
||||||
|
orig_tt.syntax().parent().and_then(ast::MacroCall::cast)?,
|
||||||
|
spec_tt.syntax().parent().and_then(ast::MacroCall::cast)?,
|
||||||
|
);
|
||||||
|
let mac_call_path0 = actual_macro_call.path().as_ref().map(|s| s.syntax().text());
|
||||||
|
let mac_call_path1 = macro_call_with_fake_ident.path().as_ref().map(|s| s.syntax().text());
|
||||||
|
|
||||||
|
// inconsistent state, stop expanding
|
||||||
|
if mac_call_path0 != mac_call_path1 {
|
||||||
|
return None;
|
||||||
|
}
|
||||||
|
let speculative_args = macro_call_with_fake_ident.token_tree()?;
|
||||||
|
|
||||||
|
match (
|
||||||
|
sema.expand_macro_call(&actual_macro_call),
|
||||||
|
sema.speculative_expand_macro_call(
|
||||||
|
&actual_macro_call,
|
||||||
|
&speculative_args,
|
||||||
|
fake_ident_token.clone(),
|
||||||
|
),
|
||||||
|
) {
|
||||||
|
// successful expansions
|
||||||
|
(Some(actual_expansion), Some((fake_expansion, fake_mapped_tokens))) => {
|
||||||
|
let mut accumulated_offset_from_fake_tokens = 0;
|
||||||
|
let actual_range = actual_expansion.text_range().end();
|
||||||
|
fake_mapped_tokens
|
||||||
|
.into_iter()
|
||||||
|
.filter_map(|(fake_mapped_token, rank)| {
|
||||||
|
let accumulated_offset = accumulated_offset_from_fake_tokens;
|
||||||
|
if !fake_mapped_token.text().contains(COMPLETION_MARKER) {
|
||||||
|
// Proc macros can make the same span with different text, we don't
|
||||||
|
// want them to participate in completion because the macro author probably
|
||||||
|
// didn't intend them to.
|
||||||
|
return None;
|
||||||
|
}
|
||||||
|
accumulated_offset_from_fake_tokens += COMPLETION_MARKER.len();
|
||||||
|
|
||||||
|
let new_offset = fake_mapped_token.text_range().start()
|
||||||
|
- TextSize::new(accumulated_offset as u32);
|
||||||
|
if new_offset + relative_offset > actual_range {
|
||||||
|
// offset outside of bounds from the original expansion,
|
||||||
|
// stop here to prevent problems from happening
|
||||||
|
return None;
|
||||||
|
}
|
||||||
|
let result = expand(
|
||||||
|
sema,
|
||||||
|
actual_expansion.clone(),
|
||||||
|
fake_expansion.clone(),
|
||||||
|
new_offset,
|
||||||
|
fake_mapped_token,
|
||||||
|
relative_offset,
|
||||||
|
)?;
|
||||||
|
Some((result, rank))
|
||||||
|
})
|
||||||
|
.min_by_key(|(_, rank)| *rank)
|
||||||
|
.map(|(result, _)| result)
|
||||||
|
}
|
||||||
|
// at least one expansion failed, we won't have anything to expand from this point
|
||||||
|
// onwards so break out
|
||||||
|
_ => None,
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Fill the completion context, this is what does semantic reasoning about the surrounding context
|
/// Fill the completion context, this is what does semantic reasoning about the surrounding context
|
||||||
|
@ -285,8 +408,14 @@ fn analyze(
|
||||||
self_token: &SyntaxToken,
|
self_token: &SyntaxToken,
|
||||||
) -> Option<(CompletionAnalysis, (Option<Type>, Option<ast::NameOrNameRef>), QualifierCtx)> {
|
) -> Option<(CompletionAnalysis, (Option<Type>, Option<ast::NameOrNameRef>), QualifierCtx)> {
|
||||||
let _p = tracing::info_span!("CompletionContext::analyze").entered();
|
let _p = tracing::info_span!("CompletionContext::analyze").entered();
|
||||||
let ExpansionResult { original_file, speculative_file, offset, fake_ident_token, derive_ctx } =
|
let ExpansionResult {
|
||||||
expansion_result;
|
original_file,
|
||||||
|
speculative_file,
|
||||||
|
original_offset: _,
|
||||||
|
speculative_offset,
|
||||||
|
fake_ident_token,
|
||||||
|
derive_ctx,
|
||||||
|
} = expansion_result;
|
||||||
|
|
||||||
// Overwrite the path kind for derives
|
// Overwrite the path kind for derives
|
||||||
if let Some((original_file, file_with_fake_ident, offset, origin_attr)) = derive_ctx {
|
if let Some((original_file, file_with_fake_ident, offset, origin_attr)) = derive_ctx {
|
||||||
|
@ -294,7 +423,8 @@ fn analyze(
|
||||||
find_node_at_offset(&file_with_fake_ident, offset)
|
find_node_at_offset(&file_with_fake_ident, offset)
|
||||||
{
|
{
|
||||||
let parent = name_ref.syntax().parent()?;
|
let parent = name_ref.syntax().parent()?;
|
||||||
let (mut nameref_ctx, _) = classify_name_ref(sema, &original_file, name_ref, parent)?;
|
let (mut nameref_ctx, _) =
|
||||||
|
classify_name_ref(sema, &original_file, name_ref, offset, parent)?;
|
||||||
if let NameRefKind::Path(path_ctx) = &mut nameref_ctx.kind {
|
if let NameRefKind::Path(path_ctx) = &mut nameref_ctx.kind {
|
||||||
path_ctx.kind = PathKind::Derive {
|
path_ctx.kind = PathKind::Derive {
|
||||||
existing_derives: sema
|
existing_derives: sema
|
||||||
|
@ -314,7 +444,7 @@ fn analyze(
|
||||||
return None;
|
return None;
|
||||||
}
|
}
|
||||||
|
|
||||||
let Some(name_like) = find_node_at_offset(&speculative_file, offset) else {
|
let Some(name_like) = find_node_at_offset(&speculative_file, speculative_offset) else {
|
||||||
let analysis = if let Some(original) = ast::String::cast(original_token.clone()) {
|
let analysis = if let Some(original) = ast::String::cast(original_token.clone()) {
|
||||||
CompletionAnalysis::String { original, expanded: ast::String::cast(self_token.clone()) }
|
CompletionAnalysis::String { original, expanded: ast::String::cast(self_token.clone()) }
|
||||||
} else {
|
} else {
|
||||||
|
@ -350,8 +480,13 @@ fn analyze(
|
||||||
}
|
}
|
||||||
ast::NameLike::NameRef(name_ref) => {
|
ast::NameLike::NameRef(name_ref) => {
|
||||||
let parent = name_ref.syntax().parent()?;
|
let parent = name_ref.syntax().parent()?;
|
||||||
let (nameref_ctx, qualifier_ctx) =
|
let (nameref_ctx, qualifier_ctx) = classify_name_ref(
|
||||||
classify_name_ref(sema, &original_file, name_ref, parent)?;
|
sema,
|
||||||
|
&original_file,
|
||||||
|
name_ref,
|
||||||
|
expansion_result.original_offset,
|
||||||
|
parent,
|
||||||
|
)?;
|
||||||
|
|
||||||
if let NameRefContext {
|
if let NameRefContext {
|
||||||
kind:
|
kind:
|
||||||
|
@ -636,9 +771,10 @@ fn classify_name_ref(
|
||||||
sema: &Semantics<'_, RootDatabase>,
|
sema: &Semantics<'_, RootDatabase>,
|
||||||
original_file: &SyntaxNode,
|
original_file: &SyntaxNode,
|
||||||
name_ref: ast::NameRef,
|
name_ref: ast::NameRef,
|
||||||
|
original_offset: TextSize,
|
||||||
parent: SyntaxNode,
|
parent: SyntaxNode,
|
||||||
) -> Option<(NameRefContext, QualifierCtx)> {
|
) -> Option<(NameRefContext, QualifierCtx)> {
|
||||||
let nameref = find_node_at_offset(original_file, name_ref.syntax().text_range().start());
|
let nameref = find_node_at_offset(original_file, original_offset);
|
||||||
|
|
||||||
let make_res = |kind| (NameRefContext { nameref: nameref.clone(), kind }, Default::default());
|
let make_res = |kind| (NameRefContext { nameref: nameref.clone(), kind }, Default::default());
|
||||||
|
|
||||||
|
@ -760,7 +896,7 @@ fn classify_name_ref(
|
||||||
// We do not want to generate path completions when we are sandwiched between an item decl signature and its body.
|
// We do not want to generate path completions when we are sandwiched between an item decl signature and its body.
|
||||||
// ex. trait Foo $0 {}
|
// ex. trait Foo $0 {}
|
||||||
// in these cases parser recovery usually kicks in for our inserted identifier, causing it
|
// in these cases parser recovery usually kicks in for our inserted identifier, causing it
|
||||||
// to either be parsed as an ExprStmt or a MacroCall, depending on whether it is in a block
|
// to either be parsed as an ExprStmt or a ItemRecovery, depending on whether it is in a block
|
||||||
// expression or an item list.
|
// expression or an item list.
|
||||||
// The following code checks if the body is missing, if it is we either cut off the body
|
// The following code checks if the body is missing, if it is we either cut off the body
|
||||||
// from the item or it was missing in the first place
|
// from the item or it was missing in the first place
|
||||||
|
@ -1088,15 +1224,10 @@ fn classify_name_ref(
|
||||||
PathKind::Type { location: location.unwrap_or(TypeLocation::Other) }
|
PathKind::Type { location: location.unwrap_or(TypeLocation::Other) }
|
||||||
};
|
};
|
||||||
|
|
||||||
let mut kind_macro_call = |it: ast::MacroCall| {
|
let kind_item = |it: &SyntaxNode| {
|
||||||
path_ctx.has_macro_bang = it.excl_token().is_some();
|
let parent = it.parent()?;
|
||||||
let parent = it.syntax().parent()?;
|
|
||||||
// Any path in an item list will be treated as a macro call by the parser
|
|
||||||
let kind = match_ast! {
|
let kind = match_ast! {
|
||||||
match parent {
|
match parent {
|
||||||
ast::MacroExpr(expr) => make_path_kind_expr(expr.into()),
|
|
||||||
ast::MacroPat(it) => PathKind::Pat { pat_ctx: pattern_context_for(sema, original_file, it.into())},
|
|
||||||
ast::MacroType(ty) => make_path_kind_type(ty.into()),
|
|
||||||
ast::ItemList(_) => PathKind::Item { kind: ItemListKind::Module },
|
ast::ItemList(_) => PathKind::Item { kind: ItemListKind::Module },
|
||||||
ast::AssocItemList(_) => PathKind::Item { kind: match parent.parent() {
|
ast::AssocItemList(_) => PathKind::Item { kind: match parent.parent() {
|
||||||
Some(it) => match_ast! {
|
Some(it) => match_ast! {
|
||||||
|
@ -1126,6 +1257,23 @@ fn classify_name_ref(
|
||||||
};
|
};
|
||||||
Some(kind)
|
Some(kind)
|
||||||
};
|
};
|
||||||
|
|
||||||
|
let mut kind_macro_call = |it: ast::MacroCall| {
|
||||||
|
path_ctx.has_macro_bang = it.excl_token().is_some();
|
||||||
|
let parent = it.syntax().parent()?;
|
||||||
|
if let Some(kind) = kind_item(it.syntax()) {
|
||||||
|
return Some(kind);
|
||||||
|
}
|
||||||
|
let kind = match_ast! {
|
||||||
|
match parent {
|
||||||
|
ast::MacroExpr(expr) => make_path_kind_expr(expr.into()),
|
||||||
|
ast::MacroPat(it) => PathKind::Pat { pat_ctx: pattern_context_for(sema, original_file, it.into())},
|
||||||
|
ast::MacroType(ty) => make_path_kind_type(ty.into()),
|
||||||
|
_ => return None,
|
||||||
|
}
|
||||||
|
};
|
||||||
|
Some(kind)
|
||||||
|
};
|
||||||
let make_path_kind_attr = |meta: ast::Meta| {
|
let make_path_kind_attr = |meta: ast::Meta| {
|
||||||
let attr = meta.parent_attr()?;
|
let attr = meta.parent_attr()?;
|
||||||
let kind = attr.kind();
|
let kind = attr.kind();
|
||||||
|
@ -1153,94 +1301,98 @@ fn classify_name_ref(
|
||||||
|
|
||||||
// Infer the path kind
|
// Infer the path kind
|
||||||
let parent = path.syntax().parent()?;
|
let parent = path.syntax().parent()?;
|
||||||
let kind = match_ast! {
|
let kind = 'find_kind: {
|
||||||
match parent {
|
if parent.kind() == SyntaxKind::ERROR {
|
||||||
ast::PathType(it) => make_path_kind_type(it.into()),
|
if let Some(kind) = inbetween_body_and_decl_check(parent.clone()) {
|
||||||
ast::PathExpr(it) => {
|
return Some(make_res(NameRefKind::Keyword(kind)));
|
||||||
if let Some(p) = it.syntax().parent() {
|
}
|
||||||
let p_kind = p.kind();
|
|
||||||
// The syntax node of interest, for which we want to check whether
|
break 'find_kind kind_item(&parent)?;
|
||||||
// it is sandwiched between an item decl signature and its body.
|
}
|
||||||
let probe = if ast::ExprStmt::can_cast(p_kind) {
|
match_ast! {
|
||||||
Some(p)
|
match parent {
|
||||||
} else if ast::StmtList::can_cast(p_kind) {
|
ast::PathType(it) => make_path_kind_type(it.into()),
|
||||||
Some(it.syntax().clone())
|
ast::PathExpr(it) => {
|
||||||
} else {
|
if let Some(p) = it.syntax().parent() {
|
||||||
None
|
let p_kind = p.kind();
|
||||||
};
|
// The syntax node of interest, for which we want to check whether
|
||||||
if let Some(kind) = probe.and_then(inbetween_body_and_decl_check) {
|
// it is sandwiched between an item decl signature and its body.
|
||||||
|
let probe = if ast::ExprStmt::can_cast(p_kind) {
|
||||||
|
Some(p)
|
||||||
|
} else if ast::StmtList::can_cast(p_kind) {
|
||||||
|
Some(it.syntax().clone())
|
||||||
|
} else {
|
||||||
|
None
|
||||||
|
};
|
||||||
|
if let Some(kind) = probe.and_then(inbetween_body_and_decl_check) {
|
||||||
|
return Some(make_res(NameRefKind::Keyword(kind)));
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
path_ctx.has_call_parens = it.syntax().parent().map_or(false, |it| ast::CallExpr::can_cast(it.kind()));
|
||||||
|
|
||||||
|
make_path_kind_expr(it.into())
|
||||||
|
},
|
||||||
|
ast::TupleStructPat(it) => {
|
||||||
|
path_ctx.has_call_parens = true;
|
||||||
|
PathKind::Pat { pat_ctx: pattern_context_for(sema, original_file, it.into()) }
|
||||||
|
},
|
||||||
|
ast::RecordPat(it) => {
|
||||||
|
path_ctx.has_call_parens = true;
|
||||||
|
PathKind::Pat { pat_ctx: pattern_context_for(sema, original_file, it.into()) }
|
||||||
|
},
|
||||||
|
ast::PathPat(it) => {
|
||||||
|
PathKind::Pat { pat_ctx: pattern_context_for(sema, original_file, it.into())}
|
||||||
|
},
|
||||||
|
ast::MacroCall(it) => {
|
||||||
|
kind_macro_call(it)?
|
||||||
|
},
|
||||||
|
ast::Meta(meta) => make_path_kind_attr(meta)?,
|
||||||
|
ast::Visibility(it) => PathKind::Vis { has_in_token: it.in_token().is_some() },
|
||||||
|
ast::UseTree(_) => PathKind::Use,
|
||||||
|
// completing inside a qualifier
|
||||||
|
ast::Path(parent) => {
|
||||||
|
path_ctx.parent = Some(parent.clone());
|
||||||
|
let parent = iter::successors(Some(parent), |it| it.parent_path()).last()?.syntax().parent()?;
|
||||||
|
match_ast! {
|
||||||
|
match parent {
|
||||||
|
ast::PathType(it) => make_path_kind_type(it.into()),
|
||||||
|
ast::PathExpr(it) => {
|
||||||
|
path_ctx.has_call_parens = it.syntax().parent().map_or(false, |it| ast::CallExpr::can_cast(it.kind()));
|
||||||
|
|
||||||
|
make_path_kind_expr(it.into())
|
||||||
|
},
|
||||||
|
ast::TupleStructPat(it) => {
|
||||||
|
path_ctx.has_call_parens = true;
|
||||||
|
PathKind::Pat { pat_ctx: pattern_context_for(sema, original_file, it.into()) }
|
||||||
|
},
|
||||||
|
ast::RecordPat(it) => {
|
||||||
|
path_ctx.has_call_parens = true;
|
||||||
|
PathKind::Pat { pat_ctx: pattern_context_for(sema, original_file, it.into()) }
|
||||||
|
},
|
||||||
|
ast::PathPat(it) => {
|
||||||
|
PathKind::Pat { pat_ctx: pattern_context_for(sema, original_file, it.into())}
|
||||||
|
},
|
||||||
|
ast::MacroCall(it) => {
|
||||||
|
kind_macro_call(it)?
|
||||||
|
},
|
||||||
|
ast::Meta(meta) => make_path_kind_attr(meta)?,
|
||||||
|
ast::Visibility(it) => PathKind::Vis { has_in_token: it.in_token().is_some() },
|
||||||
|
ast::UseTree(_) => PathKind::Use,
|
||||||
|
ast::RecordExpr(it) => make_path_kind_expr(it.into()),
|
||||||
|
_ => return None,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
},
|
||||||
|
ast::RecordExpr(it) => {
|
||||||
|
// A record expression in this position is usually a result of parsing recovery, so check that
|
||||||
|
if let Some(kind) = inbetween_body_and_decl_check(it.syntax().clone()) {
|
||||||
return Some(make_res(NameRefKind::Keyword(kind)));
|
return Some(make_res(NameRefKind::Keyword(kind)));
|
||||||
}
|
}
|
||||||
}
|
make_path_kind_expr(it.into())
|
||||||
|
},
|
||||||
path_ctx.has_call_parens = it.syntax().parent().map_or(false, |it| ast::CallExpr::can_cast(it.kind()));
|
_ => return None,
|
||||||
|
}
|
||||||
make_path_kind_expr(it.into())
|
|
||||||
},
|
|
||||||
ast::TupleStructPat(it) => {
|
|
||||||
path_ctx.has_call_parens = true;
|
|
||||||
PathKind::Pat { pat_ctx: pattern_context_for(sema, original_file, it.into()) }
|
|
||||||
},
|
|
||||||
ast::RecordPat(it) => {
|
|
||||||
path_ctx.has_call_parens = true;
|
|
||||||
PathKind::Pat { pat_ctx: pattern_context_for(sema, original_file, it.into()) }
|
|
||||||
},
|
|
||||||
ast::PathPat(it) => {
|
|
||||||
PathKind::Pat { pat_ctx: pattern_context_for(sema, original_file, it.into())}
|
|
||||||
},
|
|
||||||
ast::MacroCall(it) => {
|
|
||||||
// A macro call in this position is usually a result of parsing recovery, so check that
|
|
||||||
if let Some(kind) = inbetween_body_and_decl_check(it.syntax().clone()) {
|
|
||||||
return Some(make_res(NameRefKind::Keyword(kind)));
|
|
||||||
}
|
|
||||||
|
|
||||||
kind_macro_call(it)?
|
|
||||||
},
|
|
||||||
ast::Meta(meta) => make_path_kind_attr(meta)?,
|
|
||||||
ast::Visibility(it) => PathKind::Vis { has_in_token: it.in_token().is_some() },
|
|
||||||
ast::UseTree(_) => PathKind::Use,
|
|
||||||
// completing inside a qualifier
|
|
||||||
ast::Path(parent) => {
|
|
||||||
path_ctx.parent = Some(parent.clone());
|
|
||||||
let parent = iter::successors(Some(parent), |it| it.parent_path()).last()?.syntax().parent()?;
|
|
||||||
match_ast! {
|
|
||||||
match parent {
|
|
||||||
ast::PathType(it) => make_path_kind_type(it.into()),
|
|
||||||
ast::PathExpr(it) => {
|
|
||||||
path_ctx.has_call_parens = it.syntax().parent().map_or(false, |it| ast::CallExpr::can_cast(it.kind()));
|
|
||||||
|
|
||||||
make_path_kind_expr(it.into())
|
|
||||||
},
|
|
||||||
ast::TupleStructPat(it) => {
|
|
||||||
path_ctx.has_call_parens = true;
|
|
||||||
PathKind::Pat { pat_ctx: pattern_context_for(sema, original_file, it.into()) }
|
|
||||||
},
|
|
||||||
ast::RecordPat(it) => {
|
|
||||||
path_ctx.has_call_parens = true;
|
|
||||||
PathKind::Pat { pat_ctx: pattern_context_for(sema, original_file, it.into()) }
|
|
||||||
},
|
|
||||||
ast::PathPat(it) => {
|
|
||||||
PathKind::Pat { pat_ctx: pattern_context_for(sema, original_file, it.into())}
|
|
||||||
},
|
|
||||||
ast::MacroCall(it) => {
|
|
||||||
kind_macro_call(it)?
|
|
||||||
},
|
|
||||||
ast::Meta(meta) => make_path_kind_attr(meta)?,
|
|
||||||
ast::Visibility(it) => PathKind::Vis { has_in_token: it.in_token().is_some() },
|
|
||||||
ast::UseTree(_) => PathKind::Use,
|
|
||||||
ast::RecordExpr(it) => make_path_kind_expr(it.into()),
|
|
||||||
_ => return None,
|
|
||||||
}
|
|
||||||
}
|
|
||||||
},
|
|
||||||
ast::RecordExpr(it) => {
|
|
||||||
// A record expression in this position is usually a result of parsing recovery, so check that
|
|
||||||
if let Some(kind) = inbetween_body_and_decl_check(it.syntax().clone()) {
|
|
||||||
return Some(make_res(NameRefKind::Keyword(kind)));
|
|
||||||
}
|
|
||||||
make_path_kind_expr(it.into())
|
|
||||||
},
|
|
||||||
_ => return None,
|
|
||||||
}
|
}
|
||||||
};
|
};
|
||||||
|
|
||||||
|
@ -1320,9 +1472,7 @@ fn classify_name_ref(
|
||||||
}
|
}
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
PathKind::Item { .. } => {
|
PathKind::Item { .. } => parent.ancestors().find(|it| it.kind() == SyntaxKind::ERROR),
|
||||||
parent.ancestors().find(|it| ast::MacroCall::can_cast(it.kind()))
|
|
||||||
}
|
|
||||||
_ => None,
|
_ => None,
|
||||||
};
|
};
|
||||||
if let Some(top) = top_node {
|
if let Some(top) = top_node {
|
||||||
|
|
|
@ -1320,3 +1320,73 @@ fn main() {
|
||||||
"#]],
|
"#]],
|
||||||
);
|
);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn macro_that_ignores_completion_marker() {
|
||||||
|
check(
|
||||||
|
r#"
|
||||||
|
macro_rules! helper {
|
||||||
|
($v:ident) => {};
|
||||||
|
}
|
||||||
|
|
||||||
|
macro_rules! m {
|
||||||
|
($v:ident) => {{
|
||||||
|
helper!($v);
|
||||||
|
$v
|
||||||
|
}};
|
||||||
|
}
|
||||||
|
|
||||||
|
fn main() {
|
||||||
|
let variable = "test";
|
||||||
|
m!(v$0);
|
||||||
|
}
|
||||||
|
"#,
|
||||||
|
expect![[r#"
|
||||||
|
ct CONST Unit
|
||||||
|
en Enum Enum
|
||||||
|
fn function() fn()
|
||||||
|
fn main() fn()
|
||||||
|
lc variable &str
|
||||||
|
ma helper!(…) macro_rules! helper
|
||||||
|
ma m!(…) macro_rules! m
|
||||||
|
ma makro!(…) macro_rules! makro
|
||||||
|
md module
|
||||||
|
sc STATIC Unit
|
||||||
|
st Record Record
|
||||||
|
st Tuple Tuple
|
||||||
|
st Unit Unit
|
||||||
|
un Union Union
|
||||||
|
ev TupleV(…) TupleV(u32)
|
||||||
|
bt u32 u32
|
||||||
|
kw async
|
||||||
|
kw const
|
||||||
|
kw crate::
|
||||||
|
kw enum
|
||||||
|
kw extern
|
||||||
|
kw false
|
||||||
|
kw fn
|
||||||
|
kw for
|
||||||
|
kw if
|
||||||
|
kw if let
|
||||||
|
kw impl
|
||||||
|
kw let
|
||||||
|
kw loop
|
||||||
|
kw match
|
||||||
|
kw mod
|
||||||
|
kw self::
|
||||||
|
kw static
|
||||||
|
kw struct
|
||||||
|
kw trait
|
||||||
|
kw true
|
||||||
|
kw type
|
||||||
|
kw union
|
||||||
|
kw unsafe
|
||||||
|
kw use
|
||||||
|
kw while
|
||||||
|
kw while let
|
||||||
|
sn macro_rules
|
||||||
|
sn pd
|
||||||
|
sn ppd
|
||||||
|
"#]],
|
||||||
|
);
|
||||||
|
}
|
||||||
|
|
|
@ -72,8 +72,19 @@ pub(super) fn item_or_macro(p: &mut Parser<'_>, stop_on_r_curly: bool, is_in_ext
|
||||||
// macro_rules! ()
|
// macro_rules! ()
|
||||||
// macro_rules! []
|
// macro_rules! []
|
||||||
if paths::is_use_path_start(p) {
|
if paths::is_use_path_start(p) {
|
||||||
macro_call(p, m);
|
paths::use_path(p);
|
||||||
return;
|
// Do not create a MACRO_CALL node here if this isn't a macro call, this causes problems with completion.
|
||||||
|
|
||||||
|
// test_err path_item_without_excl
|
||||||
|
// foo
|
||||||
|
if p.at(T![!]) {
|
||||||
|
macro_call(p, m);
|
||||||
|
return;
|
||||||
|
} else {
|
||||||
|
m.complete(p, ERROR);
|
||||||
|
p.error("expected an item");
|
||||||
|
return;
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
m.abandon(p);
|
m.abandon(p);
|
||||||
|
@ -410,8 +421,7 @@ fn fn_(p: &mut Parser<'_>, m: Marker) {
|
||||||
}
|
}
|
||||||
|
|
||||||
fn macro_call(p: &mut Parser<'_>, m: Marker) {
|
fn macro_call(p: &mut Parser<'_>, m: Marker) {
|
||||||
assert!(paths::is_use_path_start(p));
|
assert!(p.at(T![!]));
|
||||||
paths::use_path(p);
|
|
||||||
match macro_call_after_excl(p) {
|
match macro_call_after_excl(p) {
|
||||||
BlockLike::Block => (),
|
BlockLike::Block => (),
|
||||||
BlockLike::NotBlock => {
|
BlockLike::NotBlock => {
|
||||||
|
|
|
@ -30,22 +30,20 @@ fn source_file() {
|
||||||
TopEntryPoint::SourceFile,
|
TopEntryPoint::SourceFile,
|
||||||
"@error@",
|
"@error@",
|
||||||
expect![[r#"
|
expect![[r#"
|
||||||
SOURCE_FILE
|
SOURCE_FILE
|
||||||
ERROR
|
ERROR
|
||||||
AT "@"
|
AT "@"
|
||||||
MACRO_CALL
|
ERROR
|
||||||
PATH
|
PATH
|
||||||
PATH_SEGMENT
|
PATH_SEGMENT
|
||||||
NAME_REF
|
NAME_REF
|
||||||
IDENT "error"
|
IDENT "error"
|
||||||
ERROR
|
ERROR
|
||||||
AT "@"
|
AT "@"
|
||||||
error 0: expected an item
|
error 0: expected an item
|
||||||
error 6: expected BANG
|
error 6: expected an item
|
||||||
error 6: expected `{`, `[`, `(`
|
error 6: expected an item
|
||||||
error 6: expected SEMICOLON
|
"#]],
|
||||||
error 6: expected an item
|
|
||||||
"#]],
|
|
||||||
);
|
);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -775,6 +775,10 @@ mod err {
|
||||||
run_and_expect_errors("test_data/parser/inline/err/missing_fn_param_type.rs");
|
run_and_expect_errors("test_data/parser/inline/err/missing_fn_param_type.rs");
|
||||||
}
|
}
|
||||||
#[test]
|
#[test]
|
||||||
|
fn path_item_without_excl() {
|
||||||
|
run_and_expect_errors("test_data/parser/inline/err/path_item_without_excl.rs");
|
||||||
|
}
|
||||||
|
#[test]
|
||||||
fn pointer_type_no_mutability() {
|
fn pointer_type_no_mutability() {
|
||||||
run_and_expect_errors("test_data/parser/inline/err/pointer_type_no_mutability.rs");
|
run_and_expect_errors("test_data/parser/inline/err/pointer_type_no_mutability.rs");
|
||||||
}
|
}
|
||||||
|
|
|
@ -10,20 +10,20 @@ SOURCE_FILE
|
||||||
USE_KW "use"
|
USE_KW "use"
|
||||||
ERROR
|
ERROR
|
||||||
SLASH "/"
|
SLASH "/"
|
||||||
MACRO_CALL
|
ERROR
|
||||||
PATH
|
PATH
|
||||||
PATH_SEGMENT
|
PATH_SEGMENT
|
||||||
NAME_REF
|
NAME_REF
|
||||||
IDENT "bin"
|
IDENT "bin"
|
||||||
ERROR
|
ERROR
|
||||||
SLASH "/"
|
SLASH "/"
|
||||||
MACRO_CALL
|
ERROR
|
||||||
PATH
|
PATH
|
||||||
PATH_SEGMENT
|
PATH_SEGMENT
|
||||||
NAME_REF
|
NAME_REF
|
||||||
IDENT "env"
|
IDENT "env"
|
||||||
WHITESPACE " "
|
WHITESPACE " "
|
||||||
MACRO_CALL
|
ERROR
|
||||||
PATH
|
PATH
|
||||||
PATH_SEGMENT
|
PATH_SEGMENT
|
||||||
NAME_REF
|
NAME_REF
|
||||||
|
@ -33,13 +33,7 @@ error 23: expected `[`
|
||||||
error 23: expected an item
|
error 23: expected an item
|
||||||
error 27: expected one of `*`, `::`, `{`, `self`, `super` or an identifier
|
error 27: expected one of `*`, `::`, `{`, `self`, `super` or an identifier
|
||||||
error 28: expected SEMICOLON
|
error 28: expected SEMICOLON
|
||||||
error 31: expected BANG
|
|
||||||
error 31: expected `{`, `[`, `(`
|
|
||||||
error 31: expected SEMICOLON
|
|
||||||
error 31: expected an item
|
error 31: expected an item
|
||||||
error 35: expected BANG
|
error 31: expected an item
|
||||||
error 35: expected `{`, `[`, `(`
|
error 35: expected an item
|
||||||
error 35: expected SEMICOLON
|
error 41: expected an item
|
||||||
error 41: expected BANG
|
|
||||||
error 41: expected `{`, `[`, `(`
|
|
||||||
error 41: expected SEMICOLON
|
|
||||||
|
|
|
@ -14,14 +14,15 @@ SOURCE_FILE
|
||||||
WHITESPACE "\n"
|
WHITESPACE "\n"
|
||||||
R_CURLY "}"
|
R_CURLY "}"
|
||||||
WHITESPACE "\n\n"
|
WHITESPACE "\n\n"
|
||||||
MACRO_CALL
|
ERROR
|
||||||
PATH
|
PATH
|
||||||
PATH_SEGMENT
|
PATH_SEGMENT
|
||||||
NAME_REF
|
NAME_REF
|
||||||
IDENT "bar"
|
IDENT "bar"
|
||||||
TOKEN_TREE
|
ERROR
|
||||||
L_PAREN "("
|
L_PAREN "("
|
||||||
R_PAREN ")"
|
ERROR
|
||||||
|
R_PAREN ")"
|
||||||
WHITESPACE " "
|
WHITESPACE " "
|
||||||
ERROR
|
ERROR
|
||||||
L_CURLY "{"
|
L_CURLY "{"
|
||||||
|
@ -75,6 +76,7 @@ SOURCE_FILE
|
||||||
WHITESPACE "\n"
|
WHITESPACE "\n"
|
||||||
R_CURLY "}"
|
R_CURLY "}"
|
||||||
WHITESPACE "\n"
|
WHITESPACE "\n"
|
||||||
error 17: expected BANG
|
error 17: expected an item
|
||||||
error 19: expected SEMICOLON
|
error 17: expected an item
|
||||||
|
error 18: expected an item
|
||||||
error 20: expected an item
|
error 20: expected an item
|
||||||
|
|
|
@ -46,7 +46,7 @@ SOURCE_FILE
|
||||||
ERROR
|
ERROR
|
||||||
AT "@"
|
AT "@"
|
||||||
WHITESPACE " "
|
WHITESPACE " "
|
||||||
MACRO_CALL
|
ERROR
|
||||||
PATH
|
PATH
|
||||||
PATH_SEGMENT
|
PATH_SEGMENT
|
||||||
NAME_REF
|
NAME_REF
|
||||||
|
@ -72,9 +72,7 @@ error 67: expected R_ANGLE
|
||||||
error 67: expected R_PAREN
|
error 67: expected R_PAREN
|
||||||
error 67: expected SEMICOLON
|
error 67: expected SEMICOLON
|
||||||
error 67: expected an item
|
error 67: expected an item
|
||||||
error 72: expected BANG
|
error 72: expected an item
|
||||||
error 72: expected `{`, `[`, `(`
|
|
||||||
error 72: expected SEMICOLON
|
|
||||||
error 72: expected an item
|
error 72: expected an item
|
||||||
error 73: expected an item
|
error 73: expected an item
|
||||||
error 79: expected an item
|
error 79: expected an item
|
||||||
|
|
|
@ -26,14 +26,15 @@ SOURCE_FILE
|
||||||
ERROR
|
ERROR
|
||||||
FN_KW "fn"
|
FN_KW "fn"
|
||||||
WHITESPACE " "
|
WHITESPACE " "
|
||||||
MACRO_CALL
|
ERROR
|
||||||
PATH
|
PATH
|
||||||
PATH_SEGMENT
|
PATH_SEGMENT
|
||||||
NAME_REF
|
NAME_REF
|
||||||
IDENT "bar"
|
IDENT "bar"
|
||||||
TOKEN_TREE
|
ERROR
|
||||||
L_PAREN "("
|
L_PAREN "("
|
||||||
R_PAREN ")"
|
ERROR
|
||||||
|
R_PAREN ")"
|
||||||
WHITESPACE " "
|
WHITESPACE " "
|
||||||
ERROR
|
ERROR
|
||||||
L_CURLY "{"
|
L_CURLY "{"
|
||||||
|
@ -43,6 +44,7 @@ error 6: expected fn, trait or impl
|
||||||
error 38: expected a name
|
error 38: expected a name
|
||||||
error 40: missing type for `const` or `static`
|
error 40: missing type for `const` or `static`
|
||||||
error 40: expected SEMICOLON
|
error 40: expected SEMICOLON
|
||||||
error 44: expected BANG
|
error 44: expected an item
|
||||||
error 46: expected SEMICOLON
|
error 44: expected an item
|
||||||
|
error 45: expected an item
|
||||||
error 47: expected an item
|
error 47: expected an item
|
||||||
|
|
|
@ -12,15 +12,16 @@ SOURCE_FILE
|
||||||
ERROR
|
ERROR
|
||||||
USE_KW "use"
|
USE_KW "use"
|
||||||
WHITESPACE " "
|
WHITESPACE " "
|
||||||
MACRO_CALL
|
ERROR
|
||||||
PATH
|
PATH
|
||||||
PATH_SEGMENT
|
PATH_SEGMENT
|
||||||
NAME_REF
|
NAME_REF
|
||||||
IDENT "std"
|
IDENT "std"
|
||||||
|
ERROR
|
||||||
SEMICOLON ";"
|
SEMICOLON ";"
|
||||||
WHITESPACE "\n"
|
WHITESPACE "\n"
|
||||||
error 8: expected R_ANGLE
|
error 8: expected R_ANGLE
|
||||||
error 8: expected type
|
error 8: expected type
|
||||||
error 11: expected `{`
|
error 11: expected `{`
|
||||||
error 15: expected BANG
|
error 15: expected an item
|
||||||
error 15: expected `{`, `[`, `(`
|
error 15: expected an item
|
||||||
|
|
|
@ -1,5 +1,5 @@
|
||||||
SOURCE_FILE
|
SOURCE_FILE
|
||||||
MACRO_CALL
|
ERROR
|
||||||
PATH
|
PATH
|
||||||
PATH_SEGMENT
|
PATH_SEGMENT
|
||||||
NAME_REF
|
NAME_REF
|
||||||
|
@ -22,7 +22,7 @@ SOURCE_FILE
|
||||||
ERROR
|
ERROR
|
||||||
ASYNC_KW "async"
|
ASYNC_KW "async"
|
||||||
WHITESPACE " "
|
WHITESPACE " "
|
||||||
MACRO_CALL
|
ERROR
|
||||||
PATH
|
PATH
|
||||||
PATH_SEGMENT
|
PATH_SEGMENT
|
||||||
NAME_REF
|
NAME_REF
|
||||||
|
@ -42,10 +42,6 @@ SOURCE_FILE
|
||||||
L_CURLY "{"
|
L_CURLY "{"
|
||||||
R_CURLY "}"
|
R_CURLY "}"
|
||||||
WHITESPACE "\n"
|
WHITESPACE "\n"
|
||||||
error 3: expected BANG
|
error 3: expected an item
|
||||||
error 3: expected `{`, `[`, `(`
|
|
||||||
error 3: expected SEMICOLON
|
|
||||||
error 24: expected fn, trait or impl
|
error 24: expected fn, trait or impl
|
||||||
error 28: expected BANG
|
error 28: expected an item
|
||||||
error 28: expected `{`, `[`, `(`
|
|
||||||
error 28: expected SEMICOLON
|
|
||||||
|
|
|
@ -0,0 +1,8 @@
|
||||||
|
SOURCE_FILE
|
||||||
|
ERROR
|
||||||
|
PATH
|
||||||
|
PATH_SEGMENT
|
||||||
|
NAME_REF
|
||||||
|
IDENT "foo"
|
||||||
|
WHITESPACE "\n"
|
||||||
|
error 3: expected an item
|
|
@ -0,0 +1 @@
|
||||||
|
foo
|
Loading…
Reference in a new issue