mirror of
https://github.com/rust-lang/rust-analyzer
synced 2024-12-26 21:13:37 +00:00
Merge pull request #18723 from ChayimFriedman2/tracing-complete
fix: Fix a case where completion was unable to expand a macro
This commit is contained in:
commit
15d2d509d0
16 changed files with 604 additions and 368 deletions
|
@ -153,13 +153,13 @@ fn syntax_context(db: &dyn ExpandDatabase, file: HirFileId) -> SyntaxContextId {
|
|||
/// This expands the given macro call, but with different arguments. This is
|
||||
/// used for completion, where we want to see what 'would happen' if we insert a
|
||||
/// token. The `token_to_map` mapped down into the expansion, with the mapped
|
||||
/// token returned.
|
||||
/// token(s) returned with their priority.
|
||||
pub fn expand_speculative(
|
||||
db: &dyn ExpandDatabase,
|
||||
actual_macro_call: MacroCallId,
|
||||
speculative_args: &SyntaxNode,
|
||||
token_to_map: SyntaxToken,
|
||||
) -> Option<(SyntaxNode, SyntaxToken)> {
|
||||
) -> Option<(SyntaxNode, Vec<(SyntaxToken, u8)>)> {
|
||||
let loc = db.lookup_intern_macro_call(actual_macro_call);
|
||||
let (_, _, span) = db.macro_arg_considering_derives(actual_macro_call, &loc.kind);
|
||||
|
||||
|
@ -303,17 +303,19 @@ pub fn expand_speculative(
|
|||
token_tree_to_syntax_node(&speculative_expansion.value, expand_to, loc.def.edition);
|
||||
|
||||
let syntax_node = node.syntax_node();
|
||||
let (token, _) = rev_tmap
|
||||
let token = rev_tmap
|
||||
.ranges_with_span(span_map.span_for_range(token_to_map.text_range()))
|
||||
.filter_map(|(range, ctx)| syntax_node.covering_element(range).into_token().zip(Some(ctx)))
|
||||
.min_by_key(|(t, ctx)| {
|
||||
.map(|(t, ctx)| {
|
||||
// prefer tokens of the same kind and text, as well as non opaque marked ones
|
||||
// Note the inversion of the score here, as we want to prefer the first token in case
|
||||
// of all tokens having the same score
|
||||
ctx.is_opaque(db) as u8
|
||||
let ranking = ctx.is_opaque(db) as u8
|
||||
+ 2 * (t.kind() != token_to_map.kind()) as u8
|
||||
+ 4 * ((t.text() != token_to_map.text()) as u8)
|
||||
})?;
|
||||
+ 4 * ((t.text() != token_to_map.text()) as u8);
|
||||
(t, ranking)
|
||||
})
|
||||
.collect();
|
||||
Some((node.syntax_node(), token))
|
||||
}
|
||||
|
||||
|
|
|
@ -571,7 +571,7 @@ impl<'db> SemanticsImpl<'db> {
|
|||
actual_macro_call: &ast::MacroCall,
|
||||
speculative_args: &ast::TokenTree,
|
||||
token_to_map: SyntaxToken,
|
||||
) -> Option<(SyntaxNode, SyntaxToken)> {
|
||||
) -> Option<(SyntaxNode, Vec<(SyntaxToken, u8)>)> {
|
||||
let SourceAnalyzer { file_id, resolver, .. } =
|
||||
self.analyze_no_infer(actual_macro_call.syntax())?;
|
||||
let macro_call = InFile::new(file_id, actual_macro_call);
|
||||
|
@ -592,7 +592,7 @@ impl<'db> SemanticsImpl<'db> {
|
|||
macro_file: MacroFileId,
|
||||
speculative_args: &SyntaxNode,
|
||||
token_to_map: SyntaxToken,
|
||||
) -> Option<(SyntaxNode, SyntaxToken)> {
|
||||
) -> Option<(SyntaxNode, Vec<(SyntaxToken, u8)>)> {
|
||||
hir_expand::db::expand_speculative(
|
||||
self.db.upcast(),
|
||||
macro_file.macro_call_id,
|
||||
|
@ -608,7 +608,7 @@ impl<'db> SemanticsImpl<'db> {
|
|||
actual_macro_call: &ast::Item,
|
||||
speculative_args: &ast::Item,
|
||||
token_to_map: SyntaxToken,
|
||||
) -> Option<(SyntaxNode, SyntaxToken)> {
|
||||
) -> Option<(SyntaxNode, Vec<(SyntaxToken, u8)>)> {
|
||||
let macro_call = self.wrap_node_infile(actual_macro_call.clone());
|
||||
let macro_call_id = self.with_ctx(|ctx| ctx.item_to_macro_call(macro_call.as_ref()))?;
|
||||
hir_expand::db::expand_speculative(
|
||||
|
@ -624,7 +624,7 @@ impl<'db> SemanticsImpl<'db> {
|
|||
actual_macro_call: &ast::Attr,
|
||||
speculative_args: &ast::Attr,
|
||||
token_to_map: SyntaxToken,
|
||||
) -> Option<(SyntaxNode, SyntaxToken)> {
|
||||
) -> Option<(SyntaxNode, Vec<(SyntaxToken, u8)>)> {
|
||||
let attr = self.wrap_node_infile(actual_macro_call.clone());
|
||||
let adt = actual_macro_call.syntax().parent().and_then(ast::Adt::cast)?;
|
||||
let macro_call_id = self.with_ctx(|ctx| {
|
||||
|
|
|
@ -718,7 +718,7 @@ impl<'a> CompletionContext<'a> {
|
|||
expected: (expected_type, expected_name),
|
||||
qualifier_ctx,
|
||||
token,
|
||||
offset,
|
||||
original_offset,
|
||||
} = expand_and_analyze(
|
||||
&sema,
|
||||
original_file.syntax().clone(),
|
||||
|
@ -728,7 +728,7 @@ impl<'a> CompletionContext<'a> {
|
|||
)?;
|
||||
|
||||
// adjust for macro input, this still fails if there is no token written yet
|
||||
let scope = sema.scope_at_offset(&token.parent()?, offset)?;
|
||||
let scope = sema.scope_at_offset(&token.parent()?, original_offset)?;
|
||||
|
||||
let krate = scope.krate();
|
||||
let module = scope.module();
|
||||
|
|
|
@ -22,10 +22,14 @@ use crate::context::{
|
|||
COMPLETION_MARKER,
|
||||
};
|
||||
|
||||
#[derive(Debug)]
|
||||
struct ExpansionResult {
|
||||
original_file: SyntaxNode,
|
||||
speculative_file: SyntaxNode,
|
||||
offset: TextSize,
|
||||
/// The offset in the original file.
|
||||
original_offset: TextSize,
|
||||
/// The offset in the speculatively expanded file.
|
||||
speculative_offset: TextSize,
|
||||
fake_ident_token: SyntaxToken,
|
||||
derive_ctx: Option<(SyntaxNode, SyntaxNode, TextSize, ast::Attr)>,
|
||||
}
|
||||
|
@ -36,7 +40,8 @@ pub(super) struct AnalysisResult {
|
|||
pub(super) qualifier_ctx: QualifierCtx,
|
||||
/// the original token of the expanded file
|
||||
pub(super) token: SyntaxToken,
|
||||
pub(super) offset: TextSize,
|
||||
/// The offset in the original file.
|
||||
pub(super) original_offset: TextSize,
|
||||
}
|
||||
|
||||
pub(super) fn expand_and_analyze(
|
||||
|
@ -54,226 +59,344 @@ pub(super) fn expand_and_analyze(
|
|||
// make the offset point to the start of the original token, as that is what the
|
||||
// intermediate offsets calculated in expansion always points to
|
||||
let offset = offset - relative_offset;
|
||||
let expansion =
|
||||
expand(sema, original_file, speculative_file, offset, fake_ident_token, relative_offset);
|
||||
let expansion = expand(
|
||||
sema,
|
||||
original_file.clone(),
|
||||
speculative_file.clone(),
|
||||
offset,
|
||||
fake_ident_token.clone(),
|
||||
relative_offset,
|
||||
)
|
||||
.unwrap_or(ExpansionResult {
|
||||
original_file,
|
||||
speculative_file,
|
||||
original_offset: offset,
|
||||
speculative_offset: fake_ident_token.text_range().start(),
|
||||
fake_ident_token,
|
||||
derive_ctx: None,
|
||||
});
|
||||
|
||||
// add the relative offset back, so that left_biased finds the proper token
|
||||
let offset = expansion.offset + relative_offset;
|
||||
let token = expansion.original_file.token_at_offset(offset).left_biased()?;
|
||||
let original_offset = expansion.original_offset + relative_offset;
|
||||
let token = expansion.original_file.token_at_offset(original_offset).left_biased()?;
|
||||
|
||||
analyze(sema, expansion, original_token, &token).map(|(analysis, expected, qualifier_ctx)| {
|
||||
AnalysisResult { analysis, expected, qualifier_ctx, token, offset }
|
||||
AnalysisResult { analysis, expected, qualifier_ctx, token, original_offset }
|
||||
})
|
||||
}
|
||||
|
||||
/// Expand attributes and macro calls at the current cursor position for both the original file
|
||||
/// and fake file repeatedly. As soon as one of the two expansions fail we stop so the original
|
||||
/// and speculative states stay in sync.
|
||||
///
|
||||
/// We do this by recursively expanding all macros and picking the best possible match. We cannot just
|
||||
/// choose the first expansion each time because macros can expand to something that does not include
|
||||
/// our completion marker, e.g.:
|
||||
/// ```
|
||||
/// macro_rules! helper { ($v:ident) => {} }
|
||||
/// macro_rules! my_macro {
|
||||
/// ($v:ident) => {
|
||||
/// helper!($v);
|
||||
/// $v
|
||||
/// };
|
||||
/// }
|
||||
///
|
||||
/// my_macro!(complete_me_here)
|
||||
/// ```
|
||||
/// If we would expand the first thing we encounter only (which in fact this method used to do), we would
|
||||
/// be unable to complete here, because we would be walking directly into the void. So we instead try
|
||||
/// *every* possible path.
|
||||
///
|
||||
/// This can also creates discrepancies between the speculative and real expansions: because we insert
|
||||
/// tokens, we insert characters, which means if we try the second occurrence it may not be at the same
|
||||
/// position in the original and speculative file. We take an educated guess here, and for each token
|
||||
/// that we check, we subtract `COMPLETION_MARKER.len()`. This may not be accurate because proc macros
|
||||
/// can insert the text of the completion marker in other places while removing the span, but this is
|
||||
/// the best we can do.
|
||||
fn expand(
|
||||
sema: &Semantics<'_, RootDatabase>,
|
||||
mut original_file: SyntaxNode,
|
||||
mut speculative_file: SyntaxNode,
|
||||
mut offset: TextSize,
|
||||
mut fake_ident_token: SyntaxToken,
|
||||
original_file: SyntaxNode,
|
||||
speculative_file: SyntaxNode,
|
||||
original_offset: TextSize,
|
||||
fake_ident_token: SyntaxToken,
|
||||
relative_offset: TextSize,
|
||||
) -> ExpansionResult {
|
||||
) -> Option<ExpansionResult> {
|
||||
let _p = tracing::info_span!("CompletionContext::expand").entered();
|
||||
let mut derive_ctx = None;
|
||||
|
||||
'expansion: loop {
|
||||
let parent_item =
|
||||
|item: &ast::Item| item.syntax().ancestors().skip(1).find_map(ast::Item::cast);
|
||||
let ancestor_items = iter::successors(
|
||||
Option::zip(
|
||||
find_node_at_offset::<ast::Item>(&original_file, offset),
|
||||
find_node_at_offset::<ast::Item>(&speculative_file, offset),
|
||||
if !sema.might_be_inside_macro_call(&fake_ident_token)
|
||||
&& original_file
|
||||
.token_at_offset(original_offset + relative_offset)
|
||||
.right_biased()
|
||||
.is_some_and(|original_token| !sema.might_be_inside_macro_call(&original_token))
|
||||
{
|
||||
// Recursion base case.
|
||||
return Some(ExpansionResult {
|
||||
original_file,
|
||||
speculative_file,
|
||||
original_offset,
|
||||
speculative_offset: fake_ident_token.text_range().start(),
|
||||
fake_ident_token,
|
||||
derive_ctx: None,
|
||||
});
|
||||
}
|
||||
|
||||
let parent_item =
|
||||
|item: &ast::Item| item.syntax().ancestors().skip(1).find_map(ast::Item::cast);
|
||||
let ancestor_items = iter::successors(
|
||||
Option::zip(
|
||||
find_node_at_offset::<ast::Item>(&original_file, original_offset),
|
||||
find_node_at_offset::<ast::Item>(
|
||||
&speculative_file,
|
||||
fake_ident_token.text_range().start(),
|
||||
),
|
||||
|(a, b)| parent_item(a).zip(parent_item(b)),
|
||||
);
|
||||
),
|
||||
|(a, b)| parent_item(a).zip(parent_item(b)),
|
||||
);
|
||||
|
||||
// first try to expand attributes as these are always the outermost macro calls
|
||||
'ancestors: for (actual_item, item_with_fake_ident) in ancestor_items {
|
||||
match (
|
||||
sema.expand_attr_macro(&actual_item),
|
||||
sema.speculative_expand_attr_macro(
|
||||
&actual_item,
|
||||
&item_with_fake_ident,
|
||||
fake_ident_token.clone(),
|
||||
),
|
||||
) {
|
||||
// maybe parent items have attributes, so continue walking the ancestors
|
||||
(None, None) => continue 'ancestors,
|
||||
// successful expansions
|
||||
(
|
||||
Some(ExpandResult { value: actual_expansion, err: _ }),
|
||||
Some((fake_expansion, fake_mapped_token)),
|
||||
) => {
|
||||
let new_offset = fake_mapped_token.text_range().start();
|
||||
if new_offset + relative_offset > actual_expansion.text_range().end() {
|
||||
// offset outside of bounds from the original expansion,
|
||||
// stop here to prevent problems from happening
|
||||
break 'expansion;
|
||||
}
|
||||
original_file = actual_expansion;
|
||||
speculative_file = fake_expansion;
|
||||
fake_ident_token = fake_mapped_token;
|
||||
offset = new_offset;
|
||||
continue 'expansion;
|
||||
// first try to expand attributes as these are always the outermost macro calls
|
||||
'ancestors: for (actual_item, item_with_fake_ident) in ancestor_items {
|
||||
match (
|
||||
sema.expand_attr_macro(&actual_item),
|
||||
sema.speculative_expand_attr_macro(
|
||||
&actual_item,
|
||||
&item_with_fake_ident,
|
||||
fake_ident_token.clone(),
|
||||
),
|
||||
) {
|
||||
// maybe parent items have attributes, so continue walking the ancestors
|
||||
(None, None) => continue 'ancestors,
|
||||
// successful expansions
|
||||
(
|
||||
Some(ExpandResult { value: actual_expansion, err: _ }),
|
||||
Some((fake_expansion, fake_mapped_tokens)),
|
||||
) => {
|
||||
let mut accumulated_offset_from_fake_tokens = 0;
|
||||
let actual_range = actual_expansion.text_range().end();
|
||||
let result = fake_mapped_tokens
|
||||
.into_iter()
|
||||
.filter_map(|(fake_mapped_token, rank)| {
|
||||
let accumulated_offset = accumulated_offset_from_fake_tokens;
|
||||
if !fake_mapped_token.text().contains(COMPLETION_MARKER) {
|
||||
// Proc macros can make the same span with different text, we don't
|
||||
// want them to participate in completion because the macro author probably
|
||||
// didn't intend them to.
|
||||
return None;
|
||||
}
|
||||
accumulated_offset_from_fake_tokens += COMPLETION_MARKER.len();
|
||||
|
||||
let new_offset = fake_mapped_token.text_range().start()
|
||||
- TextSize::new(accumulated_offset as u32);
|
||||
if new_offset + relative_offset > actual_range {
|
||||
// offset outside of bounds from the original expansion,
|
||||
// stop here to prevent problems from happening
|
||||
return None;
|
||||
}
|
||||
let result = expand(
|
||||
sema,
|
||||
actual_expansion.clone(),
|
||||
fake_expansion.clone(),
|
||||
new_offset,
|
||||
fake_mapped_token,
|
||||
relative_offset,
|
||||
)?;
|
||||
Some((result, rank))
|
||||
})
|
||||
.min_by_key(|(_, rank)| *rank)
|
||||
.map(|(result, _)| result);
|
||||
if result.is_some() {
|
||||
return result;
|
||||
}
|
||||
// exactly one expansion failed, inconsistent state so stop expanding completely
|
||||
_ => break 'expansion,
|
||||
}
|
||||
// exactly one expansion failed, inconsistent state so stop expanding completely
|
||||
_ => break 'ancestors,
|
||||
}
|
||||
}
|
||||
|
||||
// No attributes have been expanded, so look for macro_call! token trees or derive token trees
|
||||
let orig_tt = ancestors_at_offset(&original_file, original_offset)
|
||||
.map_while(Either::<ast::TokenTree, ast::Meta>::cast)
|
||||
.last()?;
|
||||
let spec_tt = ancestors_at_offset(&speculative_file, fake_ident_token.text_range().start())
|
||||
.map_while(Either::<ast::TokenTree, ast::Meta>::cast)
|
||||
.last()?;
|
||||
|
||||
let (tts, attrs) = match (orig_tt, spec_tt) {
|
||||
(Either::Left(orig_tt), Either::Left(spec_tt)) => {
|
||||
let attrs = orig_tt
|
||||
.syntax()
|
||||
.parent()
|
||||
.and_then(ast::Meta::cast)
|
||||
.and_then(|it| it.parent_attr())
|
||||
.zip(
|
||||
spec_tt
|
||||
.syntax()
|
||||
.parent()
|
||||
.and_then(ast::Meta::cast)
|
||||
.and_then(|it| it.parent_attr()),
|
||||
);
|
||||
(Some((orig_tt, spec_tt)), attrs)
|
||||
}
|
||||
(Either::Right(orig_path), Either::Right(spec_path)) => {
|
||||
(None, orig_path.parent_attr().zip(spec_path.parent_attr()))
|
||||
}
|
||||
_ => return None,
|
||||
};
|
||||
|
||||
// Expand pseudo-derive expansion aka `derive(Debug$0)`
|
||||
if let Some((orig_attr, spec_attr)) = attrs {
|
||||
if let (Some(actual_expansion), Some((fake_expansion, fake_mapped_tokens))) = (
|
||||
sema.expand_derive_as_pseudo_attr_macro(&orig_attr),
|
||||
sema.speculative_expand_derive_as_pseudo_attr_macro(
|
||||
&orig_attr,
|
||||
&spec_attr,
|
||||
fake_ident_token.clone(),
|
||||
),
|
||||
) {
|
||||
if let Some((fake_mapped_token, _)) =
|
||||
fake_mapped_tokens.into_iter().min_by_key(|(_, rank)| *rank)
|
||||
{
|
||||
return Some(ExpansionResult {
|
||||
original_file,
|
||||
speculative_file,
|
||||
original_offset,
|
||||
speculative_offset: fake_ident_token.text_range().start(),
|
||||
fake_ident_token,
|
||||
derive_ctx: Some((
|
||||
actual_expansion,
|
||||
fake_expansion,
|
||||
fake_mapped_token.text_range().start(),
|
||||
orig_attr,
|
||||
)),
|
||||
});
|
||||
}
|
||||
}
|
||||
|
||||
// No attributes have been expanded, so look for macro_call! token trees or derive token trees
|
||||
let orig_tt = match ancestors_at_offset(&original_file, offset)
|
||||
.map_while(Either::<ast::TokenTree, ast::Meta>::cast)
|
||||
.last()
|
||||
if let Some(spec_adt) =
|
||||
spec_attr.syntax().ancestors().find_map(ast::Item::cast).and_then(|it| match it {
|
||||
ast::Item::Struct(it) => Some(ast::Adt::Struct(it)),
|
||||
ast::Item::Enum(it) => Some(ast::Adt::Enum(it)),
|
||||
ast::Item::Union(it) => Some(ast::Adt::Union(it)),
|
||||
_ => None,
|
||||
})
|
||||
{
|
||||
Some(it) => it,
|
||||
None => break 'expansion,
|
||||
};
|
||||
let spec_tt = match ancestors_at_offset(&speculative_file, offset)
|
||||
.map_while(Either::<ast::TokenTree, ast::Meta>::cast)
|
||||
.last()
|
||||
{
|
||||
Some(it) => it,
|
||||
None => break 'expansion,
|
||||
};
|
||||
// might be the path of derive helper or a token tree inside of one
|
||||
if let Some(helpers) = sema.derive_helper(&orig_attr) {
|
||||
for (_mac, file) in helpers {
|
||||
if let Some((fake_expansion, fake_mapped_tokens)) = sema.speculative_expand_raw(
|
||||
file,
|
||||
spec_adt.syntax(),
|
||||
fake_ident_token.clone(),
|
||||
) {
|
||||
// we are inside a derive helper token tree, treat this as being inside
|
||||
// the derive expansion
|
||||
let actual_expansion = sema.parse_or_expand(file.into());
|
||||
let mut accumulated_offset_from_fake_tokens = 0;
|
||||
let actual_range = actual_expansion.text_range().end();
|
||||
let result = fake_mapped_tokens
|
||||
.into_iter()
|
||||
.filter_map(|(fake_mapped_token, rank)| {
|
||||
let accumulated_offset = accumulated_offset_from_fake_tokens;
|
||||
if !fake_mapped_token.text().contains(COMPLETION_MARKER) {
|
||||
// Proc macros can make the same span with different text, we don't
|
||||
// want them to participate in completion because the macro author probably
|
||||
// didn't intend them to.
|
||||
return None;
|
||||
}
|
||||
accumulated_offset_from_fake_tokens += COMPLETION_MARKER.len();
|
||||
|
||||
let (tts, attrs) = match (orig_tt, spec_tt) {
|
||||
(Either::Left(orig_tt), Either::Left(spec_tt)) => {
|
||||
let attrs = orig_tt
|
||||
.syntax()
|
||||
.parent()
|
||||
.and_then(ast::Meta::cast)
|
||||
.and_then(|it| it.parent_attr())
|
||||
.zip(
|
||||
spec_tt
|
||||
.syntax()
|
||||
.parent()
|
||||
.and_then(ast::Meta::cast)
|
||||
.and_then(|it| it.parent_attr()),
|
||||
);
|
||||
(Some((orig_tt, spec_tt)), attrs)
|
||||
}
|
||||
(Either::Right(orig_path), Either::Right(spec_path)) => {
|
||||
(None, orig_path.parent_attr().zip(spec_path.parent_attr()))
|
||||
}
|
||||
_ => break 'expansion,
|
||||
};
|
||||
|
||||
// Expand pseudo-derive expansion aka `derive(Debug$0)`
|
||||
if let Some((orig_attr, spec_attr)) = attrs {
|
||||
if let (Some(actual_expansion), Some((fake_expansion, fake_mapped_token))) = (
|
||||
sema.expand_derive_as_pseudo_attr_macro(&orig_attr),
|
||||
sema.speculative_expand_derive_as_pseudo_attr_macro(
|
||||
&orig_attr,
|
||||
&spec_attr,
|
||||
fake_ident_token.clone(),
|
||||
),
|
||||
) {
|
||||
derive_ctx = Some((
|
||||
actual_expansion,
|
||||
fake_expansion,
|
||||
fake_mapped_token.text_range().start(),
|
||||
orig_attr,
|
||||
));
|
||||
break 'expansion;
|
||||
}
|
||||
|
||||
if let Some(spec_adt) =
|
||||
spec_attr.syntax().ancestors().find_map(ast::Item::cast).and_then(|it| match it {
|
||||
ast::Item::Struct(it) => Some(ast::Adt::Struct(it)),
|
||||
ast::Item::Enum(it) => Some(ast::Adt::Enum(it)),
|
||||
ast::Item::Union(it) => Some(ast::Adt::Union(it)),
|
||||
_ => None,
|
||||
})
|
||||
{
|
||||
// might be the path of derive helper or a token tree inside of one
|
||||
if let Some(helpers) = sema.derive_helper(&orig_attr) {
|
||||
for (_mac, file) in helpers {
|
||||
if let Some((fake_expansion, fake_mapped_token)) = sema
|
||||
.speculative_expand_raw(
|
||||
file,
|
||||
spec_adt.syntax(),
|
||||
fake_ident_token.clone(),
|
||||
)
|
||||
{
|
||||
// we are inside a derive helper token tree, treat this as being inside
|
||||
// the derive expansion
|
||||
let actual_expansion = sema.parse_or_expand(file.into());
|
||||
let new_offset = fake_mapped_token.text_range().start();
|
||||
if new_offset + relative_offset > actual_expansion.text_range().end() {
|
||||
// offset outside of bounds from the original expansion,
|
||||
// stop here to prevent problems from happening
|
||||
break 'expansion;
|
||||
}
|
||||
original_file = actual_expansion;
|
||||
speculative_file = fake_expansion;
|
||||
fake_ident_token = fake_mapped_token;
|
||||
offset = new_offset;
|
||||
continue 'expansion;
|
||||
let new_offset = fake_mapped_token.text_range().start()
|
||||
- TextSize::new(accumulated_offset as u32);
|
||||
if new_offset + relative_offset > actual_range {
|
||||
// offset outside of bounds from the original expansion,
|
||||
// stop here to prevent problems from happening
|
||||
return None;
|
||||
}
|
||||
let result = expand(
|
||||
sema,
|
||||
actual_expansion.clone(),
|
||||
fake_expansion.clone(),
|
||||
new_offset,
|
||||
fake_mapped_token,
|
||||
relative_offset,
|
||||
)?;
|
||||
Some((result, rank))
|
||||
})
|
||||
.min_by_key(|(_, rank)| *rank)
|
||||
.map(|(result, _)| result);
|
||||
if result.is_some() {
|
||||
return result;
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
// at this point we won't have any more successful expansions, so stop
|
||||
break 'expansion;
|
||||
}
|
||||
|
||||
// Expand fn-like macro calls
|
||||
let Some((orig_tt, spec_tt)) = tts else { break 'expansion };
|
||||
if let (Some(actual_macro_call), Some(macro_call_with_fake_ident)) = (
|
||||
orig_tt.syntax().parent().and_then(ast::MacroCall::cast),
|
||||
spec_tt.syntax().parent().and_then(ast::MacroCall::cast),
|
||||
) {
|
||||
let mac_call_path0 = actual_macro_call.path().as_ref().map(|s| s.syntax().text());
|
||||
let mac_call_path1 =
|
||||
macro_call_with_fake_ident.path().as_ref().map(|s| s.syntax().text());
|
||||
|
||||
// inconsistent state, stop expanding
|
||||
if mac_call_path0 != mac_call_path1 {
|
||||
break 'expansion;
|
||||
}
|
||||
let speculative_args = match macro_call_with_fake_ident.token_tree() {
|
||||
Some(tt) => tt,
|
||||
None => break 'expansion,
|
||||
};
|
||||
|
||||
match (
|
||||
sema.expand_macro_call(&actual_macro_call),
|
||||
sema.speculative_expand_macro_call(
|
||||
&actual_macro_call,
|
||||
&speculative_args,
|
||||
fake_ident_token.clone(),
|
||||
),
|
||||
) {
|
||||
// successful expansions
|
||||
(Some(actual_expansion), Some((fake_expansion, fake_mapped_token))) => {
|
||||
let new_offset = fake_mapped_token.text_range().start();
|
||||
if new_offset + relative_offset > actual_expansion.text_range().end() {
|
||||
// offset outside of bounds from the original expansion,
|
||||
// stop here to prevent problems from happening
|
||||
break 'expansion;
|
||||
}
|
||||
original_file = actual_expansion;
|
||||
speculative_file = fake_expansion;
|
||||
fake_ident_token = fake_mapped_token;
|
||||
offset = new_offset;
|
||||
continue 'expansion;
|
||||
}
|
||||
// at least on expansion failed, we won't have anything to expand from this point
|
||||
// onwards so break out
|
||||
_ => break 'expansion,
|
||||
}
|
||||
}
|
||||
|
||||
// none of our states have changed so stop the loop
|
||||
break 'expansion;
|
||||
// at this point we won't have any more successful expansions, so stop
|
||||
return None;
|
||||
}
|
||||
|
||||
ExpansionResult { original_file, speculative_file, offset, fake_ident_token, derive_ctx }
|
||||
// Expand fn-like macro calls
|
||||
let (orig_tt, spec_tt) = tts?;
|
||||
let (actual_macro_call, macro_call_with_fake_ident) = (
|
||||
orig_tt.syntax().parent().and_then(ast::MacroCall::cast)?,
|
||||
spec_tt.syntax().parent().and_then(ast::MacroCall::cast)?,
|
||||
);
|
||||
let mac_call_path0 = actual_macro_call.path().as_ref().map(|s| s.syntax().text());
|
||||
let mac_call_path1 = macro_call_with_fake_ident.path().as_ref().map(|s| s.syntax().text());
|
||||
|
||||
// inconsistent state, stop expanding
|
||||
if mac_call_path0 != mac_call_path1 {
|
||||
return None;
|
||||
}
|
||||
let speculative_args = macro_call_with_fake_ident.token_tree()?;
|
||||
|
||||
match (
|
||||
sema.expand_macro_call(&actual_macro_call),
|
||||
sema.speculative_expand_macro_call(
|
||||
&actual_macro_call,
|
||||
&speculative_args,
|
||||
fake_ident_token.clone(),
|
||||
),
|
||||
) {
|
||||
// successful expansions
|
||||
(Some(actual_expansion), Some((fake_expansion, fake_mapped_tokens))) => {
|
||||
let mut accumulated_offset_from_fake_tokens = 0;
|
||||
let actual_range = actual_expansion.text_range().end();
|
||||
fake_mapped_tokens
|
||||
.into_iter()
|
||||
.filter_map(|(fake_mapped_token, rank)| {
|
||||
let accumulated_offset = accumulated_offset_from_fake_tokens;
|
||||
if !fake_mapped_token.text().contains(COMPLETION_MARKER) {
|
||||
// Proc macros can make the same span with different text, we don't
|
||||
// want them to participate in completion because the macro author probably
|
||||
// didn't intend them to.
|
||||
return None;
|
||||
}
|
||||
accumulated_offset_from_fake_tokens += COMPLETION_MARKER.len();
|
||||
|
||||
let new_offset = fake_mapped_token.text_range().start()
|
||||
- TextSize::new(accumulated_offset as u32);
|
||||
if new_offset + relative_offset > actual_range {
|
||||
// offset outside of bounds from the original expansion,
|
||||
// stop here to prevent problems from happening
|
||||
return None;
|
||||
}
|
||||
let result = expand(
|
||||
sema,
|
||||
actual_expansion.clone(),
|
||||
fake_expansion.clone(),
|
||||
new_offset,
|
||||
fake_mapped_token,
|
||||
relative_offset,
|
||||
)?;
|
||||
Some((result, rank))
|
||||
})
|
||||
.min_by_key(|(_, rank)| *rank)
|
||||
.map(|(result, _)| result)
|
||||
}
|
||||
// at least one expansion failed, we won't have anything to expand from this point
|
||||
// onwards so break out
|
||||
_ => None,
|
||||
}
|
||||
}
|
||||
|
||||
/// Fill the completion context, this is what does semantic reasoning about the surrounding context
|
||||
|
@ -285,8 +408,14 @@ fn analyze(
|
|||
self_token: &SyntaxToken,
|
||||
) -> Option<(CompletionAnalysis, (Option<Type>, Option<ast::NameOrNameRef>), QualifierCtx)> {
|
||||
let _p = tracing::info_span!("CompletionContext::analyze").entered();
|
||||
let ExpansionResult { original_file, speculative_file, offset, fake_ident_token, derive_ctx } =
|
||||
expansion_result;
|
||||
let ExpansionResult {
|
||||
original_file,
|
||||
speculative_file,
|
||||
original_offset: _,
|
||||
speculative_offset,
|
||||
fake_ident_token,
|
||||
derive_ctx,
|
||||
} = expansion_result;
|
||||
|
||||
// Overwrite the path kind for derives
|
||||
if let Some((original_file, file_with_fake_ident, offset, origin_attr)) = derive_ctx {
|
||||
|
@ -294,7 +423,8 @@ fn analyze(
|
|||
find_node_at_offset(&file_with_fake_ident, offset)
|
||||
{
|
||||
let parent = name_ref.syntax().parent()?;
|
||||
let (mut nameref_ctx, _) = classify_name_ref(sema, &original_file, name_ref, parent)?;
|
||||
let (mut nameref_ctx, _) =
|
||||
classify_name_ref(sema, &original_file, name_ref, offset, parent)?;
|
||||
if let NameRefKind::Path(path_ctx) = &mut nameref_ctx.kind {
|
||||
path_ctx.kind = PathKind::Derive {
|
||||
existing_derives: sema
|
||||
|
@ -314,7 +444,7 @@ fn analyze(
|
|||
return None;
|
||||
}
|
||||
|
||||
let Some(name_like) = find_node_at_offset(&speculative_file, offset) else {
|
||||
let Some(name_like) = find_node_at_offset(&speculative_file, speculative_offset) else {
|
||||
let analysis = if let Some(original) = ast::String::cast(original_token.clone()) {
|
||||
CompletionAnalysis::String { original, expanded: ast::String::cast(self_token.clone()) }
|
||||
} else {
|
||||
|
@ -350,8 +480,13 @@ fn analyze(
|
|||
}
|
||||
ast::NameLike::NameRef(name_ref) => {
|
||||
let parent = name_ref.syntax().parent()?;
|
||||
let (nameref_ctx, qualifier_ctx) =
|
||||
classify_name_ref(sema, &original_file, name_ref, parent)?;
|
||||
let (nameref_ctx, qualifier_ctx) = classify_name_ref(
|
||||
sema,
|
||||
&original_file,
|
||||
name_ref,
|
||||
expansion_result.original_offset,
|
||||
parent,
|
||||
)?;
|
||||
|
||||
if let NameRefContext {
|
||||
kind:
|
||||
|
@ -636,9 +771,10 @@ fn classify_name_ref(
|
|||
sema: &Semantics<'_, RootDatabase>,
|
||||
original_file: &SyntaxNode,
|
||||
name_ref: ast::NameRef,
|
||||
original_offset: TextSize,
|
||||
parent: SyntaxNode,
|
||||
) -> Option<(NameRefContext, QualifierCtx)> {
|
||||
let nameref = find_node_at_offset(original_file, name_ref.syntax().text_range().start());
|
||||
let nameref = find_node_at_offset(original_file, original_offset);
|
||||
|
||||
let make_res = |kind| (NameRefContext { nameref: nameref.clone(), kind }, Default::default());
|
||||
|
||||
|
@ -760,7 +896,7 @@ fn classify_name_ref(
|
|||
// We do not want to generate path completions when we are sandwiched between an item decl signature and its body.
|
||||
// ex. trait Foo $0 {}
|
||||
// in these cases parser recovery usually kicks in for our inserted identifier, causing it
|
||||
// to either be parsed as an ExprStmt or a MacroCall, depending on whether it is in a block
|
||||
// to either be parsed as an ExprStmt or a ItemRecovery, depending on whether it is in a block
|
||||
// expression or an item list.
|
||||
// The following code checks if the body is missing, if it is we either cut off the body
|
||||
// from the item or it was missing in the first place
|
||||
|
@ -1088,15 +1224,10 @@ fn classify_name_ref(
|
|||
PathKind::Type { location: location.unwrap_or(TypeLocation::Other) }
|
||||
};
|
||||
|
||||
let mut kind_macro_call = |it: ast::MacroCall| {
|
||||
path_ctx.has_macro_bang = it.excl_token().is_some();
|
||||
let parent = it.syntax().parent()?;
|
||||
// Any path in an item list will be treated as a macro call by the parser
|
||||
let kind_item = |it: &SyntaxNode| {
|
||||
let parent = it.parent()?;
|
||||
let kind = match_ast! {
|
||||
match parent {
|
||||
ast::MacroExpr(expr) => make_path_kind_expr(expr.into()),
|
||||
ast::MacroPat(it) => PathKind::Pat { pat_ctx: pattern_context_for(sema, original_file, it.into())},
|
||||
ast::MacroType(ty) => make_path_kind_type(ty.into()),
|
||||
ast::ItemList(_) => PathKind::Item { kind: ItemListKind::Module },
|
||||
ast::AssocItemList(_) => PathKind::Item { kind: match parent.parent() {
|
||||
Some(it) => match_ast! {
|
||||
|
@ -1126,6 +1257,23 @@ fn classify_name_ref(
|
|||
};
|
||||
Some(kind)
|
||||
};
|
||||
|
||||
let mut kind_macro_call = |it: ast::MacroCall| {
|
||||
path_ctx.has_macro_bang = it.excl_token().is_some();
|
||||
let parent = it.syntax().parent()?;
|
||||
if let Some(kind) = kind_item(it.syntax()) {
|
||||
return Some(kind);
|
||||
}
|
||||
let kind = match_ast! {
|
||||
match parent {
|
||||
ast::MacroExpr(expr) => make_path_kind_expr(expr.into()),
|
||||
ast::MacroPat(it) => PathKind::Pat { pat_ctx: pattern_context_for(sema, original_file, it.into())},
|
||||
ast::MacroType(ty) => make_path_kind_type(ty.into()),
|
||||
_ => return None,
|
||||
}
|
||||
};
|
||||
Some(kind)
|
||||
};
|
||||
let make_path_kind_attr = |meta: ast::Meta| {
|
||||
let attr = meta.parent_attr()?;
|
||||
let kind = attr.kind();
|
||||
|
@ -1153,94 +1301,98 @@ fn classify_name_ref(
|
|||
|
||||
// Infer the path kind
|
||||
let parent = path.syntax().parent()?;
|
||||
let kind = match_ast! {
|
||||
match parent {
|
||||
ast::PathType(it) => make_path_kind_type(it.into()),
|
||||
ast::PathExpr(it) => {
|
||||
if let Some(p) = it.syntax().parent() {
|
||||
let p_kind = p.kind();
|
||||
// The syntax node of interest, for which we want to check whether
|
||||
// it is sandwiched between an item decl signature and its body.
|
||||
let probe = if ast::ExprStmt::can_cast(p_kind) {
|
||||
Some(p)
|
||||
} else if ast::StmtList::can_cast(p_kind) {
|
||||
Some(it.syntax().clone())
|
||||
} else {
|
||||
None
|
||||
};
|
||||
if let Some(kind) = probe.and_then(inbetween_body_and_decl_check) {
|
||||
let kind = 'find_kind: {
|
||||
if parent.kind() == SyntaxKind::ERROR {
|
||||
if let Some(kind) = inbetween_body_and_decl_check(parent.clone()) {
|
||||
return Some(make_res(NameRefKind::Keyword(kind)));
|
||||
}
|
||||
|
||||
break 'find_kind kind_item(&parent)?;
|
||||
}
|
||||
match_ast! {
|
||||
match parent {
|
||||
ast::PathType(it) => make_path_kind_type(it.into()),
|
||||
ast::PathExpr(it) => {
|
||||
if let Some(p) = it.syntax().parent() {
|
||||
let p_kind = p.kind();
|
||||
// The syntax node of interest, for which we want to check whether
|
||||
// it is sandwiched between an item decl signature and its body.
|
||||
let probe = if ast::ExprStmt::can_cast(p_kind) {
|
||||
Some(p)
|
||||
} else if ast::StmtList::can_cast(p_kind) {
|
||||
Some(it.syntax().clone())
|
||||
} else {
|
||||
None
|
||||
};
|
||||
if let Some(kind) = probe.and_then(inbetween_body_and_decl_check) {
|
||||
return Some(make_res(NameRefKind::Keyword(kind)));
|
||||
}
|
||||
}
|
||||
|
||||
path_ctx.has_call_parens = it.syntax().parent().map_or(false, |it| ast::CallExpr::can_cast(it.kind()));
|
||||
|
||||
make_path_kind_expr(it.into())
|
||||
},
|
||||
ast::TupleStructPat(it) => {
|
||||
path_ctx.has_call_parens = true;
|
||||
PathKind::Pat { pat_ctx: pattern_context_for(sema, original_file, it.into()) }
|
||||
},
|
||||
ast::RecordPat(it) => {
|
||||
path_ctx.has_call_parens = true;
|
||||
PathKind::Pat { pat_ctx: pattern_context_for(sema, original_file, it.into()) }
|
||||
},
|
||||
ast::PathPat(it) => {
|
||||
PathKind::Pat { pat_ctx: pattern_context_for(sema, original_file, it.into())}
|
||||
},
|
||||
ast::MacroCall(it) => {
|
||||
kind_macro_call(it)?
|
||||
},
|
||||
ast::Meta(meta) => make_path_kind_attr(meta)?,
|
||||
ast::Visibility(it) => PathKind::Vis { has_in_token: it.in_token().is_some() },
|
||||
ast::UseTree(_) => PathKind::Use,
|
||||
// completing inside a qualifier
|
||||
ast::Path(parent) => {
|
||||
path_ctx.parent = Some(parent.clone());
|
||||
let parent = iter::successors(Some(parent), |it| it.parent_path()).last()?.syntax().parent()?;
|
||||
match_ast! {
|
||||
match parent {
|
||||
ast::PathType(it) => make_path_kind_type(it.into()),
|
||||
ast::PathExpr(it) => {
|
||||
path_ctx.has_call_parens = it.syntax().parent().map_or(false, |it| ast::CallExpr::can_cast(it.kind()));
|
||||
|
||||
make_path_kind_expr(it.into())
|
||||
},
|
||||
ast::TupleStructPat(it) => {
|
||||
path_ctx.has_call_parens = true;
|
||||
PathKind::Pat { pat_ctx: pattern_context_for(sema, original_file, it.into()) }
|
||||
},
|
||||
ast::RecordPat(it) => {
|
||||
path_ctx.has_call_parens = true;
|
||||
PathKind::Pat { pat_ctx: pattern_context_for(sema, original_file, it.into()) }
|
||||
},
|
||||
ast::PathPat(it) => {
|
||||
PathKind::Pat { pat_ctx: pattern_context_for(sema, original_file, it.into())}
|
||||
},
|
||||
ast::MacroCall(it) => {
|
||||
kind_macro_call(it)?
|
||||
},
|
||||
ast::Meta(meta) => make_path_kind_attr(meta)?,
|
||||
ast::Visibility(it) => PathKind::Vis { has_in_token: it.in_token().is_some() },
|
||||
ast::UseTree(_) => PathKind::Use,
|
||||
ast::RecordExpr(it) => make_path_kind_expr(it.into()),
|
||||
_ => return None,
|
||||
}
|
||||
}
|
||||
},
|
||||
ast::RecordExpr(it) => {
|
||||
// A record expression in this position is usually a result of parsing recovery, so check that
|
||||
if let Some(kind) = inbetween_body_and_decl_check(it.syntax().clone()) {
|
||||
return Some(make_res(NameRefKind::Keyword(kind)));
|
||||
}
|
||||
}
|
||||
|
||||
path_ctx.has_call_parens = it.syntax().parent().map_or(false, |it| ast::CallExpr::can_cast(it.kind()));
|
||||
|
||||
make_path_kind_expr(it.into())
|
||||
},
|
||||
ast::TupleStructPat(it) => {
|
||||
path_ctx.has_call_parens = true;
|
||||
PathKind::Pat { pat_ctx: pattern_context_for(sema, original_file, it.into()) }
|
||||
},
|
||||
ast::RecordPat(it) => {
|
||||
path_ctx.has_call_parens = true;
|
||||
PathKind::Pat { pat_ctx: pattern_context_for(sema, original_file, it.into()) }
|
||||
},
|
||||
ast::PathPat(it) => {
|
||||
PathKind::Pat { pat_ctx: pattern_context_for(sema, original_file, it.into())}
|
||||
},
|
||||
ast::MacroCall(it) => {
|
||||
// A macro call in this position is usually a result of parsing recovery, so check that
|
||||
if let Some(kind) = inbetween_body_and_decl_check(it.syntax().clone()) {
|
||||
return Some(make_res(NameRefKind::Keyword(kind)));
|
||||
}
|
||||
|
||||
kind_macro_call(it)?
|
||||
},
|
||||
ast::Meta(meta) => make_path_kind_attr(meta)?,
|
||||
ast::Visibility(it) => PathKind::Vis { has_in_token: it.in_token().is_some() },
|
||||
ast::UseTree(_) => PathKind::Use,
|
||||
// completing inside a qualifier
|
||||
ast::Path(parent) => {
|
||||
path_ctx.parent = Some(parent.clone());
|
||||
let parent = iter::successors(Some(parent), |it| it.parent_path()).last()?.syntax().parent()?;
|
||||
match_ast! {
|
||||
match parent {
|
||||
ast::PathType(it) => make_path_kind_type(it.into()),
|
||||
ast::PathExpr(it) => {
|
||||
path_ctx.has_call_parens = it.syntax().parent().map_or(false, |it| ast::CallExpr::can_cast(it.kind()));
|
||||
|
||||
make_path_kind_expr(it.into())
|
||||
},
|
||||
ast::TupleStructPat(it) => {
|
||||
path_ctx.has_call_parens = true;
|
||||
PathKind::Pat { pat_ctx: pattern_context_for(sema, original_file, it.into()) }
|
||||
},
|
||||
ast::RecordPat(it) => {
|
||||
path_ctx.has_call_parens = true;
|
||||
PathKind::Pat { pat_ctx: pattern_context_for(sema, original_file, it.into()) }
|
||||
},
|
||||
ast::PathPat(it) => {
|
||||
PathKind::Pat { pat_ctx: pattern_context_for(sema, original_file, it.into())}
|
||||
},
|
||||
ast::MacroCall(it) => {
|
||||
kind_macro_call(it)?
|
||||
},
|
||||
ast::Meta(meta) => make_path_kind_attr(meta)?,
|
||||
ast::Visibility(it) => PathKind::Vis { has_in_token: it.in_token().is_some() },
|
||||
ast::UseTree(_) => PathKind::Use,
|
||||
ast::RecordExpr(it) => make_path_kind_expr(it.into()),
|
||||
_ => return None,
|
||||
}
|
||||
}
|
||||
},
|
||||
ast::RecordExpr(it) => {
|
||||
// A record expression in this position is usually a result of parsing recovery, so check that
|
||||
if let Some(kind) = inbetween_body_and_decl_check(it.syntax().clone()) {
|
||||
return Some(make_res(NameRefKind::Keyword(kind)));
|
||||
}
|
||||
make_path_kind_expr(it.into())
|
||||
},
|
||||
_ => return None,
|
||||
make_path_kind_expr(it.into())
|
||||
},
|
||||
_ => return None,
|
||||
}
|
||||
}
|
||||
};
|
||||
|
||||
|
@ -1320,9 +1472,7 @@ fn classify_name_ref(
|
|||
}
|
||||
})
|
||||
}
|
||||
PathKind::Item { .. } => {
|
||||
parent.ancestors().find(|it| ast::MacroCall::can_cast(it.kind()))
|
||||
}
|
||||
PathKind::Item { .. } => parent.ancestors().find(|it| it.kind() == SyntaxKind::ERROR),
|
||||
_ => None,
|
||||
};
|
||||
if let Some(top) = top_node {
|
||||
|
|
|
@ -1320,3 +1320,73 @@ fn main() {
|
|||
"#]],
|
||||
);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn macro_that_ignores_completion_marker() {
|
||||
check(
|
||||
r#"
|
||||
macro_rules! helper {
|
||||
($v:ident) => {};
|
||||
}
|
||||
|
||||
macro_rules! m {
|
||||
($v:ident) => {{
|
||||
helper!($v);
|
||||
$v
|
||||
}};
|
||||
}
|
||||
|
||||
fn main() {
|
||||
let variable = "test";
|
||||
m!(v$0);
|
||||
}
|
||||
"#,
|
||||
expect![[r#"
|
||||
ct CONST Unit
|
||||
en Enum Enum
|
||||
fn function() fn()
|
||||
fn main() fn()
|
||||
lc variable &str
|
||||
ma helper!(…) macro_rules! helper
|
||||
ma m!(…) macro_rules! m
|
||||
ma makro!(…) macro_rules! makro
|
||||
md module
|
||||
sc STATIC Unit
|
||||
st Record Record
|
||||
st Tuple Tuple
|
||||
st Unit Unit
|
||||
un Union Union
|
||||
ev TupleV(…) TupleV(u32)
|
||||
bt u32 u32
|
||||
kw async
|
||||
kw const
|
||||
kw crate::
|
||||
kw enum
|
||||
kw extern
|
||||
kw false
|
||||
kw fn
|
||||
kw for
|
||||
kw if
|
||||
kw if let
|
||||
kw impl
|
||||
kw let
|
||||
kw loop
|
||||
kw match
|
||||
kw mod
|
||||
kw self::
|
||||
kw static
|
||||
kw struct
|
||||
kw trait
|
||||
kw true
|
||||
kw type
|
||||
kw union
|
||||
kw unsafe
|
||||
kw use
|
||||
kw while
|
||||
kw while let
|
||||
sn macro_rules
|
||||
sn pd
|
||||
sn ppd
|
||||
"#]],
|
||||
);
|
||||
}
|
||||
|
|
|
@ -72,8 +72,19 @@ pub(super) fn item_or_macro(p: &mut Parser<'_>, stop_on_r_curly: bool, is_in_ext
|
|||
// macro_rules! ()
|
||||
// macro_rules! []
|
||||
if paths::is_use_path_start(p) {
|
||||
macro_call(p, m);
|
||||
return;
|
||||
paths::use_path(p);
|
||||
// Do not create a MACRO_CALL node here if this isn't a macro call, this causes problems with completion.
|
||||
|
||||
// test_err path_item_without_excl
|
||||
// foo
|
||||
if p.at(T![!]) {
|
||||
macro_call(p, m);
|
||||
return;
|
||||
} else {
|
||||
m.complete(p, ERROR);
|
||||
p.error("expected an item");
|
||||
return;
|
||||
}
|
||||
}
|
||||
|
||||
m.abandon(p);
|
||||
|
@ -410,8 +421,7 @@ fn fn_(p: &mut Parser<'_>, m: Marker) {
|
|||
}
|
||||
|
||||
fn macro_call(p: &mut Parser<'_>, m: Marker) {
|
||||
assert!(paths::is_use_path_start(p));
|
||||
paths::use_path(p);
|
||||
assert!(p.at(T![!]));
|
||||
match macro_call_after_excl(p) {
|
||||
BlockLike::Block => (),
|
||||
BlockLike::NotBlock => {
|
||||
|
|
|
@ -30,22 +30,20 @@ fn source_file() {
|
|||
TopEntryPoint::SourceFile,
|
||||
"@error@",
|
||||
expect![[r#"
|
||||
SOURCE_FILE
|
||||
ERROR
|
||||
AT "@"
|
||||
MACRO_CALL
|
||||
PATH
|
||||
PATH_SEGMENT
|
||||
NAME_REF
|
||||
IDENT "error"
|
||||
ERROR
|
||||
AT "@"
|
||||
error 0: expected an item
|
||||
error 6: expected BANG
|
||||
error 6: expected `{`, `[`, `(`
|
||||
error 6: expected SEMICOLON
|
||||
error 6: expected an item
|
||||
"#]],
|
||||
SOURCE_FILE
|
||||
ERROR
|
||||
AT "@"
|
||||
ERROR
|
||||
PATH
|
||||
PATH_SEGMENT
|
||||
NAME_REF
|
||||
IDENT "error"
|
||||
ERROR
|
||||
AT "@"
|
||||
error 0: expected an item
|
||||
error 6: expected an item
|
||||
error 6: expected an item
|
||||
"#]],
|
||||
);
|
||||
}
|
||||
|
||||
|
|
|
@ -775,6 +775,10 @@ mod err {
|
|||
run_and_expect_errors("test_data/parser/inline/err/missing_fn_param_type.rs");
|
||||
}
|
||||
#[test]
|
||||
fn path_item_without_excl() {
|
||||
run_and_expect_errors("test_data/parser/inline/err/path_item_without_excl.rs");
|
||||
}
|
||||
#[test]
|
||||
fn pointer_type_no_mutability() {
|
||||
run_and_expect_errors("test_data/parser/inline/err/pointer_type_no_mutability.rs");
|
||||
}
|
||||
|
|
|
@ -10,20 +10,20 @@ SOURCE_FILE
|
|||
USE_KW "use"
|
||||
ERROR
|
||||
SLASH "/"
|
||||
MACRO_CALL
|
||||
ERROR
|
||||
PATH
|
||||
PATH_SEGMENT
|
||||
NAME_REF
|
||||
IDENT "bin"
|
||||
ERROR
|
||||
SLASH "/"
|
||||
MACRO_CALL
|
||||
ERROR
|
||||
PATH
|
||||
PATH_SEGMENT
|
||||
NAME_REF
|
||||
IDENT "env"
|
||||
WHITESPACE " "
|
||||
MACRO_CALL
|
||||
ERROR
|
||||
PATH
|
||||
PATH_SEGMENT
|
||||
NAME_REF
|
||||
|
@ -33,13 +33,7 @@ error 23: expected `[`
|
|||
error 23: expected an item
|
||||
error 27: expected one of `*`, `::`, `{`, `self`, `super` or an identifier
|
||||
error 28: expected SEMICOLON
|
||||
error 31: expected BANG
|
||||
error 31: expected `{`, `[`, `(`
|
||||
error 31: expected SEMICOLON
|
||||
error 31: expected an item
|
||||
error 35: expected BANG
|
||||
error 35: expected `{`, `[`, `(`
|
||||
error 35: expected SEMICOLON
|
||||
error 41: expected BANG
|
||||
error 41: expected `{`, `[`, `(`
|
||||
error 41: expected SEMICOLON
|
||||
error 31: expected an item
|
||||
error 35: expected an item
|
||||
error 41: expected an item
|
||||
|
|
|
@ -14,14 +14,15 @@ SOURCE_FILE
|
|||
WHITESPACE "\n"
|
||||
R_CURLY "}"
|
||||
WHITESPACE "\n\n"
|
||||
MACRO_CALL
|
||||
ERROR
|
||||
PATH
|
||||
PATH_SEGMENT
|
||||
NAME_REF
|
||||
IDENT "bar"
|
||||
TOKEN_TREE
|
||||
L_PAREN "("
|
||||
R_PAREN ")"
|
||||
ERROR
|
||||
L_PAREN "("
|
||||
ERROR
|
||||
R_PAREN ")"
|
||||
WHITESPACE " "
|
||||
ERROR
|
||||
L_CURLY "{"
|
||||
|
@ -75,6 +76,7 @@ SOURCE_FILE
|
|||
WHITESPACE "\n"
|
||||
R_CURLY "}"
|
||||
WHITESPACE "\n"
|
||||
error 17: expected BANG
|
||||
error 19: expected SEMICOLON
|
||||
error 17: expected an item
|
||||
error 17: expected an item
|
||||
error 18: expected an item
|
||||
error 20: expected an item
|
||||
|
|
|
@ -46,7 +46,7 @@ SOURCE_FILE
|
|||
ERROR
|
||||
AT "@"
|
||||
WHITESPACE " "
|
||||
MACRO_CALL
|
||||
ERROR
|
||||
PATH
|
||||
PATH_SEGMENT
|
||||
NAME_REF
|
||||
|
@ -72,9 +72,7 @@ error 67: expected R_ANGLE
|
|||
error 67: expected R_PAREN
|
||||
error 67: expected SEMICOLON
|
||||
error 67: expected an item
|
||||
error 72: expected BANG
|
||||
error 72: expected `{`, `[`, `(`
|
||||
error 72: expected SEMICOLON
|
||||
error 72: expected an item
|
||||
error 72: expected an item
|
||||
error 73: expected an item
|
||||
error 79: expected an item
|
||||
|
|
|
@ -26,14 +26,15 @@ SOURCE_FILE
|
|||
ERROR
|
||||
FN_KW "fn"
|
||||
WHITESPACE " "
|
||||
MACRO_CALL
|
||||
ERROR
|
||||
PATH
|
||||
PATH_SEGMENT
|
||||
NAME_REF
|
||||
IDENT "bar"
|
||||
TOKEN_TREE
|
||||
L_PAREN "("
|
||||
R_PAREN ")"
|
||||
ERROR
|
||||
L_PAREN "("
|
||||
ERROR
|
||||
R_PAREN ")"
|
||||
WHITESPACE " "
|
||||
ERROR
|
||||
L_CURLY "{"
|
||||
|
@ -43,6 +44,7 @@ error 6: expected fn, trait or impl
|
|||
error 38: expected a name
|
||||
error 40: missing type for `const` or `static`
|
||||
error 40: expected SEMICOLON
|
||||
error 44: expected BANG
|
||||
error 46: expected SEMICOLON
|
||||
error 44: expected an item
|
||||
error 44: expected an item
|
||||
error 45: expected an item
|
||||
error 47: expected an item
|
||||
|
|
|
@ -12,15 +12,16 @@ SOURCE_FILE
|
|||
ERROR
|
||||
USE_KW "use"
|
||||
WHITESPACE " "
|
||||
MACRO_CALL
|
||||
ERROR
|
||||
PATH
|
||||
PATH_SEGMENT
|
||||
NAME_REF
|
||||
IDENT "std"
|
||||
ERROR
|
||||
SEMICOLON ";"
|
||||
WHITESPACE "\n"
|
||||
error 8: expected R_ANGLE
|
||||
error 8: expected type
|
||||
error 11: expected `{`
|
||||
error 15: expected BANG
|
||||
error 15: expected `{`, `[`, `(`
|
||||
error 15: expected an item
|
||||
error 15: expected an item
|
||||
|
|
|
@ -1,5 +1,5 @@
|
|||
SOURCE_FILE
|
||||
MACRO_CALL
|
||||
ERROR
|
||||
PATH
|
||||
PATH_SEGMENT
|
||||
NAME_REF
|
||||
|
@ -22,7 +22,7 @@ SOURCE_FILE
|
|||
ERROR
|
||||
ASYNC_KW "async"
|
||||
WHITESPACE " "
|
||||
MACRO_CALL
|
||||
ERROR
|
||||
PATH
|
||||
PATH_SEGMENT
|
||||
NAME_REF
|
||||
|
@ -42,10 +42,6 @@ SOURCE_FILE
|
|||
L_CURLY "{"
|
||||
R_CURLY "}"
|
||||
WHITESPACE "\n"
|
||||
error 3: expected BANG
|
||||
error 3: expected `{`, `[`, `(`
|
||||
error 3: expected SEMICOLON
|
||||
error 3: expected an item
|
||||
error 24: expected fn, trait or impl
|
||||
error 28: expected BANG
|
||||
error 28: expected `{`, `[`, `(`
|
||||
error 28: expected SEMICOLON
|
||||
error 28: expected an item
|
||||
|
|
|
@ -0,0 +1,8 @@
|
|||
SOURCE_FILE
|
||||
ERROR
|
||||
PATH
|
||||
PATH_SEGMENT
|
||||
NAME_REF
|
||||
IDENT "foo"
|
||||
WHITESPACE "\n"
|
||||
error 3: expected an item
|
|
@ -0,0 +1 @@
|
|||
foo
|
Loading…
Reference in a new issue