mirror of
https://github.com/rust-lang/rust-analyzer
synced 2024-12-26 21:13:37 +00:00
internal: Properly check the edition for edition dependent syntax kinds
This commit is contained in:
parent
2b86639018
commit
f90bdfc13d
23 changed files with 348 additions and 130 deletions
|
@ -25,7 +25,7 @@ use hir_expand::{
|
||||||
InFile, MacroFileId, MacroFileIdExt,
|
InFile, MacroFileId, MacroFileIdExt,
|
||||||
};
|
};
|
||||||
use intern::Symbol;
|
use intern::Symbol;
|
||||||
use span::Span;
|
use span::{Edition, Span};
|
||||||
use stdx::{format_to, format_to_acc};
|
use stdx::{format_to, format_to_acc};
|
||||||
use syntax::{
|
use syntax::{
|
||||||
ast::{self, edit::IndentLevel},
|
ast::{self, edit::IndentLevel},
|
||||||
|
@ -257,21 +257,25 @@ fn pretty_print_macro_expansion(
|
||||||
(T![;] | T!['{'] | T!['}'], _) => "\n",
|
(T![;] | T!['{'] | T!['}'], _) => "\n",
|
||||||
(_, T!['}']) => "\n",
|
(_, T!['}']) => "\n",
|
||||||
(IDENT | LIFETIME_IDENT, IDENT | LIFETIME_IDENT) => " ",
|
(IDENT | LIFETIME_IDENT, IDENT | LIFETIME_IDENT) => " ",
|
||||||
_ if prev_kind.is_keyword() && curr_kind.is_keyword() => " ",
|
_ if prev_kind.is_keyword(Edition::CURRENT)
|
||||||
(IDENT, _) if curr_kind.is_keyword() => " ",
|
&& curr_kind.is_keyword(Edition::CURRENT) =>
|
||||||
(_, IDENT) if prev_kind.is_keyword() => " ",
|
{
|
||||||
|
" "
|
||||||
|
}
|
||||||
|
(IDENT, _) if curr_kind.is_keyword(Edition::CURRENT) => " ",
|
||||||
|
(_, IDENT) if prev_kind.is_keyword(Edition::CURRENT) => " ",
|
||||||
(T![>], IDENT) => " ",
|
(T![>], IDENT) => " ",
|
||||||
(T![>], _) if curr_kind.is_keyword() => " ",
|
(T![>], _) if curr_kind.is_keyword(Edition::CURRENT) => " ",
|
||||||
(T![->], _) | (_, T![->]) => " ",
|
(T![->], _) | (_, T![->]) => " ",
|
||||||
(T![&&], _) | (_, T![&&]) => " ",
|
(T![&&], _) | (_, T![&&]) => " ",
|
||||||
(T![,], _) => " ",
|
(T![,], _) => " ",
|
||||||
(T![:], IDENT | T!['(']) => " ",
|
(T![:], IDENT | T!['(']) => " ",
|
||||||
(T![:], _) if curr_kind.is_keyword() => " ",
|
(T![:], _) if curr_kind.is_keyword(Edition::CURRENT) => " ",
|
||||||
(T![fn], T!['(']) => "",
|
(T![fn], T!['(']) => "",
|
||||||
(T![']'], _) if curr_kind.is_keyword() => " ",
|
(T![']'], _) if curr_kind.is_keyword(Edition::CURRENT) => " ",
|
||||||
(T![']'], T![#]) => "\n",
|
(T![']'], T![#]) => "\n",
|
||||||
(T![Self], T![::]) => "",
|
(T![Self], T![::]) => "",
|
||||||
_ if prev_kind.is_keyword() => " ",
|
_ if prev_kind.is_keyword(Edition::CURRENT) => " ",
|
||||||
_ => "",
|
_ => "",
|
||||||
};
|
};
|
||||||
|
|
||||||
|
|
|
@ -104,14 +104,17 @@ impl Name {
|
||||||
|
|
||||||
/// Resolve a name from the text of token.
|
/// Resolve a name from the text of token.
|
||||||
fn resolve(raw_text: &str) -> Name {
|
fn resolve(raw_text: &str) -> Name {
|
||||||
|
// FIXME: Edition
|
||||||
match raw_text.strip_prefix("r#") {
|
match raw_text.strip_prefix("r#") {
|
||||||
// When `raw_text` starts with "r#" but the name does not coincide with any
|
// When `raw_text` starts with "r#" but the name does not coincide with any
|
||||||
// keyword, we never need the prefix so we strip it.
|
// keyword, we never need the prefix so we strip it.
|
||||||
Some(text) if !is_raw_identifier(text) => Name::new_ref(text),
|
Some(text) if !is_raw_identifier(text, span::Edition::CURRENT) => Name::new_ref(text),
|
||||||
// Keywords (in the current edition) *can* be used as a name in earlier editions of
|
// Keywords (in the current edition) *can* be used as a name in earlier editions of
|
||||||
// Rust, e.g. "try" in Rust 2015. Even in such cases, we keep track of them in their
|
// Rust, e.g. "try" in Rust 2015. Even in such cases, we keep track of them in their
|
||||||
// escaped form.
|
// escaped form.
|
||||||
None if is_raw_identifier(raw_text) => Name::new_text(&format!("r#{}", raw_text)),
|
None if is_raw_identifier(raw_text, span::Edition::CURRENT) => {
|
||||||
|
Name::new_text(&format!("r#{}", raw_text))
|
||||||
|
}
|
||||||
_ => Name::new_text(raw_text),
|
_ => Name::new_text(raw_text),
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
|
@ -15,7 +15,7 @@ use ide_db::{
|
||||||
};
|
};
|
||||||
use syntax::{
|
use syntax::{
|
||||||
ast::{self, AttrKind, NameOrNameRef},
|
ast::{self, AttrKind, NameOrNameRef},
|
||||||
AstNode, SmolStr,
|
AstNode, Edition, SmolStr,
|
||||||
SyntaxKind::{self, *},
|
SyntaxKind::{self, *},
|
||||||
SyntaxToken, TextRange, TextSize, T,
|
SyntaxToken, TextRange, TextSize, T,
|
||||||
};
|
};
|
||||||
|
@ -468,7 +468,8 @@ impl CompletionContext<'_> {
|
||||||
TextRange::at(self.original_token.text_range().start(), TextSize::from(1))
|
TextRange::at(self.original_token.text_range().start(), TextSize::from(1))
|
||||||
}
|
}
|
||||||
IDENT | LIFETIME_IDENT | UNDERSCORE | INT_NUMBER => self.original_token.text_range(),
|
IDENT | LIFETIME_IDENT | UNDERSCORE | INT_NUMBER => self.original_token.text_range(),
|
||||||
_ if kind.is_keyword() => self.original_token.text_range(),
|
// FIXME: Edition
|
||||||
|
_ if kind.is_keyword(Edition::CURRENT) => self.original_token.text_range(),
|
||||||
_ => TextRange::empty(self.position.offset),
|
_ => TextRange::empty(self.position.offset),
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
|
@ -1,4 +1,5 @@
|
||||||
//! Utilities for formatting macro expanded nodes until we get a proper formatter.
|
//! Utilities for formatting macro expanded nodes until we get a proper formatter.
|
||||||
|
use span::Edition;
|
||||||
use syntax::{
|
use syntax::{
|
||||||
ast::make,
|
ast::make,
|
||||||
ted::{self, Position},
|
ted::{self, Position},
|
||||||
|
@ -131,5 +132,6 @@ pub fn insert_ws_into(syn: SyntaxNode) -> SyntaxNode {
|
||||||
}
|
}
|
||||||
|
|
||||||
fn is_text(k: SyntaxKind) -> bool {
|
fn is_text(k: SyntaxKind) -> bool {
|
||||||
k.is_keyword() || k.is_literal() || k == IDENT || k == UNDERSCORE
|
// FIXME: Edition
|
||||||
|
k.is_keyword(Edition::CURRENT) || k.is_literal() || k == IDENT || k == UNDERSCORE
|
||||||
}
|
}
|
||||||
|
|
|
@ -1,6 +1,7 @@
|
||||||
//! Various helper functions to work with SyntaxNodes.
|
//! Various helper functions to work with SyntaxNodes.
|
||||||
use itertools::Itertools;
|
use itertools::Itertools;
|
||||||
use parser::T;
|
use parser::T;
|
||||||
|
use span::Edition;
|
||||||
use syntax::{
|
use syntax::{
|
||||||
ast::{self, HasLoopBody, MacroCall, PathSegmentKind, VisibilityKind},
|
ast::{self, HasLoopBody, MacroCall, PathSegmentKind, VisibilityKind},
|
||||||
AstNode, AstToken, Preorder, RustLanguage, WalkEvent,
|
AstNode, AstToken, Preorder, RustLanguage, WalkEvent,
|
||||||
|
@ -461,7 +462,8 @@ pub fn parse_tt_as_comma_sep_paths(input: ast::TokenTree) -> Option<Vec<ast::Pat
|
||||||
let tokens =
|
let tokens =
|
||||||
input.syntax().children_with_tokens().skip(1).map_while(|it| match it.into_token() {
|
input.syntax().children_with_tokens().skip(1).map_while(|it| match it.into_token() {
|
||||||
// seeing a keyword means the attribute is unclosed so stop parsing here
|
// seeing a keyword means the attribute is unclosed so stop parsing here
|
||||||
Some(tok) if tok.kind().is_keyword() => None,
|
// FIXME: Edition
|
||||||
|
Some(tok) if tok.kind().is_keyword(Edition::CURRENT) => None,
|
||||||
// don't include the right token tree parenthesis if it exists
|
// don't include the right token tree parenthesis if it exists
|
||||||
tok @ Some(_) if tok == r_paren => None,
|
tok @ Some(_) if tok == r_paren => None,
|
||||||
// only nodes that we can find are other TokenTrees, those are unexpected in this parse though
|
// only nodes that we can find are other TokenTrees, those are unexpected in this parse though
|
||||||
|
|
|
@ -17,7 +17,7 @@ use ide_db::{
|
||||||
};
|
};
|
||||||
use itertools::Itertools;
|
use itertools::Itertools;
|
||||||
|
|
||||||
use span::FileId;
|
use span::{Edition, FileId};
|
||||||
use syntax::{
|
use syntax::{
|
||||||
ast::{self, HasLoopBody},
|
ast::{self, HasLoopBody},
|
||||||
match_ast, AstNode, AstToken,
|
match_ast, AstNode, AstToken,
|
||||||
|
@ -55,7 +55,7 @@ pub(crate) fn goto_definition(
|
||||||
| COMMENT => 4,
|
| COMMENT => 4,
|
||||||
// index and prefix ops
|
// index and prefix ops
|
||||||
T!['['] | T![']'] | T![?] | T![*] | T![-] | T![!] => 3,
|
T!['['] | T![']'] | T![?] | T![*] | T![-] | T![!] => 3,
|
||||||
kind if kind.is_keyword() => 2,
|
kind if kind.is_keyword(Edition::CURRENT) => 2,
|
||||||
T!['('] | T![')'] => 2,
|
T!['('] | T![')'] => 2,
|
||||||
kind if kind.is_trivia() => 0,
|
kind if kind.is_trivia() => 0,
|
||||||
_ => 1,
|
_ => 1,
|
||||||
|
|
|
@ -11,7 +11,7 @@ use ide_db::{
|
||||||
},
|
},
|
||||||
FxHashMap, FxHashSet, RootDatabase,
|
FxHashMap, FxHashSet, RootDatabase,
|
||||||
};
|
};
|
||||||
use span::EditionedFileId;
|
use span::{Edition, EditionedFileId};
|
||||||
use syntax::{
|
use syntax::{
|
||||||
ast::{self, HasLoopBody},
|
ast::{self, HasLoopBody},
|
||||||
match_ast, AstNode,
|
match_ast, AstNode,
|
||||||
|
@ -65,7 +65,7 @@ pub(crate) fn highlight_related(
|
||||||
let token = pick_best_token(syntax.token_at_offset(offset), |kind| match kind {
|
let token = pick_best_token(syntax.token_at_offset(offset), |kind| match kind {
|
||||||
T![?] => 4, // prefer `?` when the cursor is sandwiched like in `await$0?`
|
T![?] => 4, // prefer `?` when the cursor is sandwiched like in `await$0?`
|
||||||
T![->] => 4,
|
T![->] => 4,
|
||||||
kind if kind.is_keyword() => 3,
|
kind if kind.is_keyword(Edition::CURRENT) => 3,
|
||||||
IDENT | INT_NUMBER => 2,
|
IDENT | INT_NUMBER => 2,
|
||||||
T![|] => 1,
|
T![|] => 1,
|
||||||
_ => 0,
|
_ => 0,
|
||||||
|
|
|
@ -14,6 +14,7 @@ use ide_db::{
|
||||||
FileRange, FxIndexSet, RootDatabase,
|
FileRange, FxIndexSet, RootDatabase,
|
||||||
};
|
};
|
||||||
use itertools::{multizip, Itertools};
|
use itertools::{multizip, Itertools};
|
||||||
|
use span::Edition;
|
||||||
use syntax::{ast, AstNode, SyntaxKind::*, SyntaxNode, T};
|
use syntax::{ast, AstNode, SyntaxKind::*, SyntaxNode, T};
|
||||||
|
|
||||||
use crate::{
|
use crate::{
|
||||||
|
@ -140,7 +141,7 @@ fn hover_simple(
|
||||||
| T![_] => 4,
|
| T![_] => 4,
|
||||||
// index and prefix ops and closure pipe
|
// index and prefix ops and closure pipe
|
||||||
T!['['] | T![']'] | T![?] | T![*] | T![-] | T![!] | T![|] => 3,
|
T!['['] | T![']'] | T![?] | T![*] | T![-] | T![!] | T![|] => 3,
|
||||||
kind if kind.is_keyword() => 2,
|
kind if kind.is_keyword(Edition::CURRENT) => 2,
|
||||||
T!['('] | T![')'] => 2,
|
T!['('] | T![')'] => 2,
|
||||||
kind if kind.is_trivia() => 0,
|
kind if kind.is_trivia() => 0,
|
||||||
_ => 1,
|
_ => 1,
|
||||||
|
|
|
@ -20,6 +20,7 @@ use rustc_apfloat::{
|
||||||
ieee::{Half as f16, Quad as f128},
|
ieee::{Half as f16, Quad as f128},
|
||||||
Float,
|
Float,
|
||||||
};
|
};
|
||||||
|
use span::Edition;
|
||||||
use stdx::format_to;
|
use stdx::format_to;
|
||||||
use syntax::{algo, ast, match_ast, AstNode, AstToken, Direction, SyntaxToken, T};
|
use syntax::{algo, ast, match_ast, AstNode, AstToken, Direction, SyntaxToken, T};
|
||||||
|
|
||||||
|
@ -251,7 +252,7 @@ pub(super) fn keyword(
|
||||||
config: &HoverConfig,
|
config: &HoverConfig,
|
||||||
token: &SyntaxToken,
|
token: &SyntaxToken,
|
||||||
) -> Option<HoverResult> {
|
) -> Option<HoverResult> {
|
||||||
if !token.kind().is_keyword() || !config.documentation || !config.keywords {
|
if !token.kind().is_keyword(Edition::CURRENT) || !config.documentation || !config.keywords {
|
||||||
return None;
|
return None;
|
||||||
}
|
}
|
||||||
let parent = token.parent()?;
|
let parent = token.parent()?;
|
||||||
|
|
|
@ -17,6 +17,7 @@ use ide_db::{
|
||||||
};
|
};
|
||||||
use itertools::Itertools;
|
use itertools::Itertools;
|
||||||
use nohash_hasher::IntMap;
|
use nohash_hasher::IntMap;
|
||||||
|
use span::Edition;
|
||||||
use syntax::{
|
use syntax::{
|
||||||
ast::{self, HasName},
|
ast::{self, HasName},
|
||||||
match_ast, AstNode,
|
match_ast, AstNode,
|
||||||
|
@ -305,7 +306,8 @@ fn handle_control_flow_keywords(
|
||||||
FilePosition { file_id, offset }: FilePosition,
|
FilePosition { file_id, offset }: FilePosition,
|
||||||
) -> Option<ReferenceSearchResult> {
|
) -> Option<ReferenceSearchResult> {
|
||||||
let file = sema.parse_guess_edition(file_id);
|
let file = sema.parse_guess_edition(file_id);
|
||||||
let token = file.syntax().token_at_offset(offset).find(|t| t.kind().is_keyword())?;
|
let token =
|
||||||
|
file.syntax().token_at_offset(offset).find(|t| t.kind().is_keyword(Edition::CURRENT))?;
|
||||||
|
|
||||||
let references = match token.kind() {
|
let references = match token.kind() {
|
||||||
T![fn] | T![return] | T![try] => highlight_related::highlight_exit_points(sema, token),
|
T![fn] | T![return] | T![try] => highlight_related::highlight_exit_points(sema, token),
|
||||||
|
|
|
@ -6,6 +6,7 @@
|
||||||
|
|
||||||
use hir::{AsAssocItem, HirFileIdExt, InFile, Semantics};
|
use hir::{AsAssocItem, HirFileIdExt, InFile, Semantics};
|
||||||
use ide_db::{
|
use ide_db::{
|
||||||
|
base_db::SourceDatabase,
|
||||||
defs::{Definition, NameClass, NameRefClass},
|
defs::{Definition, NameClass, NameRefClass},
|
||||||
rename::{bail, format_err, source_edit_from_references, IdentifierKind},
|
rename::{bail, format_err, source_edit_from_references, IdentifierKind},
|
||||||
source_change::SourceChangeBuilder,
|
source_change::SourceChangeBuilder,
|
||||||
|
@ -162,7 +163,8 @@ pub(crate) fn will_rename_file(
|
||||||
let sema = Semantics::new(db);
|
let sema = Semantics::new(db);
|
||||||
let module = sema.file_to_module_def(file_id)?;
|
let module = sema.file_to_module_def(file_id)?;
|
||||||
let def = Definition::Module(module);
|
let def = Definition::Module(module);
|
||||||
let mut change = if is_raw_identifier(new_name_stem) {
|
let mut change =
|
||||||
|
if is_raw_identifier(new_name_stem, db.crate_graph()[module.krate().into()].edition) {
|
||||||
def.rename(&sema, &SmolStr::from_iter(["r#", new_name_stem])).ok()?
|
def.rename(&sema, &SmolStr::from_iter(["r#", new_name_stem])).ok()?
|
||||||
} else {
|
} else {
|
||||||
def.rename(&sema, new_name_stem).ok()?
|
def.rename(&sema, new_name_stem).ok()?
|
||||||
|
|
|
@ -6,6 +6,7 @@ use ide_db::{
|
||||||
defs::{Definition, IdentClass, NameClass, NameRefClass},
|
defs::{Definition, IdentClass, NameClass, NameRefClass},
|
||||||
FxHashMap, RootDatabase, SymbolKind,
|
FxHashMap, RootDatabase, SymbolKind,
|
||||||
};
|
};
|
||||||
|
use span::Edition;
|
||||||
use stdx::hash_once;
|
use stdx::hash_once;
|
||||||
use syntax::{
|
use syntax::{
|
||||||
ast, match_ast, AstNode, AstToken, NodeOrToken,
|
ast, match_ast, AstNode, AstToken, NodeOrToken,
|
||||||
|
@ -41,7 +42,7 @@ pub(super) fn token(sema: &Semantics<'_, RootDatabase>, token: SyntaxToken) -> O
|
||||||
HlTag::None.into()
|
HlTag::None.into()
|
||||||
}
|
}
|
||||||
p if p.is_punct() => punctuation(sema, token, p),
|
p if p.is_punct() => punctuation(sema, token, p),
|
||||||
k if k.is_keyword() => keyword(sema, token, k)?,
|
k if k.is_keyword(Edition::CURRENT) => keyword(sema, token, k)?,
|
||||||
_ => return None,
|
_ => return None,
|
||||||
};
|
};
|
||||||
Some(highlight)
|
Some(highlight)
|
||||||
|
|
|
@ -1,4 +1,5 @@
|
||||||
//! Syntax highlighting for macro_rules!.
|
//! Syntax highlighting for macro_rules!.
|
||||||
|
use span::Edition;
|
||||||
use syntax::{SyntaxKind, SyntaxToken, TextRange, T};
|
use syntax::{SyntaxKind, SyntaxToken, TextRange, T};
|
||||||
|
|
||||||
use crate::{HlRange, HlTag};
|
use crate::{HlRange, HlTag};
|
||||||
|
@ -117,7 +118,7 @@ fn update_macro_state(state: &mut MacroMatcherParseState, tok: &SyntaxToken) {
|
||||||
|
|
||||||
fn is_metavariable(token: &SyntaxToken) -> Option<TextRange> {
|
fn is_metavariable(token: &SyntaxToken) -> Option<TextRange> {
|
||||||
match token.kind() {
|
match token.kind() {
|
||||||
kind if kind == SyntaxKind::IDENT || kind.is_keyword() => {
|
kind if kind == SyntaxKind::IDENT || kind.is_keyword(Edition::CURRENT) => {
|
||||||
if let Some(_dollar) = token.prev_token().filter(|t| t.kind() == T![$]) {
|
if let Some(_dollar) = token.prev_token().filter(|t| t.kind() == T![$]) {
|
||||||
return Some(token.text_range());
|
return Some(token.text_range());
|
||||||
}
|
}
|
||||||
|
|
|
@ -178,19 +178,8 @@ impl<'a> Converter<'a> {
|
||||||
rustc_lexer::TokenKind::Whitespace => WHITESPACE,
|
rustc_lexer::TokenKind::Whitespace => WHITESPACE,
|
||||||
|
|
||||||
rustc_lexer::TokenKind::Ident if token_text == "_" => UNDERSCORE,
|
rustc_lexer::TokenKind::Ident if token_text == "_" => UNDERSCORE,
|
||||||
rustc_lexer::TokenKind::Ident
|
|
||||||
if ["async", "await", "dyn", "try"].contains(&token_text)
|
|
||||||
&& !self.edition.at_least_2018() =>
|
|
||||||
{
|
|
||||||
IDENT
|
|
||||||
}
|
|
||||||
rustc_lexer::TokenKind::Ident
|
|
||||||
if token_text == "gen" && !self.edition.at_least_2024() =>
|
|
||||||
{
|
|
||||||
IDENT
|
|
||||||
}
|
|
||||||
rustc_lexer::TokenKind::Ident => {
|
rustc_lexer::TokenKind::Ident => {
|
||||||
SyntaxKind::from_keyword(token_text).unwrap_or(IDENT)
|
SyntaxKind::from_keyword(token_text, self.edition).unwrap_or(IDENT)
|
||||||
}
|
}
|
||||||
rustc_lexer::TokenKind::InvalidPrefix | rustc_lexer::TokenKind::InvalidIdent => {
|
rustc_lexer::TokenKind::InvalidPrefix | rustc_lexer::TokenKind::InvalidIdent => {
|
||||||
err = "Ident contains invalid characters";
|
err = "Ident contains invalid characters";
|
||||||
|
|
|
@ -35,12 +35,10 @@ impl LexedStr<'_> {
|
||||||
was_joint = false
|
was_joint = false
|
||||||
} else if kind == SyntaxKind::IDENT {
|
} else if kind == SyntaxKind::IDENT {
|
||||||
let token_text = self.text(i);
|
let token_text = self.text(i);
|
||||||
let contextual_kw = if !edition.at_least_2018() && token_text == "dyn" {
|
res.push_ident(
|
||||||
SyntaxKind::DYN_KW
|
SyntaxKind::from_contextual_keyword(token_text, edition)
|
||||||
} else {
|
.unwrap_or(SyntaxKind::IDENT),
|
||||||
SyntaxKind::from_contextual_keyword(token_text).unwrap_or(SyntaxKind::IDENT)
|
)
|
||||||
};
|
|
||||||
res.push_ident(contextual_kw);
|
|
||||||
} else {
|
} else {
|
||||||
if was_joint {
|
if was_joint {
|
||||||
res.was_joint();
|
res.was_joint();
|
||||||
|
|
File diff suppressed because one or more lines are too long
|
@ -307,7 +307,8 @@ where
|
||||||
tt::Ident::new(&text, conv.span_for(abs_range)).into()
|
tt::Ident::new(&text, conv.span_for(abs_range)).into()
|
||||||
}
|
}
|
||||||
UNDERSCORE => make_ident!(),
|
UNDERSCORE => make_ident!(),
|
||||||
k if k.is_keyword() => make_ident!(),
|
// FIXME: Edition
|
||||||
|
k if k.is_keyword(Edition::CURRENT) => make_ident!(),
|
||||||
k if k.is_literal() => {
|
k if k.is_literal() => {
|
||||||
let text = token.to_text(conv);
|
let text = token.to_text(conv);
|
||||||
let span = conv.span_for(abs_range);
|
let span = conv.span_for(abs_range);
|
||||||
|
|
|
@ -64,13 +64,11 @@ pub fn to_parser_input<S: Copy + fmt::Debug>(
|
||||||
"_" => res.push(T![_]),
|
"_" => res.push(T![_]),
|
||||||
i if i.starts_with('\'') => res.push(LIFETIME_IDENT),
|
i if i.starts_with('\'') => res.push(LIFETIME_IDENT),
|
||||||
_ if ident.is_raw.yes() => res.push(IDENT),
|
_ if ident.is_raw.yes() => res.push(IDENT),
|
||||||
"gen" if !edition.at_least_2024() => res.push(IDENT),
|
text => match SyntaxKind::from_keyword(text, edition) {
|
||||||
"dyn" if !edition.at_least_2018() => res.push_ident(DYN_KW),
|
|
||||||
"async" | "await" | "try" if !edition.at_least_2018() => res.push(IDENT),
|
|
||||||
text => match SyntaxKind::from_keyword(text) {
|
|
||||||
Some(kind) => res.push(kind),
|
Some(kind) => res.push(kind),
|
||||||
None => {
|
None => {
|
||||||
let contextual_keyword = SyntaxKind::from_contextual_keyword(text)
|
let contextual_keyword =
|
||||||
|
SyntaxKind::from_contextual_keyword(text, edition)
|
||||||
.unwrap_or(SyntaxKind::IDENT);
|
.unwrap_or(SyntaxKind::IDENT);
|
||||||
res.push_ident(contextual_keyword);
|
res.push_ident(contextual_keyword);
|
||||||
}
|
}
|
||||||
|
|
|
@ -9,8 +9,6 @@
|
||||||
// // -- comment
|
// // -- comment
|
||||||
// Name = -- non-terminal definition
|
// Name = -- non-terminal definition
|
||||||
// 'ident' -- keyword or punct token (terminal)
|
// 'ident' -- keyword or punct token (terminal)
|
||||||
// '?ident' -- contextual keyword (terminal)
|
|
||||||
// too)
|
|
||||||
// '#ident' -- generic token (terminal)
|
// '#ident' -- generic token (terminal)
|
||||||
// '@ident' -- literal token (terminal)
|
// '@ident' -- literal token (terminal)
|
||||||
// A B -- sequence
|
// A B -- sequence
|
||||||
|
@ -152,7 +150,7 @@ Item =
|
||||||
|
|
||||||
MacroRules =
|
MacroRules =
|
||||||
Attr* Visibility?
|
Attr* Visibility?
|
||||||
'?macro_rules' '!' Name
|
'macro_rules' '!' Name
|
||||||
TokenTree
|
TokenTree
|
||||||
|
|
||||||
MacroDef =
|
MacroDef =
|
||||||
|
@ -188,7 +186,7 @@ UseTreeList =
|
||||||
|
|
||||||
Fn =
|
Fn =
|
||||||
Attr* Visibility?
|
Attr* Visibility?
|
||||||
'?default'? 'const'? 'async'? 'unsafe'? Abi?
|
'default'? 'const'? 'async'? 'unsafe'? Abi?
|
||||||
'fn' Name GenericParamList? ParamList RetType? WhereClause?
|
'fn' Name GenericParamList? ParamList RetType? WhereClause?
|
||||||
(body:BlockExpr | ';')
|
(body:BlockExpr | ';')
|
||||||
|
|
||||||
|
@ -220,7 +218,7 @@ RetType =
|
||||||
|
|
||||||
TypeAlias =
|
TypeAlias =
|
||||||
Attr* Visibility?
|
Attr* Visibility?
|
||||||
'?default'?
|
'default'?
|
||||||
'type' Name GenericParamList? (':' TypeBoundList?)? WhereClause?
|
'type' Name GenericParamList? (':' TypeBoundList?)? WhereClause?
|
||||||
('=' Type)? ';'
|
('=' Type)? ';'
|
||||||
|
|
||||||
|
@ -263,7 +261,7 @@ Variant =
|
||||||
|
|
||||||
Union =
|
Union =
|
||||||
Attr* Visibility?
|
Attr* Visibility?
|
||||||
'?union' Name GenericParamList? WhereClause?
|
'union' Name GenericParamList? WhereClause?
|
||||||
RecordFieldList
|
RecordFieldList
|
||||||
|
|
||||||
// A Data Type.
|
// A Data Type.
|
||||||
|
@ -276,7 +274,7 @@ Adt =
|
||||||
|
|
||||||
Const =
|
Const =
|
||||||
Attr* Visibility?
|
Attr* Visibility?
|
||||||
'?default'?
|
'default'?
|
||||||
'const' (Name | '_') ':' Type
|
'const' (Name | '_') ':' Type
|
||||||
('=' body:Expr)? ';'
|
('=' body:Expr)? ';'
|
||||||
|
|
||||||
|
@ -287,7 +285,7 @@ Static =
|
||||||
|
|
||||||
Trait =
|
Trait =
|
||||||
Attr* Visibility?
|
Attr* Visibility?
|
||||||
'unsafe'? '?auto'?
|
'unsafe'? 'auto'?
|
||||||
'trait' Name GenericParamList?
|
'trait' Name GenericParamList?
|
||||||
(':' TypeBoundList?)? WhereClause? AssocItemList
|
(':' TypeBoundList?)? WhereClause? AssocItemList
|
||||||
|
|
||||||
|
@ -306,7 +304,7 @@ AssocItem =
|
||||||
|
|
||||||
Impl =
|
Impl =
|
||||||
Attr* Visibility?
|
Attr* Visibility?
|
||||||
'?default'? 'unsafe'?
|
'default'? 'unsafe'?
|
||||||
'impl' GenericParamList? ('const'? '!'? trait:Type 'for')? self_ty:Type WhereClause?
|
'impl' GenericParamList? ('const'? '!'? trait:Type 'for')? self_ty:Type WhereClause?
|
||||||
AssocItemList
|
AssocItemList
|
||||||
|
|
||||||
|
@ -387,13 +385,13 @@ Expr =
|
||||||
| UnderscoreExpr
|
| UnderscoreExpr
|
||||||
|
|
||||||
OffsetOfExpr =
|
OffsetOfExpr =
|
||||||
Attr* '?builtin' '#' '?offset_of' '(' Type ',' fields:(NameRef ('.' NameRef)* ) ')'
|
Attr* 'builtin' '#' 'offset_of' '(' Type ',' fields:(NameRef ('.' NameRef)* ) ')'
|
||||||
|
|
||||||
AsmExpr =
|
AsmExpr =
|
||||||
Attr* '?builtin' '#' '?asm' '(' Expr ')'
|
Attr* 'builtin' '#' 'asm' '(' Expr ')'
|
||||||
|
|
||||||
FormatArgsExpr =
|
FormatArgsExpr =
|
||||||
Attr* '?builtin' '#' '?format_args' '('
|
Attr* 'builtin' '#' 'format_args' '('
|
||||||
template:Expr
|
template:Expr
|
||||||
(',' args:(FormatArgsArg (',' FormatArgsArg)* ','?)? )?
|
(',' args:(FormatArgsArg (',' FormatArgsArg)* ','?)? )?
|
||||||
')'
|
')'
|
||||||
|
@ -425,7 +423,7 @@ StmtList =
|
||||||
'}'
|
'}'
|
||||||
|
|
||||||
RefExpr =
|
RefExpr =
|
||||||
Attr* '&' (('?raw' 'const'?)| ('?raw'? 'mut') ) Expr
|
Attr* '&' (('raw' 'const'?)| ('raw'? 'mut') ) Expr
|
||||||
|
|
||||||
TryExpr =
|
TryExpr =
|
||||||
Attr* Expr '?'
|
Attr* Expr '?'
|
||||||
|
@ -550,7 +548,7 @@ YieldExpr =
|
||||||
Attr* 'yield' Expr?
|
Attr* 'yield' Expr?
|
||||||
|
|
||||||
YeetExpr =
|
YeetExpr =
|
||||||
Attr* 'do' '?yeet' Expr?
|
Attr* 'do' 'yeet' Expr?
|
||||||
|
|
||||||
LetExpr =
|
LetExpr =
|
||||||
Attr* 'let' Pat '=' Expr
|
Attr* 'let' Pat '=' Expr
|
||||||
|
|
|
@ -117,7 +117,7 @@ pub fn name_ref(name_ref: &str) -> ast::NameRef {
|
||||||
ast_from_text(&format!("fn f() {{ {raw_escape}{name_ref}; }}"))
|
ast_from_text(&format!("fn f() {{ {raw_escape}{name_ref}; }}"))
|
||||||
}
|
}
|
||||||
fn raw_ident_esc(ident: &str) -> &'static str {
|
fn raw_ident_esc(ident: &str) -> &'static str {
|
||||||
if is_raw_identifier(ident) {
|
if is_raw_identifier(ident, Edition::CURRENT) {
|
||||||
"r#"
|
"r#"
|
||||||
} else {
|
} else {
|
||||||
""
|
""
|
||||||
|
|
|
@ -2,7 +2,7 @@
|
||||||
|
|
||||||
use crate::SyntaxKind;
|
use crate::SyntaxKind;
|
||||||
|
|
||||||
pub fn is_raw_identifier(name: &str) -> bool {
|
pub fn is_raw_identifier(name: &str, edition: parser::Edition) -> bool {
|
||||||
let is_keyword = SyntaxKind::from_keyword(name).is_some();
|
let is_keyword = SyntaxKind::from_keyword(name, edition).is_some();
|
||||||
is_keyword && !matches!(name, "self" | "crate" | "super" | "Self")
|
is_keyword && !matches!(name, "self" | "crate" | "super" | "Self")
|
||||||
}
|
}
|
||||||
|
|
|
@ -396,24 +396,66 @@ fn generate_syntax_kinds(grammar: KindsSrc) -> String {
|
||||||
let punctuation =
|
let punctuation =
|
||||||
grammar.punct.iter().map(|(_token, name)| format_ident!("{}", name)).collect::<Vec<_>>();
|
grammar.punct.iter().map(|(_token, name)| format_ident!("{}", name)).collect::<Vec<_>>();
|
||||||
|
|
||||||
let x = |&name| match name {
|
let fmt_kw_as_variant = |&name| match name {
|
||||||
"Self" => format_ident!("SELF_TYPE_KW"),
|
"Self" => format_ident!("SELF_TYPE_KW"),
|
||||||
name => format_ident!("{}_KW", to_upper_snake_case(name)),
|
name => format_ident!("{}_KW", to_upper_snake_case(name)),
|
||||||
};
|
};
|
||||||
let full_keywords_values = grammar.keywords;
|
let strict_keywords = grammar.keywords;
|
||||||
let full_keywords = full_keywords_values.iter().map(x);
|
let strict_keywords_variants =
|
||||||
|
strict_keywords.iter().map(fmt_kw_as_variant).collect::<Vec<_>>();
|
||||||
|
let strict_keywords_tokens = strict_keywords.iter().map(|it| format_ident!("{it}"));
|
||||||
|
|
||||||
let contextual_keywords_values = &grammar.contextual_keywords;
|
let edition_dependent_keywords_variants_match_arm = grammar
|
||||||
let contextual_keywords = contextual_keywords_values.iter().map(x);
|
.edition_dependent_keywords
|
||||||
|
|
||||||
let all_keywords_values = grammar
|
|
||||||
.keywords
|
|
||||||
.iter()
|
.iter()
|
||||||
.chain(grammar.contextual_keywords.iter())
|
.map(|(kw, ed)| {
|
||||||
.copied()
|
let kw = fmt_kw_as_variant(kw);
|
||||||
|
quote! { #kw if #ed <= edition }
|
||||||
|
})
|
||||||
|
.collect::<Vec<_>>();
|
||||||
|
let edition_dependent_keywords_str_match_arm = grammar
|
||||||
|
.edition_dependent_keywords
|
||||||
|
.iter()
|
||||||
|
.map(|(kw, ed)| {
|
||||||
|
quote! { #kw if #ed <= edition }
|
||||||
|
})
|
||||||
|
.collect::<Vec<_>>();
|
||||||
|
let edition_dependent_keywords_variants = grammar
|
||||||
|
.edition_dependent_keywords
|
||||||
|
.iter()
|
||||||
|
.map(|(kw, _)| fmt_kw_as_variant(kw))
|
||||||
|
.collect::<Vec<_>>();
|
||||||
|
let edition_dependent_keywords_tokens =
|
||||||
|
grammar.edition_dependent_keywords.iter().map(|(it, _)| format_ident!("{it}"));
|
||||||
|
|
||||||
|
let contextual_keywords = grammar.contextual_keywords;
|
||||||
|
let contextual_keywords_variants =
|
||||||
|
contextual_keywords.iter().map(fmt_kw_as_variant).collect::<Vec<_>>();
|
||||||
|
let contextual_keywords_tokens = contextual_keywords.iter().map(|it| format_ident!("{it}"));
|
||||||
|
let contextual_keywords_str_match_arm = grammar.contextual_keywords.iter().map(|kw| {
|
||||||
|
match grammar.edition_dependent_keywords.iter().find(|(ed_kw, _)| ed_kw == kw) {
|
||||||
|
Some((_, ed)) => quote! { #kw if edition < #ed },
|
||||||
|
None => quote! { #kw },
|
||||||
|
}
|
||||||
|
});
|
||||||
|
let contextual_keywords_variants_match_arm = grammar
|
||||||
|
.contextual_keywords
|
||||||
|
.iter()
|
||||||
|
.map(|kw_s| {
|
||||||
|
let kw = fmt_kw_as_variant(kw_s);
|
||||||
|
match grammar.edition_dependent_keywords.iter().find(|(ed_kw, _)| ed_kw == kw_s) {
|
||||||
|
Some((_, ed)) => quote! { #kw if edition < #ed },
|
||||||
|
None => quote! { #kw },
|
||||||
|
}
|
||||||
|
})
|
||||||
|
.collect::<Vec<_>>();
|
||||||
|
|
||||||
|
let non_strict_keyword_variants = contextual_keywords_variants
|
||||||
|
.iter()
|
||||||
|
.chain(edition_dependent_keywords_variants.iter())
|
||||||
|
.sorted()
|
||||||
|
.dedup()
|
||||||
.collect::<Vec<_>>();
|
.collect::<Vec<_>>();
|
||||||
let all_keywords_idents = all_keywords_values.iter().map(|kw| format_ident!("{}", kw));
|
|
||||||
let all_keywords = all_keywords_values.iter().map(x).collect::<Vec<_>>();
|
|
||||||
|
|
||||||
let literals =
|
let literals =
|
||||||
grammar.literals.iter().map(|name| format_ident!("{}", name)).collect::<Vec<_>>();
|
grammar.literals.iter().map(|name| format_ident!("{}", name)).collect::<Vec<_>>();
|
||||||
|
@ -424,6 +466,8 @@ fn generate_syntax_kinds(grammar: KindsSrc) -> String {
|
||||||
|
|
||||||
let ast = quote! {
|
let ast = quote! {
|
||||||
#![allow(bad_style, missing_docs, unreachable_pub)]
|
#![allow(bad_style, missing_docs, unreachable_pub)]
|
||||||
|
use crate::Edition;
|
||||||
|
|
||||||
/// The kind of syntax node, e.g. `IDENT`, `USE_KW`, or `STRUCT`.
|
/// The kind of syntax node, e.g. `IDENT`, `USE_KW`, or `STRUCT`.
|
||||||
#[derive(Clone, Copy, PartialEq, Eq, PartialOrd, Ord, Hash, Debug)]
|
#[derive(Clone, Copy, PartialEq, Eq, PartialOrd, Ord, Hash, Debug)]
|
||||||
#[repr(u16)]
|
#[repr(u16)]
|
||||||
|
@ -435,7 +479,8 @@ fn generate_syntax_kinds(grammar: KindsSrc) -> String {
|
||||||
#[doc(hidden)]
|
#[doc(hidden)]
|
||||||
EOF,
|
EOF,
|
||||||
#(#punctuation,)*
|
#(#punctuation,)*
|
||||||
#(#all_keywords,)*
|
#(#strict_keywords_variants,)*
|
||||||
|
#(#non_strict_keyword_variants,)*
|
||||||
#(#literals,)*
|
#(#literals,)*
|
||||||
#(#tokens,)*
|
#(#tokens,)*
|
||||||
#(#nodes,)*
|
#(#nodes,)*
|
||||||
|
@ -447,31 +492,55 @@ fn generate_syntax_kinds(grammar: KindsSrc) -> String {
|
||||||
use self::SyntaxKind::*;
|
use self::SyntaxKind::*;
|
||||||
|
|
||||||
impl SyntaxKind {
|
impl SyntaxKind {
|
||||||
pub fn is_keyword(self) -> bool {
|
/// Checks whether this syntax kind is a strict keyword for the given edition.
|
||||||
matches!(self, #(#all_keywords)|*)
|
/// Strict keywords are identifiers that are always considered keywords.
|
||||||
|
pub fn is_strict_keyword(self, edition: Edition) -> bool {
|
||||||
|
matches!(self, #(#strict_keywords_variants)|*)
|
||||||
|
|| match self {
|
||||||
|
#(#edition_dependent_keywords_variants_match_arm => true,)*
|
||||||
|
_ => false,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Checks whether this syntax kind is a weak keyword for the given edition.
|
||||||
|
/// Weak keywords are identifiers that are considered keywords only in certain contexts.
|
||||||
|
pub fn is_contextual_keyword(self, edition: Edition) -> bool {
|
||||||
|
match self {
|
||||||
|
#(#contextual_keywords_variants_match_arm => true,)*
|
||||||
|
_ => false,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Checks whether this syntax kind is a strict or weak keyword for the given edition.
|
||||||
|
pub fn is_keyword(self, edition: Edition) -> bool {
|
||||||
|
matches!(self, #(#strict_keywords_variants)|*)
|
||||||
|
|| match self {
|
||||||
|
#(#edition_dependent_keywords_variants_match_arm => true,)*
|
||||||
|
#(#contextual_keywords_variants_match_arm => true,)*
|
||||||
|
_ => false,
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn is_punct(self) -> bool {
|
pub fn is_punct(self) -> bool {
|
||||||
|
|
||||||
matches!(self, #(#punctuation)|*)
|
matches!(self, #(#punctuation)|*)
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn is_literal(self) -> bool {
|
pub fn is_literal(self) -> bool {
|
||||||
matches!(self, #(#literals)|*)
|
matches!(self, #(#literals)|*)
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn from_keyword(ident: &str) -> Option<SyntaxKind> {
|
pub fn from_keyword(ident: &str, edition: Edition) -> Option<SyntaxKind> {
|
||||||
let kw = match ident {
|
let kw = match ident {
|
||||||
#(#full_keywords_values => #full_keywords,)*
|
#(#strict_keywords => #strict_keywords_variants,)*
|
||||||
|
#(#edition_dependent_keywords_str_match_arm => #edition_dependent_keywords_variants,)*
|
||||||
_ => return None,
|
_ => return None,
|
||||||
};
|
};
|
||||||
Some(kw)
|
Some(kw)
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn from_contextual_keyword(ident: &str) -> Option<SyntaxKind> {
|
pub fn from_contextual_keyword(ident: &str, edition: Edition) -> Option<SyntaxKind> {
|
||||||
let kw = match ident {
|
let kw = match ident {
|
||||||
#(#contextual_keywords_values => #contextual_keywords,)*
|
#(#contextual_keywords_str_match_arm => #contextual_keywords_variants,)*
|
||||||
_ => return None,
|
_ => return None,
|
||||||
};
|
};
|
||||||
Some(kw)
|
Some(kw)
|
||||||
|
@ -489,7 +558,9 @@ fn generate_syntax_kinds(grammar: KindsSrc) -> String {
|
||||||
#[macro_export]
|
#[macro_export]
|
||||||
macro_rules! T {
|
macro_rules! T {
|
||||||
#([#punctuation_values] => { $crate::SyntaxKind::#punctuation };)*
|
#([#punctuation_values] => { $crate::SyntaxKind::#punctuation };)*
|
||||||
#([#all_keywords_idents] => { $crate::SyntaxKind::#all_keywords };)*
|
#([#strict_keywords_tokens] => { $crate::SyntaxKind::#strict_keywords_variants };)*
|
||||||
|
#([#contextual_keywords_tokens] => { $crate::SyntaxKind::#contextual_keywords_variants };)*
|
||||||
|
#([#edition_dependent_keywords_tokens] => { $crate::SyntaxKind::#edition_dependent_keywords_variants };)*
|
||||||
[lifetime_ident] => { $crate::SyntaxKind::LIFETIME_IDENT };
|
[lifetime_ident] => { $crate::SyntaxKind::LIFETIME_IDENT };
|
||||||
[int_number] => { $crate::SyntaxKind::INT_NUMBER };
|
[int_number] => { $crate::SyntaxKind::INT_NUMBER };
|
||||||
[ident] => { $crate::SyntaxKind::IDENT };
|
[ident] => { $crate::SyntaxKind::IDENT };
|
||||||
|
|
|
@ -1,5 +1,7 @@
|
||||||
//! Defines input for code generation process.
|
//! Defines input for code generation process.
|
||||||
|
|
||||||
|
use quote::ToTokens;
|
||||||
|
|
||||||
use crate::codegen::grammar::to_upper_snake_case;
|
use crate::codegen::grammar::to_upper_snake_case;
|
||||||
|
|
||||||
#[derive(Copy, Clone, Debug)]
|
#[derive(Copy, Clone, Debug)]
|
||||||
|
@ -10,6 +12,35 @@ pub(crate) struct KindsSrc {
|
||||||
pub(crate) literals: &'static [&'static str],
|
pub(crate) literals: &'static [&'static str],
|
||||||
pub(crate) tokens: &'static [&'static str],
|
pub(crate) tokens: &'static [&'static str],
|
||||||
pub(crate) nodes: &'static [&'static str],
|
pub(crate) nodes: &'static [&'static str],
|
||||||
|
pub(crate) edition_dependent_keywords: &'static [(&'static str, Edition)],
|
||||||
|
}
|
||||||
|
|
||||||
|
#[allow(dead_code)]
|
||||||
|
#[derive(Copy, Clone, Debug, PartialEq, Eq, PartialOrd, Ord)]
|
||||||
|
pub(super) enum Edition {
|
||||||
|
Edition2015,
|
||||||
|
Edition2018,
|
||||||
|
Edition2021,
|
||||||
|
Edition2024,
|
||||||
|
}
|
||||||
|
|
||||||
|
impl ToTokens for Edition {
|
||||||
|
fn to_tokens(&self, tokens: &mut proc_macro2::TokenStream) {
|
||||||
|
match self {
|
||||||
|
Edition::Edition2015 => {
|
||||||
|
tokens.extend(quote::quote! { Edition::Edition2015 });
|
||||||
|
}
|
||||||
|
Edition::Edition2018 => {
|
||||||
|
tokens.extend(quote::quote! { Edition::Edition2018 });
|
||||||
|
}
|
||||||
|
Edition::Edition2021 => {
|
||||||
|
tokens.extend(quote::quote! { Edition::Edition2021 });
|
||||||
|
}
|
||||||
|
Edition::Edition2024 => {
|
||||||
|
tokens.extend(quote::quote! { Edition::Edition2024 });
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
/// The punctuations of the language.
|
/// The punctuations of the language.
|
||||||
|
@ -75,17 +106,32 @@ const EOF: &str = "EOF";
|
||||||
|
|
||||||
const RESERVED: &[&str] = &[
|
const RESERVED: &[&str] = &[
|
||||||
"abstract", "become", "box", "do", "final", "macro", "override", "priv", "typeof", "unsized",
|
"abstract", "become", "box", "do", "final", "macro", "override", "priv", "typeof", "unsized",
|
||||||
"virtual", "yield", "try",
|
"virtual", "yield",
|
||||||
|
];
|
||||||
|
// keywords that are keywords only in specific parse contexts
|
||||||
|
#[doc(alias = "WEAK_KEYWORDS")]
|
||||||
|
const CONTEXTUAL_KEYWORDS: &[&str] =
|
||||||
|
&["macro_rules", "union", "default", "raw", "dyn", "auto", "yeet"];
|
||||||
|
// keywords we use for special macro expansions
|
||||||
|
const CONTEXTUAL_BUILTIN_KEYWORDS: &[&str] = &["builtin", "offset_of", "format_args", "asm"];
|
||||||
|
// keywords that are keywords depending on the edition
|
||||||
|
const EDITION_DEPENDENT_KEYWORDS: &[(&str, Edition)] = &[
|
||||||
|
("try", Edition::Edition2018),
|
||||||
|
("dyn", Edition::Edition2018),
|
||||||
|
("async", Edition::Edition2018),
|
||||||
|
("await", Edition::Edition2018),
|
||||||
|
("gen", Edition::Edition2024),
|
||||||
];
|
];
|
||||||
const CONTEXTUAL_RESERVED: &[&str] = &[];
|
|
||||||
|
|
||||||
pub(crate) fn generate_kind_src(
|
pub(crate) fn generate_kind_src(
|
||||||
nodes: &[AstNodeSrc],
|
nodes: &[AstNodeSrc],
|
||||||
enums: &[AstEnumSrc],
|
enums: &[AstEnumSrc],
|
||||||
grammar: &ungrammar::Grammar,
|
grammar: &ungrammar::Grammar,
|
||||||
) -> KindsSrc {
|
) -> KindsSrc {
|
||||||
|
let mut contextual_keywords: Vec<&_> =
|
||||||
|
CONTEXTUAL_KEYWORDS.iter().chain(CONTEXTUAL_BUILTIN_KEYWORDS).copied().collect();
|
||||||
|
|
||||||
let mut keywords: Vec<&_> = Vec::new();
|
let mut keywords: Vec<&_> = Vec::new();
|
||||||
let mut contextual_keywords: Vec<&_> = Vec::new();
|
|
||||||
let mut tokens: Vec<&_> = TOKENS.to_vec();
|
let mut tokens: Vec<&_> = TOKENS.to_vec();
|
||||||
let mut literals: Vec<&_> = Vec::new();
|
let mut literals: Vec<&_> = Vec::new();
|
||||||
let mut used_puncts = vec![false; PUNCT.len()];
|
let mut used_puncts = vec![false; PUNCT.len()];
|
||||||
|
@ -103,9 +149,7 @@ pub(crate) fn generate_kind_src(
|
||||||
("#", token) if !token.is_empty() => {
|
("#", token) if !token.is_empty() => {
|
||||||
tokens.push(String::leak(to_upper_snake_case(token)));
|
tokens.push(String::leak(to_upper_snake_case(token)));
|
||||||
}
|
}
|
||||||
("?", kw) if !kw.is_empty() => {
|
_ if contextual_keywords.contains(&name) => {}
|
||||||
contextual_keywords.push(String::leak(kw.to_owned()));
|
|
||||||
}
|
|
||||||
_ if name.chars().all(char::is_alphabetic) => {
|
_ if name.chars().all(char::is_alphabetic) => {
|
||||||
keywords.push(String::leak(name.to_owned()));
|
keywords.push(String::leak(name.to_owned()));
|
||||||
}
|
}
|
||||||
|
@ -124,9 +168,14 @@ pub(crate) fn generate_kind_src(
|
||||||
keywords.extend(RESERVED.iter().copied());
|
keywords.extend(RESERVED.iter().copied());
|
||||||
keywords.sort();
|
keywords.sort();
|
||||||
keywords.dedup();
|
keywords.dedup();
|
||||||
contextual_keywords.extend(CONTEXTUAL_RESERVED.iter().copied());
|
|
||||||
contextual_keywords.sort();
|
contextual_keywords.sort();
|
||||||
contextual_keywords.dedup();
|
contextual_keywords.dedup();
|
||||||
|
let mut edition_dependent_keywords: Vec<(&_, _)> = EDITION_DEPENDENT_KEYWORDS.to_vec();
|
||||||
|
edition_dependent_keywords.sort();
|
||||||
|
edition_dependent_keywords.dedup();
|
||||||
|
|
||||||
|
keywords.retain(|&it| !contextual_keywords.contains(&it));
|
||||||
|
keywords.retain(|&it| !edition_dependent_keywords.iter().any(|&(kw, _)| kw == it));
|
||||||
|
|
||||||
// we leak things here for simplicity, that way we don't have to deal with lifetimes
|
// we leak things here for simplicity, that way we don't have to deal with lifetimes
|
||||||
// The execution is a one shot job so thats fine
|
// The execution is a one shot job so thats fine
|
||||||
|
@ -142,12 +191,21 @@ pub(crate) fn generate_kind_src(
|
||||||
nodes.sort();
|
nodes.sort();
|
||||||
let keywords = Vec::leak(keywords);
|
let keywords = Vec::leak(keywords);
|
||||||
let contextual_keywords = Vec::leak(contextual_keywords);
|
let contextual_keywords = Vec::leak(contextual_keywords);
|
||||||
|
let edition_dependent_keywords = Vec::leak(edition_dependent_keywords);
|
||||||
let literals = Vec::leak(literals);
|
let literals = Vec::leak(literals);
|
||||||
literals.sort();
|
literals.sort();
|
||||||
let tokens = Vec::leak(tokens);
|
let tokens = Vec::leak(tokens);
|
||||||
tokens.sort();
|
tokens.sort();
|
||||||
|
|
||||||
KindsSrc { punct: PUNCT, nodes, keywords, contextual_keywords, literals, tokens }
|
KindsSrc {
|
||||||
|
punct: PUNCT,
|
||||||
|
nodes,
|
||||||
|
keywords,
|
||||||
|
contextual_keywords,
|
||||||
|
edition_dependent_keywords,
|
||||||
|
literals,
|
||||||
|
tokens,
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
#[derive(Default, Debug)]
|
#[derive(Default, Debug)]
|
||||||
|
|
Loading…
Reference in a new issue