mirror of
https://github.com/rust-lang/rust-analyzer
synced 2024-12-26 13:03:31 +00:00
Merge pull request #18417 from ChayimFriedman2/hash-string
fix: Correctly handle `#""` in edition <2024
This commit is contained in:
commit
8d10e248c8
4 changed files with 30 additions and 7 deletions
|
@ -39,7 +39,9 @@ impl<'a> LexedStr<'a> {
|
|||
conv.offset = shebang_len;
|
||||
};
|
||||
|
||||
for token in rustc_lexer::tokenize(&text[conv.offset..]) {
|
||||
// Re-create the tokenizer from scratch every token because `GuardedStrPrefix` is one token in the lexer
|
||||
// but we want to split it to two in edition <2024.
|
||||
while let Some(token) = rustc_lexer::tokenize(&text[conv.offset..]).next() {
|
||||
let token_text = &text[conv.offset..][..token.len as usize];
|
||||
|
||||
conv.extend_token(&token.kind, token_text);
|
||||
|
@ -158,7 +160,7 @@ impl<'a> Converter<'a> {
|
|||
}
|
||||
}
|
||||
|
||||
fn extend_token(&mut self, kind: &rustc_lexer::TokenKind, token_text: &str) {
|
||||
fn extend_token(&mut self, kind: &rustc_lexer::TokenKind, mut token_text: &str) {
|
||||
// A note on an intended tradeoff:
|
||||
// We drop some useful information here (see patterns with double dots `..`)
|
||||
// Storing that info in `SyntaxKind` is not possible due to its layout requirements of
|
||||
|
@ -189,10 +191,15 @@ impl<'a> Converter<'a> {
|
|||
rustc_lexer::TokenKind::RawIdent => IDENT,
|
||||
|
||||
rustc_lexer::TokenKind::GuardedStrPrefix if self.edition.at_least_2024() => {
|
||||
// FIXME: rustc does something better for recovery.
|
||||
err = "Invalid string literal (reserved syntax)";
|
||||
ERROR
|
||||
}
|
||||
rustc_lexer::TokenKind::GuardedStrPrefix => POUND,
|
||||
rustc_lexer::TokenKind::GuardedStrPrefix => {
|
||||
// The token is `#"` or `##`, split it into two.
|
||||
token_text = &token_text[1..];
|
||||
POUND
|
||||
}
|
||||
|
||||
rustc_lexer::TokenKind::Literal { kind, .. } => {
|
||||
self.extend_literal(token_text.len(), kind);
|
||||
|
|
|
@ -15,11 +15,20 @@ use crate::{Edition, LexedStr, TopEntryPoint};
|
|||
#[path = "../test_data/generated/runner.rs"]
|
||||
mod runner;
|
||||
|
||||
fn infer_edition(file_path: &Path) -> Edition {
|
||||
let file_content = std::fs::read_to_string(file_path).unwrap();
|
||||
if let Some(edition) = file_content.strip_prefix("//@ edition: ") {
|
||||
edition[..4].parse().expect("invalid edition directive")
|
||||
} else {
|
||||
Edition::CURRENT
|
||||
}
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn lex_ok() {
|
||||
for case in TestCase::list("lexer/ok") {
|
||||
let _guard = stdx::panic_context::enter(format!("{:?}", case.rs));
|
||||
let actual = lex(&case.text);
|
||||
let actual = lex(&case.text, infer_edition(&case.rs));
|
||||
expect_file![case.rast].assert_eq(&actual)
|
||||
}
|
||||
}
|
||||
|
@ -28,13 +37,13 @@ fn lex_ok() {
|
|||
fn lex_err() {
|
||||
for case in TestCase::list("lexer/err") {
|
||||
let _guard = stdx::panic_context::enter(format!("{:?}", case.rs));
|
||||
let actual = lex(&case.text);
|
||||
let actual = lex(&case.text, infer_edition(&case.rs));
|
||||
expect_file![case.rast].assert_eq(&actual)
|
||||
}
|
||||
}
|
||||
|
||||
fn lex(text: &str) -> String {
|
||||
let lexed = LexedStr::new(Edition::CURRENT, text);
|
||||
fn lex(text: &str, edition: Edition) -> String {
|
||||
let lexed = LexedStr::new(edition, text);
|
||||
|
||||
let mut res = String::new();
|
||||
for i in 0..lexed.len() {
|
||||
|
|
|
@ -0,0 +1,4 @@
|
|||
COMMENT "//@ edition: 2021"
|
||||
WHITESPACE "\n\n"
|
||||
POUND "#"
|
||||
STRING "\"foo\""
|
|
@ -0,0 +1,3 @@
|
|||
//@ edition: 2021
|
||||
|
||||
#"foo"
|
Loading…
Reference in a new issue