mirror of
https://github.com/rust-lang/rust-analyzer
synced 2024-12-25 12:33:33 +00:00
Simple identifier lexer
This commit is contained in:
parent
15af7ad36c
commit
171baf4c48
8 changed files with 146 additions and 19 deletions
|
@ -4,6 +4,8 @@ version = "0.1.0"
|
|||
authors = ["Aleksey Kladov <aleksey.kladov@gmail.com>"]
|
||||
|
||||
[dependencies]
|
||||
unicode-xid = "0.1.0"
|
||||
|
||||
serde = "1.0.26"
|
||||
serde_derive = "1.0.26"
|
||||
file = "1.1.1"
|
||||
|
|
10
src/lexer.rs
10
src/lexer.rs
|
@ -1,10 +0,0 @@
|
|||
use {Token, TextUnit};
|
||||
use syntax_kinds::*;
|
||||
|
||||
pub fn next_token(text: &str) -> Token {
|
||||
let c = text.chars().next().unwrap();
|
||||
Token {
|
||||
kind: IDENT,
|
||||
len: TextUnit::len_of_char(c),
|
||||
}
|
||||
}
|
64
src/lexer/mod.rs
Normal file
64
src/lexer/mod.rs
Normal file
|
@ -0,0 +1,64 @@
|
|||
use unicode_xid::UnicodeXID;
|
||||
|
||||
use {Token, SyntaxKind};
|
||||
use syntax_kinds::*;
|
||||
|
||||
mod ptr;
|
||||
use self::ptr::Ptr;
|
||||
|
||||
pub fn next_token(text: &str) -> Token {
|
||||
assert!(!text.is_empty());
|
||||
let mut ptr = Ptr::new(text);
|
||||
let c = ptr.bump().unwrap();
|
||||
let kind = next_token_inner(c, &mut ptr);
|
||||
let len = ptr.into_len();
|
||||
Token { kind, len }
|
||||
}
|
||||
|
||||
fn next_token_inner(c: char, ptr: &mut Ptr) -> SyntaxKind {
|
||||
// Note: r as in r" or r#" is part of a raw string literal,
|
||||
// b as in b' is part of a byte literal.
|
||||
// They are not identifiers, and are handled further down.
|
||||
let ident_start = ident_start(c) && !string_literal_start(c, ptr.next(), ptr.nnext());
|
||||
if ident_start {
|
||||
loop {
|
||||
match ptr.next() {
|
||||
Some(c) if ident_continue(c) => {
|
||||
ptr.bump();
|
||||
},
|
||||
_ => break,
|
||||
}
|
||||
}
|
||||
IDENT
|
||||
} else {
|
||||
WHITESPACE
|
||||
}
|
||||
}
|
||||
|
||||
fn ident_start(c: char) -> bool {
|
||||
(c >= 'a' && c <= 'z')
|
||||
|| (c >= 'A' && c <= 'Z')
|
||||
|| c == '_'
|
||||
|| (c > '\x7f' && UnicodeXID::is_xid_start(c))
|
||||
}
|
||||
|
||||
fn ident_continue(c: char) -> bool {
|
||||
(c >= 'a' && c <= 'z')
|
||||
|| (c >= 'A' && c <= 'Z')
|
||||
|| (c >= '0' && c <= '9')
|
||||
|| c == '_'
|
||||
|| (c > '\x7f' && UnicodeXID::is_xid_continue(c))
|
||||
}
|
||||
|
||||
|
||||
fn string_literal_start(c: char, c1: Option<char>, c2: Option<char>) -> bool {
|
||||
match (c, c1, c2) {
|
||||
('r', Some('"'), _) |
|
||||
('r', Some('#'), _) |
|
||||
('b', Some('"'), _) |
|
||||
('b', Some('\''), _) |
|
||||
('b', Some('r'), Some('"')) |
|
||||
('b', Some('r'), Some('#')) => true,
|
||||
_ => false
|
||||
}
|
||||
}
|
38
src/lexer/ptr.rs
Normal file
38
src/lexer/ptr.rs
Normal file
|
@ -0,0 +1,38 @@
|
|||
use {TextUnit};
|
||||
|
||||
use std::str::Chars;
|
||||
|
||||
pub(crate) struct Ptr<'s> {
|
||||
text: &'s str,
|
||||
len: TextUnit,
|
||||
}
|
||||
|
||||
impl<'s> Ptr<'s> {
|
||||
pub fn new(text: &'s str) -> Ptr<'s> {
|
||||
Ptr { text, len: TextUnit::new(0) }
|
||||
}
|
||||
|
||||
pub fn into_len(self) -> TextUnit {
|
||||
self.len
|
||||
}
|
||||
|
||||
pub fn next(&self) -> Option<char> {
|
||||
self.chars().next()
|
||||
}
|
||||
|
||||
pub fn nnext(&self) -> Option<char> {
|
||||
let mut chars = self.chars();
|
||||
chars.next()?;
|
||||
chars.next()
|
||||
}
|
||||
|
||||
pub fn bump(&mut self) -> Option<char> {
|
||||
let ch = self.chars().next()?;
|
||||
self.len += TextUnit::len_of_char(ch);
|
||||
Some(ch)
|
||||
}
|
||||
|
||||
fn chars(&self) -> Chars {
|
||||
self.text[self.len.0 as usize ..].chars()
|
||||
}
|
||||
}
|
|
@ -1,3 +1,5 @@
|
|||
extern crate unicode_xid;
|
||||
|
||||
mod text;
|
||||
mod tree;
|
||||
mod lexer;
|
||||
|
|
31
src/text.rs
31
src/text.rs
|
@ -1,7 +1,10 @@
|
|||
use std::fmt;
|
||||
use std::ops;
|
||||
|
||||
#[derive(Clone, Copy, PartialEq, Eq, PartialOrd, Ord, Hash)]
|
||||
pub struct TextUnit(u32);
|
||||
pub struct TextUnit(
|
||||
pub(crate) u32
|
||||
);
|
||||
|
||||
impl TextUnit {
|
||||
pub fn len_of_char(c: char) -> TextUnit {
|
||||
|
@ -30,3 +33,29 @@ impl From<TextUnit> for u32 {
|
|||
tu.0
|
||||
}
|
||||
}
|
||||
|
||||
impl ops::Add<TextUnit> for TextUnit {
|
||||
type Output = TextUnit;
|
||||
fn add(self, rhs: TextUnit) -> TextUnit {
|
||||
TextUnit(self.0 + rhs.0)
|
||||
}
|
||||
}
|
||||
|
||||
impl ops::AddAssign<TextUnit> for TextUnit {
|
||||
fn add_assign(&mut self, rhs: TextUnit) {
|
||||
self.0 += rhs.0
|
||||
}
|
||||
}
|
||||
|
||||
impl ops::Sub<TextUnit> for TextUnit {
|
||||
type Output = TextUnit;
|
||||
fn sub(self, rhs: TextUnit) -> TextUnit {
|
||||
TextUnit(self.0 - rhs.0)
|
||||
}
|
||||
}
|
||||
|
||||
impl ops::SubAssign<TextUnit> for TextUnit {
|
||||
fn sub_assign(&mut self, rhs: TextUnit) {
|
||||
self.0 -= rhs.0
|
||||
}
|
||||
}
|
|
@ -41,13 +41,15 @@ fn lexer_test_case(path: &Path) {
|
|||
dump_tokens(&tokens)
|
||||
};
|
||||
let expected = file::get_text(&path.with_extension("txt")).unwrap();
|
||||
|
||||
assert_diff!(
|
||||
expected.as_str(),
|
||||
actual.as_str(),
|
||||
"\n",
|
||||
0
|
||||
)
|
||||
let expected = expected.as_str();
|
||||
let actual = actual.as_str();
|
||||
if expected == actual {
|
||||
return
|
||||
}
|
||||
if expected.trim() == actual.trim() {
|
||||
panic!("Whitespace difference!")
|
||||
}
|
||||
assert_diff!(expected, actual, "\n", 0)
|
||||
}
|
||||
|
||||
fn tokenize(text: &str) -> Vec<Token> {
|
||||
|
|
Loading…
Reference in a new issue