2911: Implement collecting errors while tokenizing r=matklad a=Veetaha

Now we are collecting errors from `rustc_lexer` and returning them in `ParsedToken { token, error }` and `ParsedTokens { tokens, errors }` structures **([UPD]: this is now simplified, see updates bellow)**.

The main changes are introduced in `ra_syntax/parsing/lexer.rs`. It now exposes the following functions and types:

```rust
pub fn tokenize(text: &str) -> ParsedTokens;
pub fn tokenize_append(text: &str, parsed_tokens_to_append_to: &mut ParsedTokens);
pub fn first_token(text: &str) -> Option<ParsedToken>; // allows any number of tokens in text
pub fn single_token(text: &str) -> Option<ParsedToken>; // allows only a single token in text

pub struct ParsedToken  { pub token: Token,       pub error: Option<SyntaxError> }
pub struct ParsedTokens { pub tokens: Vec<Token>, pub errors: Vec<SyntaxError>   }

pub enum TokenizeError { /* Simple enum which reflects rustc_lexer tokenization errors */ }
```
In the first commit I implemented it with iterators, but then decided that since this crate is ad hoc for `rust-analyzer` and we clearly see the places of its usage it would be better to simplify it to vectors.

This is currently WIP, because I want to add tests for error messages generated by the lexer.
I'd like to listen to you thoughts how to define these tests in `ra_syntax/test-data` dir.

Related issues: #223 

**[UPD]**

After the PR review the API was simplified:
```rust
pub fn tokenize(text: &str) -> (Vec<Token>, Vec<SyntaxError>);
// Both lex functions do not check for unescape errors
pub fn lex_single_syntax_kind(text: &str) -> Option<(SyntaxKind, Option<SyntaxError>)>;
pub fn lex_single_valid_syntax_kind(text: &str) -> Option<SyntaxKind>;

// This will be removed in the next PR in favour of simlifying `SyntaxError` to `(String, TextRange)`
pub enum TokenizeError { /* Simple enum which reflects rustc_lexer tokenization errors */ }

// this is private, but may be made public if such demand would exist in future (least privilege principle)
fn lex_first_token(text: &str) -> Option<(Token, Option<SyntaxError>)>;
```

Co-authored-by: Veetaha <gerzoh1@gmail.com>
This commit is contained in:
bors[bot] 2020-02-03 22:51:17 +00:00 committed by GitHub
commit 918547dbe9
No known key found for this signature in database
GPG key ID: 4AEE18F83AFDEB23
160 changed files with 799 additions and 173 deletions

View file

@ -2,7 +2,9 @@
use hir::ModuleSource;
use ra_db::{RelativePath, RelativePathBuf, SourceDatabase, SourceDatabaseExt};
use ra_syntax::{algo::find_node_at_offset, ast, tokenize, AstNode, SyntaxKind, SyntaxNode};
use ra_syntax::{
algo::find_node_at_offset, ast, lex_single_valid_syntax_kind, AstNode, SyntaxKind, SyntaxNode,
};
use ra_text_edit::TextEdit;
use crate::{
@ -17,11 +19,9 @@ pub(crate) fn rename(
position: FilePosition,
new_name: &str,
) -> Option<RangeInfo<SourceChange>> {
let tokens = tokenize(new_name);
if tokens.len() != 1
|| (tokens[0].kind != SyntaxKind::IDENT && tokens[0].kind != SyntaxKind::UNDERSCORE)
{
return None;
match lex_single_valid_syntax_kind(new_name)? {
SyntaxKind::IDENT | SyntaxKind::UNDERSCORE => (),
_ => return None,
}
let parse = db.parse(position.file_id);

View file

@ -1,7 +1,7 @@
//! FIXME: write short doc here
use ra_parser::{Token, TokenSource};
use ra_syntax::{classify_literal, SmolStr, SyntaxKind, SyntaxKind::*, T};
use ra_syntax::{lex_single_valid_syntax_kind, SmolStr, SyntaxKind, SyntaxKind::*, T};
use std::cell::{Cell, Ref, RefCell};
use tt::buffer::{Cursor, TokenBuffer};
@ -129,8 +129,9 @@ fn convert_delim(d: Option<tt::DelimiterKind>, closing: bool) -> TtToken {
}
fn convert_literal(l: &tt::Literal) -> TtToken {
let kind =
classify_literal(&l.text).map(|tkn| tkn.kind).unwrap_or_else(|| match l.text.as_ref() {
let kind = lex_single_valid_syntax_kind(&l.text)
.filter(|kind| kind.is_literal())
.unwrap_or_else(|| match l.text.as_ref() {
"true" => T![true],
"false" => T![false],
_ => panic!("Fail to convert given literal {:#?}", &l),

View file

@ -81,7 +81,7 @@ impl TreeDiff {
/// Specifically, returns a map whose keys are descendants of `from` and values
/// are descendants of `to`, such that `replace_descendants(from, map) == to`.
///
/// A trivial solution is a singletom map `{ from: to }`, but this function
/// A trivial solution is a singleton map `{ from: to }`, but this function
/// tries to find a more fine-grained diff.
pub fn diff(from: &SyntaxNode, to: &SyntaxNode) -> TreeDiff {
let mut buf = FxHashMap::default();

View file

@ -41,7 +41,9 @@ use crate::syntax_node::GreenNode;
pub use crate::{
algo::InsertPosition,
ast::{AstNode, AstToken},
parsing::{classify_literal, tokenize, Token},
parsing::{
lex_single_syntax_kind, lex_single_valid_syntax_kind, tokenize, Token, TokenizeError,
},
ptr::{AstPtr, SyntaxNodePtr},
syntax_error::{Location, SyntaxError, SyntaxErrorKind},
syntax_node::{

View file

@ -7,15 +7,23 @@ mod text_tree_sink;
mod reparsing;
use crate::{syntax_node::GreenNode, SyntaxError};
use text_token_source::TextTokenSource;
use text_tree_sink::TextTreeSink;
pub use self::lexer::{classify_literal, tokenize, Token};
pub use lexer::*;
pub(crate) use self::reparsing::incremental_reparse;
pub(crate) fn parse_text(text: &str) -> (GreenNode, Vec<SyntaxError>) {
let tokens = tokenize(&text);
let mut token_source = text_token_source::TextTokenSource::new(text, &tokens);
let mut tree_sink = text_tree_sink::TextTreeSink::new(text, &tokens);
let (tokens, lexer_errors) = tokenize(&text);
let mut token_source = TextTokenSource::new(text, &tokens);
let mut tree_sink = TextTreeSink::new(text, &tokens);
ra_parser::parse(&mut token_source, &mut tree_sink);
tree_sink.finish()
let (tree, mut parser_errors) = tree_sink.finish();
parser_errors.extend(lexer_errors);
(tree, parser_errors)
}

View file

@ -1,8 +1,10 @@
//! FIXME: write short doc here
//! Lexer analyzes raw input string and produces lexemes (tokens).
//! It is just a bridge to `rustc_lexer`.
use crate::{
SyntaxError, SyntaxErrorKind,
SyntaxKind::{self, *},
TextUnit,
TextRange, TextUnit,
};
/// A token of Rust source.
@ -14,91 +16,261 @@ pub struct Token {
pub len: TextUnit,
}
fn match_literal_kind(kind: rustc_lexer::LiteralKind) -> SyntaxKind {
match kind {
rustc_lexer::LiteralKind::Int { .. } => INT_NUMBER,
rustc_lexer::LiteralKind::Float { .. } => FLOAT_NUMBER,
rustc_lexer::LiteralKind::Char { .. } => CHAR,
rustc_lexer::LiteralKind::Byte { .. } => BYTE,
rustc_lexer::LiteralKind::Str { .. } => STRING,
rustc_lexer::LiteralKind::ByteStr { .. } => BYTE_STRING,
rustc_lexer::LiteralKind::RawStr { .. } => RAW_STRING,
rustc_lexer::LiteralKind::RawByteStr { .. } => RAW_BYTE_STRING,
/// Break a string up into its component tokens.
/// Beware that it checks for shebang first and its length contributes to resulting
/// tokens offsets.
pub fn tokenize(text: &str) -> (Vec<Token>, Vec<SyntaxError>) {
// non-empty string is a precondtion of `rustc_lexer::strip_shebang()`.
if text.is_empty() {
return Default::default();
}
let mut tokens = Vec::new();
let mut errors = Vec::new();
let mut offset: usize = rustc_lexer::strip_shebang(text)
.map(|shebang_len| {
tokens.push(Token { kind: SHEBANG, len: TextUnit::from_usize(shebang_len) });
shebang_len
})
.unwrap_or(0);
let text_without_shebang = &text[offset..];
for rustc_token in rustc_lexer::tokenize(text_without_shebang) {
let token_len = TextUnit::from_usize(rustc_token.len);
let token_range = TextRange::offset_len(TextUnit::from_usize(offset), token_len);
let (syntax_kind, error) =
rustc_token_kind_to_syntax_kind(&rustc_token.kind, &text[token_range]);
tokens.push(Token { kind: syntax_kind, len: token_len });
if let Some(error) = error {
errors.push(SyntaxError::new(SyntaxErrorKind::TokenizeError(error), token_range));
}
offset += rustc_token.len;
}
(tokens, errors)
}
/// Break a string up into its component tokens
pub fn tokenize(text: &str) -> Vec<Token> {
/// Returns `SyntaxKind` and `Option<SyntaxError>` of the first token
/// encountered at the beginning of the string.
///
/// Returns `None` if the string contains zero *or two or more* tokens.
/// The token is malformed if the returned error is not `None`.
///
/// Beware that unescape errors are not checked at tokenization time.
pub fn lex_single_syntax_kind(text: &str) -> Option<(SyntaxKind, Option<SyntaxError>)> {
lex_first_token(text)
.filter(|(token, _)| token.len.to_usize() == text.len())
.map(|(token, error)| (token.kind, error))
}
/// The same as `lex_single_syntax_kind()` but returns only `SyntaxKind` and
/// returns `None` if any tokenization error occured.
///
/// Beware that unescape errors are not checked at tokenization time.
pub fn lex_single_valid_syntax_kind(text: &str) -> Option<SyntaxKind> {
lex_first_token(text)
.filter(|(token, error)| !error.is_some() && token.len.to_usize() == text.len())
.map(|(token, _error)| token.kind)
}
/// Returns `SyntaxKind` and `Option<SyntaxError>` of the first token
/// encountered at the beginning of the string.
///
/// Returns `None` if the string contains zero tokens or if the token was parsed
/// with an error.
/// The token is malformed if the returned error is not `None`.
///
/// Beware that unescape errors are not checked at tokenization time.
fn lex_first_token(text: &str) -> Option<(Token, Option<SyntaxError>)> {
// non-empty string is a precondtion of `rustc_lexer::first_token()`.
if text.is_empty() {
return vec![];
return None;
}
let mut text = text;
let mut acc = Vec::new();
if let Some(len) = rustc_lexer::strip_shebang(text) {
acc.push(Token { kind: SHEBANG, len: TextUnit::from_usize(len) });
text = &text[len..];
}
while !text.is_empty() {
let rustc_token = rustc_lexer::first_token(text);
let kind = match rustc_token.kind {
rustc_lexer::TokenKind::LineComment => COMMENT,
rustc_lexer::TokenKind::BlockComment { .. } => COMMENT,
rustc_lexer::TokenKind::Whitespace => WHITESPACE,
rustc_lexer::TokenKind::Ident => {
let token_text = &text[..rustc_token.len];
let rustc_token = rustc_lexer::first_token(text);
let (syntax_kind, error) = rustc_token_kind_to_syntax_kind(&rustc_token.kind, text);
let token = Token { kind: syntax_kind, len: TextUnit::from_usize(rustc_token.len) };
let error = error.map(|error| {
SyntaxError::new(
SyntaxErrorKind::TokenizeError(error),
TextRange::from_to(TextUnit::from(0), TextUnit::of_str(text)),
)
});
Some((token, error))
}
// FIXME: simplify TokenizeError to `SyntaxError(String, TextRange)` as per @matklad advice:
// https://github.com/rust-analyzer/rust-analyzer/pull/2911/files#r371175067
/// Describes the values of `SyntaxErrorKind::TokenizeError` enum variant.
/// It describes all the types of errors that may happen during the tokenization
/// of Rust source.
#[derive(Debug, Clone, PartialEq, Eq, Hash)]
pub enum TokenizeError {
/// Base prefix was provided, but there were no digits
/// after it, e.g. `0x`, `0b`.
EmptyInt,
/// Float exponent lacks digits e.g. `12.34e+`, `12.3E+`, `12e-`, `1_E-`,
EmptyExponent,
/// Block comment lacks trailing delimiter `*/`
UnterminatedBlockComment,
/// Character literal lacks trailing delimiter `'`
UnterminatedChar,
/// Characterish byte literal lacks trailing delimiter `'`
UnterminatedByte,
/// String literal lacks trailing delimiter `"`
UnterminatedString,
/// Byte string literal lacks trailing delimiter `"`
UnterminatedByteString,
/// Raw literal lacks trailing delimiter e.g. `"##`
UnterminatedRawString,
/// Raw byte string literal lacks trailing delimiter e.g. `"##`
UnterminatedRawByteString,
/// Raw string lacks a quote after the pound characters e.g. `r###`
UnstartedRawString,
/// Raw byte string lacks a quote after the pound characters e.g. `br###`
UnstartedRawByteString,
/// Lifetime starts with a number e.g. `'4ever`
LifetimeStartsWithNumber,
}
fn rustc_token_kind_to_syntax_kind(
rustc_token_kind: &rustc_lexer::TokenKind,
token_text: &str,
) -> (SyntaxKind, Option<TokenizeError>) {
// A note on an intended tradeoff:
// We drop some useful infromation here (see patterns with double dots `..`)
// Storing that info in `SyntaxKind` is not possible due to its layout requirements of
// being `u16` that come from `rowan::SyntaxKind`.
let syntax_kind = {
use rustc_lexer::TokenKind as TK;
use TokenizeError as TE;
match rustc_token_kind {
TK::LineComment => COMMENT,
TK::BlockComment { terminated: true } => COMMENT,
TK::BlockComment { terminated: false } => {
return (COMMENT, Some(TE::UnterminatedBlockComment));
}
TK::Whitespace => WHITESPACE,
TK::Ident => {
if token_text == "_" {
UNDERSCORE
} else {
SyntaxKind::from_keyword(&text[..rustc_token.len]).unwrap_or(IDENT)
SyntaxKind::from_keyword(token_text).unwrap_or(IDENT)
}
}
rustc_lexer::TokenKind::RawIdent => IDENT,
rustc_lexer::TokenKind::Literal { kind, .. } => match_literal_kind(kind),
rustc_lexer::TokenKind::Lifetime { .. } => LIFETIME,
rustc_lexer::TokenKind::Semi => SEMI,
rustc_lexer::TokenKind::Comma => COMMA,
rustc_lexer::TokenKind::Dot => DOT,
rustc_lexer::TokenKind::OpenParen => L_PAREN,
rustc_lexer::TokenKind::CloseParen => R_PAREN,
rustc_lexer::TokenKind::OpenBrace => L_CURLY,
rustc_lexer::TokenKind::CloseBrace => R_CURLY,
rustc_lexer::TokenKind::OpenBracket => L_BRACK,
rustc_lexer::TokenKind::CloseBracket => R_BRACK,
rustc_lexer::TokenKind::At => AT,
rustc_lexer::TokenKind::Pound => POUND,
rustc_lexer::TokenKind::Tilde => TILDE,
rustc_lexer::TokenKind::Question => QUESTION,
rustc_lexer::TokenKind::Colon => COLON,
rustc_lexer::TokenKind::Dollar => DOLLAR,
rustc_lexer::TokenKind::Eq => EQ,
rustc_lexer::TokenKind::Not => EXCL,
rustc_lexer::TokenKind::Lt => L_ANGLE,
rustc_lexer::TokenKind::Gt => R_ANGLE,
rustc_lexer::TokenKind::Minus => MINUS,
rustc_lexer::TokenKind::And => AMP,
rustc_lexer::TokenKind::Or => PIPE,
rustc_lexer::TokenKind::Plus => PLUS,
rustc_lexer::TokenKind::Star => STAR,
rustc_lexer::TokenKind::Slash => SLASH,
rustc_lexer::TokenKind::Caret => CARET,
rustc_lexer::TokenKind::Percent => PERCENT,
rustc_lexer::TokenKind::Unknown => ERROR,
};
let token = Token { kind, len: TextUnit::from_usize(rustc_token.len) };
acc.push(token);
text = &text[rustc_token.len..];
}
acc
}
pub fn classify_literal(text: &str) -> Option<Token> {
let t = rustc_lexer::first_token(text);
if t.len != text.len() {
return None;
}
let kind = match t.kind {
rustc_lexer::TokenKind::Literal { kind, .. } => match_literal_kind(kind),
_ => return None,
TK::RawIdent => IDENT,
TK::Literal { kind, .. } => return match_literal_kind(&kind),
TK::Lifetime { starts_with_number: false } => LIFETIME,
TK::Lifetime { starts_with_number: true } => {
return (LIFETIME, Some(TE::LifetimeStartsWithNumber))
}
TK::Semi => SEMI,
TK::Comma => COMMA,
TK::Dot => DOT,
TK::OpenParen => L_PAREN,
TK::CloseParen => R_PAREN,
TK::OpenBrace => L_CURLY,
TK::CloseBrace => R_CURLY,
TK::OpenBracket => L_BRACK,
TK::CloseBracket => R_BRACK,
TK::At => AT,
TK::Pound => POUND,
TK::Tilde => TILDE,
TK::Question => QUESTION,
TK::Colon => COLON,
TK::Dollar => DOLLAR,
TK::Eq => EQ,
TK::Not => EXCL,
TK::Lt => L_ANGLE,
TK::Gt => R_ANGLE,
TK::Minus => MINUS,
TK::And => AMP,
TK::Or => PIPE,
TK::Plus => PLUS,
TK::Star => STAR,
TK::Slash => SLASH,
TK::Caret => CARET,
TK::Percent => PERCENT,
TK::Unknown => ERROR,
}
};
Some(Token { kind, len: TextUnit::from_usize(t.len) })
return (syntax_kind, None);
fn match_literal_kind(kind: &rustc_lexer::LiteralKind) -> (SyntaxKind, Option<TokenizeError>) {
use rustc_lexer::LiteralKind as LK;
use TokenizeError as TE;
#[rustfmt::skip]
let syntax_kind = match *kind {
LK::Int { empty_int: false, .. } => INT_NUMBER,
LK::Int { empty_int: true, .. } => {
return (INT_NUMBER, Some(TE::EmptyInt))
}
LK::Float { empty_exponent: false, .. } => FLOAT_NUMBER,
LK::Float { empty_exponent: true, .. } => {
return (FLOAT_NUMBER, Some(TE::EmptyExponent))
}
LK::Char { terminated: true } => CHAR,
LK::Char { terminated: false } => {
return (CHAR, Some(TE::UnterminatedChar))
}
LK::Byte { terminated: true } => BYTE,
LK::Byte { terminated: false } => {
return (BYTE, Some(TE::UnterminatedByte))
}
LK::Str { terminated: true } => STRING,
LK::Str { terminated: false } => {
return (STRING, Some(TE::UnterminatedString))
}
LK::ByteStr { terminated: true } => BYTE_STRING,
LK::ByteStr { terminated: false } => {
return (BYTE_STRING, Some(TE::UnterminatedByteString))
}
LK::RawStr { started: true, terminated: true, .. } => RAW_STRING,
LK::RawStr { started: true, terminated: false, .. } => {
return (RAW_STRING, Some(TE::UnterminatedRawString))
}
LK::RawStr { started: false, .. } => {
return (RAW_STRING, Some(TE::UnstartedRawString))
}
LK::RawByteStr { started: true, terminated: true, .. } => RAW_BYTE_STRING,
LK::RawByteStr { started: true, terminated: false, .. } => {
return (RAW_BYTE_STRING, Some(TE::UnterminatedRawByteString))
}
LK::RawByteStr { started: false, .. } => {
return (RAW_BYTE_STRING, Some(TE::UnstartedRawByteString))
}
};
(syntax_kind, None)
}
}

View file

@ -12,7 +12,7 @@ use ra_text_edit::AtomTextEdit;
use crate::{
algo,
parsing::{
lexer::{tokenize, Token},
lexer::{lex_single_syntax_kind, tokenize, Token},
text_token_source::TextTokenSource,
text_tree_sink::TextTreeSink,
},
@ -41,37 +41,42 @@ fn reparse_token<'node>(
root: &'node SyntaxNode,
edit: &AtomTextEdit,
) -> Option<(GreenNode, TextRange)> {
let token = algo::find_covering_element(root, edit.delete).as_token()?.clone();
match token.kind() {
let prev_token = algo::find_covering_element(root, edit.delete).as_token()?.clone();
let prev_token_kind = prev_token.kind();
match prev_token_kind {
WHITESPACE | COMMENT | IDENT | STRING | RAW_STRING => {
if token.kind() == WHITESPACE || token.kind() == COMMENT {
if prev_token_kind == WHITESPACE || prev_token_kind == COMMENT {
// removing a new line may extends previous token
if token.text().to_string()[edit.delete - token.text_range().start()].contains('\n')
{
let deleted_range = edit.delete - prev_token.text_range().start();
if prev_token.text()[deleted_range].contains('\n') {
return None;
}
}
let text = get_text_after_edit(token.clone().into(), &edit);
let lex_tokens = tokenize(&text);
let lex_token = match lex_tokens[..] {
[lex_token] if lex_token.kind == token.kind() => lex_token,
_ => return None,
};
let mut new_text = get_text_after_edit(prev_token.clone().into(), &edit);
let (new_token_kind, _error) = lex_single_syntax_kind(&new_text)?;
if lex_token.kind == IDENT && is_contextual_kw(&text) {
if new_token_kind != prev_token_kind
|| (new_token_kind == IDENT && is_contextual_kw(&new_text))
{
return None;
}
if let Some(next_char) = root.text().char_at(token.text_range().end()) {
let tokens_with_next_char = tokenize(&format!("{}{}", text, next_char));
if tokens_with_next_char.len() == 1 {
// Check that edited token is not a part of the bigger token.
// E.g. if for source code `bruh"str"` the user removed `ruh`, then
// `b` no longer remains an identifier, but becomes a part of byte string literal
if let Some(next_char) = root.text().char_at(prev_token.text_range().end()) {
new_text.push(next_char);
let token_with_next_char = lex_single_syntax_kind(&new_text);
if let Some((_kind, _error)) = token_with_next_char {
return None;
}
new_text.pop();
}
let new_token = GreenToken::new(rowan::SyntaxKind(token.kind().into()), text.into());
Some((token.replace_with(new_token), token.text_range()))
let new_token =
GreenToken::new(rowan::SyntaxKind(prev_token_kind.into()), new_text.into());
Some((prev_token.replace_with(new_token), prev_token.text_range()))
}
_ => None,
}
@ -83,20 +88,26 @@ fn reparse_block<'node>(
) -> Option<(GreenNode, Vec<SyntaxError>, TextRange)> {
let (node, reparser) = find_reparsable_node(root, edit.delete)?;
let text = get_text_after_edit(node.clone().into(), &edit);
let tokens = tokenize(&text);
let (tokens, new_lexer_errors) = tokenize(&text);
if !is_balanced(&tokens) {
return None;
}
let mut token_source = TextTokenSource::new(&text, &tokens);
let mut tree_sink = TextTreeSink::new(&text, &tokens);
reparser.parse(&mut token_source, &mut tree_sink);
let (green, new_errors) = tree_sink.finish();
Some((node.replace_with(green), new_errors, node.text_range()))
let (green, mut new_parser_errors) = tree_sink.finish();
new_parser_errors.extend(new_lexer_errors);
Some((node.replace_with(green), new_parser_errors, node.text_range()))
}
fn get_text_after_edit(element: SyntaxElement, edit: &AtomTextEdit) -> String {
let edit =
AtomTextEdit::replace(edit.delete - element.text_range().start(), edit.insert.clone());
let text = match element {
NodeOrToken::Token(token) => token.text().to_string(),
NodeOrToken::Node(node) => node.text().to_string(),
@ -113,6 +124,7 @@ fn is_contextual_kw(text: &str) -> bool {
fn find_reparsable_node(node: &SyntaxNode, range: TextRange) -> Option<(SyntaxNode, Reparser)> {
let node = algo::find_covering_element(node, range);
let mut ancestors = match node {
NodeOrToken::Token(it) => it.parent().ancestors(),
NodeOrToken::Node(it) => it.ancestors(),
@ -182,7 +194,6 @@ mod tests {
let fully_reparsed = SourceFile::parse(&after);
let incrementally_reparsed: Parse<SourceFile> = {
let f = SourceFile::parse(&before);
let edit = AtomTextEdit { delete: range, insert: replace_with.to_string() };
let (green, new_errors, range) =
incremental_reparse(f.tree().syntax(), &edit, f.errors.to_vec()).unwrap();
assert_eq!(range.len(), reparsed_len.into(), "reparsed fragment has wrong length");

View file

@ -92,8 +92,8 @@ impl<'a> TreeSink for TextTreeSink<'a> {
}
impl<'a> TextTreeSink<'a> {
pub(super) fn new(text: &'a str, tokens: &'a [Token]) -> TextTreeSink<'a> {
TextTreeSink {
pub(super) fn new(text: &'a str, tokens: &'a [Token]) -> Self {
Self {
text,
tokens,
text_pos: 0.into(),

View file

@ -4,7 +4,7 @@ use std::fmt;
use ra_parser::ParseError;
use crate::{validation::EscapeError, TextRange, TextUnit};
use crate::{validation::EscapeError, TextRange, TextUnit, TokenizeError};
#[derive(Debug, Clone, PartialEq, Eq, Hash)]
pub struct SyntaxError {
@ -12,6 +12,10 @@ pub struct SyntaxError {
location: Location,
}
// FIXME: Location should be just `Location(TextRange)`
// TextUnit enum member just unnecessarily compicates things,
// we should'n treat it specially, it just as a `TextRange { start: x, end: x + 1 }`
// see `location_to_range()` in ra_ide/src/diagnostics
#[derive(Clone, PartialEq, Eq, Hash)]
pub enum Location {
Offset(TextUnit),
@ -67,6 +71,10 @@ impl SyntaxError {
self
}
pub fn debug_dump(&self, acc: &mut impl fmt::Write) {
writeln!(acc, "error {:?}: {}", self.location(), self.kind()).unwrap();
}
}
impl fmt::Display for SyntaxError {
@ -79,6 +87,10 @@ impl fmt::Display for SyntaxError {
pub enum SyntaxErrorKind {
ParseError(ParseError),
EscapeError(EscapeError),
TokenizeError(TokenizeError),
// FIXME: the obvious pattern of this enum dictates that the following enum variants
// should be wrapped into something like `SemmanticError(SemmanticError)`
// or `ValidateError(ValidateError)` or `SemmanticValidateError(...)`
InvalidBlockAttr,
InvalidMatchInnerAttr,
InvalidTupleIndexFormat,
@ -101,6 +113,7 @@ impl fmt::Display for SyntaxErrorKind {
}
ParseError(msg) => write!(f, "{}", msg.0),
EscapeError(err) => write!(f, "{}", err),
TokenizeError(err) => write!(f, "{}", err),
VisibilityNotAllowed => {
write!(f, "unnecessary visibility qualifier")
}
@ -111,6 +124,51 @@ impl fmt::Display for SyntaxErrorKind {
}
}
impl fmt::Display for TokenizeError {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
#[rustfmt::skip]
let msg = match self {
TokenizeError::EmptyInt => {
"Missing digits after the integer base prefix"
}
TokenizeError::EmptyExponent => {
"Missing digits after the exponent symbol"
}
TokenizeError::UnterminatedBlockComment => {
"Missing trailing `*/` symbols to terminate the block comment"
}
TokenizeError::UnterminatedChar => {
"Missing trailing `'` symbol to terminate the character literal"
}
TokenizeError::UnterminatedByte => {
"Missing trailing `'` symbol to terminate the byte literal"
}
TokenizeError::UnterminatedString => {
"Missing trailing `\"` symbol to terminate the string literal"
}
TokenizeError::UnterminatedByteString => {
"Missing trailing `\"` symbol to terminate the byte string literal"
}
TokenizeError::UnterminatedRawString => {
"Missing trailing `\"` with `#` symbols to terminate the raw string literal"
}
TokenizeError::UnterminatedRawByteString => {
"Missing trailing `\"` with `#` symbols to terminate the raw byte string literal"
}
TokenizeError::UnstartedRawString => {
"Missing `\"` symbol after `#` symbols to begin the raw string literal"
}
TokenizeError::UnstartedRawByteString => {
"Missing `\"` symbol after `#` symbols to begin the raw byte string literal"
}
TokenizeError::LifetimeStartsWithNumber => {
"Lifetime name cannot start with a number"
}
};
write!(f, "{}", msg)
}
}
impl fmt::Display for EscapeError {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
let msg = match self {

View file

@ -4,7 +4,7 @@
//! `SyntaxNode`, and a basic traversal API (parent, children, siblings).
//!
//! The *real* implementation is in the (language-agnostic) `rowan` crate, this
//! modules just wraps its API.
//! module just wraps its API.
use ra_parser::ParseError;
use rowan::{GreenNodeBuilder, Language};
@ -38,17 +38,12 @@ pub type SyntaxElementChildren = rowan::SyntaxElementChildren<RustLanguage>;
pub use rowan::{Direction, NodeOrToken};
#[derive(Default)]
pub struct SyntaxTreeBuilder {
errors: Vec<SyntaxError>,
inner: GreenNodeBuilder<'static>,
}
impl Default for SyntaxTreeBuilder {
fn default() -> SyntaxTreeBuilder {
SyntaxTreeBuilder { errors: Vec::new(), inner: GreenNodeBuilder::new() }
}
}
impl SyntaxTreeBuilder {
pub(crate) fn finish_raw(self) -> (GreenNode, Vec<SyntaxError>) {
let green = self.inner.finish();

View file

@ -1,18 +1,28 @@
use std::{
fmt::Write,
path::{Component, PathBuf},
path::{Component, Path, PathBuf},
};
use test_utils::{collect_tests, dir_tests, project_dir, read_text};
use crate::{fuzz, SourceFile};
use crate::{fuzz, tokenize, Location, SourceFile, SyntaxError, TextRange, Token};
#[test]
fn lexer_tests() {
dir_tests(&test_data_dir(), &["lexer"], |text, _| {
let tokens = crate::tokenize(text);
dump_tokens(&tokens, text)
})
// FIXME:
// * Add tests for unicode escapes in byte-character and [raw]-byte-string literals
// * Add tests for unescape errors
dir_tests(&test_data_dir(), &["lexer/ok"], |text, path| {
let (tokens, errors) = tokenize(text);
assert_errors_are_absent(&errors, path);
dump_tokens_and_errors(&tokens, &errors, text)
});
dir_tests(&test_data_dir(), &["lexer/err"], |text, path| {
let (tokens, errors) = tokenize(text);
assert_errors_are_present(&errors, path);
dump_tokens_and_errors(&tokens, &errors, text)
});
}
#[test]
@ -32,18 +42,13 @@ fn parser_tests() {
dir_tests(&test_data_dir(), &["parser/inline/ok", "parser/ok"], |text, path| {
let parse = SourceFile::parse(text);
let errors = parse.errors();
assert_eq!(
errors,
&[] as &[crate::SyntaxError],
"There should be no errors in the file {:?}",
path.display(),
);
assert_errors_are_absent(&errors, path);
parse.debug_dump()
});
dir_tests(&test_data_dir(), &["parser/err", "parser/inline/err"], |text, path| {
let parse = SourceFile::parse(text);
let errors = parse.errors();
assert!(!errors.is_empty(), "There should be errors in the file {:?}", path.display());
assert_errors_are_present(&errors, path);
parse.debug_dump()
});
}
@ -75,7 +80,7 @@ fn self_hosting_parsing() {
.into_iter()
.filter_entry(|entry| {
!entry.path().components().any(|component| {
// Get all files which are not in the crates/ra_syntax/tests/data folder
// Get all files which are not in the crates/ra_syntax/test_data folder
component == Component::Normal(OsStr::new("test_data"))
})
})
@ -101,15 +106,47 @@ fn test_data_dir() -> PathBuf {
project_dir().join("crates/ra_syntax/test_data")
}
fn dump_tokens(tokens: &[crate::Token], text: &str) -> String {
fn assert_errors_are_present(errors: &[SyntaxError], path: &Path) {
assert!(!errors.is_empty(), "There should be errors in the file {:?}", path.display());
}
fn assert_errors_are_absent(errors: &[SyntaxError], path: &Path) {
assert_eq!(
errors,
&[] as &[SyntaxError],
"There should be no errors in the file {:?}",
path.display(),
);
}
fn dump_tokens_and_errors(tokens: &[Token], errors: &[SyntaxError], text: &str) -> String {
let mut acc = String::new();
let mut offset = 0;
for token in tokens {
let len: u32 = token.len.into();
let len = len as usize;
let token_text = &text[offset..offset + len];
offset += len;
write!(acc, "{:?} {} {:?}\n", token.kind, token.len, token_text).unwrap()
let token_len = token.len.to_usize();
let token_text = &text[offset..offset + token_len];
offset += token_len;
writeln!(acc, "{:?} {} {:?}", token.kind, token_len, token_text).unwrap();
}
for err in errors {
let err_range = location_to_range(err.location());
writeln!(
acc,
"> error{:?} token({:?}) msg({})",
err.location(),
&text[err_range],
err.kind()
)
.unwrap();
}
return acc;
// FIXME: copy-pasted this from `ra_ide/src/diagnostics.rs`
// `Location` will be refactored soon in new PR, see todos here:
// https://github.com/rust-analyzer/rust-analyzer/issues/223
fn location_to_range(location: Location) -> TextRange {
match location {
Location::Offset(offset) => TextRange::offset_len(offset, 1.into()),
Location::Range(range) => range,
}
}
acc
}

View file

@ -94,6 +94,12 @@ impl From<rustc_lexer::unescape::EscapeError> for SyntaxErrorKind {
}
pub(crate) fn validate(root: &SyntaxNode) -> Vec<SyntaxError> {
// FIXME:
// * Add validation of character literal containing only a single char
// * Add validation of `crate` keyword not appearing in the middle of the symbol path
// * Add validation of doc comments are being attached to nodes
// * Remove validation of unterminated literals (it is already implemented in `tokenize()`)
let mut errors = Vec::new();
for node in root.descendants() {
match_ast! {

View file

@ -1,3 +0,0 @@
#!/usr/bin/env bash
// hello
//! World

View file

@ -1,6 +0,0 @@
SHEBANG 19 "#!/usr/bin/env bash"
WHITESPACE 1 "\n"
COMMENT 8 "// hello"
WHITESPACE 1 "\n"
COMMENT 9 "//! World"
WHITESPACE 1 "\n"

View file

@ -1 +0,0 @@
LIFETIME 2 "\'1"

View file

@ -1 +0,0 @@
STRING 7 "\"hello\n"

View file

@ -0,0 +1,2 @@
CHAR 1 "\'"
> error[0; 1) token("\'") msg(Missing trailing `'` symbol to terminate the character literal)

View file

@ -0,0 +1,2 @@
CHAR 5 "\'🦀"
> error[0; 5) token("\'🦀") msg(Missing trailing `'` symbol to terminate the character literal)

View file

@ -0,0 +1,2 @@
CHAR 5 "\'\\x7f"
> error[0; 5) token("\'\\x7f") msg(Missing trailing `'` symbol to terminate the character literal)

View file

@ -0,0 +1,2 @@
CHAR 9 "\'\\u{20AA}"
> error[0; 9) token("\'\\u{20AA}") msg(Missing trailing `'` symbol to terminate the character literal)

View file

@ -0,0 +1,2 @@
CHAR 2 "\' "
> error[0; 2) token("\' ") msg(Missing trailing `'` symbol to terminate the character literal)

View file

@ -0,0 +1,2 @@
CHAR 2 "\'\\"
> error[0; 2) token("\'\\") msg(Missing trailing `'` symbol to terminate the character literal)

View file

@ -0,0 +1,2 @@
CHAR 3 "\'\\n"
> error[0; 3) token("\'\\n") msg(Missing trailing `'` symbol to terminate the character literal)

View file

@ -0,0 +1,2 @@
CHAR 3 "\'\\\'"
> error[0; 3) token("\'\\\'") msg(Missing trailing `'` symbol to terminate the character literal)

View file

@ -0,0 +1,2 @@
BYTE 2 "b\'"
> error[0; 2) token("b\'") msg(Missing trailing `'` symbol to terminate the byte literal)

View file

@ -0,0 +1 @@
b'🦀

View file

@ -0,0 +1,2 @@
BYTE 6 "b\'🦀"
> error[0; 6) token("b\'🦀") msg(Missing trailing `'` symbol to terminate the byte literal)

View file

@ -0,0 +1,2 @@
BYTE 6 "b\'\\x7f"
> error[0; 6) token("b\'\\x7f") msg(Missing trailing `'` symbol to terminate the byte literal)

View file

@ -0,0 +1,2 @@
BYTE 10 "b\'\\u{20AA}"
> error[0; 10) token("b\'\\u{20AA}") msg(Missing trailing `'` symbol to terminate the byte literal)

View file

@ -0,0 +1,2 @@
BYTE 3 "b\' "
> error[0; 3) token("b\' ") msg(Missing trailing `'` symbol to terminate the byte literal)

View file

@ -0,0 +1,2 @@
BYTE 3 "b\'\\"
> error[0; 3) token("b\'\\") msg(Missing trailing `'` symbol to terminate the byte literal)

View file

@ -0,0 +1,2 @@
BYTE 4 "b\'\\n"
> error[0; 4) token("b\'\\n") msg(Missing trailing `'` symbol to terminate the byte literal)

View file

@ -0,0 +1,2 @@
BYTE 4 "b\'\\\'"
> error[0; 4) token("b\'\\\'") msg(Missing trailing `'` symbol to terminate the byte literal)

View file

@ -0,0 +1,2 @@
STRING 1 "\""
> error[0; 1) token("\"") msg(Missing trailing `"` symbol to terminate the string literal)

View file

@ -0,0 +1,2 @@
STRING 5 "\"🦀"
> error[0; 5) token("\"🦀") msg(Missing trailing `"` symbol to terminate the string literal)

View file

@ -0,0 +1,2 @@
STRING 5 "\"\\x7f"
> error[0; 5) token("\"\\x7f") msg(Missing trailing `"` symbol to terminate the string literal)

View file

@ -0,0 +1,2 @@
STRING 9 "\"\\u{20AA}"
> error[0; 9) token("\"\\u{20AA}") msg(Missing trailing `"` symbol to terminate the string literal)

View file

@ -0,0 +1,2 @@
STRING 2 "\" "
> error[0; 2) token("\" ") msg(Missing trailing `"` symbol to terminate the string literal)

View file

@ -0,0 +1,2 @@
STRING 2 "\"\\"
> error[0; 2) token("\"\\") msg(Missing trailing `"` symbol to terminate the string literal)

View file

@ -0,0 +1,2 @@
STRING 3 "\"\\n"
> error[0; 3) token("\"\\n") msg(Missing trailing `"` symbol to terminate the string literal)

View file

@ -0,0 +1,2 @@
STRING 3 "\"\\\""
> error[0; 3) token("\"\\\"") msg(Missing trailing `"` symbol to terminate the string literal)

View file

@ -0,0 +1,2 @@
BYTE_STRING 2 "b\""
> error[0; 2) token("b\"") msg(Missing trailing `"` symbol to terminate the byte string literal)

View file

@ -0,0 +1,2 @@
BYTE_STRING 6 "b\"🦀"
> error[0; 6) token("b\"🦀") msg(Missing trailing `"` symbol to terminate the byte string literal)

View file

@ -0,0 +1,2 @@
BYTE_STRING 6 "b\"\\x7f"
> error[0; 6) token("b\"\\x7f") msg(Missing trailing `"` symbol to terminate the byte string literal)

View file

@ -0,0 +1,2 @@
BYTE_STRING 10 "b\"\\u{20AA}"
> error[0; 10) token("b\"\\u{20AA}") msg(Missing trailing `"` symbol to terminate the byte string literal)

View file

@ -0,0 +1,2 @@
BYTE_STRING 3 "b\" "
> error[0; 3) token("b\" ") msg(Missing trailing `"` symbol to terminate the byte string literal)

View file

@ -0,0 +1,2 @@
BYTE_STRING 3 "b\"\\"
> error[0; 3) token("b\"\\") msg(Missing trailing `"` symbol to terminate the byte string literal)

View file

@ -0,0 +1,2 @@
BYTE_STRING 4 "b\"\\n"
> error[0; 4) token("b\"\\n") msg(Missing trailing `"` symbol to terminate the byte string literal)

View file

@ -0,0 +1,2 @@
BYTE_STRING 4 "b\"\\\""
> error[0; 4) token("b\"\\\"") msg(Missing trailing `"` symbol to terminate the byte string literal)

View file

@ -0,0 +1,2 @@
RAW_STRING 4 "r##\""
> error[0; 4) token("r##\"") msg(Missing trailing `"` with `#` symbols to terminate the raw string literal)

View file

@ -0,0 +1,2 @@
RAW_STRING 8 "r##\"🦀"
> error[0; 8) token("r##\"🦀") msg(Missing trailing `"` with `#` symbols to terminate the raw string literal)

View file

@ -0,0 +1,2 @@
RAW_STRING 8 "r##\"\\x7f"
> error[0; 8) token("r##\"\\x7f") msg(Missing trailing `"` with `#` symbols to terminate the raw string literal)

View file

@ -0,0 +1,2 @@
RAW_STRING 12 "r##\"\\u{20AA}"
> error[0; 12) token("r##\"\\u{20AA}") msg(Missing trailing `"` with `#` symbols to terminate the raw string literal)

View file

@ -0,0 +1,2 @@
RAW_STRING 5 "r##\" "
> error[0; 5) token("r##\" ") msg(Missing trailing `"` with `#` symbols to terminate the raw string literal)

View file

@ -0,0 +1,2 @@
RAW_STRING 5 "r##\"\\"
> error[0; 5) token("r##\"\\") msg(Missing trailing `"` with `#` symbols to terminate the raw string literal)

View file

@ -0,0 +1,2 @@
RAW_STRING 6 "r##\"\\n"
> error[0; 6) token("r##\"\\n") msg(Missing trailing `"` with `#` symbols to terminate the raw string literal)

View file

@ -0,0 +1,2 @@
RAW_BYTE_STRING 5 "br##\""
> error[0; 5) token("br##\"") msg(Missing trailing `"` with `#` symbols to terminate the raw byte string literal)

View file

@ -0,0 +1,2 @@
RAW_BYTE_STRING 9 "br##\"🦀"
> error[0; 9) token("br##\"🦀") msg(Missing trailing `"` with `#` symbols to terminate the raw byte string literal)

Some files were not shown because too many files have changed in this diff Show more