|
1 | 1 | //! Lexer analyzes raw input string and produces lexemes (tokens).
|
2 | 2 | //! It is just a bridge to `rustc_lexer`.
|
3 | 3 |
|
4 |
| -use rustc_lexer::{LiteralKind as LK, RawStrError}; |
5 |
| - |
6 | 4 | use std::convert::TryInto;
|
7 | 5 |
|
| 6 | +use rustc_lexer::{LiteralKind as LK, RawStrError}; |
| 7 | + |
8 | 8 | use crate::{
|
9 | 9 | SyntaxError,
|
10 | 10 | SyntaxKind::{self, *},
|
@@ -61,27 +61,30 @@ pub fn tokenize(text: &str) -> (Vec<Token>, Vec<SyntaxError>) {
|
61 | 61 | (tokens, errors)
|
62 | 62 | }
|
63 | 63 |
|
64 |
| -/// Returns `SyntaxKind` and `Option<SyntaxError>` of the first token |
65 |
| -/// encountered at the beginning of the string. |
| 64 | +/// Returns `SyntaxKind` and `Option<SyntaxError>` if `text` parses as a single token. |
66 | 65 | ///
|
67 | 66 | /// Returns `None` if the string contains zero *or two or more* tokens.
|
68 | 67 | /// The token is malformed if the returned error is not `None`.
|
69 | 68 | ///
|
70 | 69 | /// Beware that unescape errors are not checked at tokenization time.
|
71 | 70 | pub fn lex_single_syntax_kind(text: &str) -> Option<(SyntaxKind, Option<SyntaxError>)> {
|
72 |
| - lex_first_token(text) |
73 |
| - .filter(|(token, _)| token.len == TextSize::of(text)) |
74 |
| - .map(|(token, error)| (token.kind, error)) |
| 71 | + let (first_token, err) = lex_first_token(text)?; |
| 72 | + if first_token.len != TextSize::of(text) { |
| 73 | + return None; |
| 74 | + } |
| 75 | + Some((first_token.kind, err)) |
75 | 76 | }
|
76 | 77 |
|
77 | 78 | /// The same as `lex_single_syntax_kind()` but returns only `SyntaxKind` and
|
78 | 79 | /// returns `None` if any tokenization error occured.
|
79 | 80 | ///
|
80 | 81 | /// Beware that unescape errors are not checked at tokenization time.
|
81 | 82 | pub fn lex_single_valid_syntax_kind(text: &str) -> Option<SyntaxKind> {
|
82 |
| - lex_first_token(text) |
83 |
| - .filter(|(token, error)| !error.is_some() && token.len == TextSize::of(text)) |
84 |
| - .map(|(token, _error)| token.kind) |
| 83 | + let (single_token, err) = lex_single_syntax_kind(text)?; |
| 84 | + if err.is_some() { |
| 85 | + return None; |
| 86 | + } |
| 87 | + Some(single_token) |
85 | 88 | }
|
86 | 89 |
|
87 | 90 | /// Returns `SyntaxKind` and `Option<SyntaxError>` of the first token
|
|
0 commit comments