Skip to content

Commit 384e1ce

Browse files
committed
Use prev_token and next_token
1 parent b7ab079 commit 384e1ce

File tree

1 file changed

+47
-44
lines changed

1 file changed

+47
-44
lines changed

crates/ra_ide/src/extend_selection.rs

Lines changed: 47 additions & 44 deletions
Original file line numberDiff line numberDiff line change
@@ -11,7 +11,7 @@ use ra_syntax::{
1111

1212
use crate::{db::RootDatabase, expand::descend_into_macros, FileId, FileRange};
1313
use hir::db::AstDatabase;
14-
use itertools::Itertools;
14+
use std::iter::successors;
1515

1616
pub(crate) fn extend_selection(db: &RootDatabase, frange: FileRange) -> TextRange {
1717
let src = db.parse(frange.file_id).tree();
@@ -110,46 +110,28 @@ fn extend_tokens_from_range(
110110
macro_call: ast::MacroCall,
111111
original_range: TextRange,
112112
) -> Option<TextRange> {
113-
// Find all non-whitespace tokens under MacroCall
114-
let all_tokens: Vec<_> = macro_call
115-
.syntax()
116-
.descendants_with_tokens()
117-
.filter_map(|n| {
118-
let token = n.as_token()?;
119-
if token.kind() == WHITESPACE {
120-
None
121-
} else {
122-
Some(token.clone())
123-
}
124-
})
125-
.sorted_by(|a, b| Ord::cmp(&a.text_range().start(), &b.text_range().start()))
126-
.collect();
127-
128-
// Get all indices which is in original range
129-
let indices: Vec<_> =
130-
all_tokens
131-
.iter()
132-
.enumerate()
133-
.filter_map(|(i, token)| {
134-
if token.text_range().is_subrange(&original_range) {
135-
Some(i)
136-
} else {
137-
None
138-
}
139-
})
140-
.collect();
113+
let src = find_covering_element(&macro_call.syntax(), original_range);
114+
let (first_token, last_token) = match src {
115+
NodeOrToken::Node(it) => (it.first_token()?, it.last_token()?),
116+
NodeOrToken::Token(it) => (it.clone(), it),
117+
};
118+
119+
let mut first_token = skip_whitespace(first_token, Direction::Next)?;
120+
let mut last_token = skip_whitespace(last_token, Direction::Prev)?;
141121

142-
// The first and last token index in original_range
143-
// Note that the indices is sorted
144-
let first_idx = *indices.first()?;
145-
let last_idx = *indices.last()?;
122+
while !first_token.text_range().is_subrange(&original_range) {
123+
first_token = skip_whitespace(first_token.next_token()?, Direction::Next)?;
124+
}
125+
while !last_token.text_range().is_subrange(&original_range) {
126+
last_token = skip_whitespace(last_token.prev_token()?, Direction::Prev)?;
127+
}
146128

147129
// compute original mapped token range
148130
let expanded = {
149-
let first_node = descend_into_macros(db, file_id, all_tokens[first_idx].clone());
131+
let first_node = descend_into_macros(db, file_id, first_token.clone());
150132
let first_node = first_node.map(|it| it.text_range());
151133

152-
let last_node = descend_into_macros(db, file_id, all_tokens[last_idx].clone());
134+
let last_node = descend_into_macros(db, file_id, last_token.clone());
153135
if last_node.file_id == file_id.into() || first_node.file_id != last_node.file_id {
154136
return None;
155137
}
@@ -160,27 +142,48 @@ fn extend_tokens_from_range(
160142
let src = db.parse_or_expand(expanded.file_id)?;
161143
let parent = shallowest_node(&find_covering_element(&src, expanded.value))?.parent()?;
162144

163-
let validate = |&idx: &usize| {
164-
let token: &SyntaxToken = &all_tokens[idx];
145+
let validate = |token: SyntaxToken| {
165146
let node = descend_into_macros(db, file_id, token.clone());
166-
167-
node.file_id == expanded.file_id
147+
if node.file_id == expanded.file_id
168148
&& node.value.text_range().is_subrange(&parent.text_range())
149+
{
150+
Some(token)
151+
} else {
152+
None
153+
}
169154
};
170155

171156
// Find the first and last text range under expanded parent
172-
let first = (0..=first_idx).rev().take_while(validate).last()?;
173-
let last = (last_idx..all_tokens.len()).take_while(validate).last()?;
174-
175-
let range = union_range(all_tokens[first].text_range(), all_tokens[last].text_range());
176-
157+
let first = successors(Some(first_token), |token| {
158+
validate(skip_whitespace(token.prev_token()?, Direction::Prev)?)
159+
})
160+
.last()?;
161+
let last = successors(Some(last_token), |token| {
162+
validate(skip_whitespace(token.next_token()?, Direction::Next)?)
163+
})
164+
.last()?;
165+
166+
let range = union_range(first.text_range(), last.text_range());
177167
if original_range.is_subrange(&range) && original_range != range {
178168
Some(range)
179169
} else {
180170
None
181171
}
182172
}
183173

174+
fn skip_whitespace(
175+
mut token: SyntaxToken,
176+
direction: Direction,
177+
) -> Option<SyntaxToken> {
178+
while token.kind() == WHITESPACE {
179+
token = match direction {
180+
Direction::Next => token.next_token()?,
181+
Direction::Prev => token.prev_token()?,
182+
}
183+
}
184+
Some(token)
185+
}
186+
184187
fn union_range(range: TextRange, r: TextRange) -> TextRange {
185188
let start = range.start().min(r.start());
186189
let end = range.end().max(r.end());

0 commit comments

Comments
 (0)