Skip to content

Commit 3955ece

Browse files
authored
Merge pull request #5579 from Textualize/fuzzy-fix
Fix for fuzzy matcher
2 parents fd26e24 + a0597c9 commit 3955ece

File tree

4 files changed

+28
-14
lines changed

4 files changed

+28
-14
lines changed

CHANGELOG.md

Lines changed: 7 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -5,6 +5,12 @@ All notable changes to this project will be documented in this file.
55
The format is based on [Keep a Changelog](http://keepachangelog.com/)
66
and this project adheres to [Semantic Versioning](http://semver.org/).
77

8+
## [2.1.2] - 2025-02-26
9+
10+
### Fixed
11+
12+
- Fixed command palette fuzzy search bailing too early https://github.com/Textualize/textual/pull/5579
13+
814
## [2.1.1] - 2025-02-22
915

1016
### Fixed
@@ -2773,6 +2779,7 @@ https://textual.textualize.io/blog/2022/11/08/version-040/#version-040
27732779
- New handler system for messages that doesn't require inheritance
27742780
- Improved traceback handling
27752781

2782+
[2.1.2]: https://github.com/Textualize/textual/compare/v2.1.1...v2.1.2
27762783
[2.1.1]: https://github.com/Textualize/textual/compare/v2.1.0...v2.1.1
27772784
[2.1.0]: https://github.com/Textualize/textual/compare/v2.0.4...v2.1.0
27782785
[2.0.4]: https://github.com/Textualize/textual/compare/v2.0.3...v2.0.4

pyproject.toml

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -1,6 +1,6 @@
11
[tool.poetry]
22
name = "textual"
3-
version = "2.1.1"
3+
version = "2.1.2"
44
homepage = "https://github.com/Textualize/textual"
55
repository = "https://github.com/Textualize/textual"
66
documentation = "https://textual.textualize.io/"

src/textual/command.py

Lines changed: 0 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -949,7 +949,6 @@ async def _search_for(
949949
)
950950
for provider in self._providers
951951
]
952-
953952
# Set up a delay for showing that we're busy.
954953
self._start_busy_countdown()
955954

src/textual/fuzzy.py

Lines changed: 20 additions & 12 deletions
Original file line numberDiff line numberDiff line change
@@ -13,6 +13,7 @@
1313

1414
import rich.repr
1515

16+
from textual.cache import LRUCache
1617
from textual.content import Content
1718
from textual.visual import Style
1819

@@ -43,8 +44,8 @@ def branch(self, offset: int) -> tuple[_Search, _Search]:
4344
def groups(self) -> int:
4445
"""Number of groups in offsets."""
4546
groups = 1
46-
last_offset = self.offsets[0]
47-
for offset in self.offsets[1:]:
47+
last_offset, *offsets = self.offsets
48+
for offset in offsets:
4849
if offset != last_offset + 1:
4950
groups += 1
5051
last_offset = offset
@@ -57,13 +58,17 @@ class FuzzySearch:
5758
Unlike a regex solution, this will finds all possible matches.
5859
"""
5960

61+
cache: LRUCache[tuple[str, str, bool], tuple[float, tuple[int, ...]]] = LRUCache(
62+
1024 * 4
63+
)
64+
6065
def __init__(self, case_sensitive: bool = False) -> None:
6166
"""Initialize fuzzy search.
6267
6368
Args:
6469
case_sensitive: Is the match case sensitive?
6570
"""
66-
self.cache: dict[tuple[str, str, bool], tuple[float, tuple[int, ...]]] = {}
71+
6772
self.case_sensitive = case_sensitive
6873

6974
def match(self, query: str, candidate: str) -> tuple[float, tuple[int, ...]]:
@@ -76,7 +81,6 @@ def match(self, query: str, candidate: str) -> tuple[float, tuple[int, ...]]:
7681
Returns:
7782
A pair of (score, tuple of offsets). `(0, ())` for no result.
7883
"""
79-
8084
query_regex = ".*?".join(f"({escape(character)})" for character in query)
8185
if not search(
8286
query_regex, candidate, flags=0 if self.case_sensitive else IGNORECASE
@@ -124,13 +128,13 @@ def score(search: _Search) -> float:
124128
"""
125129
# This is a heuristic, and can be tweaked for better results
126130
# Boost first letter matches
127-
score: float = sum(
128-
(2.0 if offset in first_letters else 1.0) for offset in search.offsets
131+
offset_count = len(search.offsets)
132+
score: float = offset_count + len(
133+
first_letters.intersection(search.offsets)
129134
)
130135
# Boost to favor less groups
131-
offset_count = len(search.offsets)
132136
normalized_groups = (offset_count - (search.groups - 1)) / offset_count
133-
score *= 1 + (normalized_groups**2)
137+
score *= 1 + (normalized_groups * normalized_groups)
134138
return score
135139

136140
stack: list[_Search] = [_Search()]
@@ -139,20 +143,24 @@ def score(search: _Search) -> float:
139143
query_size = len(query)
140144
find = candidate.find
141145
# Limit the number of loops out of an abundance of caution.
142-
# This would be hard to reach without contrived data.
143-
remaining_loops = 200
144-
146+
# This should be hard to reach without contrived data.
147+
remaining_loops = 10_000
145148
while stack and (remaining_loops := remaining_loops - 1):
146149
search = pop()
147150
offset = find(query[search.query_offset], search.candidate_offset)
148151
if offset != -1:
152+
if not set(candidate[search.candidate_offset :]).issuperset(
153+
query[search.query_offset :]
154+
):
155+
# Early out if there is not change of a match
156+
continue
149157
advance_branch, branch = search.branch(offset)
150158
if advance_branch.query_offset == query_size:
151159
yield score(advance_branch), advance_branch.offsets
152160
push(branch)
153161
else:
154-
push(advance_branch)
155162
push(branch)
163+
push(advance_branch)
156164

157165

158166
@rich.repr.auto

0 commit comments

Comments
 (0)