Skip to content

Commit ca3ae98

Browse files
author
SMKRV
committed
feat(performance): Optimize system resources and token estimation
- Improve JSON history file processing - Add memory and disk space validation - Enhance parallel request handling - Refine token counting heuristics
1 parent 0c4399b commit ca3ae98

File tree

1 file changed

+2
-4
lines changed

1 file changed

+2
-4
lines changed

custom_components/ha_text_ai/coordinator.py

Lines changed: 2 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -865,8 +865,7 @@ async def async_process_question(
865865
context_tokens = self._calculate_context_tokens(
866866
[{"content": entry["question"]} for entry in context_history] +
867867
[{"content": entry["response"]} for entry in context_history] +
868-
[{"content": question}],
869-
temp_model
868+
[{"content": question}]
870869
)
871870

872871
# Dynamic token allocation
@@ -886,8 +885,7 @@ async def async_process_question(
886885
context_tokens = self._calculate_context_tokens(
887886
[{"content": entry["question"]} for entry in context_history] +
888887
[{"content": entry["response"]} for entry in context_history] +
889-
[{"content": question}],
890-
temp_model
888+
[{"content": question}]
891889
)
892890

893891
# Rebuild messages with trimmed context

0 commit comments

Comments
 (0)