Skip to content

Commit ede6434

Browse files
fix usage tracking when None exists (#8339)
1 parent dd971a7 commit ede6434

File tree

2 files changed

+60
-1
lines changed

2 files changed

+60
-1
lines changed

dspy/utils/usage_tracker.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -38,7 +38,7 @@ def _merge_usage_entries(self, usage_entry1, usage_entry2) -> dict[str, dict[str
3838
result = dict(usage_entry2)
3939
for k, v in usage_entry1.items():
4040
current_v = result.get(k)
41-
if isinstance(v, dict):
41+
if isinstance(v, dict) or isinstance(current_v, dict):
4242
result[k] = self._merge_usage_entries(current_v, v)
4343
else:
4444
result[k] = (current_v or 0) + (v or 0)

tests/utils/test_usage_tracker.py

Lines changed: 59 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -170,3 +170,62 @@ def test_merge_usage_entries_with_new_keys():
170170

171171
assert total_usage["model-x"]["prompt_tokens"] == 5
172172
assert total_usage["model-x"]["completion_tokens"] == 2
173+
174+
175+
def test_merge_usage_entries_with_none_values():
176+
"""Test tracking usage across multiple models."""
177+
tracker = UsageTracker()
178+
179+
# Add usage entries for different models
180+
usage_entries = [
181+
{
182+
"model": "gpt-4o-mini",
183+
"usage": {
184+
"prompt_tokens": 1117,
185+
"completion_tokens": 46,
186+
"total_tokens": 1163,
187+
"prompt_tokens_details": None,
188+
"completion_tokens_details": {},
189+
},
190+
},
191+
{
192+
"model": "gpt-4o-mini",
193+
"usage": {
194+
"prompt_tokens": 800,
195+
"completion_tokens": 100,
196+
"total_tokens": 900,
197+
"prompt_tokens_details": {"cached_tokens": 50, "audio_tokens": 50},
198+
"completion_tokens_details": None,
199+
},
200+
},
201+
{
202+
"model": "gpt-4o-mini",
203+
"usage": {
204+
"prompt_tokens": 800,
205+
"completion_tokens": 100,
206+
"total_tokens": 900,
207+
"prompt_tokens_details": None,
208+
"completion_tokens_details": {
209+
"reasoning_tokens": 1,
210+
"audio_tokens": 1,
211+
"accepted_prediction_tokens": 1,
212+
"rejected_prediction_tokens": 1,
213+
},
214+
},
215+
},
216+
]
217+
218+
for entry in usage_entries:
219+
tracker.add_usage(entry["model"], entry["usage"])
220+
221+
total_usage = tracker.get_total_tokens()
222+
223+
assert total_usage["gpt-4o-mini"]["prompt_tokens"] == 2717
224+
assert total_usage["gpt-4o-mini"]["completion_tokens"] == 246
225+
assert total_usage["gpt-4o-mini"]["total_tokens"] == 2963
226+
assert total_usage["gpt-4o-mini"]["prompt_tokens_details"]["cached_tokens"] == 50
227+
assert total_usage["gpt-4o-mini"]["prompt_tokens_details"]["audio_tokens"] == 50
228+
assert total_usage["gpt-4o-mini"]["completion_tokens_details"]["reasoning_tokens"] == 1
229+
assert total_usage["gpt-4o-mini"]["completion_tokens_details"]["audio_tokens"] == 1
230+
assert total_usage["gpt-4o-mini"]["completion_tokens_details"]["accepted_prediction_tokens"] == 1
231+
assert total_usage["gpt-4o-mini"]["completion_tokens_details"]["rejected_prediction_tokens"] == 1

0 commit comments

Comments
 (0)