Skip to content

Commit 54fa1f7

Browse files
authored
Include additional usage fields from OpenAI-compatible APIs in usage details (#2038)
1 parent 191d651 commit 54fa1f7

File tree

2 files changed

+23
-7
lines changed

2 files changed

+23
-7
lines changed

pydantic_ai_slim/pydantic_ai/models/openai.py

Lines changed: 17 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -1062,18 +1062,29 @@ def _map_usage(response: chat.ChatCompletion | ChatCompletionChunk | responses.R
10621062
if response_usage is None:
10631063
return usage.Usage()
10641064
elif isinstance(response_usage, responses.ResponseUsage):
1065-
details: dict[str, int] = {}
1065+
details: dict[str, int] = {
1066+
key: value
1067+
for key, value in response_usage.model_dump(
1068+
exclude={'input_tokens', 'output_tokens', 'total_tokens'}
1069+
).items()
1070+
if isinstance(value, int)
1071+
}
1072+
details['reasoning_tokens'] = response_usage.output_tokens_details.reasoning_tokens
1073+
details['cached_tokens'] = response_usage.input_tokens_details.cached_tokens
10661074
return usage.Usage(
10671075
request_tokens=response_usage.input_tokens,
10681076
response_tokens=response_usage.output_tokens,
10691077
total_tokens=response_usage.total_tokens,
1070-
details={
1071-
'reasoning_tokens': response_usage.output_tokens_details.reasoning_tokens,
1072-
'cached_tokens': response_usage.input_tokens_details.cached_tokens,
1073-
},
1078+
details=details,
10741079
)
10751080
else:
1076-
details = {}
1081+
details = {
1082+
key: value
1083+
for key, value in response_usage.model_dump(
1084+
exclude={'prompt_tokens', 'completion_tokens', 'total_tokens'}
1085+
).items()
1086+
if isinstance(value, int)
1087+
}
10771088
if response_usage.completion_tokens_details is not None:
10781089
details.update(response_usage.completion_tokens_details.model_dump(exclude_none=True))
10791090
if response_usage.prompt_tokens_details is not None:

tests/models/test_deepseek.py

Lines changed: 6 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -35,7 +35,12 @@ async def test_deepseek_model_thinking_part(allow_model_requests: None, deepseek
3535
request_tokens=12,
3636
response_tokens=789,
3737
total_tokens=801,
38-
details={'reasoning_tokens': 415, 'cached_tokens': 0},
38+
details={
39+
'prompt_cache_hit_tokens': 0,
40+
'prompt_cache_miss_tokens': 12,
41+
'reasoning_tokens': 415,
42+
'cached_tokens': 0,
43+
},
3944
),
4045
model_name='deepseek-reasoner',
4146
timestamp=IsDatetime(),

0 commit comments

Comments
 (0)