Skip to content

Commit e63c7cc

Browse files
authored
fix(openai): Convert OpenAI responses API usage to tracing format (langchain-ai#9145)
1 parent 51f638e commit e63c7cc

File tree

4 files changed

+110
-3
lines changed

4 files changed

+110
-3
lines changed

.changeset/brown-hounds-divide.md

Lines changed: 5 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,5 @@
1+
---
2+
"@langchain/openai": patch
3+
---
4+
5+
fix(openai): Convert OpenAI responses API usage to tracing format

libs/langchain-openai/src/chat_models.ts

Lines changed: 11 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -107,7 +107,10 @@ import {
107107
ResponsesTool,
108108
ResponsesToolChoice,
109109
} from "./utils/tools.js";
110-
import { handleMultiModalOutput } from "./utils/output.js";
110+
import {
111+
_convertOpenAIResponsesUsageToLangChainUsage,
112+
handleMultiModalOutput,
113+
} from "./utils/output.js";
111114

112115
const _FUNCTION_CALL_IDS_MAP_KEY = "__openai_function_call_ids__";
113116

@@ -1755,7 +1758,9 @@ export class ChatOpenAIResponses<
17551758
content,
17561759
tool_calls,
17571760
invalid_tool_calls,
1758-
usage_metadata: response.usage,
1761+
usage_metadata: _convertOpenAIResponsesUsageToLangChainUsage(
1762+
response.usage
1763+
),
17591764
additional_kwargs,
17601765
response_metadata,
17611766
});
@@ -1831,7 +1836,10 @@ export class ChatOpenAIResponses<
18311836
} else if (chunk.type === "response.completed") {
18321837
const msg = this._convertResponsesMessageToBaseMessage(chunk.response);
18331838

1834-
usage_metadata = chunk.response.usage;
1839+
usage_metadata = _convertOpenAIResponsesUsageToLangChainUsage(
1840+
chunk.response.usage
1841+
);
1842+
18351843
if (chunk.response.text?.format?.type === "json_schema") {
18361844
additional_kwargs.parsed ??= JSON.parse(msg.text);
18371845
}

libs/langchain-openai/src/tests/chat_models.test.ts

Lines changed: 67 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -5,6 +5,7 @@ import { load } from "@langchain/core/load";
55
import { it, expect, describe, beforeAll, afterAll, jest } from "@jest/globals";
66
import { tool } from "@langchain/core/tools";
77
import { ChatOpenAI } from "../chat_models.js";
8+
import { _convertOpenAIResponsesUsageToLangChainUsage } from "../utils/output.js";
89

910
describe("ChatOpenAI", () => {
1011
describe("should initialize with correct values", () => {
@@ -657,4 +658,70 @@ describe("ChatOpenAI", () => {
657658
);
658659
});
659660
});
661+
662+
describe("Responses API usage metadata conversion", () => {
663+
it("should convert OpenAI Responses usage to LangChain format with cached tokens", () => {
664+
const usage = {
665+
input_tokens: 100,
666+
output_tokens: 50,
667+
total_tokens: 150,
668+
input_tokens_details: {
669+
cached_tokens: 75,
670+
text_tokens: 25,
671+
},
672+
output_tokens_details: {
673+
reasoning_tokens: 10,
674+
text_tokens: 40,
675+
},
676+
};
677+
678+
const result = _convertOpenAIResponsesUsageToLangChainUsage(usage as any);
679+
680+
expect(result).toEqual({
681+
input_tokens: 100,
682+
output_tokens: 50,
683+
total_tokens: 150,
684+
input_token_details: {
685+
cached_tokens: 75,
686+
text_tokens: 25,
687+
cache_read: 75,
688+
},
689+
output_token_details: {
690+
reasoning_tokens: 10,
691+
text_tokens: 40,
692+
reasoning: 10,
693+
},
694+
});
695+
});
696+
697+
it("should handle missing usage details gracefully", () => {
698+
const usage = {
699+
input_tokens: 100,
700+
output_tokens: 50,
701+
total_tokens: 150,
702+
};
703+
704+
const result = _convertOpenAIResponsesUsageToLangChainUsage(usage as any);
705+
706+
expect(result).toEqual({
707+
input_tokens: 100,
708+
output_tokens: 50,
709+
total_tokens: 150,
710+
input_token_details: {},
711+
output_token_details: {},
712+
});
713+
});
714+
715+
it("should handle undefined usage", () => {
716+
const result = _convertOpenAIResponsesUsageToLangChainUsage(undefined);
717+
718+
expect(result).toEqual({
719+
input_tokens: 0,
720+
output_tokens: 0,
721+
total_tokens: 0,
722+
input_token_details: {},
723+
output_token_details: {},
724+
});
725+
});
726+
});
660727
});

libs/langchain-openai/src/utils/output.ts

Lines changed: 27 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -1,6 +1,8 @@
1+
import { OpenAI as OpenAIClient } from "openai";
12
import {
23
StandardImageBlock,
34
StandardTextBlock,
5+
UsageMetadata,
46
} from "@langchain/core/messages";
57

68
/**
@@ -39,3 +41,28 @@ export function handleMultiModalOutput(
3941

4042
return content;
4143
}
44+
45+
export function _convertOpenAIResponsesUsageToLangChainUsage(
46+
usage?: OpenAIClient.Responses.ResponseUsage
47+
): UsageMetadata {
48+
// TODO: Remove raw OpenAI usage details in v1
49+
const inputTokenDetails = {
50+
...(usage?.input_tokens_details?.cached_tokens != null && {
51+
...usage?.input_tokens_details,
52+
cache_read: usage?.input_tokens_details?.cached_tokens,
53+
}),
54+
};
55+
const outputTokenDetails = {
56+
...(usage?.output_tokens_details?.reasoning_tokens != null && {
57+
...usage?.output_tokens_details,
58+
reasoning: usage?.output_tokens_details?.reasoning_tokens,
59+
}),
60+
};
61+
return {
62+
input_tokens: usage?.input_tokens ?? 0,
63+
output_tokens: usage?.output_tokens ?? 0,
64+
total_tokens: usage?.total_tokens ?? 0,
65+
input_token_details: inputTokenDetails,
66+
output_token_details: outputTokenDetails,
67+
};
68+
}

0 commit comments

Comments
 (0)