Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
3 changes: 1 addition & 2 deletions logfire/_internal/integrations/llm_providers/llm_provider.py
Original file line number Diff line number Diff line change
Expand Up @@ -201,7 +201,6 @@ def record_chunk(chunk: Any) -> None:
duration = (timer() - start) / ONE_SECOND_IN_NANOSECONDS
logire_llm.info(
'streaming response from {request_data[model]!r} took {duration:.2f}s',
**span_data,
duration=duration,
response_data=stream_state.get_response_data(),
**stream_state.get_attributes(span_data),
)
16 changes: 10 additions & 6 deletions logfire/_internal/integrations/llm_providers/openai.py
Original file line number Diff line number Diff line change
Expand Up @@ -55,16 +55,15 @@ def get_endpoint_config(options: FinalRequestOptions) -> EndpointConfig:
if is_current_agent_span('Responses API', 'Responses API with {gen_ai.request.model!r}'):
return EndpointConfig(message_template='', span_data={})

stream = json_data.get('stream', False) # type: ignore
span_data: dict[str, Any] = {
'gen_ai.request.model': json_data['model'],
}
if json_data.get('stream'): # type: ignore
span_data['request_data'] = json_data
else:
span_data['events'] = inputs_to_events(
'request_data': {'model': json_data['model'], 'stream': stream},
'events': inputs_to_events(
json_data['input'], # type: ignore
json_data.get('instructions'), # type: ignore
)
),
}

return EndpointConfig(
message_template='Responses API with {gen_ai.request.model!r}',
Expand Down Expand Up @@ -140,6 +139,11 @@ def get_response_data(self) -> Any:

return response

def get_attributes(self, span_data: dict[str, Any]) -> dict[str, Any]:
response = self.get_response_data()
span_data['events'] = span_data['events'] + responses_output_events(response)
return span_data


try:
# ChatCompletionStreamState only exists in openai>=1.40.0
Expand Down
4 changes: 4 additions & 0 deletions logfire/_internal/integrations/llm_providers/types.py
Original file line number Diff line number Diff line change
Expand Up @@ -17,6 +17,10 @@ def record_chunk(self, chunk: Any) -> None:
def get_response_data(self) -> Any:
"""Returns the response data for including in the log."""

def get_attributes(self, span_data: dict[str, Any]) -> dict[str, Any]:
"""Attributes to include in the log."""
return dict(**span_data, response_data=self.get_response_data())


class EndpointConfig(NamedTuple):
"""The configuration for the endpoint of a provider based on request url."""
Expand Down
6 changes: 3 additions & 3 deletions tests/otel_integrations/test_anthropic.py
Original file line number Diff line number Diff line change
Expand Up @@ -343,7 +343,7 @@ def test_sync_message_empty_response_chunk(instrumented_client: anthropic.Anthro
'logfire.tags': ('LLM',),
'duration': 1.0,
'response_data': '{"combined_chunk_content":"","chunk_count":0}',
'logfire.json_schema': '{"type":"object","properties":{"request_data":{"type":"object"},"async":{},"duration":{},"response_data":{"type":"object"}}}',
'logfire.json_schema': '{"type":"object","properties":{"duration":{},"request_data":{"type":"object"},"async":{},"response_data":{"type":"object"}}}',
},
},
]
Expand Down Expand Up @@ -405,7 +405,7 @@ def test_sync_messages_stream(instrumented_client: anthropic.Anthropic, exporter
'logfire.tags': ('LLM',),
'duration': 1.0,
'response_data': '{"combined_chunk_content":"The answer is secret","chunk_count":2}',
'logfire.json_schema': '{"type":"object","properties":{"request_data":{"type":"object"},"async":{},"duration":{},"response_data":{"type":"object"}}}',
'logfire.json_schema': '{"type":"object","properties":{"duration":{},"request_data":{"type":"object"},"async":{},"response_data":{"type":"object"}}}',
},
},
]
Expand Down Expand Up @@ -470,7 +470,7 @@ async def test_async_messages_stream(
'logfire.tags': ('LLM',),
'duration': 1.0,
'response_data': '{"combined_chunk_content":"The answer is secret","chunk_count":2}',
'logfire.json_schema': '{"type":"object","properties":{"request_data":{"type":"object"},"async":{},"duration":{},"response_data":{"type":"object"}}}',
'logfire.json_schema': '{"type":"object","properties":{"duration":{},"request_data":{"type":"object"},"async":{},"response_data":{"type":"object"}}}',
},
},
]
Expand Down
137 changes: 22 additions & 115 deletions tests/otel_integrations/test_openai.py
Original file line number Diff line number Diff line change
Expand Up @@ -1489,8 +1489,11 @@ def test_responses_stream(exporter: TestExporter) -> None:
'code.filepath': 'test_openai.py',
'code.function': 'test_responses_stream',
'code.lineno': 123,
'request_data': {'input': 'What is four plus five?', 'model': 'gpt-4.1', 'stream': True},
'request_data': {'model': 'gpt-4.1', 'stream': True},
'gen_ai.request.model': 'gpt-4.1',
'events': [
{'event.name': 'gen_ai.user.message', 'content': 'What is four plus five?', 'role': 'user'}
],
'async': False,
'logfire.msg_template': 'Responses API with {gen_ai.request.model!r}',
'logfire.msg': "Responses API with 'gpt-4.1'",
Expand All @@ -1499,6 +1502,7 @@ def test_responses_stream(exporter: TestExporter) -> None:
'properties': {
'request_data': {'type': 'object'},
'gen_ai.request.model': {},
'events': {'type': 'array'},
'async': {},
},
},
Expand All @@ -1521,131 +1525,30 @@ def test_responses_stream(exporter: TestExporter) -> None:
'code.filepath': 'test_openai.py',
'code.function': 'test_responses_stream',
'code.lineno': 123,
'request_data': {'input': 'What is four plus five?', 'model': 'gpt-4.1', 'stream': True},
'request_data': {'model': 'gpt-4.1', 'stream': True},
'gen_ai.request.model': 'gpt-4.1',
'async': False,
'duration': 1.0,
'response_data': {
'id': 'resp_079fceed100a827c0068e011e9cefc81969ea6a843546705e6',
'created_at': 1759515113.0,
'error': None,
'incomplete_details': None,
'instructions': None,
'metadata': {},
'model': 'gpt-4.1-2025-04-14',
'object': 'response',
'output': [
{
'id': 'msg_079fceed100a827c0068e011ea7d388196a588ec8cf09b1364',
'content': [
{
'annotations': [],
'text': 'Four plus five equals **nine**.',
'type': 'output_text',
'logprobs': [],
'parsed': None,
}
],
'role': 'assistant',
'status': 'completed',
'type': 'message',
}
],
'parallel_tool_calls': True,
'temperature': 1.0,
'tool_choice': 'auto',
'tools': [],
'top_p': 1.0,
'background': False,
'conversation': None,
'max_output_tokens': None,
'max_tool_calls': None,
'previous_response_id': None,
'prompt': None,
'prompt_cache_key': None,
'reasoning': {'effort': None, 'generate_summary': None, 'summary': None},
'safety_identifier': None,
'service_tier': 'default',
'status': 'completed',
'text': {'format': {'type': 'text'}, 'verbosity': 'medium'},
'top_logprobs': 0,
'truncation': 'disabled',
'usage': {
'input_tokens': 13,
'input_tokens_details': {'cached_tokens': 0},
'output_tokens': 9,
'output_tokens_details': {'reasoning_tokens': 0},
'total_tokens': 22,
'events': [
{
'event.name': 'gen_ai.user.message',
'content': 'What is four plus five?',
'role': 'user',
},
'user': None,
'store': True,
},
{
'event.name': 'gen_ai.assistant.message',
'content': 'Four plus five equals **nine**.',
'role': 'assistant',
},
],
'logfire.json_schema': {
'type': 'object',
'properties': {
'request_data': {'type': 'object'},
'gen_ai.request.model': {},
'async': {},
'events': {'type': 'array'},
'duration': {},
'response_data': {
'type': 'object',
'title': 'ParsedResponse[NoneType]',
'x-python-datatype': 'PydanticModel',
'properties': {
'output': {
'type': 'array',
'items': {
'type': 'object',
'title': 'ParsedResponseOutputMessage[NoneType]',
'x-python-datatype': 'PydanticModel',
'properties': {
'content': {
'type': 'array',
'items': {
'type': 'object',
'title': 'ParsedResponseOutputText[NoneType]',
'x-python-datatype': 'PydanticModel',
},
}
},
},
},
'reasoning': {
'type': 'object',
'title': 'Reasoning',
'x-python-datatype': 'PydanticModel',
},
'text': {
'type': 'object',
'title': 'ResponseTextConfig',
'x-python-datatype': 'PydanticModel',
'properties': {
'format': {
'type': 'object',
'title': 'ResponseFormatText',
'x-python-datatype': 'PydanticModel',
}
},
},
'usage': {
'type': 'object',
'title': 'ResponseUsage',
'x-python-datatype': 'PydanticModel',
'properties': {
'input_tokens_details': {
'type': 'object',
'title': 'InputTokensDetails',
'x-python-datatype': 'PydanticModel',
},
'output_tokens_details': {
'type': 'object',
'title': 'OutputTokensDetails',
'x-python-datatype': 'PydanticModel',
},
},
},
},
},
},
},
'logfire.tags': ('LLM',),
Expand Down Expand Up @@ -2315,6 +2218,7 @@ def test_responses_api(exporter: TestExporter) -> None:
'code.function': 'test_responses_api',
'code.lineno': 123,
'async': False,
'request_data': {'model': 'gpt-4.1', 'stream': False},
'logfire.msg_template': 'Responses API with {gen_ai.request.model!r}',
'logfire.msg': "Responses API with 'gpt-4.1'",
'logfire.tags': ('LLM',),
Expand Down Expand Up @@ -2348,6 +2252,7 @@ def test_responses_api(exporter: TestExporter) -> None:
'type': 'object',
'properties': {
'gen_ai.request.model': {},
'request_data': {'type': 'object'},
'events': {'type': 'array'},
'async': {},
'gen_ai.system': {},
Expand All @@ -2370,6 +2275,7 @@ def test_responses_api(exporter: TestExporter) -> None:
'code.function': 'test_responses_api',
'code.lineno': 123,
'async': False,
'request_data': {'model': 'gpt-4.1', 'stream': False},
'logfire.msg_template': 'Responses API with {gen_ai.request.model!r}',
'logfire.msg': "Responses API with 'gpt-4.1'",
'logfire.tags': ('LLM',),
Expand Down Expand Up @@ -2414,6 +2320,7 @@ def test_responses_api(exporter: TestExporter) -> None:
'type': 'object',
'properties': {
'gen_ai.request.model': {},
'request_data': {'type': 'object'},
'events': {'type': 'array'},
'async': {},
'gen_ai.system': {},
Expand Down
Loading