Skip to content

Commit ea69c90

Browse files
soundTrickercopybara-github
authored andcommitted
feat: add usage span attributes to telemetry (#356)
Merge #1079 Fixes part of #356 Add usage attributes to span. Note: Since the handling of GenAI event bodies in OpenTelemetry has not yet been determined, I have temporarily added only attributes related to usage. COPYBARA_INTEGRATE_REVIEW=#1079 from soundTricker:feature/356-support-more-opentelemetry-semantics 99a9d03 PiperOrigin-RevId: 774834279
1 parent f033e40 commit ea69c90

File tree

2 files changed

+40
-0
lines changed

2 files changed

+40
-0
lines changed

src/google/adk/telemetry.py

Lines changed: 10 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -195,6 +195,16 @@ def trace_call_llm(
195195
llm_response_json,
196196
)
197197

198+
if llm_response.usage_metadata is not None:
199+
span.set_attribute(
200+
'gen_ai.usage.input_tokens',
201+
llm_response.usage_metadata.prompt_token_count,
202+
)
203+
span.set_attribute(
204+
'gen_ai.usage.output_tokens',
205+
llm_response.usage_metadata.total_token_count,
206+
)
207+
198208

199209
def trace_send_data(
200210
invocation_context: InvocationContext,

tests/unittests/test_telemetry.py

Lines changed: 30 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -141,6 +141,36 @@ async def test_trace_call_llm_function_response_includes_part_from_bytes(
141141
assert llm_request_json_str.count('<not serializable>') == 2
142142

143143

144+
@pytest.mark.asyncio
145+
async def test_trace_call_llm_usage_metadata(monkeypatch, mock_span_fixture):
146+
monkeypatch.setattr(
147+
'opentelemetry.trace.get_current_span', lambda: mock_span_fixture
148+
)
149+
150+
agent = LlmAgent(name='test_agent')
151+
invocation_context = await _create_invocation_context(agent)
152+
llm_request = LlmRequest(
153+
config=types.GenerateContentConfig(system_instruction=''),
154+
)
155+
llm_response = LlmResponse(
156+
turn_complete=True,
157+
usage_metadata=types.GenerateContentResponseUsageMetadata(
158+
total_token_count=100, prompt_token_count=50
159+
),
160+
)
161+
trace_call_llm(invocation_context, 'test_event_id', llm_request, llm_response)
162+
163+
expected_calls = [
164+
mock.call('gen_ai.system', 'gcp.vertex.agent'),
165+
mock.call('gen_ai.usage.input_tokens', 50),
166+
mock.call('gen_ai.usage.output_tokens', 100),
167+
]
168+
assert mock_span_fixture.set_attribute.call_count == 9
169+
mock_span_fixture.set_attribute.assert_has_calls(
170+
expected_calls, any_order=True
171+
)
172+
173+
144174
def test_trace_tool_call_with_scalar_response(
145175
monkeypatch, mock_span_fixture, mock_tool_fixture, mock_event_fixture
146176
):

0 commit comments

Comments
 (0)