Skip to content

Commit 9dc684e

Browse files
committed
Merge branch 'main' into toolsets
# Conflicts: # pydantic_ai_slim/pydantic_ai/_agent_graph.py # tests/test_streaming.py
2 parents 2348f45 + 95f6ce3 commit 9dc684e

File tree

10 files changed

+59
-26
lines changed

10 files changed

+59
-26
lines changed

docs/a2a.md

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -27,7 +27,7 @@ The library is designed to be used with any agentic framework, and is **not excl
2727

2828
### Design
2929

30-
**FastA2A** is built on top of [Starlette](https://starlette.io), which means it's fully compatible with any ASGI server.
30+
**FastA2A** is built on top of [Starlette](https://www.starlette.io), which means it's fully compatible with any ASGI server.
3131

3232
Given the nature of the A2A protocol, it's important to understand the design before using it, as a developer
3333
you'll need to provide some components:
@@ -66,7 +66,7 @@ pip/uv-add fasta2a
6666

6767
The only dependencies are:
6868

69-
- [starlette](https://starlette.io): to expose the A2A server as an [ASGI application](https://asgi.readthedocs.io/en/latest/)
69+
- [starlette](https://www.starlette.io): to expose the A2A server as an [ASGI application](https://asgi.readthedocs.io/en/latest/)
7070
- [pydantic](https://pydantic.dev): to validate the request/response messages
7171
- [opentelemetry-api](https://opentelemetry-python.readthedocs.io/en/latest): to provide tracing capabilities
7272

docs/mcp/client.md

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -31,7 +31,7 @@ Examples of both are shown below; [mcp-run-python](run-python.md) is used as the
3131
!!! note
3232
[`MCPServerSSE`][pydantic_ai.mcp.MCPServerSSE] requires an MCP server to be running and accepting HTTP connections before calling [`agent.run_toolsets()`][pydantic_ai.Agent.run_toolsets]. Running the server is not managed by PydanticAI.
3333

34-
The name "HTTP" is used since this implemented will be adapted in future to use the new
34+
The name "HTTP" is used since this implementation will be adapted in future to use the new
3535
[Streamable HTTP](https://github.com/modelcontextprotocol/specification/pull/206) currently in development.
3636

3737
Before creating the SSE client, we need to run the server (docs [here](run-python.md)):
@@ -372,7 +372,7 @@ async def main():
372372

373373
_(This example is complete, it can be run "as is" with Python 3.10+)_
374374

375-
You can disallow sampling by settings [`allow_sampling=False`][pydantic_ai.mcp.MCPServerStdio.allow_sampling] when creating the server reference, e.g.:
375+
You can disallow sampling by setting [`allow_sampling=False`][pydantic_ai.mcp.MCPServerStdio.allow_sampling] when creating the server reference, e.g.:
376376

377377
```python {title="sampling_disallowed.py" hl_lines="6" py="3.10"}
378378
from pydantic_ai.mcp import MCPServerStdio

pydantic_ai_slim/pydantic_ai/_agent_graph.py

Lines changed: 3 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -608,7 +608,7 @@ async def process_function_tools( # noqa: C901
608608
content='Output tool not used - a final result was already processed.',
609609
tool_call_id=call.tool_call_id,
610610
)
611-
yield _messages.FunctionToolResultEvent(part, tool_call_id=call.tool_call_id)
611+
yield _messages.FunctionToolResultEvent(part)
612612

613613
parts.append(part)
614614
else:
@@ -621,7 +621,7 @@ async def process_function_tools( # noqa: C901
621621
ctx.state.increment_retries(ctx.deps.max_result_retries, e)
622622
yield _messages.FunctionToolCallEvent(call)
623623
parts.append(e.tool_retry)
624-
yield _messages.FunctionToolResultEvent(e.tool_retry, tool_call_id=call.tool_call_id)
624+
yield _messages.FunctionToolResultEvent(e.tool_retry)
625625
else:
626626
part = _messages.ToolReturnPart(
627627
tool_name=call.tool_name,
@@ -682,7 +682,7 @@ async def process_function_tools( # noqa: C901
682682
for task in done:
683683
index = tasks.index(task)
684684
tool_result = task.result()
685-
yield _messages.FunctionToolResultEvent(tool_result, tool_call_id=tool_result.tool_call_id)
685+
yield _messages.FunctionToolResultEvent(tool_result)
686686

687687
if isinstance(tool_result, _messages.RetryPromptPart):
688688
results_by_index[index] = tool_result

pydantic_ai_slim/pydantic_ai/messages.py

Lines changed: 18 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -11,7 +11,7 @@
1111
import pydantic
1212
import pydantic_core
1313
from opentelemetry._events import Event # pyright: ignore[reportPrivateImportUsage]
14-
from typing_extensions import TypeAlias
14+
from typing_extensions import TypeAlias, deprecated
1515

1616
from . import _utils
1717
from ._utils import (
@@ -501,7 +501,10 @@ class RetryPromptPart:
501501
def model_response(self) -> str:
502502
"""Return a string message describing why the retry is requested."""
503503
if isinstance(self.content, str):
504-
description = self.content
504+
if self.tool_name is None:
505+
description = f'Validation feedback:\n{self.content}'
506+
else:
507+
description = self.content
505508
else:
506509
json_errors = error_details_ta.dump_json(self.content, exclude={'__all__': {'ctx'}}, indent=2)
507510
description = f'{len(self.content)} validation errors: {json_errors.decode()}'
@@ -1009,10 +1012,16 @@ class FunctionToolCallEvent:
10091012
"""Event type identifier, used as a discriminator."""
10101013

10111014
@property
1012-
def call_id(self) -> str:
1013-
"""An ID used for matching details about the call to its result. If present, defaults to the part's tool_call_id."""
1015+
def tool_call_id(self) -> str:
1016+
"""An ID used for matching details about the call to its result."""
10141017
return self.part.tool_call_id
10151018

1019+
@property
1020+
@deprecated('`call_id` is deprecated, use `tool_call_id` instead.')
1021+
def call_id(self) -> str:
1022+
"""An ID used for matching details about the call to its result."""
1023+
return self.part.tool_call_id # pragma: no cover
1024+
10161025
__repr__ = _utils.dataclasses_no_defaults_repr
10171026

10181027

@@ -1022,11 +1031,14 @@ class FunctionToolResultEvent:
10221031

10231032
result: ToolReturnPart | RetryPromptPart
10241033
"""The result of the call to the function tool."""
1025-
tool_call_id: str
1026-
"""An ID used to match the result to its original call."""
10271034
event_kind: Literal['function_tool_result'] = 'function_tool_result'
10281035
"""Event type identifier, used as a discriminator."""
10291036

1037+
@property
1038+
def tool_call_id(self) -> str:
1039+
"""An ID used to match the result to its original call."""
1040+
return self.result.tool_call_id
1041+
10301042
__repr__ = _utils.dataclasses_no_defaults_repr
10311043

10321044

pydantic_ai_slim/pydantic_ai/models/openai.py

Lines changed: 8 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -61,6 +61,7 @@
6161
from openai.types.chat.chat_completion_content_part_image_param import ImageURL
6262
from openai.types.chat.chat_completion_content_part_input_audio_param import InputAudio
6363
from openai.types.chat.chat_completion_content_part_param import File, FileFile
64+
from openai.types.chat.chat_completion_prediction_content_param import ChatCompletionPredictionContentParam
6465
from openai.types.responses import ComputerToolParam, FileSearchToolParam, WebSearchToolParam
6566
from openai.types.responses.response_input_param import FunctionCallOutput, Message
6667
from openai.types.shared import ReasoningEffort
@@ -126,6 +127,12 @@ class OpenAIModelSettings(ModelSettings, total=False):
126127
For more information, see [OpenAI's service tiers documentation](https://platform.openai.com/docs/api-reference/chat/object#chat/object-service_tier).
127128
"""
128129

130+
openai_prediction: ChatCompletionPredictionContentParam
131+
"""Enables [predictive outputs](https://platform.openai.com/docs/guides/predicted-outputs).
132+
133+
This feature is currently only supported for some OpenAI models.
134+
"""
135+
129136

130137
class OpenAIResponsesModelSettings(OpenAIModelSettings, total=False):
131138
"""Settings used for an OpenAI Responses model request.
@@ -320,6 +327,7 @@ async def _completions_create(
320327
reasoning_effort=model_settings.get('openai_reasoning_effort', NOT_GIVEN),
321328
user=model_settings.get('openai_user', NOT_GIVEN),
322329
service_tier=model_settings.get('openai_service_tier', NOT_GIVEN),
330+
prediction=model_settings.get('openai_prediction', NOT_GIVEN),
323331
temperature=sampling_settings.get('temperature', NOT_GIVEN),
324332
top_p=sampling_settings.get('top_p', NOT_GIVEN),
325333
presence_penalty=sampling_settings.get('presence_penalty', NOT_GIVEN),

tests/models/test_bedrock.py

Lines changed: 1 addition & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -413,8 +413,7 @@ async def get_temperature(city: str) -> str:
413413
content='30°C',
414414
tool_call_id='tooluse_lAG_zP8QRHmSYOwZzzaCqA',
415415
timestamp=IsDatetime(),
416-
),
417-
tool_call_id='tooluse_lAG_zP8QRHmSYOwZzzaCqA',
416+
)
418417
),
419418
PartStartEvent(index=0, part=TextPart(content='The')),
420419
FinalResultEvent(tool_name=None, tool_call_id=None),

tests/models/test_google.py

Lines changed: 2 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -375,8 +375,7 @@ async def get_temperature(city: str) -> str:
375375
FunctionToolResultEvent(
376376
result=ToolReturnPart(
377377
tool_name='get_capital', content='Paris', tool_call_id=IsStr(), timestamp=IsDatetime()
378-
),
379-
tool_call_id=IsStr(),
378+
)
380379
),
381380
PartStartEvent(
382381
index=0,
@@ -386,8 +385,7 @@ async def get_temperature(city: str) -> str:
386385
FunctionToolResultEvent(
387386
result=ToolReturnPart(
388387
tool_name='get_temperature', content='30°C', tool_call_id=IsStr(), timestamp=IsDatetime()
389-
),
390-
tool_call_id=IsStr(),
388+
)
391389
),
392390
PartStartEvent(index=0, part=TextPart(content='The temperature in Paris')),
393391
FinalResultEvent(tool_name=None, tool_call_id=None),

tests/models/test_instrumented.py

Lines changed: 2 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -240,6 +240,7 @@ async def test_instrumented_model(capfire: CaptureLogfire):
240240
{
241241
'body': {
242242
'content': """\
243+
Validation feedback:
243244
retry_prompt2
244245
245246
Fix the errors and try again.\
@@ -619,6 +620,7 @@ async def test_instrumented_model_attributes_mode(capfire: CaptureLogfire):
619620
{
620621
'event.name': 'gen_ai.user.message',
621622
'content': """\
623+
Validation feedback:
622624
retry_prompt2
623625
624626
Fix the errors and try again.\

tests/test_agent.py

Lines changed: 3 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -303,7 +303,7 @@ def return_tuple(_: list[ModelMessage], info: AgentInfo) -> ModelResponse:
303303
parts=[
304304
ToolCallPart(tool_name='final_result', args='{"response": ["foo", "bar"]}', tool_call_id=IsStr())
305305
],
306-
usage=Usage(requests=1, request_tokens=72, response_tokens=8, total_tokens=80),
306+
usage=Usage(requests=1, request_tokens=74, response_tokens=8, total_tokens=82),
307307
model_name='function:return_tuple:',
308308
timestamp=IsNow(tz=timezone.utc),
309309
),
@@ -942,7 +942,7 @@ def call_tool(messages: list[ModelMessage], info: AgentInfo) -> ModelResponse:
942942
),
943943
ModelResponse(
944944
parts=[TextPart(content='Mexico City')],
945-
usage=Usage(requests=1, request_tokens=68, response_tokens=5, total_tokens=73),
945+
usage=Usage(requests=1, request_tokens=70, response_tokens=5, total_tokens=75),
946946
model_name='function:call_tool:',
947947
timestamp=IsDatetime(),
948948
),
@@ -1602,7 +1602,7 @@ def call_tool(messages: list[ModelMessage], info: AgentInfo) -> ModelResponse:
16021602
),
16031603
ModelResponse(
16041604
parts=[TextPart(content='{"city": "Mexico City"}')],
1605-
usage=Usage(requests=1, request_tokens=68, response_tokens=11, total_tokens=79),
1605+
usage=Usage(requests=1, request_tokens=70, response_tokens=11, total_tokens=81),
16061606
model_name='function:call_tool:',
16071607
timestamp=IsDatetime(),
16081608
),

tests/test_streaming.py

Lines changed: 18 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -997,7 +997,6 @@ def known_tool(x: int) -> int:
997997
tool_call_id=IsStr(),
998998
timestamp=IsNow(tz=timezone.utc),
999999
),
1000-
tool_call_id=IsStr(),
10011000
),
10021001
FunctionToolResultEvent(
10031002
result=ToolReturnPart(
@@ -1006,7 +1005,6 @@ def known_tool(x: int) -> int:
10061005
tool_call_id=IsStr(),
10071006
timestamp=IsNow(tz=timezone.utc),
10081007
),
1009-
tool_call_id=IsStr(),
10101008
),
10111009
]
10121010
)
@@ -1057,8 +1055,7 @@ def call_final_result_with_bad_data(messages: list[ModelMessage], info: AgentInf
10571055
tool_name='final_result',
10581056
tool_call_id=IsStr(),
10591057
timestamp=IsNow(tz=timezone.utc),
1060-
),
1061-
tool_call_id=IsStr(),
1058+
)
10621059
),
10631060
]
10641061
)
@@ -1087,6 +1084,23 @@ class CityLocation(BaseModel):
10871084
assert result.is_complete
10881085

10891086

1087+
def test_function_tool_event_tool_call_id_properties():
1088+
"""Ensure that the `tool_call_id` property on function tool events mirrors the underlying part's ID."""
1089+
# Prepare a ToolCallPart with a fixed ID
1090+
call_part = ToolCallPart(tool_name='sample_tool', args={'a': 1}, tool_call_id='call_id_123')
1091+
call_event = FunctionToolCallEvent(part=call_part)
1092+
1093+
# The event should expose the same `tool_call_id` as the part
1094+
assert call_event.tool_call_id == call_part.tool_call_id == 'call_id_123'
1095+
1096+
# Prepare a ToolReturnPart with a fixed ID
1097+
return_part = ToolReturnPart(tool_name='sample_tool', content='ok', tool_call_id='return_id_456')
1098+
result_event = FunctionToolResultEvent(result=return_part)
1099+
1100+
# The event should expose the same `tool_call_id` as the result part
1101+
assert result_event.tool_call_id == return_part.tool_call_id == 'return_id_456'
1102+
1103+
10901104
async def test_deferred_tool():
10911105
agent = Agent(TestModel(), output_type=[str, DeferredToolCalls])
10921106

0 commit comments

Comments
 (0)