Skip to content

Commit 96f676c

Browse files
authored
chore: 100% clean coverage (#1705)
1 parent 74046ed commit 96f676c

Some content is hidden

Large Commits have some content hidden by default. Use the searchbox below for content that may be hidden.

59 files changed

+263
-246
lines changed

.github/workflows/ci.yml

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -224,7 +224,7 @@ jobs:
224224
name: diff-coverage-html
225225
path: index.html
226226

227-
- run: uv run coverage report --fail-under 95
227+
- run: uv run coverage report --fail-under 100
228228
- run: uv run diff-cover coverage.xml --fail-under 100
229229
- run: uv run strict-no-cover
230230

pydantic_ai_slim/pydantic_ai/_agent_graph.py

Lines changed: 8 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -196,7 +196,9 @@ async def _reevaluate_dynamic_prompts(
196196
for i, part in enumerate(msg.parts):
197197
if isinstance(part, _messages.SystemPromptPart) and part.dynamic_ref:
198198
# Look up the runner by its ref
199-
if runner := self.system_prompt_dynamic_functions.get(part.dynamic_ref):
199+
if runner := self.system_prompt_dynamic_functions.get( # pragma: lax no cover
200+
part.dynamic_ref
201+
):
200202
updated_part_content = await runner.run(run_context)
201203
msg.parts[i] = _messages.SystemPromptPart(
202204
updated_part_content, dynamic_ref=part.dynamic_ref
@@ -265,7 +267,7 @@ async def run(
265267
if self._did_stream:
266268
# `self._result` gets set when exiting the `stream` contextmanager, so hitting this
267269
# means that the stream was started but not finished before `run()` was called
268-
raise exceptions.AgentRunError('You must finish streaming before calling run()')
270+
raise exceptions.AgentRunError('You must finish streaming before calling run()') # pragma: no cover
269271

270272
return await self._make_request(ctx)
271273

@@ -316,7 +318,7 @@ async def _make_request(
316318
self, ctx: GraphRunContext[GraphAgentState, GraphAgentDeps[DepsT, NodeRunEndT]]
317319
) -> CallToolsNode[DepsT, NodeRunEndT]:
318320
if self._result is not None:
319-
return self._result
321+
return self._result # pragma: no cover
320322

321323
model_settings, model_request_parameters = await self._prepare_request(ctx)
322324
model_request_parameters = ctx.deps.model.customize_request_parameters(model_request_parameters)
@@ -333,7 +335,7 @@ async def _prepare_request(
333335
ctx.state.message_history.append(self.request)
334336

335337
# Check usage
336-
if ctx.deps.usage_limits:
338+
if ctx.deps.usage_limits: # pragma: no branch
337339
ctx.deps.usage_limits.check_before_request(ctx.state.usage)
338340

339341
# Increment run_step
@@ -350,7 +352,7 @@ def _finish_handling(
350352
) -> CallToolsNode[DepsT, NodeRunEndT]:
351353
# Update usage
352354
ctx.state.usage.incr(response.usage)
353-
if ctx.deps.usage_limits:
355+
if ctx.deps.usage_limits: # pragma: no branch
354356
ctx.deps.usage_limits.check_tokens(ctx.state.usage)
355357

356358
# Append the model response to state.message_history
@@ -735,7 +737,7 @@ async def run_tool(ctx: RunContext[DepsT], **args: Any) -> Any:
735737

736738
for server in ctx.deps.mcp_servers:
737739
tools = await server.list_tools()
738-
if tool_name in {tool.name for tool in tools}:
740+
if tool_name in {tool.name for tool in tools}: # pragma: no branch
739741
return Tool(name=tool_name, function=run_tool, takes_ctx=True, max_retries=ctx.deps.default_retries)
740742
return None
741743

pydantic_ai_slim/pydantic_ai/_cli.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -202,7 +202,7 @@ def cli(args_list: Sequence[str] | None = None, *, prog_name: str = 'pai') -> in
202202
elif args.code_theme == 'dark':
203203
code_theme = 'monokai'
204204
else:
205-
code_theme = args.code_theme
205+
code_theme = args.code_theme # pragma: no cover
206206

207207
if prompt := cast(str, args.prompt):
208208
try:

pydantic_ai_slim/pydantic_ai/_output.py

Lines changed: 7 additions & 7 deletions
Original file line numberDiff line numberDiff line change
@@ -140,8 +140,8 @@ def find_named_tool(
140140
self, parts: Iterable[_messages.ModelResponsePart], tool_name: str
141141
) -> tuple[_messages.ToolCallPart, OutputSchemaTool[OutputDataT]] | None:
142142
"""Find a tool that matches one of the calls, with a specific name."""
143-
for part in parts:
144-
if isinstance(part, _messages.ToolCallPart):
143+
for part in parts: # pragma: no branch
144+
if isinstance(part, _messages.ToolCallPart): # pragma: no branch
145145
if part.tool_name == tool_name:
146146
return part, self.tools[tool_name]
147147

@@ -151,7 +151,7 @@ def find_tool(
151151
) -> Iterator[tuple[_messages.ToolCallPart, OutputSchemaTool[OutputDataT]]]:
152152
"""Find a tool that matches one of the calls."""
153153
for part in parts:
154-
if isinstance(part, _messages.ToolCallPart):
154+
if isinstance(part, _messages.ToolCallPart): # pragma: no branch
155155
if result := self.tools.get(part.tool_name):
156156
yield part, result
157157

@@ -201,7 +201,7 @@ def __init__(
201201
if description is None:
202202
tool_description = json_schema_description
203203
else:
204-
tool_description = f'{description}. {json_schema_description}'
204+
tool_description = f'{description}. {json_schema_description}' # pragma: no cover
205205
else:
206206
tool_description = description or DEFAULT_DESCRIPTION
207207
if multiple:
@@ -243,7 +243,7 @@ def validate(
243243
)
244244
raise ToolRetryError(m) from e
245245
else:
246-
raise
246+
raise # pragma: lax no cover
247247
else:
248248
if k := self.tool_def.outer_typed_dict_key:
249249
output = output[k]
@@ -269,11 +269,11 @@ def extract_str_from_union(output_type: Any) -> _utils.Option[Any]:
269269
includes_str = True
270270
else:
271271
remain_args.append(arg)
272-
if includes_str:
272+
if includes_str: # pragma: no branch
273273
if len(remain_args) == 1:
274274
return _utils.Some(remain_args[0])
275275
else:
276-
return _utils.Some(Union[tuple(remain_args)])
276+
return _utils.Some(Union[tuple(remain_args)]) # pragma: no cover
277277

278278

279279
def get_union_args(tp: Any) -> tuple[Any, ...]:

pydantic_ai_slim/pydantic_ai/_parts_manager.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -164,7 +164,7 @@ def handle_tool_call_delta(
164164
if tool_name is None and self._parts:
165165
part_index = len(self._parts) - 1
166166
latest_part = self._parts[part_index]
167-
if isinstance(latest_part, (ToolCallPart, ToolCallPartDelta)):
167+
if isinstance(latest_part, (ToolCallPart, ToolCallPartDelta)): # pragma: no branch
168168
existing_matching_part_and_index = latest_part, part_index
169169
else:
170170
# vendor_part_id is provided, so look up the corresponding part or delta

pydantic_ai_slim/pydantic_ai/agent.py

Lines changed: 8 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -1002,7 +1002,7 @@ async def stream_to_final(
10021002
final_result_details = await stream_to_final(streamed_response)
10031003
if final_result_details is not None:
10041004
if yielded:
1005-
raise exceptions.AgentRunError('Agent run produced final results')
1005+
raise exceptions.AgentRunError('Agent run produced final results') # pragma: no cover
10061006
yielded = True
10071007

10081008
messages = graph_ctx.state.message_history.copy()
@@ -1049,11 +1049,13 @@ async def on_complete() -> None:
10491049
break
10501050
next_node = await agent_run.next(node)
10511051
if not isinstance(next_node, _agent_graph.AgentNode):
1052-
raise exceptions.AgentRunError('Should have produced a StreamedRunResult before getting here')
1052+
raise exceptions.AgentRunError( # pragma: no cover
1053+
'Should have produced a StreamedRunResult before getting here'
1054+
)
10531055
node = cast(_agent_graph.AgentNode[Any, Any], next_node)
10541056

10551057
if not yielded:
1056-
raise exceptions.AgentRunError('Agent run finished without producing a final result')
1058+
raise exceptions.AgentRunError('Agent run finished without producing a final result') # pragma: no cover
10571059

10581060
@contextmanager
10591061
def override(
@@ -1227,7 +1229,7 @@ def decorator(
12271229
) -> _system_prompt.SystemPromptFunc[AgentDepsT]:
12281230
runner = _system_prompt.SystemPromptRunner[AgentDepsT](func_, dynamic=dynamic)
12291231
self._system_prompt_functions.append(runner)
1230-
if dynamic:
1232+
if dynamic: # pragma: lax no cover
12311233
self._system_prompt_dynamic_functions[func_.__qualname__] = runner
12321234
return func_
12331235

@@ -1609,7 +1611,7 @@ def _infer_name(self, function_frame: FrameType | None) -> None:
16091611
if item is self:
16101612
self.name = name
16111613
return
1612-
if parent_frame.f_locals != parent_frame.f_globals:
1614+
if parent_frame.f_locals != parent_frame.f_globals: # pragma: no branch
16131615
# if we couldn't find the agent in locals and globals are a different dict, try globals
16141616
for name, item in parent_frame.f_globals.items():
16151617
if item is self:
@@ -2025,7 +2027,7 @@ def usage(self) -> _usage.Usage:
20252027
"""Get usage statistics for the run so far, including token usage, model requests, and so on."""
20262028
return self._graph_run.state.usage
20272029

2028-
def __repr__(self) -> str:
2030+
def __repr__(self) -> str: # pragma: no cover
20292031
result = self._graph_run.result
20302032
result_repr = '<run not finished>' if result is None else repr(result.output)
20312033
return f'<{type(self).__name__} result={result_repr} usage={self.usage()}>'

pydantic_ai_slim/pydantic_ai/exceptions.py

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -4,9 +4,9 @@
44
import sys
55

66
if sys.version_info < (3, 11):
7-
from exceptiongroup import ExceptionGroup
7+
from exceptiongroup import ExceptionGroup # pragma: lax no cover
88
else:
9-
ExceptionGroup = ExceptionGroup
9+
ExceptionGroup = ExceptionGroup # pragma: lax no cover
1010

1111
__all__ = (
1212
'ModelRetry',

pydantic_ai_slim/pydantic_ai/messages.py

Lines changed: 6 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -326,7 +326,7 @@ def otel_event(self, settings: InstrumentationSettings) -> Event:
326326
converted_part['binary_content'] = base64.b64encode(part.data).decode()
327327
content.append(converted_part)
328328
else:
329-
content.append({'kind': part.kind})
329+
content.append({'kind': part.kind}) # pragma: no cover
330330
return Event('gen_ai.user.message', body={'content': content, 'role': 'user'})
331331

332332

@@ -363,7 +363,7 @@ def model_response_object(self) -> dict[str, Any]:
363363
"""Return a dictionary representation of the content, wrapping non-dict types appropriately."""
364364
# gemini supports JSON dict return values, but no other JSON types, hence we wrap anything else in a dict
365365
if isinstance(self.content, dict):
366-
return tool_return_ta.dump_python(self.content, mode='json') # pyright: ignore[reportUnknownMemberType]
366+
return tool_return_ta.dump_python(self.content, mode='json') # pyright: ignore[reportUnknownMemberType] # pragma: no cover
367367
else:
368368
return {'return_value': tool_return_ta.dump_python(self.content, mode='json')}
369369

@@ -625,7 +625,7 @@ def apply(self, part: ModelResponsePart) -> TextPart:
625625
ValueError: If `part` is not a `TextPart`.
626626
"""
627627
if not isinstance(part, TextPart):
628-
raise ValueError('Cannot apply TextPartDeltas to non-TextParts')
628+
raise ValueError('Cannot apply TextPartDeltas to non-TextParts') # pragma: no cover
629629
return replace(part, content=part.content + self.content_delta)
630630

631631

@@ -688,7 +688,9 @@ def apply(self, part: ModelResponsePart | ToolCallPartDelta) -> ToolCallPart | T
688688
if isinstance(part, ToolCallPartDelta):
689689
return self._apply_to_delta(part)
690690

691-
raise ValueError(f'Can only apply ToolCallPartDeltas to ToolCallParts or ToolCallPartDeltas, not {part}')
691+
raise ValueError( # pragma: no cover
692+
f'Can only apply ToolCallPartDeltas to ToolCallParts or ToolCallPartDeltas, not {part}'
693+
)
692694

693695
def _apply_to_delta(self, delta: ToolCallPartDelta) -> ToolCallPart | ToolCallPartDelta:
694696
"""Internal helper to apply this delta to another delta."""

pydantic_ai_slim/pydantic_ai/models/__init__.py

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -485,7 +485,7 @@ def infer_model(model: Model | KnownModelName | str) -> Model:
485485
raise UserError(f'Unknown model: {model}')
486486

487487
if provider == 'vertexai':
488-
provider = 'google-vertex'
488+
provider = 'google-vertex' # pragma: no cover
489489

490490
if provider == 'cohere':
491491
from .cohere import CohereModel
@@ -516,7 +516,7 @@ def infer_model(model: Model | KnownModelName | str) -> Model:
516516

517517
return BedrockConverseModel(model_name, provider=provider)
518518
else:
519-
raise UserError(f'Unknown model: {model}')
519+
raise UserError(f'Unknown model: {model}') # pragma: no cover
520520

521521

522522
def cached_async_http_client(*, provider: str | None = None, timeout: int = 600, connect: int = 5) -> httpx.AsyncClient:

pydantic_ai_slim/pydantic_ai/models/anthropic.py

Lines changed: 16 additions & 11 deletions
Original file line numberDiff line numberDiff line change
@@ -244,7 +244,7 @@ async def _messages_create(
244244
except APIStatusError as e:
245245
if (status_code := e.status_code) >= 400:
246246
raise ModelHTTPError(status_code=status_code, model_name=self.model_name, body=e.body) from e
247-
raise
247+
raise # pragma: lax no cover
248248

249249
def _process_response(self, response: AnthropicMessage) -> ModelResponse:
250250
"""Process a non-streamed response, and prepare a message to return."""
@@ -268,7 +268,7 @@ async def _process_streamed_response(self, response: AsyncStream[RawMessageStrea
268268
peekable_response = _utils.PeekableAsyncStream(response)
269269
first_chunk = await peekable_response.peek()
270270
if isinstance(first_chunk, _utils.Unset):
271-
raise UnexpectedModelBehavior('Streamed response ended without content or tool calls')
271+
raise UnexpectedModelBehavior('Streamed response ended without content or tool calls') # pragma: no cover
272272

273273
# Since Anthropic doesn't provide a timestamp in the message, we'll use the current time
274274
timestamp = datetime.now(tz=timezone.utc)
@@ -305,9 +305,10 @@ async def _map_message(self, messages: list[ModelMessage]) -> tuple[str, list[Me
305305
is_error=False,
306306
)
307307
user_content_params.append(tool_result_block_param)
308-
elif isinstance(request_part, RetryPromptPart):
308+
elif isinstance(request_part, RetryPromptPart): # pragma: no branch
309309
if request_part.tool_name is None:
310-
retry_param = TextBlockParam(type='text', text=request_part.model_response())
310+
text = request_part.model_response() # pragma: no cover
311+
retry_param = TextBlockParam(type='text', text=text) # pragma: no cover
311312
else:
312313
retry_param = ToolResultBlockParam(
313314
tool_use_id=_guard_tool_call_id(t=request_part),
@@ -380,7 +381,7 @@ async def _map_user_prompt(
380381
else: # pragma: no cover
381382
raise RuntimeError(f'Unsupported media type: {item.media_type}')
382383
else:
383-
raise RuntimeError(f'Unsupported content type: {type(item)}')
384+
raise RuntimeError(f'Unsupported content type: {type(item)}') # pragma: no cover
384385

385386
@staticmethod
386387
def _map_tool_definition(f: ToolDefinition) -> ToolParam:
@@ -447,21 +448,25 @@ async def _get_event_iterator(self) -> AsyncIterator[ModelResponseStreamEvent]:
447448
if isinstance(event, RawContentBlockStartEvent):
448449
current_block = event.content_block
449450
if isinstance(current_block, TextBlock) and current_block.text:
450-
yield self._parts_manager.handle_text_delta(vendor_part_id='content', content=current_block.text)
451-
elif isinstance(current_block, ToolUseBlock):
451+
yield self._parts_manager.handle_text_delta( # pragma: lax no cover
452+
vendor_part_id='content', content=current_block.text
453+
)
454+
elif isinstance(current_block, ToolUseBlock): # pragma: no branch
452455
maybe_event = self._parts_manager.handle_tool_call_delta(
453456
vendor_part_id=current_block.id,
454457
tool_name=current_block.name,
455458
args=cast(dict[str, Any], current_block.input),
456459
tool_call_id=current_block.id,
457460
)
458-
if maybe_event is not None:
461+
if maybe_event is not None: # pragma: no branch
459462
yield maybe_event
460463

461464
elif isinstance(event, RawContentBlockDeltaEvent):
462465
if isinstance(event.delta, TextDelta):
463-
yield self._parts_manager.handle_text_delta(vendor_part_id='content', content=event.delta.text)
464-
elif (
466+
yield self._parts_manager.handle_text_delta( # pragma: no cover
467+
vendor_part_id='content', content=event.delta.text
468+
)
469+
elif ( # pragma: no branch
465470
current_block and event.delta.type == 'input_json_delta' and isinstance(current_block, ToolUseBlock)
466471
):
467472
# Try to parse the JSON immediately, otherwise cache the value for later. This handles
@@ -480,7 +485,7 @@ async def _get_event_iterator(self) -> AsyncIterator[ModelResponseStreamEvent]:
480485
args=parsed_args,
481486
tool_call_id=current_block.id,
482487
)
483-
if maybe_event is not None:
488+
if maybe_event is not None: # pragma: no branch
484489
yield maybe_event
485490

486491
elif isinstance(event, (RawContentBlockStopEvent, RawMessageStopEvent)):

0 commit comments

Comments
 (0)