From 5dc3bad8bbfa49569e67f17dbb1aa7367301b149 Mon Sep 17 00:00:00 2001 From: Assad Yousuf Date: Sat, 5 Jul 2025 16:58:50 -0700 Subject: [PATCH] refactor: reduce CLI complexity and update help output --- clai/README.md | 48 ++--- pydantic_ai_slim/pydantic_ai/_cli.py | 264 ++++++++++++++++++--------- pydantic_ai_slim/pyproject.toml | 2 +- tests/test_cli.py | 75 +++++--- 4 files changed, 248 insertions(+), 141 deletions(-) diff --git a/clai/README.md b/clai/README.md index 8899a82ff..e7d79391e 100644 --- a/clai/README.md +++ b/clai/README.md @@ -53,27 +53,29 @@ Either way, running `clai` will start an interactive session where you can chat ## Help ``` -usage: clai [-h] [-m [MODEL]] [-a AGENT] [-l] [-t [CODE_THEME]] [--no-stream] [--version] [prompt] - -PydanticAI CLI v... - -Special prompts: -* `/exit` - exit the interactive mode (ctrl-c and ctrl-d also work) -* `/markdown` - show the last markdown output of the last question -* `/multiline` - toggle multiline mode - -positional arguments: - prompt AI Prompt, if omitted fall into interactive mode - -options: - -h, --help show this help message and exit - -m [MODEL], --model [MODEL] - Model to use, in format ":" e.g. "openai:gpt-4o" or "anthropic:claude-3-7-sonnet-latest". Defaults to "openai:gpt-4o". - -a AGENT, --agent AGENT - Custom Agent to use, in format "module:variable", e.g. "mymodule.submodule:my_agent" - -l, --list-models List all available models and exit - -t [CODE_THEME], --code-theme [CODE_THEME] - Which colors to use for code, can be "dark", "light" or any theme from pygments.org/styles/. Defaults to "dark" which works well on dark terminals. - --no-stream Disable streaming from the model - --version Show version and exit +Usage: clai [OPTIONS] [PROMPT]... + + PydanticAI CLI v... + + Special prompts: * `/exit` - exit the interactive mode (ctrl-c and ctrl-d + also work) * `/markdown` - show the last markdown output of the last + question * `/multiline` - toggle multiline mode + +Options: + -m, --model [anthropic:claude-2.0|anthropic:claude-2.1|anthropic:claude-3-5-haiku-20241022|anthropic:claude-3-5-haiku-latest|anthropic:claude-3-5-sonnet-20240620|anthropic:claude-3-5-sonnet-20241022|anthropic:claude-3-5-sonnet-latest|anthropic:claude-3-7-sonnet-20250219|anthropic:claude-3-7-sonnet-latest|anthropic:claude-3-haiku-20240307|anthropic:claude-3-opus-20240229|anthropic:claude-3-opus-latest|anthropic:claude-3-sonnet-20240229|anthropic:claude-4-opus-20250514|anthropic:claude-4-sonnet-20250514|anthropic:claude-opus-4-0|anthropic:claude-opus-4-20250514|anthropic:claude-sonnet-4-0|anthropic:claude-sonnet-4-20250514|bedrock:amazon.titan-tg1-large|bedrock:amazon.titan-text-lite-v1|bedrock:amazon.titan-text-express-v1|bedrock:us.amazon.nova-pro-v1:0|bedrock:us.amazon.nova-lite-v1:0|bedrock:us.amazon.nova-micro-v1:0|bedrock:anthropic.claude-3-5-sonnet-20241022-v2:0|bedrock:us.anthropic.claude-3-5-sonnet-20241022-v2:0|bedrock:anthropic.claude-3-5-haiku-20241022-v1:0|bedrock:us.anthropic.claude-3-5-haiku-20241022-v1:0|bedrock:anthropic.claude-instant-v1|bedrock:anthropic.claude-v2:1|bedrock:anthropic.claude-v2|bedrock:anthropic.claude-3-sonnet-20240229-v1:0|bedrock:us.anthropic.claude-3-sonnet-20240229-v1:0|bedrock:anthropic.claude-3-haiku-20240307-v1:0|bedrock:us.anthropic.claude-3-haiku-20240307-v1:0|bedrock:anthropic.claude-3-opus-20240229-v1:0|bedrock:us.anthropic.claude-3-opus-20240229-v1:0|bedrock:anthropic.claude-3-5-sonnet-20240620-v1:0|bedrock:us.anthropic.claude-3-5-sonnet-20240620-v1:0|bedrock:anthropic.claude-3-7-sonnet-20250219-v1:0|bedrock:us.anthropic.claude-3-7-sonnet-20250219-v1:0|bedrock:anthropic.claude-opus-4-20250514-v1:0|bedrock:us.anthropic.claude-opus-4-20250514-v1:0|bedrock:anthropic.claude-sonnet-4-20250514-v1:0|bedrock:us.anthropic.claude-sonnet-4-20250514-v1:0|bedrock:cohere.command-text-v14|bedrock:cohere.command-r-v1:0|bedrock:cohere.command-r-plus-v1:0|bedrock:cohere.command-light-text-v14|bedrock:meta.llama3-8b-instruct-v1:0|bedrock:meta.llama3-70b-instruct-v1:0|bedrock:meta.llama3-1-8b-instruct-v1:0|bedrock:us.meta.llama3-1-8b-instruct-v1:0|bedrock:meta.llama3-1-70b-instruct-v1:0|bedrock:us.meta.llama3-1-70b-instruct-v1:0|bedrock:meta.llama3-1-405b-instruct-v1:0|bedrock:us.meta.llama3-2-11b-instruct-v1:0|bedrock:us.meta.llama3-2-90b-instruct-v1:0|bedrock:us.meta.llama3-2-1b-instruct-v1:0|bedrock:us.meta.llama3-2-3b-instruct-v1:0|bedrock:us.meta.llama3-3-70b-instruct-v1:0|bedrock:mistral.mistral-7b-instruct-v0:2|bedrock:mistral.mixtral-8x7b-instruct-v0:1|bedrock:mistral.mistral-large-2402-v1:0|bedrock:mistral.mistral-large-2407-v1:0|cohere:c4ai-aya-expanse-32b|cohere:c4ai-aya-expanse-8b|cohere:command|cohere:command-light|cohere:command-light-nightly|cohere:command-nightly|cohere:command-r|cohere:command-r-03-2024|cohere:command-r-08-2024|cohere:command-r-plus|cohere:command-r-plus-04-2024|cohere:command-r-plus-08-2024|cohere:command-r7b-12-2024|deepseek:deepseek-chat|deepseek:deepseek-reasoner|google-gla:gemini-1.5-flash|google-gla:gemini-1.5-flash-8b|google-gla:gemini-1.5-pro|google-gla:gemini-1.0-pro|google-gla:gemini-2.0-flash|google-gla:gemini-2.0-flash-lite-preview-02-05|google-gla:gemini-2.0-pro-exp-02-05|google-gla:gemini-2.5-flash-preview-05-20|google-gla:gemini-2.5-flash|google-gla:gemini-2.5-flash-lite-preview-06-17|google-gla:gemini-2.5-pro-exp-03-25|google-gla:gemini-2.5-pro-preview-05-06|google-gla:gemini-2.5-pro|google-vertex:gemini-1.5-flash|google-vertex:gemini-1.5-flash-8b|google-vertex:gemini-1.5-pro|google-vertex:gemini-1.0-pro|google-vertex:gemini-2.0-flash|google-vertex:gemini-2.0-flash-lite-preview-02-05|google-vertex:gemini-2.0-pro-exp-02-05|google-vertex:gemini-2.5-flash-preview-05-20|google-vertex:gemini-2.5-flash|google-vertex:gemini-2.5-flash-lite-preview-06-17|google-vertex:gemini-2.5-pro-exp-03-25|google-vertex:gemini-2.5-pro-preview-05-06|google-vertex:gemini-2.5-pro|groq:distil-whisper-large-v3-en|groq:gemma2-9b-it|groq:llama-3.3-70b-versatile|groq:llama-3.1-8b-instant|groq:llama-guard-3-8b|groq:llama3-70b-8192|groq:llama3-8b-8192|groq:whisper-large-v3|groq:whisper-large-v3-turbo|groq:playai-tts|groq:playai-tts-arabic|groq:qwen-qwq-32b|groq:mistral-saba-24b|groq:qwen-2.5-coder-32b|groq:qwen-2.5-32b|groq:deepseek-r1-distill-qwen-32b|groq:deepseek-r1-distill-llama-70b|groq:llama-3.3-70b-specdec|groq:llama-3.2-1b-preview|groq:llama-3.2-3b-preview|groq:llama-3.2-11b-vision-preview|groq:llama-3.2-90b-vision-preview|heroku:claude-3-5-haiku|heroku:claude-3-5-sonnet-latest|heroku:claude-3-7-sonnet|heroku:claude-4-sonnet|heroku:claude-3-haiku|mistral:codestral-latest|mistral:mistral-large-latest|mistral:mistral-moderation-latest|mistral:mistral-small-latest|openai:chatgpt-4o-latest|openai:gpt-3.5-turbo|openai:gpt-3.5-turbo-0125|openai:gpt-3.5-turbo-0301|openai:gpt-3.5-turbo-0613|openai:gpt-3.5-turbo-1106|openai:gpt-3.5-turbo-16k|openai:gpt-3.5-turbo-16k-0613|openai:gpt-4|openai:gpt-4-0125-preview|openai:gpt-4-0314|openai:gpt-4-0613|openai:gpt-4-1106-preview|openai:gpt-4-32k|openai:gpt-4-32k-0314|openai:gpt-4-32k-0613|openai:gpt-4-turbo|openai:gpt-4-turbo-2024-04-09|openai:gpt-4-turbo-preview|openai:gpt-4-vision-preview|openai:gpt-4.1|openai:gpt-4.1-2025-04-14|openai:gpt-4.1-mini|openai:gpt-4.1-mini-2025-04-14|openai:gpt-4.1-nano|openai:gpt-4.1-nano-2025-04-14|openai:gpt-4o|openai:gpt-4o-2024-05-13|openai:gpt-4o-2024-08-06|openai:gpt-4o-2024-11-20|openai:gpt-4o-audio-preview|openai:gpt-4o-audio-preview-2024-10-01|openai:gpt-4o-audio-preview-2024-12-17|openai:gpt-4o-mini|openai:gpt-4o-mini-2024-07-18|openai:gpt-4o-mini-audio-preview|openai:gpt-4o-mini-audio-preview-2024-12-17|openai:gpt-4o-mini-search-preview|openai:gpt-4o-mini-search-preview-2025-03-11|openai:gpt-4o-search-preview|openai:gpt-4o-search-preview-2025-03-11|openai:o1|openai:o1-2024-12-17|openai:o1-mini|openai:o1-mini-2024-09-12|openai:o1-preview|openai:o1-preview-2024-09-12|openai:o3|openai:o3-2025-04-16|openai:o3-mini|openai:o3-mini-2025-01-31|openai:o4-mini|openai:o4-mini-2025-04-16] + Model to use, in format ":" + e.g. "openai:gpt-4o" or + "anthropic:claude-3-7-sonnet-latest". + Defaults to "openai:gpt-4o". + -a, --agent TEXT Custom Agent to use, in format + "module:variable", e.g. + "mymodule.submodule:my_agent" + -l, --list-models List all available models and exit + -t, --code-theme [dark|light] Which colors to use for code, can be "dark", + "light" or any theme from + pygments.org/styles/. Defaults to "dark" + which works well on dark terminals. + --no-stream Disable streaming from the model + --version Show version and exit + -h, --help Show this message and exit. ``` diff --git a/pydantic_ai_slim/pydantic_ai/_cli.py b/pydantic_ai_slim/pydantic_ai/_cli.py index 894f630c1..d4cdca0e6 100644 --- a/pydantic_ai_slim/pydantic_ai/_cli.py +++ b/pydantic_ai_slim/pydantic_ai/_cli.py @@ -1,17 +1,16 @@ from __future__ import annotations as _annotations -import argparse import asyncio import importlib import os import sys from asyncio import CancelledError -from collections.abc import Sequence from contextlib import ExitStack from datetime import datetime, timezone from pathlib import Path -from typing import Any, cast +from typing import Any +import click from typing_inspection.introspection import get_literal_values from . import __version__ @@ -19,11 +18,10 @@ from .agent import Agent from .exceptions import UserError from .messages import ModelMessage -from .models import KnownModelName, infer_model +from .models import KnownModelName, Model, infer_model from .output import OutputDataT try: - import argcomplete from prompt_toolkit import PromptSession from prompt_toolkit.auto_suggest import AutoSuggestFromHistory, Suggestion from prompt_toolkit.buffer import Buffer @@ -64,7 +62,13 @@ class SimpleCodeBlock(CodeBlock): def __rich_console__(self, console: Console, options: ConsoleOptions) -> RenderResult: code = str(self.text).rstrip() yield Text(self.lexer_name, style='dim') - yield Syntax(code, self.lexer_name, theme=self.theme, background_color='default', word_wrap=True) + yield Syntax( + code, + self.lexer_name, + theme=self.theme, + background_color='default', + word_wrap=True, + ) yield Text(f'/{self.lexer_name}', style='dim') @@ -101,119 +105,197 @@ def cli_exit(prog_name: str = 'pai'): # pragma: no cover sys.exit(cli(prog_name=prog_name)) -def cli(args_list: Sequence[str] | None = None, *, prog_name: str = 'pai') -> int: # noqa: C901 - """Run the CLI and return the exit code for the process.""" - parser = argparse.ArgumentParser( - prog=prog_name, - description=f"""\ -PydanticAI CLI v{__version__}\n\n +# we don't want to autocomplete or list models that don't include the provider, +# e.g. we want to show `openai:gpt-4o` but not `gpt-4o` +qualified_model_names = [n for n in get_literal_values(KnownModelName.__value__) if ':' in n] -Special prompts: -* `/exit` - exit the interactive mode (ctrl-c and ctrl-d also work) -* `/markdown` - show the last markdown output of the last question -* `/multiline` - toggle multiline mode -""", - formatter_class=argparse.RawTextHelpFormatter, - ) - parser.add_argument('prompt', nargs='?', help='AI Prompt, if omitted fall into interactive mode') - arg = parser.add_argument( - '-m', - '--model', - nargs='?', - help='Model to use, in format ":" e.g. "openai:gpt-4o" or "anthropic:claude-3-7-sonnet-latest". Defaults to "openai:gpt-4o".', - ) - # we don't want to autocomplete or list models that don't include the provider, - # e.g. we want to show `openai:gpt-4o` but not `gpt-4o` - qualified_model_names = [n for n in get_literal_values(KnownModelName.__value__) if ':' in n] - arg.completer = argcomplete.ChoicesCompleter(qualified_model_names) # type: ignore[reportPrivateUsage] - parser.add_argument( - '-a', - '--agent', - help='Custom Agent to use, in format "module:variable", e.g. "mymodule.submodule:my_agent"', - ) - parser.add_argument( - '-l', - '--list-models', - action='store_true', - help='List all available models and exit', - ) - parser.add_argument( - '-t', - '--code-theme', - nargs='?', - help='Which colors to use for code, can be "dark", "light" or any theme from pygments.org/styles/. Defaults to "dark" which works well on dark terminals.', - default='dark', - ) - parser.add_argument('--no-stream', action='store_true', help='Disable streaming from the model') - parser.add_argument('--version', action='store_true', help='Show version and exit') - - argcomplete.autocomplete(parser) - args = parser.parse_args(args_list) - console = Console() - name_version = f'[green]{prog_name} - PydanticAI CLI v{__version__}[/green]' - if args.version: +def _handle_version_and_list( + console: Console, + prog_name: str, + name_version: str, + version: bool, + list_models: bool, +) -> int | None: + """Handle --version and --list-models flags.""" + if version: console.print(name_version, highlight=False) return 0 - if args.list_models: + if list_models: console.print(f'{name_version}\n\n[green]Available models:[/green]') for model in qualified_model_names: console.print(f' {model}', highlight=False) return 0 + return None + +def _setup_agent( + console: Console, + agent_path: str | None, + model_name: str | None, +) -> tuple[Agent[None, str], int | None]: + """Set up the agent based on command line arguments.""" agent: Agent[None, str] = cli_agent - if args.agent: + if agent_path: sys.path.append(os.getcwd()) try: - module_path, variable_name = args.agent.split(':') + module_path, variable_name = agent_path.split(':') except ValueError: console.print('[red]Error: Agent must be specified in "module:variable" format[/red]') - return 1 + return agent, 1 module = importlib.import_module(module_path) agent = getattr(module, variable_name) if not isinstance(agent, Agent): - console.print(f'[red]Error: {args.agent} is not an Agent instance[/red]') - return 1 + console.print(f'[red]Error: {agent_path} is not an Agent instance[/red]') + return agent, 1 - model_arg_set = args.model is not None + model_arg_set = model_name is not None if agent.model is None or model_arg_set: try: - agent.model = infer_model(args.model or 'openai:gpt-4o') + agent.model = infer_model(model_name or 'openai:gpt-4o') except UserError as e: - console.print(f'Error initializing [magenta]{args.model}[/magenta]:\n[red]{e}[/red]') - return 1 + console.print(f'Error initializing [magenta]{model_name}[/magenta]:\n[red]{e}[/red]') + return agent, 1 + + return agent, None - model_name = agent.model if isinstance(agent.model, str) else f'{agent.model.system}:{agent.model.model_name}' - if args.agent and model_arg_set: + +def _print_agent_info( + console: Console, + name_version: str, + agent: Agent[None, str], + agent_path: str | None, + model_name: str | None, +) -> None: + """Print agent and model information.""" + if isinstance(agent.model, str): + model_display = agent.model + elif isinstance(agent.model, Model): + model_display = f'{agent.model.system}:{agent.model.model_name}' + else: + model_display = 'unknown' + + if agent_path and model_name is not None: console.print( - f'{name_version} using custom agent [magenta]{args.agent}[/magenta] with [magenta]{model_name}[/magenta]', + f'{name_version} using custom agent [magenta]{agent_path}[/magenta] with [magenta]{model_display}[/magenta]', + highlight=False, + ) + elif agent_path: + console.print( + f'{name_version} using custom agent [magenta]{agent_path}[/magenta]', highlight=False, ) - elif args.agent: - console.print(f'{name_version} using custom agent [magenta]{args.agent}[/magenta]', highlight=False) else: - console.print(f'{name_version} with [magenta]{model_name}[/magenta]', highlight=False) + console.print( + f'{name_version} with [magenta]{model_display}[/magenta]', + highlight=False, + ) - stream = not args.no_stream - if args.code_theme == 'light': - code_theme = 'default' - elif args.code_theme == 'dark': - code_theme = 'monokai' - else: - code_theme = args.code_theme # pragma: no cover - if prompt := cast(str, args.prompt): +def _handle_prompt( + console: Console, + prog_name: str, + prompt: tuple[str, ...], + agent: Agent[None, str], + stream: bool, + code_theme: str, +) -> int: + """Handle prompt input and execution.""" + if prompt: + # If prompt is provided, run it and exit + prompt_str = ' '.join(prompt) try: - asyncio.run(ask_agent(agent, prompt, stream, console, code_theme)) - except KeyboardInterrupt: - pass - return 0 + asyncio.run(ask_agent(agent, prompt_str, not stream, console, code_theme)) + return 0 + except (KeyboardInterrupt, CancelledError): + return 1 + except Exception as e: + console.print(f'[red]Error: {e}[/red]') + return 1 + else: + # Otherwise, start interactive mode + try: + asyncio.run(run_chat(not stream, agent, console, code_theme, prog_name)) + return 0 + except (KeyboardInterrupt, CancelledError): + return 1 + except Exception as e: + console.print(f'[red]Error: {e}[/red]') + return 1 - try: - return asyncio.run(run_chat(stream, agent, console, code_theme, prog_name)) - except KeyboardInterrupt: # pragma: no cover - return 0 + +@click.command( + name='pai', + help=f""" +PydanticAI CLI v{__version__} + +Special prompts: +* `/exit` - exit the interactive mode (ctrl-c and ctrl-d also work) +* `/markdown` - show the last markdown output of the last question +* `/multiline` - toggle multiline mode +""", + context_settings={'help_option_names': ['-h', '--help']}, +) +@click.argument('prompt', nargs=-1) +@click.option( + '-m', + '--model', + 'model_name', + type=click.Choice(qualified_model_names), + help='Model to use, in format ":" e.g. "openai:gpt-4o" or "anthropic:claude-3-7-sonnet-latest". Defaults to "openai:gpt-4o".', +) +@click.option( + '-a', + '--agent', + 'agent_path', + help='Custom Agent to use, in format "module:variable", e.g. "mymodule.submodule:my_agent"', +) +@click.option( + '-l', + '--list-models', + is_flag=True, + help='List all available models and exit', +) +@click.option( + '-t', + '--code-theme', + type=click.Choice(['dark', 'light']), + default='dark', + help='Which colors to use for code, can be "dark", "light" or any theme from pygments.org/styles/. Defaults to "dark" which works well on dark terminals.', +) +@click.option('--no-stream', is_flag=True, help='Disable streaming from the model') +@click.option('--version', is_flag=True, help='Show version and exit') +@click.pass_context +def cli( + ctx: click.Context, + prompt: tuple[str, ...], + model_name: str | None, + agent_path: str | None, + list_models: bool, + code_theme: str, + no_stream: bool, + version: bool, +) -> int: + """Run the CLI and return the exit code for the process.""" + console = Console() + prog_name = ctx.find_root().info_name or 'pai' + name_version = f'{prog_name} - PydanticAI CLI v{__version__}' + + # Handle version and list-models flags + if result := _handle_version_and_list(console, prog_name, name_version, version, list_models): + return result + + # Set up the agent + agent, result = _setup_agent(console, agent_path, model_name) + if result is not None: + return result + + # Print agent info + _print_agent_info(console, name_version, agent, agent_path, model_name) + + # Handle prompt or start interactive mode + return _handle_prompt(console, prog_name, prompt, agent, no_stream, code_theme) async def run_chat( @@ -312,7 +394,11 @@ def get_suggestion(self, buffer: Buffer, document: Document) -> Suggestion | Non def handle_slash_command( - ident_prompt: str, messages: list[ModelMessage], multiline: bool, console: Console, code_theme: str + ident_prompt: str, + messages: list[ModelMessage], + multiline: bool, + console: Console, + code_theme: str, ) -> tuple[int | None, bool]: if ident_prompt == '/markdown': try: diff --git a/pydantic_ai_slim/pyproject.toml b/pydantic_ai_slim/pyproject.toml index a04bd07c5..84f4239f3 100644 --- a/pydantic_ai_slim/pyproject.toml +++ b/pydantic_ai_slim/pyproject.toml @@ -73,7 +73,7 @@ bedrock = ["boto3>=1.37.24"] duckduckgo = ["duckduckgo-search>=7.0.0"] tavily = ["tavily-python>=0.5.0"] # CLI -cli = ["rich>=13", "prompt-toolkit>=3", "argcomplete>=3.5.0"] +cli = ["rich>=13", "prompt-toolkit>=3", "click>=8.1.7"] # MCP mcp = ["mcp>=1.9.4; python_version >= '3.10'"] # Evals diff --git a/tests/test_cli.py b/tests/test_cli.py index 024116249..0f1409e2f 100644 --- a/tests/test_cli.py +++ b/tests/test_cli.py @@ -35,7 +35,9 @@ def test_cli_version(capfd: CaptureFixture[str]): def test_invalid_model(capfd: CaptureFixture[str]): assert cli(['--model', 'potato']) == 1 - assert capfd.readouterr().out.splitlines() == snapshot(['Error initializing potato:', 'Unknown model: potato']) + output = capfd.readouterr().out + assert 'Error initializing potato:' in output + assert 'Unknown model: potato' in output @pytest.fixture @@ -106,7 +108,7 @@ def test_agent_flag_set_model( mocker.patch('pydantic_ai._cli.ask_agent') - assert cli(['--agent', 'test_module:custom_agent', '--model', 'gpt-4o', 'hello']) == 0 + assert cli(['--agent', 'test_module:custom_agent', '--model', 'openai:gpt-4o', 'hello']) == 0 assert 'using custom agent test_module:custom_agent with openai:gpt-4o' in capfd.readouterr().out.replace('\n', '') @@ -114,7 +116,9 @@ def test_agent_flag_set_model( def test_agent_flag_non_agent( - capfd: CaptureFixture[str], mocker: MockerFixture, create_test_module: Callable[..., None] + capfd: CaptureFixture[str], + mocker: MockerFixture, + create_test_module: Callable[..., None], ): test_agent = 'Not an Agent object' create_test_module(custom_agent=test_agent) @@ -133,31 +137,38 @@ def test_list_models(capfd: CaptureFixture[str]): output = capfd.readouterr().out.splitlines() assert output[:3] == snapshot([IsStr(regex='pai - PydanticAI CLI .*'), '', 'Available models:']) - providers = ( - 'openai', - 'anthropic', - 'bedrock', - 'google-vertex', - 'google-gla', - 'groq', - 'mistral', - 'cohere', - 'deepseek', - 'heroku', - ) - models = {line.strip().split(' ')[0] for line in output[3:]} - for provider in providers: - models = models - {model for model in models if model.startswith(provider)} - assert models == set(), models + # All models should be in provider:model format + for line in output[3:]: + if line.strip(): + model_name = line.strip().split()[0] + assert ':' in model_name, f'Model {model_name} should be in provider:model format' def test_cli_prompt(capfd: CaptureFixture[str], env: TestEnv): env.set('OPENAI_API_KEY', 'test') with cli_agent.override(model=TestModel(custom_output_text='# result\n\n```py\nx = 1\n```')): assert cli(['hello']) == 0 - assert capfd.readouterr().out.splitlines() == snapshot([IsStr(), '# result', '', 'py', 'x = 1', '/py']) + assert capfd.readouterr().out.splitlines() == snapshot( + [ + IsStr(regex='pai - PydanticAI CLI .*'), + '# result', + '', + 'py', + 'x = 1', + '/py', + ] + ) assert cli(['--no-stream', 'hello']) == 0 - assert capfd.readouterr().out.splitlines() == snapshot([IsStr(), '# result', '', 'py', 'x = 1', '/py']) + assert capfd.readouterr().out.splitlines() == snapshot( + [ + IsStr(regex='pai - PydanticAI CLI .*'), + '# result', + '', + 'py', + 'x = 1', + '/py', + ] + ) def test_chat(capfd: CaptureFixture[str], mocker: MockerFixture, env: TestEnv): @@ -170,12 +181,12 @@ def test_chat(capfd: CaptureFixture[str], mocker: MockerFixture, env: TestEnv): session = PromptSession[Any](input=inp, output=DummyOutput()) m = mocker.patch('pydantic_ai._cli.PromptSession', return_value=session) m.return_value = session - m = TestModel(custom_output_text='goodbye') - with cli_agent.override(model=m): + model = TestModel(custom_output_text='goodbye') + with cli_agent.override(model=model): assert cli([]) == 0 assert capfd.readouterr().out.splitlines() == snapshot( [ - IsStr(), + IsStr(regex='pai - PydanticAI CLI .*'), IsStr(regex='goodbye *Markdown output of last question:'), '', 'goodbye', @@ -192,11 +203,13 @@ def test_handle_slash_command_markdown(): messages: list[ModelMessage] = [ModelResponse(parts=[TextPart('[hello](# hello)'), ToolCallPart('foo', '{}')])] io = StringIO() assert handle_slash_command('/markdown', messages, True, Console(file=io), 'default') == (None, True) - assert io.getvalue() == snapshot("""\ + assert io.getvalue() == snapshot( + """\ Markdown output of last question: [hello](# hello) -""") +""" + ) def test_handle_slash_command_multiline(): @@ -211,13 +224,19 @@ def test_handle_slash_command_multiline(): def test_handle_slash_command_exit(): io = StringIO() - assert handle_slash_command('/exit', [], False, Console(file=io), 'default') == (0, False) + assert handle_slash_command('/exit', [], False, Console(file=io), 'default') == ( + 0, + False, + ) assert io.getvalue() == snapshot('Exiting…\n') def test_handle_slash_command_other(): io = StringIO() - assert handle_slash_command('/foobar', [], False, Console(file=io), 'default') == (None, False) + assert handle_slash_command('/foobar', [], False, Console(file=io), 'default') == ( + None, + False, + ) assert io.getvalue() == snapshot('Unknown command `/foobar`\n')