From 55ef2041a54786e34af8973c62559a35a326e07d Mon Sep 17 00:00:00 2001 From: Koudai Aono Date: Tue, 21 Oct 2025 10:29:58 +0900 Subject: [PATCH] Add public provider property to BaseClient --- .../llm/clients/anthropic/clients.py | 26 ++++++++++++------- python/mirascope/llm/clients/base/client.py | 15 ++++++++++- .../mirascope/llm/clients/google/clients.py | 26 ++++++++++++------- .../llm/clients/openai/completions/clients.py | 26 ++++++++++++------- .../llm/clients/openai/responses/clients.py | 26 ++++++++++++------- 5 files changed, 82 insertions(+), 37 deletions(-) diff --git a/python/mirascope/llm/clients/anthropic/clients.py b/python/mirascope/llm/clients/anthropic/clients.py index 01bc358cc..801278b21 100644 --- a/python/mirascope/llm/clients/anthropic/clients.py +++ b/python/mirascope/llm/clients/anthropic/clients.py @@ -4,7 +4,7 @@ from collections.abc import Sequence from contextvars import ContextVar from functools import lru_cache -from typing import overload +from typing import TYPE_CHECKING, overload from typing_extensions import Unpack from anthropic import Anthropic, AsyncAnthropic @@ -36,6 +36,9 @@ from . import _utils from .model_ids import AnthropicModelId +if TYPE_CHECKING: + from ..providers import Provider + ANTHROPIC_CLIENT_CONTEXT: ContextVar["AnthropicClient | None"] = ContextVar( "ANTHROPIC_CLIENT_CONTEXT", default=None ) @@ -87,6 +90,11 @@ class AnthropicClient(BaseClient[AnthropicModelId, Anthropic]): def _context_var(self) -> ContextVar["AnthropicClient | None"]: return ANTHROPIC_CLIENT_CONTEXT + @property + def provider(self) -> "Provider": + """Return the provider name for this client.""" + return "anthropic" + def __init__( self, *, api_key: str | None = None, base_url: str | None = None ) -> None: @@ -170,7 +178,7 @@ def call( return Response( raw=anthropic_response, - provider="anthropic", + provider=self.provider, model_id=model_id, params=params, tools=tools, @@ -269,7 +277,7 @@ def context_call( return ContextResponse( raw=anthropic_response, - provider="anthropic", + provider=self.provider, model_id=model_id, params=params, tools=tools, @@ -355,7 +363,7 @@ async def call_async( return AsyncResponse( raw=anthropic_response, - provider="anthropic", + provider=self.provider, model_id=model_id, params=params, tools=tools, @@ -454,7 +462,7 @@ async def context_call_async( return AsyncContextResponse( raw=anthropic_response, - provider="anthropic", + provider=self.provider, model_id=model_id, params=params, tools=tools, @@ -537,7 +545,7 @@ def stream( chunk_iterator = _utils.decode_stream(anthropic_stream) return StreamResponse( - provider="anthropic", + provider=self.provider, model_id=model_id, params=params, tools=tools, @@ -632,7 +640,7 @@ def context_stream( chunk_iterator = _utils.decode_stream(anthropic_stream) return ContextStreamResponse( - provider="anthropic", + provider=self.provider, model_id=model_id, params=params, tools=tools, @@ -714,7 +722,7 @@ async def stream_async( chunk_iterator = _utils.decode_async_stream(anthropic_stream) return AsyncStreamResponse( - provider="anthropic", + provider=self.provider, model_id=model_id, params=params, tools=tools, @@ -809,7 +817,7 @@ async def context_stream_async( chunk_iterator = _utils.decode_async_stream(anthropic_stream) return AsyncContextStreamResponse( - provider="anthropic", + provider=self.provider, model_id=model_id, params=params, tools=tools, diff --git a/python/mirascope/llm/clients/base/client.py b/python/mirascope/llm/clients/base/client.py index 09a262b41..1533d7453 100644 --- a/python/mirascope/llm/clients/base/client.py +++ b/python/mirascope/llm/clients/base/client.py @@ -6,7 +6,7 @@ from collections.abc import Sequence from contextvars import ContextVar, Token from types import TracebackType -from typing import Generic, overload +from typing import TYPE_CHECKING, Generic, overload from typing_extensions import Self, TypeVar, Unpack from ...context import Context, DepsT @@ -34,6 +34,9 @@ ) from .params import Params +if TYPE_CHECKING: + from ..providers import Provider + ModelIdT = TypeVar("ModelIdT", bound=str) ProviderClientT = TypeVar("ProviderClientT") @@ -57,6 +60,16 @@ def _context_var(self) -> ContextVar: """The ContextVar for this client type.""" ... + @property + @abstractmethod + def provider(self) -> Provider: + """The provider name for this client. + + This property provides the name of the provider and is available for + overriding by subclasses in the case of a mirrored or wrapped client. + """ + ... + def __enter__(self) -> Self: """Sets the client context and stores the token.""" self._token = self._context_var.set(self) diff --git a/python/mirascope/llm/clients/google/clients.py b/python/mirascope/llm/clients/google/clients.py index 4455ee4cd..8892526e4 100644 --- a/python/mirascope/llm/clients/google/clients.py +++ b/python/mirascope/llm/clients/google/clients.py @@ -4,7 +4,7 @@ from collections.abc import Sequence from contextvars import ContextVar from functools import lru_cache -from typing import overload +from typing import TYPE_CHECKING, overload from typing_extensions import Unpack from google.genai import Client @@ -37,6 +37,9 @@ from . import _utils from .model_ids import GoogleModelId +if TYPE_CHECKING: + from ..providers import Provider + GOOGLE_CLIENT_CONTEXT: ContextVar["GoogleClient | None"] = ContextVar( "GOOGLE_CLIENT_CONTEXT", default=None ) @@ -86,6 +89,11 @@ class GoogleClient(BaseClient[GoogleModelId, Client]): def _context_var(self) -> ContextVar["GoogleClient | None"]: return GOOGLE_CLIENT_CONTEXT + @property + def provider(self) -> "Provider": + """Return the provider name for this client.""" + return "google" + def __init__( self, *, api_key: str | None = None, base_url: str | None = None ) -> None: @@ -176,7 +184,7 @@ def call( return Response( raw=google_response, - provider="google", + provider=self.provider, model_id=model_id, params=params, tools=tools, @@ -279,7 +287,7 @@ def context_call( return ContextResponse( raw=google_response, - provider="google", + provider=self.provider, model_id=model_id, params=params, tools=tools, @@ -369,7 +377,7 @@ async def call_async( return AsyncResponse( raw=google_response, - provider="google", + provider=self.provider, model_id=model_id, params=params, tools=tools, @@ -472,7 +480,7 @@ async def context_call_async( return AsyncContextResponse( raw=google_response, - provider="google", + provider=self.provider, model_id=model_id, params=params, tools=tools, @@ -559,7 +567,7 @@ def stream( chunk_iterator = _utils.decode_stream(google_stream) return StreamResponse( - provider="google", + provider=self.provider, model_id=model_id, params=params, tools=tools, @@ -658,7 +666,7 @@ def context_stream( chunk_iterator = _utils.decode_stream(google_stream) return ContextStreamResponse( - provider="google", + provider=self.provider, model_id=model_id, params=params, tools=tools, @@ -744,7 +752,7 @@ async def stream_async( chunk_iterator = _utils.decode_async_stream(google_stream) return AsyncStreamResponse( - provider="google", + provider=self.provider, model_id=model_id, params=params, tools=tools, @@ -843,7 +851,7 @@ async def context_stream_async( chunk_iterator = _utils.decode_async_stream(google_stream) return AsyncContextStreamResponse( - provider="google", + provider=self.provider, model_id=model_id, params=params, tools=tools, diff --git a/python/mirascope/llm/clients/openai/completions/clients.py b/python/mirascope/llm/clients/openai/completions/clients.py index 13420677f..48899f183 100644 --- a/python/mirascope/llm/clients/openai/completions/clients.py +++ b/python/mirascope/llm/clients/openai/completions/clients.py @@ -4,7 +4,7 @@ from collections.abc import Sequence from contextvars import ContextVar from functools import lru_cache -from typing import overload +from typing import TYPE_CHECKING, overload from typing_extensions import Unpack from openai import AsyncOpenAI, OpenAI @@ -36,6 +36,9 @@ from . import _utils from .model_ids import OpenAICompletionsModelId +if TYPE_CHECKING: + from ...providers import Provider + OPENAI_COMPLETIONS_CLIENT_CONTEXT: ContextVar["OpenAICompletionsClient | None"] = ( ContextVar("OPENAI_COMPLETIONS_CLIENT_CONTEXT", default=None) ) @@ -87,6 +90,11 @@ class OpenAICompletionsClient(BaseClient[OpenAICompletionsModelId, OpenAI]): def _context_var(self) -> ContextVar["OpenAICompletionsClient | None"]: return OPENAI_COMPLETIONS_CLIENT_CONTEXT + @property + def provider(self) -> "Provider": + """Return the provider name for this client.""" + return "openai:completions" + def __init__( self, *, api_key: str | None = None, base_url: str | None = None ) -> None: @@ -170,7 +178,7 @@ def call( return Response( raw=openai_response, - provider="openai:completions", + provider=self.provider, model_id=model_id, params=params, tools=tools, @@ -269,7 +277,7 @@ def context_call( return ContextResponse( raw=openai_response, - provider="openai:completions", + provider=self.provider, model_id=model_id, params=params, tools=tools, @@ -356,7 +364,7 @@ async def call_async( return AsyncResponse( raw=openai_response, - provider="openai:completions", + provider=self.provider, model_id=model_id, params=params, tools=tools, @@ -455,7 +463,7 @@ async def context_call_async( return AsyncContextResponse( raw=openai_response, - provider="openai:completions", + provider=self.provider, model_id=model_id, params=params, tools=tools, @@ -541,7 +549,7 @@ def stream( chunk_iterator = _utils.decode_stream(openai_stream) return StreamResponse( - provider="openai:completions", + provider=self.provider, model_id=model_id, params=params, tools=tools, @@ -639,7 +647,7 @@ def context_stream( chunk_iterator = _utils.decode_stream(openai_stream) return ContextStreamResponse( - provider="openai:completions", + provider=self.provider, model_id=model_id, params=params, tools=tools, @@ -725,7 +733,7 @@ async def stream_async( chunk_iterator = _utils.decode_async_stream(openai_stream) return AsyncStreamResponse( - provider="openai:completions", + provider=self.provider, model_id=model_id, params=params, tools=tools, @@ -823,7 +831,7 @@ async def context_stream_async( chunk_iterator = _utils.decode_async_stream(openai_stream) return AsyncContextStreamResponse( - provider="openai:completions", + provider=self.provider, model_id=model_id, params=params, tools=tools, diff --git a/python/mirascope/llm/clients/openai/responses/clients.py b/python/mirascope/llm/clients/openai/responses/clients.py index b1d4dd446..63b4a5f33 100644 --- a/python/mirascope/llm/clients/openai/responses/clients.py +++ b/python/mirascope/llm/clients/openai/responses/clients.py @@ -4,7 +4,7 @@ from collections.abc import Sequence from contextvars import ContextVar from functools import lru_cache -from typing import overload +from typing import TYPE_CHECKING, overload from typing_extensions import Unpack from openai import AsyncOpenAI, OpenAI @@ -36,6 +36,9 @@ from . import _utils from .model_ids import OpenAIResponsesModelId +if TYPE_CHECKING: + from ...providers import Provider + OPENAI_RESPONSES_CLIENT_CONTEXT: ContextVar["OpenAIResponsesClient | None"] = ( ContextVar("OPENAI_RESPONSES_CLIENT_CONTEXT", default=None) ) @@ -74,6 +77,11 @@ class OpenAIResponsesClient(BaseClient[OpenAIResponsesModelId, OpenAI]): def _context_var(self) -> ContextVar["OpenAIResponsesClient | None"]: return OPENAI_RESPONSES_CLIENT_CONTEXT + @property + def provider(self) -> "Provider": + """Return the provider name for this client.""" + return "openai:responses" + def __init__( self, *, api_key: str | None = None, base_url: str | None = None ) -> None: @@ -157,7 +165,7 @@ def call( return Response( raw=openai_response, - provider="openai:responses", + provider=self.provider, model_id=model_id, params=params, tools=tools, @@ -243,7 +251,7 @@ async def call_async( return AsyncResponse( raw=openai_response, - provider="openai:responses", + provider=self.provider, model_id=model_id, params=params, tools=tools, @@ -331,7 +339,7 @@ def stream( ) return StreamResponse( - provider="openai:responses", + provider=self.provider, model_id=model_id, params=params, tools=tools, @@ -418,7 +426,7 @@ async def stream_async( ) return AsyncStreamResponse( - provider="openai:responses", + provider=self.provider, model_id=model_id, params=params, tools=tools, @@ -516,7 +524,7 @@ def context_call( return ContextResponse( raw=openai_response, - provider="openai:responses", + provider=self.provider, model_id=model_id, params=params, tools=tools, @@ -615,7 +623,7 @@ async def context_call_async( return AsyncContextResponse( raw=openai_response, - provider="openai:responses", + provider=self.provider, model_id=model_id, params=params, tools=tools, @@ -716,7 +724,7 @@ def context_stream( ) return ContextStreamResponse( - provider="openai:responses", + provider=self.provider, model_id=model_id, params=params, tools=tools, @@ -822,7 +830,7 @@ async def context_stream_async( ) return AsyncContextStreamResponse( - provider="openai:responses", + provider=self.provider, model_id=model_id, params=params, tools=tools,