From 7751c53437ff8857bcd481cf811a1eea9211e555 Mon Sep 17 00:00:00 2001 From: Krrish Dholakia Date: Tue, 25 Mar 2025 09:13:17 -0700 Subject: [PATCH 1/8] fix: initial commit for adding provider model discovery to gemini --- litellm/utils.py | 12 ++++++++++-- tests/litellm_utils_tests/test_utils.py | 8 ++++++++ 2 files changed, 18 insertions(+), 2 deletions(-) diff --git a/litellm/utils.py b/litellm/utils.py index dc97c4d898ff..db9f63bb60c6 100644 --- a/litellm/utils.py +++ b/litellm/utils.py @@ -5744,13 +5744,15 @@ def trim_messages( return messages -def get_valid_models(check_provider_endpoint: bool = False) -> List[str]: +def get_valid_models( + check_provider_endpoint: bool = False, custom_llm_provider: Optional[str] = None +) -> List[str]: """ Returns a list of valid LLMs based on the set environment variables Args: check_provider_endpoint: If True, will check the provider's endpoint for valid models. - + custom_llm_provider: If provided, will only check the provider's endpoint for valid models. Returns: A list of valid LLMs """ @@ -5762,6 +5764,9 @@ def get_valid_models(check_provider_endpoint: bool = False) -> List[str]: valid_models = [] for provider in litellm.provider_list: + if custom_llm_provider and provider != custom_llm_provider: + continue + # edge case litellm has together_ai as a provider, it should be togetherai env_provider_1 = provider.replace("_", "") env_provider_2 = provider @@ -5783,6 +5788,9 @@ def get_valid_models(check_provider_endpoint: bool = False) -> List[str]: provider=LlmProviders(provider), ) + if custom_llm_provider and provider != custom_llm_provider: + continue + if provider == "azure": valid_models.append("Azure-LLM") elif provider_config is not None and check_provider_endpoint: diff --git a/tests/litellm_utils_tests/test_utils.py b/tests/litellm_utils_tests/test_utils.py index fea225e4a3bf..ba3019ab387d 100644 --- a/tests/litellm_utils_tests/test_utils.py +++ b/tests/litellm_utils_tests/test_utils.py @@ -303,6 +303,14 @@ def test_aget_valid_models(): os.environ = old_environ +def test_get_valid_models_with_check_provider_endpoint(): + valid_models = get_valid_models( + check_provider_endpoint=True, custom_llm_provider="gemini" + ) + print(valid_models) + assert len(valid_models) > 0 + + # test_get_valid_models() From 67b0c94239a2086cd8cc0327ef0d38e70861ed36 Mon Sep 17 00:00:00 2001 From: Krrish Dholakia Date: Tue, 25 Mar 2025 09:31:06 -0700 Subject: [PATCH 2/8] feat(gemini/): add model discovery for gemini/ route --- litellm/__init__.py | 1 + litellm/llms/base_llm/base_utils.py | 12 +++++- litellm/llms/gemini/common_utils.py | 52 +++++++++++++++++++++++++ litellm/utils.py | 2 + tests/litellm_utils_tests/test_utils.py | 10 ++++- 5 files changed, 74 insertions(+), 3 deletions(-) create mode 100644 litellm/llms/gemini/common_utils.py diff --git a/litellm/__init__.py b/litellm/__init__.py index d2b516966409..fab7aaa78e31 100644 --- a/litellm/__init__.py +++ b/litellm/__init__.py @@ -848,6 +848,7 @@ def add_known_models(): VertexGeminiConfig, VertexGeminiConfig as VertexAIConfig, ) +from .llms.gemini.common_utils import GeminiModelInfo from .llms.gemini.chat.transformation import ( GoogleAIStudioGeminiConfig, GoogleAIStudioGeminiConfig as GeminiConfig, # aliased to maintain backwards compatibility diff --git a/litellm/llms/base_llm/base_utils.py b/litellm/llms/base_llm/base_utils.py index 919cdbfd02c5..cef64d01e37c 100644 --- a/litellm/llms/base_llm/base_utils.py +++ b/litellm/llms/base_llm/base_utils.py @@ -19,11 +19,19 @@ def get_provider_info( self, model: str, ) -> Optional[ProviderSpecificModelInfo]: + """ + Default values all models of this provider support. + """ return None @abstractmethod - def get_models(self) -> List[str]: - pass + def get_models( + self, api_key: Optional[str] = None, api_base: Optional[str] = None + ) -> List[str]: + """ + Returns a list of models supported by this provider. + """ + return [] @staticmethod @abstractmethod diff --git a/litellm/llms/gemini/common_utils.py b/litellm/llms/gemini/common_utils.py new file mode 100644 index 000000000000..7f266c05367a --- /dev/null +++ b/litellm/llms/gemini/common_utils.py @@ -0,0 +1,52 @@ +from typing import List, Optional + +import litellm +from litellm.llms.base_llm.base_utils import BaseLLMModelInfo +from litellm.secret_managers.main import get_secret_str + + +class GeminiModelInfo(BaseLLMModelInfo): + @staticmethod + def get_api_base(api_base: Optional[str] = None) -> Optional[str]: + return ( + api_base + or get_secret_str("GEMINI_API_BASE") + or "https://generativelanguage.googleapis.com/v1beta" + ) + + @staticmethod + def get_api_key(api_key: Optional[str] = None) -> Optional[str]: + return api_key or (get_secret_str("GEMINI_API_KEY")) + + @staticmethod + def get_base_model(model: str) -> Optional[str]: + return model.replace("gemini/", "") + + def get_models( + self, api_key: Optional[str] = None, api_base: Optional[str] = None + ) -> List[str]: + + api_base = GeminiModelInfo.get_api_base(api_base) + api_key = GeminiModelInfo.get_api_key(api_key) + if api_base is None or api_key is None: + raise ValueError( + "GEMINI_API_BASE or GEMINI_API_KEY is not set. Please set the environment variable, to query Gemini's `/models` endpoint." + ) + + response = litellm.module_level_client.get( + url=f"{api_base}/models?key={api_key}", + ) + + if response.status_code != 200: + raise ValueError( + f"Failed to fetch models from Gemini. Status code: {response.status_code}, Response: {response.json()}" + ) + + models = response.json()["models"] + + litellm_model_names = [] + for model in models: + stripped_model_name = model["name"].strip("models/") + litellm_model_name = "gemini/" + stripped_model_name + litellm_model_names.append(litellm_model_name) + return litellm_model_names diff --git a/litellm/utils.py b/litellm/utils.py index db9f63bb60c6..a710ba1e4c65 100644 --- a/litellm/utils.py +++ b/litellm/utils.py @@ -6403,6 +6403,8 @@ def get_provider_model_info( return litellm.FireworksAIConfig() elif LlmProviders.OPENAI == provider: return litellm.OpenAIGPTConfig() + elif LlmProviders.GEMINI == provider: + return litellm.GeminiModelInfo() elif LlmProviders.LITELLM_PROXY == provider: return litellm.LiteLLMProxyChatConfig() elif LlmProviders.TOPAZ == provider: diff --git a/tests/litellm_utils_tests/test_utils.py b/tests/litellm_utils_tests/test_utils.py index ba3019ab387d..827e58bc0d18 100644 --- a/tests/litellm_utils_tests/test_utils.py +++ b/tests/litellm_utils_tests/test_utils.py @@ -303,7 +303,15 @@ def test_aget_valid_models(): os.environ = old_environ -def test_get_valid_models_with_check_provider_endpoint(): +def test_get_valid_models_with_custom_llm_provider(): + from litellm.utils import ProviderConfigManager + from litellm.types.utils import LlmProviders + + provider_config = ProviderConfigManager.get_provider_model_info( + model=None, + provider=LlmProviders("gemini"), + ) + assert provider_config is not None valid_models = get_valid_models( check_provider_endpoint=True, custom_llm_provider="gemini" ) From e0dbbbab55ecf393d7ca90367b3f8c1583eff64e Mon Sep 17 00:00:00 2001 From: Krrish Dholakia Date: Tue, 25 Mar 2025 09:34:27 -0700 Subject: [PATCH 3/8] docs(set_keys.md): update docs to show you can check available gemini models as well --- docs/my-website/docs/set_keys.md | 12 ++++++++++++ 1 file changed, 12 insertions(+) diff --git a/docs/my-website/docs/set_keys.md b/docs/my-website/docs/set_keys.md index 3a5ff08d634b..52337c135f95 100644 --- a/docs/my-website/docs/set_keys.md +++ b/docs/my-website/docs/set_keys.md @@ -188,7 +188,11 @@ Currently implemented for: - OpenAI (if OPENAI_API_KEY is set) - Fireworks AI (if FIREWORKS_AI_API_KEY is set) - LiteLLM Proxy (if LITELLM_PROXY_API_KEY is set) +- Gemini (if GEMINI_API_KEY is set) +You can also specify a custom provider to check: + +**All providers**: ```python from litellm import get_valid_models @@ -196,6 +200,14 @@ valid_models = get_valid_models(check_provider_endpoint=True) print(valid_models) ``` +**Specific provider**: +```python +from litellm import get_valid_models + +valid_models = get_valid_models(check_provider_endpoint=True, custom_llm_provider="openai") +print(valid_models) +``` + ### `validate_environment(model: str)` This helper tells you if you have all the required environment variables for a model, and if not - what's missing. From 28caf0b171e67d0ac2d425aa7905ae1756ae17ed Mon Sep 17 00:00:00 2001 From: Krrish Dholakia Date: Tue, 25 Mar 2025 09:43:39 -0700 Subject: [PATCH 4/8] feat(anthropic/): add model discovery for anthropic api key --- litellm/__init__.py | 1 + litellm/llms/anthropic/common_utils.py | 51 +++++++++++++++++++++++++ litellm/utils.py | 8 +++- tests/litellm_utils_tests/test_utils.py | 8 ++-- 4 files changed, 64 insertions(+), 4 deletions(-) diff --git a/litellm/__init__.py b/litellm/__init__.py index fab7aaa78e31..498482b6b378 100644 --- a/litellm/__init__.py +++ b/litellm/__init__.py @@ -813,6 +813,7 @@ def add_known_models(): from .llms.maritalk import MaritalkConfig from .llms.openrouter.chat.transformation import OpenrouterConfig from .llms.anthropic.chat.transformation import AnthropicConfig +from .llms.anthropic.common_utils import AnthropicModelInfo from .llms.groq.stt.transformation import GroqSTTConfig from .llms.anthropic.completion.transformation import AnthropicTextConfig from .llms.triton.completion.transformation import TritonConfig diff --git a/litellm/llms/anthropic/common_utils.py b/litellm/llms/anthropic/common_utils.py index 409bbe2d8295..ef6022c60124 100644 --- a/litellm/llms/anthropic/common_utils.py +++ b/litellm/llms/anthropic/common_utils.py @@ -6,7 +6,10 @@ import httpx +import litellm +from litellm.llms.base_llm.base_utils import BaseLLMModelInfo from litellm.llms.base_llm.chat.transformation import BaseLLMException +from litellm.secret_managers.main import get_secret_str class AnthropicError(BaseLLMException): @@ -19,6 +22,54 @@ def __init__( super().__init__(status_code=status_code, message=message, headers=headers) +class AnthropicModelInfo(BaseLLMModelInfo): + @staticmethod + def get_api_base(api_base: str | None = None) -> str | None: + return ( + api_base + or get_secret_str("ANTHROPIC_API_BASE") + or "https://api.anthropic.com" + ) + + @staticmethod + def get_api_key(api_key: str | None = None) -> str | None: + return api_key or get_secret_str("ANTHROPIC_API_KEY") + + @staticmethod + def get_base_model(model: str) -> str | None: + return model.replace("anthropic/", "") + + def get_models( + self, api_key: Optional[str] = None, api_base: Optional[str] = None + ) -> list[str]: + api_base = AnthropicModelInfo.get_api_base(api_base) + api_key = AnthropicModelInfo.get_api_key(api_key) + if api_base is None or api_key is None: + raise ValueError( + "ANTHROPIC_API_BASE or ANTHROPIC_API_KEY is not set. Please set the environment variable, to query Anthropic's `/models` endpoint." + ) + response = litellm.module_level_client.get( + url=f"{api_base}/v1/models", + headers={"x-api-key": api_key, "anthropic-version": "2023-06-01"}, + ) + + try: + response.raise_for_status() + except httpx.HTTPStatusError: + raise Exception( + f"Failed to fetch models from Anthropic. Status code: {response.status_code}, Response: {response.text}" + ) + + models = response.json()["data"] + + litellm_model_names = [] + for model in models: + stripped_model_name = model["id"] + litellm_model_name = "anthropic/" + stripped_model_name + litellm_model_names.append(litellm_model_name) + return litellm_model_names + + def process_anthropic_headers(headers: Union[httpx.Headers, dict]) -> dict: openai_headers = {} if "anthropic-ratelimit-requests-limit" in headers: diff --git a/litellm/utils.py b/litellm/utils.py index a710ba1e4c65..061beaf1a03f 100644 --- a/litellm/utils.py +++ b/litellm/utils.py @@ -5794,7 +5794,11 @@ def get_valid_models( if provider == "azure": valid_models.append("Azure-LLM") elif provider_config is not None and check_provider_endpoint: - valid_models.extend(provider_config.get_models()) + try: + models = provider_config.get_models() + valid_models.extend(models) + except Exception as e: + verbose_logger.debug(f"Error getting valid models: {e}") else: models_for_provider = litellm.models_by_provider.get(provider, []) valid_models.extend(models_for_provider) @@ -6409,6 +6413,8 @@ def get_provider_model_info( return litellm.LiteLLMProxyChatConfig() elif LlmProviders.TOPAZ == provider: return litellm.TopazModelInfo() + elif LlmProviders.ANTHROPIC == provider: + return litellm.AnthropicModelInfo() return None diff --git a/tests/litellm_utils_tests/test_utils.py b/tests/litellm_utils_tests/test_utils.py index 827e58bc0d18..220bcc13018d 100644 --- a/tests/litellm_utils_tests/test_utils.py +++ b/tests/litellm_utils_tests/test_utils.py @@ -303,20 +303,22 @@ def test_aget_valid_models(): os.environ = old_environ -def test_get_valid_models_with_custom_llm_provider(): +@pytest.mark.parametrize("custom_llm_provider", ["gemini", "anthropic", "xai"]) +def test_get_valid_models_with_custom_llm_provider(custom_llm_provider): from litellm.utils import ProviderConfigManager from litellm.types.utils import LlmProviders provider_config = ProviderConfigManager.get_provider_model_info( model=None, - provider=LlmProviders("gemini"), + provider=LlmProviders(custom_llm_provider), ) assert provider_config is not None valid_models = get_valid_models( - check_provider_endpoint=True, custom_llm_provider="gemini" + check_provider_endpoint=True, custom_llm_provider=custom_llm_provider ) print(valid_models) assert len(valid_models) > 0 + assert provider_config.get_models() == valid_models # test_get_valid_models() From e9d857162aad79b17e825645a491c5bc56d36eeb Mon Sep 17 00:00:00 2001 From: Krrish Dholakia Date: Tue, 25 Mar 2025 09:59:59 -0700 Subject: [PATCH 5/8] feat(xai/): add model discovery for XAI enables checking what models an xai key can call --- docs/my-website/docs/set_keys.md | 2 ++ litellm/__init__.py | 1 + litellm/llms/xai/common_utils.py | 51 ++++++++++++++++++++++++++++++++ litellm/utils.py | 2 ++ 4 files changed, 56 insertions(+) create mode 100644 litellm/llms/xai/common_utils.py diff --git a/docs/my-website/docs/set_keys.md b/docs/my-website/docs/set_keys.md index 52337c135f95..693cf5f7f4ab 100644 --- a/docs/my-website/docs/set_keys.md +++ b/docs/my-website/docs/set_keys.md @@ -189,6 +189,8 @@ Currently implemented for: - Fireworks AI (if FIREWORKS_AI_API_KEY is set) - LiteLLM Proxy (if LITELLM_PROXY_API_KEY is set) - Gemini (if GEMINI_API_KEY is set) +- XAI (if XAI_API_KEY is set) +- Anthropic (if ANTHROPIC_API_KEY is set) You can also specify a custom provider to check: diff --git a/litellm/__init__.py b/litellm/__init__.py index 498482b6b378..445bf0207772 100644 --- a/litellm/__init__.py +++ b/litellm/__init__.py @@ -980,6 +980,7 @@ def add_known_models(): from .llms.friendliai.chat.transformation import FriendliaiChatConfig from .llms.jina_ai.embedding.transformation import JinaAIEmbeddingConfig from .llms.xai.chat.transformation import XAIChatConfig +from .llms.xai.common_utils import XAIModelInfo from .llms.volcengine import VolcEngineConfig from .llms.codestral.completion.transformation import CodestralTextCompletionConfig from .llms.azure.azure import ( diff --git a/litellm/llms/xai/common_utils.py b/litellm/llms/xai/common_utils.py new file mode 100644 index 000000000000..fdf2edbfa3e2 --- /dev/null +++ b/litellm/llms/xai/common_utils.py @@ -0,0 +1,51 @@ +from typing import Optional + +import httpx + +import litellm +from litellm.llms.base_llm.base_utils import BaseLLMModelInfo +from litellm.secret_managers.main import get_secret_str + + +class XAIModelInfo(BaseLLMModelInfo): + @staticmethod + def get_api_base(api_base: Optional[str] = None) -> Optional[str]: + return api_base or get_secret_str("XAI_API_BASE") or "https://api.x.ai" + + @staticmethod + def get_api_key(api_key: Optional[str] = None) -> Optional[str]: + return api_key or get_secret_str("XAI_API_KEY") + + @staticmethod + def get_base_model(model: str) -> Optional[str]: + return model.replace("xai/", "") + + def get_models( + self, api_key: Optional[str] = None, api_base: Optional[str] = None + ) -> list[str]: + api_base = self.get_api_base(api_base) + api_key = self.get_api_key(api_key) + if api_base is None or api_key is None: + raise ValueError( + "XAI_API_BASE or XAI_API_KEY is not set. Please set the environment variable, to query XAI's `/models` endpoint." + ) + response = litellm.module_level_client.get( + url=f"{api_base}/v1/models", + headers={"Authorization": f"Bearer {api_key}"}, + ) + + try: + response.raise_for_status() + except httpx.HTTPStatusError: + raise Exception( + f"Failed to fetch models from XAI. Status code: {response.status_code}, Response: {response.text}" + ) + + models = response.json()["data"] + + litellm_model_names = [] + for model in models: + stripped_model_name = model["id"] + litellm_model_name = "xai/" + stripped_model_name + litellm_model_names.append(litellm_model_name) + return litellm_model_names diff --git a/litellm/utils.py b/litellm/utils.py index 061beaf1a03f..bf56c6abaec5 100644 --- a/litellm/utils.py +++ b/litellm/utils.py @@ -6415,6 +6415,8 @@ def get_provider_model_info( return litellm.TopazModelInfo() elif LlmProviders.ANTHROPIC == provider: return litellm.AnthropicModelInfo() + elif LlmProviders.XAI == provider: + return litellm.XAIModelInfo() return None From 06546d6a6262c3f71bd9c9f85e5531adee461a42 Mon Sep 17 00:00:00 2001 From: Krrish Dholakia Date: Tue, 25 Mar 2025 23:15:55 -0700 Subject: [PATCH 6/8] ci: bump ci config yml --- .circleci/config.yml | 47 +++++++++++++++++++++++++++++++++++++++----- 1 file changed, 42 insertions(+), 5 deletions(-) diff --git a/.circleci/config.yml b/.circleci/config.yml index b93a9d81e8e2..304f96bdbb31 100644 --- a/.circleci/config.yml +++ b/.circleci/config.yml @@ -3,6 +3,14 @@ orbs: codecov: codecov/codecov@4.0.1 node: circleci/node@5.1.0 # Add this line to declare the node orb +commands: + setup_google_dns: + steps: + - run: + name: "Configure Google DNS" + command: | + echo "nameserver 8.8.8.8" | sudo tee /etc/resolv.conf + echo "nameserver 8.8.4.4" | sudo tee -a /etc/resolv.conf jobs: local_testing: @@ -15,7 +23,7 @@ jobs: steps: - checkout - + - setup_google_dns - run: name: Show git commit hash command: | @@ -134,7 +142,7 @@ jobs: steps: - checkout - + - setup_google_dns - run: name: Show git commit hash command: | @@ -234,7 +242,7 @@ jobs: steps: - checkout - + - setup_google_dns - run: name: Show git commit hash command: | @@ -334,6 +342,7 @@ jobs: steps: - checkout + - setup_google_dns - run: name: Install Dependencies command: | @@ -388,6 +397,7 @@ jobs: steps: - checkout + - setup_google_dns - run: name: Install Dependencies command: | @@ -429,6 +439,7 @@ jobs: working_directory: ~/project steps: - checkout + - setup_google_dns - run: name: Show git commit hash command: | @@ -479,7 +490,7 @@ jobs: working_directory: ~/project steps: - checkout - + - setup_google_dns - run: name: Show git commit hash command: | @@ -569,7 +580,7 @@ jobs: - litellm_proxy_unit_tests_coverage litellm_assistants_api_testing: # Runs all tests with the "assistants" keyword docker: - - image: cimg/python:3.11 + - image: cimg/python:3.13.1 auth: username: ${DOCKERHUB_USERNAME} password: ${DOCKERHUB_PASSWORD} @@ -577,6 +588,7 @@ jobs: steps: - checkout + - setup_google_dns - run: name: Install Dependencies command: | @@ -618,6 +630,7 @@ jobs: steps: - checkout + - setup_google_dns - run: name: Install Dependencies command: | @@ -654,6 +667,7 @@ jobs: steps: - checkout + - setup_google_dns - run: name: Install Dependencies command: | @@ -696,6 +710,7 @@ jobs: steps: - checkout + - setup_google_dns - run: name: Install Dependencies command: | @@ -740,6 +755,7 @@ jobs: steps: - checkout + - setup_google_dns - run: name: Install Dependencies command: | @@ -782,6 +798,7 @@ jobs: steps: - checkout + - setup_google_dns - run: name: Install Dependencies command: | @@ -828,6 +845,7 @@ jobs: steps: - checkout + - setup_google_dns - run: name: Install Dependencies command: | @@ -872,6 +890,7 @@ jobs: steps: - checkout + - setup_google_dns - run: name: Install Dependencies command: | @@ -918,6 +937,7 @@ jobs: steps: - checkout + - setup_google_dns - run: name: Install Dependencies command: | @@ -960,6 +980,7 @@ jobs: steps: - checkout + - setup_google_dns - run: name: Install Dependencies command: | @@ -1002,6 +1023,7 @@ jobs: steps: - checkout + - setup_google_dns - run: name: Install Dependencies command: | @@ -1048,6 +1070,7 @@ jobs: steps: - checkout + - setup_google_dns - run: name: Install Dependencies command: | @@ -1080,6 +1103,7 @@ jobs: steps: - checkout + - setup_google_dns - run: name: Install Dependencies command: | @@ -1104,6 +1128,7 @@ jobs: steps: - checkout + - setup_google_dns # Install Helm - run: name: Install Helm @@ -1173,6 +1198,7 @@ jobs: steps: - checkout + - setup_google_dns - run: name: Install Dependencies command: | @@ -1209,6 +1235,7 @@ jobs: working_directory: ~/project steps: - checkout + - setup_google_dns - run: name: Install Python 3.9 command: | @@ -1283,6 +1310,7 @@ jobs: working_directory: ~/project steps: - checkout + - setup_google_dns - run: name: Install Docker CLI (In case it's not already installed) command: | @@ -1418,6 +1446,7 @@ jobs: working_directory: ~/project steps: - checkout + - setup_google_dns - run: name: Install Docker CLI (In case it's not already installed) command: | @@ -1542,6 +1571,7 @@ jobs: working_directory: ~/project steps: - checkout + - setup_google_dns - run: name: Install Docker CLI (In case it's not already installed) command: | @@ -1704,6 +1734,7 @@ jobs: working_directory: ~/project steps: - checkout + - setup_google_dns - run: name: Install Docker CLI (In case it's not already installed) command: | @@ -1815,6 +1846,7 @@ jobs: working_directory: ~/project steps: - checkout + - setup_google_dns - run: name: Install Docker CLI (In case it's not already installed) command: | @@ -1897,6 +1929,7 @@ jobs: working_directory: ~/project steps: - checkout + - setup_google_dns # Remove Docker CLI installation since it's already available in machine executor - run: name: Install Python 3.13 @@ -1994,6 +2027,7 @@ jobs: working_directory: ~/project steps: - checkout + - setup_google_dns - run: name: Install Docker CLI (In case it's not already installed) command: | @@ -2253,6 +2287,7 @@ jobs: working_directory: ~/project steps: - checkout + - setup_google_dns - run: name: Build UI command: | @@ -2367,6 +2402,7 @@ jobs: working_directory: ~/project steps: - checkout + - setup_google_dns - run: name: Build Docker image command: | @@ -2389,6 +2425,7 @@ jobs: working_directory: ~/project steps: - checkout + - setup_google_dns - run: name: Build Docker image command: | From 59e2ee69c7bde48cee111fda820a3b829fdb651d Mon Sep 17 00:00:00 2001 From: Krrish Dholakia Date: Wed, 26 Mar 2025 23:09:11 -0700 Subject: [PATCH 7/8] fix(topaz/common_utils.py): fix linting error --- litellm/llms/topaz/common_utils.py | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/litellm/llms/topaz/common_utils.py b/litellm/llms/topaz/common_utils.py index 4ef2315db4ee..0252585922d2 100644 --- a/litellm/llms/topaz/common_utils.py +++ b/litellm/llms/topaz/common_utils.py @@ -11,7 +11,9 @@ class TopazException(BaseLLMException): class TopazModelInfo(BaseLLMModelInfo): - def get_models(self) -> List[str]: + def get_models( + self, api_key: Optional[str] = None, api_base: Optional[str] = None + ) -> List[str]: return [ "topaz/Standard V2", "topaz/Low Resolution V2", From fcee1e7c9fa2e08ec4829542871d5d3c42b70dba Mon Sep 17 00:00:00 2001 From: Krrish Dholakia Date: Thu, 27 Mar 2025 22:49:32 -0700 Subject: [PATCH 8/8] fix: fix linting error for python38 --- litellm/llms/anthropic/common_utils.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/litellm/llms/anthropic/common_utils.py b/litellm/llms/anthropic/common_utils.py index ef6022c60124..52a96f5a3037 100644 --- a/litellm/llms/anthropic/common_utils.py +++ b/litellm/llms/anthropic/common_utils.py @@ -24,7 +24,7 @@ def __init__( class AnthropicModelInfo(BaseLLMModelInfo): @staticmethod - def get_api_base(api_base: str | None = None) -> str | None: + def get_api_base(api_base: Optional[str] = None) -> str | None: return ( api_base or get_secret_str("ANTHROPIC_API_BASE")