From 49de737f5a98b11da429d6f243097913812057ae Mon Sep 17 00:00:00 2001 From: Krish Dholakia Date: Thu, 27 Mar 2025 19:13:53 -0700 Subject: [PATCH] Revert "Support max_completion_tokens on Mistral (#9589)" This reverts commit fef5d23dd5935ae5d791f07fdaa07b655449f9f9. --- .../mistral/mistral_chat_transformation.py | 9 +--- .../mistral/test_mistral_transformation.py | 45 ------------------- 2 files changed, 1 insertion(+), 53 deletions(-) delete mode 100644 tests/litellm/llms/mistral/test_mistral_transformation.py diff --git a/litellm/llms/mistral/mistral_chat_transformation.py b/litellm/llms/mistral/mistral_chat_transformation.py index 7f93ddcc293f..3e7a97c92f2d 100644 --- a/litellm/llms/mistral/mistral_chat_transformation.py +++ b/litellm/llms/mistral/mistral_chat_transformation.py @@ -28,9 +28,7 @@ class MistralConfig(OpenAIGPTConfig): - `top_p` (number or null): An alternative to sampling with temperature, used for nucleus sampling. API Default - 1. - - `max_tokens` [DEPRECATED - use max_completion_tokens] (integer or null): This optional parameter helps to set the maximum number of tokens to generate in the chat completion. API Default - null. - - - `max_completion_tokens` (integer or null): This optional parameter helps to set the maximum number of tokens to generate in the chat completion. API Default - null. + - `max_tokens` (integer or null): This optional parameter helps to set the maximum number of tokens to generate in the chat completion. API Default - null. - `tools` (list or null): A list of available tools for the model. Use this to specify functions for which the model can generate JSON inputs. @@ -48,7 +46,6 @@ class MistralConfig(OpenAIGPTConfig): temperature: Optional[int] = None top_p: Optional[int] = None max_tokens: Optional[int] = None - max_completion_tokens: Optional[int] = None tools: Optional[list] = None tool_choice: Optional[Literal["auto", "any", "none"]] = None random_seed: Optional[int] = None @@ -61,7 +58,6 @@ def __init__( temperature: Optional[int] = None, top_p: Optional[int] = None, max_tokens: Optional[int] = None, - max_completion_tokens: Optional[int] = None, tools: Optional[list] = None, tool_choice: Optional[Literal["auto", "any", "none"]] = None, random_seed: Optional[int] = None, @@ -84,7 +80,6 @@ def get_supported_openai_params(self, model: str) -> List[str]: "temperature", "top_p", "max_tokens", - "max_completion_tokens" "tools", "tool_choice", "seed", @@ -110,8 +105,6 @@ def map_openai_params( for param, value in non_default_params.items(): if param == "max_tokens": optional_params["max_tokens"] = value - if param == "max_completion_tokens": # max_completion_tokens should take priority - optional_params["max_tokens"] = value if param == "tools": optional_params["tools"] = value if param == "stream" and value is True: diff --git a/tests/litellm/llms/mistral/test_mistral_transformation.py b/tests/litellm/llms/mistral/test_mistral_transformation.py deleted file mode 100644 index 4594e8ca2aec..000000000000 --- a/tests/litellm/llms/mistral/test_mistral_transformation.py +++ /dev/null @@ -1,45 +0,0 @@ -import os -import sys -from unittest.mock import MagicMock - - -sys.path.insert( - 0, os.path.abspath("../../../../..") -) # Adds the parent directory to the system path - -from litellm.llms.mistral.mistral_chat_transformation import MistralConfig - - -class TestMistralTransform: - def setup_method(self): - self.config = MistralConfig() - self.model = "mistral-small-latest" - self.logging_obj = MagicMock() - - def test_map_mistral_params(self): - """Test that parameters are correctly mapped""" - test_params = {"temperature": 0.7, "max_tokens": 200, "max_completion_tokens": 256} - - result = self.config.map_openai_params( - non_default_params=test_params, - optional_params={}, - model=self.model, - drop_params=False, - ) - - # The function should properly map max_completion_tokens to max_tokens and override max_tokens - assert result == {"temperature": 0.7, "max_tokens": 256} - - def test_mistral_max_tokens_backward_compat(self): - """Test that parameters are correctly mapped""" - test_params = {"temperature": 0.7, "max_tokens": 200,} - - result = self.config.map_openai_params( - non_default_params=test_params, - optional_params={}, - model=self.model, - drop_params=False, - ) - - # The function should properly map max_tokens if max_completion_tokens is not provided - assert result == {"temperature": 0.7, "max_tokens": 200}