Skip to content

fix(internal_user_endpoints.py): support user with + in email on user info + handle empty string for arguments on gemini function calls #11601

New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Merged
merged 10 commits into from
Jun 11, 2025
Merged
2 changes: 1 addition & 1 deletion docs/my-website/docs/observability/langfuse_integration.md
Original file line number Diff line number Diff line change
Expand Up @@ -21,7 +21,7 @@ Example trace in Langfuse using multiple models via LiteLLM:
### Pre-Requisites
Ensure you have run `pip install langfuse` for this integration
```shell
pip install langfuse>=2.0.0 litellm
pip install langfuse==2.45.0 litellm
```

### Quick Start
Expand Down
9 changes: 8 additions & 1 deletion litellm/litellm_core_utils/prompt_templates/factory.py
Original file line number Diff line number Diff line change
Expand Up @@ -989,7 +989,14 @@ def _gemini_tool_call_invoke_helper(
) -> Optional[VertexFunctionCall]:
name = function_call_params.get("name", "") or ""
arguments = function_call_params.get("arguments", "")
arguments_dict = json.loads(arguments)
if (
isinstance(arguments, str) and len(arguments) == 0
): # pass empty dict, if arguments is empty string - prevents call from failing
arguments_dict = {
"type": "object",
}
else:
arguments_dict = json.loads(arguments)
function_call = VertexFunctionCall(
name=name,
args=arguments_dict,
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -315,6 +315,7 @@ def get_tool_value(tool: dict, tool_name: str) -> Optional[dict]:
if (
"parameters" in _openai_function_object
and _openai_function_object["parameters"] is not None
and isinstance(_openai_function_object["parameters"], dict)
): # OPENAI accepts JSON Schema, Google accepts OpenAPI schema.
_openai_function_object["parameters"] = _build_vertex_schema(
_openai_function_object["parameters"]
Expand Down Expand Up @@ -344,6 +345,10 @@ def get_tool_value(tool: dict, tool_name: str) -> Optional[dict]:
)
_description = openai_function_object.get("description", None)
_parameters = openai_function_object.get("parameters", None)
if isinstance(_parameters, str) and len(_parameters) == 0:
_parameters = {
"type": "object",
}
if _description is not None:
gtool_func_declaration["description"] = _description
if _parameters is not None:
Expand Down
26 changes: 26 additions & 0 deletions litellm/model_prices_and_context_window_backup.json
Original file line number Diff line number Diff line change
Expand Up @@ -4153,6 +4153,32 @@
"supports_assistant_prefill": true,
"supports_tool_choice": true
},
"mistral/magistral-medium-2506": {
"max_tokens": 40000,
"max_input_tokens": 40000,
"max_output_tokens": 40000,
"input_cost_per_token": 2e-06,
"output_cost_per_token": 5e-06,
"litellm_provider": "mistral",
"mode": "chat",
"source": "https://mistral.ai/news/magistral",
"supports_function_calling": true,
"supports_assistant_prefill": true,
"supports_tool_choice": true
},
"mistral/magistral-small-2506": {
"max_tokens": 40000,
"max_input_tokens": 40000,
"max_output_tokens": 40000,
"input_cost_per_token": 0.0,
"output_cost_per_token": 0.0,
"litellm_provider": "mistral",
"mode": "chat",
"source": "https://mistral.ai/news/magistral",
"supports_function_calling": true,
"supports_assistant_prefill": true,
"supports_tool_choice": true
},
"mistral/mistral-embed": {
"max_tokens": 8192,
"max_input_tokens": 8192,
Expand Down
5 changes: 5 additions & 0 deletions litellm/proxy/_new_secret_config.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -72,6 +72,11 @@ model_list:
model: openai/fake
api_key: fake-key
api_base: https://exampleopenaiendpoint-production.up.railway.app/
- model_name: "anthropic-claude-vertex"
litellm_params:
model: vertex_ai/claude-3-5-sonnet@20240620
vertex_project: internal-litellm-local-dev


general_settings:
store_model_in_db: true
Expand Down
29 changes: 29 additions & 0 deletions litellm/proxy/management_endpoints/internal_user_endpoints.py
Original file line number Diff line number Diff line change
Expand Up @@ -392,6 +392,26 @@ def get_team_from_list(
return None


def get_user_id_from_request(request: Request) -> Optional[str]:
"""
Get the user id from the request
"""
# Get the raw query string and parse it properly to handle + characters
user_id: Optional[str] = None
query_string = str(request.url.query)
if "user_id=" in query_string:
# Extract the user_id value from the raw query string
import re
from urllib.parse import unquote

match = re.search(r"user_id=([^&]*)", query_string)
if match:
# Use unquote instead of unquote_plus to preserve + characters
raw_user_id = unquote(match.group(1))
user_id = raw_user_id
return user_id


@router.get(
"/user/info",
tags=["Internal User management"],
Expand All @@ -400,6 +420,7 @@ def get_team_from_list(
)
@management_endpoint_wrapper
async def user_info(
request: Request,
user_id: Optional[str] = fastapi.Query(
default=None, description="User ID in the request parameters"
),
Expand All @@ -421,6 +442,12 @@ async def user_info(
from litellm.proxy.proxy_server import prisma_client

try:
# Handle URL encoding properly by getting user_id from the original request
if (
user_id is not None and " " in user_id
): # if user_id is not None and contains a space, get the user_id from the request - this is to handle the case where the user_id is encoded in the url
user_id = get_user_id_from_request(request=request)

if prisma_client is None:
raise Exception(
"Database not connected. Connect a database to your proxy - https://docs.litellm.ai/docs/simple_proxy#managing-auth---virtual-keys"
Expand All @@ -433,10 +460,12 @@ async def user_info(
elif user_id is None:
user_id = user_api_key_dict.user_id
## GET USER ROW ##

if user_id is not None:
user_info = await prisma_client.get_data(user_id=user_id)
else:
user_info = None

## GET ALL TEAMS ##
team_list = []
team_id_list = []
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -1405,6 +1405,7 @@ async def generate_key_helper_fn( # noqa: PLR0915

try:
# Create a new verification token (you may want to enhance this logic based on your needs)

user_data = {
"max_budget": max_budget,
"user_email": user_email,
Expand Down
2 changes: 2 additions & 0 deletions litellm/proxy/utils.py
Original file line number Diff line number Diff line change
Expand Up @@ -1564,10 +1564,12 @@ async def get_data( # noqa: PLR0915
if query_type == "find_unique":
if key_val is None:
key_val = {"user_id": user_id}

response = await self.db.litellm_usertable.find_unique( # type: ignore
where=key_val, # type: ignore
include={"organization_memberships": True},
)

elif query_type == "find_all" and key_val is not None:
response = await self.db.litellm_usertable.find_many(
where=key_val # type: ignore
Expand Down
31 changes: 31 additions & 0 deletions test_url_encoding.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,31 @@
# Test URL encoding handling for emails with + characters
import re
from urllib.parse import unquote

def test_user_id_parsing():
# Simulate the raw query string that would come from the URL
# When user calls: http://0.0.0.0:4000/user/info?user_id=machine-user+alp-air-admin-b58-b@tempus.com
# The query string would be: user_id=machine-user+alp-air-admin-b58-b@tempus.com

test_cases = [
"user_id=machine-user+alp-air-admin-b58-b@tempus.com",
"user_id=machine-user%2Balp-air-admin-b58-b@tempus.com", # URL encoded +
"user_id=regular@email.com",
"user_id=test-user@domain.com&other_param=value"
]

for query_string in test_cases:
print(f"\nTesting query string: {query_string}")

if 'user_id=' in query_string:
match = re.search(r'user_id=([^&]*)', query_string)
if match:
raw_user_id = unquote(match.group(1))
print(f"Extracted user_id: {raw_user_id}")
else:
print("No match found")
else:
print("user_id not found in query string")

if __name__ == "__main__":
test_user_id_parsing()
19 changes: 18 additions & 1 deletion tests/llm_translation/test_gemini.py
Original file line number Diff line number Diff line change
Expand Up @@ -221,4 +221,21 @@ def test_gemini_with_grounding():
assert complete_response is not None
usage: Usage = complete_response.usage
assert usage.prompt_tokens_details.web_search_requests is not None
assert usage.prompt_tokens_details.web_search_requests > 0
assert usage.prompt_tokens_details.web_search_requests > 0


def test_gemini_with_empty_function_call_arguments():
from litellm import completion
litellm._turn_on_debug()
tools = [
{
"type": "function",
"function": {
"name": "get_current_weather",
"parameters": "",
},
}
]
response = completion(model="gemini/gemini-2.0-flash", messages=[{"role": "user", "content": "What is the capital of France?"}], tools=tools)
print(response)
assert response.choices[0].message.content is not None
2 changes: 1 addition & 1 deletion tests/proxy_unit_tests/test_jwt.py
Original file line number Diff line number Diff line change
Expand Up @@ -677,7 +677,7 @@ async def aaaatest_user_token_output(

request._url = URL(url="/team/new")
result = await user_api_key_auth(request=request, api_key=bearer_token)
await user_info(user_id=user_id)
await user_info(request=request, user_id=user_id)
except Exception as e:
pytest.fail(f"This should not fail - {str(e)}")
else:
Expand Down
4 changes: 3 additions & 1 deletion tests/proxy_unit_tests/test_key_generate_prisma.py
Original file line number Diff line number Diff line change
Expand Up @@ -234,6 +234,7 @@ async def test_new_user_response(prisma_client):
)
def test_generate_and_call_with_valid_key(prisma_client, api_route):
# 1. Generate a Key, and use it to make a call
from unittest.mock import MagicMock

print("prisma client=", prisma_client)

Expand All @@ -256,8 +257,9 @@ async def test():
user_id = key.user_id

# check /user/info to verify user_role was set correctly
request_mock = MagicMock()
new_user_info = await user_info(
user_id=user_id, user_api_key_dict=user_api_key_dict
request=request_mock, user_id=user_id, user_api_key_dict=user_api_key_dict
)
new_user_info = new_user_info.user_info
print("new_user_info=", new_user_info)
Expand Down
1 change: 1 addition & 0 deletions tests/proxy_unit_tests/test_proxy_server.py
Original file line number Diff line number Diff line change
Expand Up @@ -1274,6 +1274,7 @@ async def test_user_info_team_list(prisma_client):

try:
await user_info(
request=MagicMock(),
user_id=None,
user_api_key_dict=UserAPIKeyAuth(
api_key="sk-1234", user_id="default_user_id"
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -288,3 +288,23 @@ def test_bedrock_validate_format_image_or_video():
# assert "### System:\nBe helpful\n\n" in result["prompt"]
# assert "### Assistant:\nI see a cat in the image.\n\n" in result["prompt"]
# assert result["images"] == ["http://example.com/image.jpg"]


def test_vertex_ai_transform_empty_function_call_arguments():
"""
Test that the _transform_parts method handles empty function call arguments correctly
"""
from litellm.litellm_core_utils.prompt_templates.factory import (
VertexFunctionCall,
_gemini_tool_call_invoke_helper,
)

function_call = {
"name": "get_weather",
"arguments": "",
}
result: VertexFunctionCall = _gemini_tool_call_invoke_helper(function_call)
print(result)
assert result["args"] == {
"type": "object",
}
Loading
Loading