3
3
from unittest .mock import patch , MagicMock
4
4
from fastapi import Request
5
5
from starlette .datastructures import Headers , QueryParams
6
+ from fastapi import Response
6
7
7
8
import api .routers .vertex as vertex
8
9
@@ -33,38 +34,25 @@ def test_to_vertex_anthropic():
33
34
assert isinstance (result ["messages" ], list )
34
35
assert result ["messages" ][0 ]["role" ] == "user"
35
36
assert result ["messages" ][0 ]["content" ][0 ]["text" ] == "Hello!"
37
+ assert result ["messages" ][1 ]["role" ] == "assistant"
38
+ assert result ["messages" ][1 ]["content" ][0 ]["text" ] == "Hi there!"
36
39
37
- def test_from_vertex_anthropic_to_openai ():
40
+ def test_from_anthropic_to_openai_response ():
38
41
msg = json .dumps ({
39
42
"id" : "abc123" ,
40
43
"role" : "assistant" ,
41
- "content" : [{"type" : "text" , "text" : "Hello!" }],
44
+ "content" : [{"type" : "text" , "text" : "Hello!" }, { "type" : "text" , "text" : "Bye!" } ],
42
45
"stop_reason" : "stop" ,
43
46
"usage" : {"prompt_tokens" : 5 , "completion_tokens" : 2 }
44
47
})
45
- result = json .loads (vertex .from_vertex_anthropic_to_openai (msg ))
48
+ result = json .loads (vertex .from_anthropic_to_openai_response (msg ))
46
49
assert result ["id" ] == "abc123"
47
50
assert result ["object" ] == "chat.completion"
48
- assert result ["choices" ][0 ]["message" ]["content" ] == "Hello!"
51
+ assert len (result ["choices" ]) == 1
52
+ assert result ["choices" ][0 ]["message" ]["content" ] == "Hello!Bye!"
49
53
assert result ["choices" ][0 ]["finish_reason" ] == "stop"
50
54
assert result ["usage" ]["prompt_tokens" ] == 5
51
55
52
- def test_to_openai_response ():
53
- resp = {
54
- "candidates" : [
55
- {
56
- "content" : {"parts" : [{"text" : "Hello!" }]},
57
- "finishReason" : "STOP"
58
- }
59
- ]
60
- }
61
- result = vertex .to_openai_response (resp )
62
- assert result ["object" ] == "chat.completion"
63
- assert result ["choices" ][0 ]["message" ]["content" ] == "Hello!"
64
- assert result ["choices" ][0 ]["finish_reason" ] == "stop"
65
- assert result ["choices" ][0 ]["index" ] == 0
66
- assert result ["id" ].startswith ("chatcmpl-" )
67
-
68
56
def test_get_gcp_target_env (monkeypatch ):
69
57
monkeypatch .setenv ("PROXY_TARGET" , "https://custom-proxy" )
70
58
result = vertex .get_gcp_target ("any-model" , "/v1/chat/completions" )
@@ -105,7 +93,7 @@ def test_get_header_removes_hop_headers(mock_token, dummy_request):
105
93
assert "Connection" not in headers
106
94
assert "Authorization" in headers
107
95
assert headers ["Authorization" ] == "Bearer dummy-token"
108
- assert headers ["X-Custom " ] == "foo"
96
+ assert headers ["x-custom " ] == "foo"
109
97
110
98
@pytest .mark .asyncio
111
99
@patch ("api.routers.vertex.httpx.AsyncClient" )
@@ -126,3 +114,86 @@ async def test_handle_proxy_basic(mock_get_model, mock_get_header, mock_async_cl
126
114
assert result .status_code == 200
127
115
assert b"hi" in result .body
128
116
assert result .headers ["content-type" ] == "application/json"
117
+
118
+ @pytest .mark .asyncio
119
+ @patch ("api.routers.vertex.httpx.AsyncClient" )
120
+ @patch ("api.routers.vertex.get_header" )
121
+ @patch ("api.routers.vertex.get_model" , return_value = "test-model" )
122
+ async def test_handle_proxy_known_chat_model (
123
+ mock_get_model , mock_get_header , mock_async_client , dummy_request
124
+ ):
125
+ req = dummy_request (body = json .dumps ({"model" : "foo" }).encode ())
126
+ mock_get_header .return_value = ("http://target" , {"Authorization" : "Bearer token" })
127
+ mock_response = MagicMock ()
128
+ mock_response .content = b'{"candidates":[{"content":{"parts":[{"text":"hi"}]}, "finishReason":"STOP"}]}'
129
+ mock_response .status_code = 200
130
+ mock_response .headers = {"content-type" : "application/json" }
131
+ mock_async_client .return_value .__aenter__ .return_value .request .return_value = mock_response
132
+
133
+ vertex .USE_MODEL_MAPPING = True
134
+ if "test-model" not in vertex .known_chat_models :
135
+ vertex .known_chat_models .append ("test-model" )
136
+
137
+ result = await vertex .handle_proxy (req , "/v1/chat/completions" )
138
+ assert isinstance (result , Response )
139
+ assert result .status_code == 200
140
+ assert b"hi" in result .body
141
+ assert result .headers ["content-type" ] == "application/json"
142
+
143
+ @pytest .mark .asyncio
144
+ @patch ("api.routers.vertex.httpx.AsyncClient" )
145
+ @patch ("api.routers.vertex.get_header" )
146
+ @patch ("api.routers.vertex.get_model" , return_value = "anthropic-model" )
147
+ async def test_handle_proxy_anthropic_conversion (
148
+ mock_get_model , mock_get_header , mock_async_client , dummy_request
149
+ ):
150
+ req = dummy_request (body = json .dumps ({"model" : "foo" , "messages" : [{"role" : "user" , "content" : "hi" }]}).encode ())
151
+ mock_get_header .return_value = ("http://target" , {"Authorization" : "Bearer token" })
152
+ mock_response = MagicMock ()
153
+ # Simulate anthropic response
154
+ anthropic_resp = json .dumps ({
155
+ "id" : "abc123" ,
156
+ "role" : "assistant" ,
157
+ "content" : [{"type" : "text" , "text" : "Hello!" }],
158
+ "stop_reason" : "stop" ,
159
+ "usage" : {"prompt_tokens" : 5 , "completion_tokens" : 2 }
160
+ }).encode ()
161
+ mock_response .content = anthropic_resp
162
+ mock_response .status_code = 200
163
+ mock_response .headers = {"content-type" : "application/json" }
164
+ mock_async_client .return_value .__aenter__ .return_value .request .return_value = mock_response
165
+
166
+ vertex .USE_MODEL_MAPPING = True
167
+ # Ensure model is not in known_chat_models to trigger conversion
168
+ if "anthropic-model" in vertex .known_chat_models :
169
+ vertex .known_chat_models .remove ("anthropic-model" )
170
+ result = await vertex .handle_proxy (req , "/v1/chat/completions" )
171
+ assert isinstance (result , Response )
172
+ data = json .loads (result .body )
173
+ assert data ["object" ] == "chat.completion"
174
+ assert data ["choices" ][0 ]["message" ]["content" ] == "Hello!"
175
+
176
+ @pytest .mark .asyncio
177
+ @patch ("api.routers.vertex.httpx.AsyncClient" , side_effect = Exception ("network error" ))
178
+ @patch ("api.routers.vertex.get_header" )
179
+ @patch ("api.routers.vertex.get_model" , return_value = "test-model" )
180
+ async def test_handle_proxy_httpx_exception (
181
+ mock_get_model , mock_get_header , mock_async_client , dummy_request
182
+ ):
183
+ req = dummy_request (body = json .dumps ({"model" : "foo" }).encode ())
184
+ mock_get_header .return_value = ("http://target" , {"Authorization" : "Bearer token" })
185
+ vertex .USE_MODEL_MAPPING = True
186
+ if "test-model" not in vertex .known_chat_models :
187
+ vertex .known_chat_models .append ("test-model" )
188
+ # Patch httpx.RequestError to be raised
189
+ with patch ("api.routers.vertex.httpx.RequestError" , Exception ):
190
+ result = await vertex .handle_proxy (req , "/v1/chat/completions" )
191
+ assert isinstance (result , Response )
192
+ assert result .status_code == 502
193
+ assert b"Upstream request failed" in result .body
194
+ # Assert that the status code is 502 (Bad Gateway) due to upstream failure
195
+ assert result .status_code == 502
196
+
197
+ # Assert that the response body contains the expected error message
198
+ assert b"Upstream request failed" in result .body
199
+
0 commit comments