Skip to content

Commit 2e2ee0e

Browse files
committed
Update tests.
1 parent bee6a92 commit 2e2ee0e

File tree

3 files changed

+14
-7
lines changed

3 files changed

+14
-7
lines changed

tests/unitary/with_extras/langchain/chat_models/__init__.py

Whitespace-only changes.

tests/unitary/with_extras/langchain/llms/__init__.py

Whitespace-only changes.

tests/unitary/with_extras/langchain/llms/test_oci_model_deployment_endpoint.py

Lines changed: 14 additions & 7 deletions
Original file line numberDiff line numberDiff line change
@@ -8,10 +8,17 @@
88

99
CONST_MODEL_NAME = "odsc-vllm"
1010
CONST_ENDPOINT = "https://oci.endpoint/ocid/predict"
11-
CONST_PROMPT_FOR_COMPLETION = "This is a prompt."
11+
CONST_PROMPT = "This is a prompt."
1212
CONST_COMPLETION = "This is a completion."
1313
CONST_COMPLETION_RESPONSE = {
14-
"choices": [{"index": 0, "text": CONST_COMPLETION}],
14+
"choices": [
15+
{
16+
"index": 0,
17+
"text": CONST_COMPLETION,
18+
"logprobs": 0.1,
19+
"finish_reason": "length",
20+
}
21+
],
1522
}
1623
CONST_COMPLETION_RESPONSE_TGI = {"generated_text": CONST_COMPLETION}
1724
CONST_STREAM_TEMPLATE = (
@@ -68,7 +75,7 @@ def text(self):
6875
prompt = payload.get("prompt")
6976
is_tgi = False
7077

71-
if prompt == CONST_PROMPT_FOR_COMPLETION:
78+
if prompt == CONST_PROMPT:
7279
if is_tgi:
7380
return MockResponse(json_data=CONST_COMPLETION_RESPONSE_TGI)
7481
return MockResponse(json_data=CONST_COMPLETION_RESPONSE)
@@ -91,7 +98,7 @@ async def mocked_async_streaming_response(*args, **kwargs):
9198
def test_invoke_vllm(mock_post, mock_auth) -> None:
9299
"""Tests invoking vLLM endpoint."""
93100
llm = OCIModelDeploymentVLLM(endpoint=CONST_ENDPOINT, model=CONST_MODEL_NAME)
94-
output = llm.invoke(CONST_PROMPT_FOR_COMPLETION)
101+
output = llm.invoke(CONST_PROMPT)
95102
assert output == CONST_COMPLETION
96103

97104

@@ -105,7 +112,7 @@ def test_stream_tgi(mock_post, mock_auth) -> None:
105112
)
106113
output = ""
107114
count = 0
108-
for chunk in llm.stream(CONST_PROMPT_FOR_COMPLETION):
115+
for chunk in llm.stream(CONST_PROMPT):
109116
output += chunk
110117
count += 1
111118
assert count == 4
@@ -120,7 +127,7 @@ def test_generate_tgi(mock_post, mock_auth) -> None:
120127
llm = OCIModelDeploymentTGI(
121128
endpoint=CONST_ENDPOINT, api="/generate", model=CONST_MODEL_NAME
122129
)
123-
output = llm.invoke(CONST_PROMPT_FOR_COMPLETION)
130+
output = llm.invoke(CONST_PROMPT)
124131
assert output == CONST_COMPLETION
125132

126133

@@ -144,5 +151,5 @@ async def test_stream_async(mock_auth):
144151
mock.MagicMock(return_value=mocked_async_streaming_response()),
145152
):
146153

147-
chunks = [chunk async for chunk in llm.astream(CONST_PROMPT_FOR_COMPLETION)]
154+
chunks = [chunk async for chunk in llm.astream(CONST_PROMPT)]
148155
assert "".join(chunks).strip() == CONST_COMPLETION

0 commit comments

Comments
 (0)