Skip to content

Commit 1c91acc

Browse files
committed
add test_id for test_json_mode
Signed-off-by: Linkun Chen <github@lkchen.net>
1 parent 4b97d68 commit 1c91acc

File tree

1 file changed

+14
-8
lines changed

1 file changed

+14
-8
lines changed

release/llm_tests/serve/probes/test_json_mode.py

Lines changed: 14 additions & 8 deletions
Original file line numberDiff line numberDiff line change
@@ -58,28 +58,34 @@ class NestedResponse(BaseModel):
5858
sorted_list: List[Person] = Field(description="List of the sorted objects")
5959

6060

61-
def get_params_and_expected_type(response_type: str):
61+
def get_params_and_expected_type(response_type: str, test_id: str):
6262
params = {}
6363
if response_type == "basic":
6464
params.update(
6565
**messages(
66-
system("You are a helpful assistant designed to output JSON."),
66+
system(
67+
f"{test_id} You are a helpful assistant designed to output JSON."
68+
),
6769
user("Who won the world series in 2020?"),
6870
)
6971
)
7072
expected_type = BasicResponse
7173
elif response_type == "array":
7274
params.update(
7375
**messages(
74-
system("You are a helpful assistant designed to output JSON."),
76+
system(
77+
f"{test_id} You are a helpful assistant designed to output JSON."
78+
),
7579
user("Sort the numbers 3, 1, 2, 4, 5"),
7680
)
7781
)
7882
expected_type = ArrayResponse
7983
elif response_type == "nested":
8084
params.update(
8185
**messages(
82-
system("You are a helpful assistant designed to output JSON."),
86+
system(
87+
f"{test_id} You are a helpful assistant designed to output JSON."
88+
),
8389
user(
8490
"Sort these people by age: John, 20 years old, Mary, 30 years old, Bob, 10 years old."
8591
),
@@ -116,11 +122,11 @@ def get_response_formats():
116122

117123

118124
async def query_json_model(
119-
model: str, response_type: str, stream: bool, openai_async_client
125+
model: str, response_type: str, stream: bool, openai_async_client, test_id: str
120126
):
121127
querier = TextGenerationProbeQuerier(openai_async_client, {"temperature": 0.0})
122128

123-
params, expected_type = get_params_and_expected_type(response_type)
129+
params, expected_type = get_params_and_expected_type(response_type, test_id)
124130
response = await querier.query(model, stream=stream, **params)
125131
response_str = response.full()
126132

@@ -148,7 +154,7 @@ async def test_json_mode(
148154

149155
responses = await asyncio.gather(
150156
*[
151-
query_json_model(model, response_type, stream, openai_async_client)
157+
query_json_model(model, response_type, stream, openai_async_client, test_id)
152158
for _ in range(n_concurrent_requests)
153159
]
154160
)
@@ -172,7 +178,7 @@ async def test_response_format_options(
172178

173179
params = {
174180
**messages(
175-
system("You are a helpful assistant designed to output JSON."),
181+
system(f"{test_id} You are a helpful assistant designed to output JSON."),
176182
user("Who won the world series in 2020?"),
177183
),
178184
"response_format": response_format,

0 commit comments

Comments
 (0)