Skip to content

Commit 1c60efd

Browse files
committed
add pydantic dependency, expand and fix test cases
1 parent c13b305 commit 1c60efd

File tree

5 files changed

+196
-48
lines changed

5 files changed

+196
-48
lines changed

setup.py

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -23,6 +23,7 @@ def _setup_long_description() -> Tuple[str, str]:
2323
'loguru',
2424
'numpy',
2525
'openai',
26+
'pydantic>=2.0',
2627
'requests',
2728
'transformers',
2829
],

src/guidellm/core/result.py

Lines changed: 3 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -1,4 +1,4 @@
1-
from time import perf_counter, time
1+
from time import time
22
from typing import Any, Dict, List, Optional, Union
33

44
from loguru import logger
@@ -47,7 +47,7 @@ def start(self, prompt: str):
4747
self.prompt_word_count = len(prompt.split())
4848
self.prompt_token_count = len(prompt) # Token count placeholder
4949
self.start_time = time()
50-
self.last_time = perf_counter()
50+
self.last_time = time()
5151
self.first_token_set = False
5252

5353
logger.info("Text generation started with prompt: '{}'", prompt)
@@ -59,7 +59,7 @@ def output_token(self, token: str):
5959
:param token: The decoded token.
6060
:type token: str
6161
"""
62-
current_counter = perf_counter()
62+
current_counter = time()
6363

6464
if not self.first_token_set:
6565
self.first_token_time = current_counter - self.last_time

tests/unit/core/test_distribution.py

Lines changed: 6 additions & 14 deletions
Original file line numberDiff line numberDiff line change
@@ -57,30 +57,22 @@ def test_distribution_repr():
5757

5858

5959
@pytest.mark.regression
60-
def test_distribution_to_json():
60+
def test_distribution_json():
6161
data = [1, 2, 3, 4, 5]
6262
dist = Distribution(data=data)
6363
json_str = dist.to_json()
6464
assert '"data":[1,2,3,4,5]' in json_str
6565

66-
67-
@pytest.mark.regression
68-
def test_distribution_from_json():
69-
json_str = '{"data": [1, 2, 3, 4, 5]}'
70-
dist = Distribution.from_json(json_str)
71-
assert dist.data == [1, 2, 3, 4, 5]
66+
dist_restored = Distribution.from_json(json_str)
67+
assert dist_restored.data == data
7268

7369

7470
@pytest.mark.regression
75-
def test_distribution_to_yaml():
71+
def test_distribution_yaml():
7672
data = [1, 2, 3, 4, 5]
7773
dist = Distribution(data=data)
7874
yaml_str = dist.to_yaml()
7975
assert "data:\n- 1\n- 2\n- 3\n- 4\n- 5\n" in yaml_str
8076

81-
82-
@pytest.mark.regression
83-
def test_distribution_from_yaml():
84-
yaml_str = "data:\n- 1\n- 2\n- 3\n- 4\n- 5\n"
85-
dist = Distribution.from_yaml(yaml_str)
86-
assert dist.data == [1, 2, 3, 4, 5]
77+
dist_restored = Distribution.from_yaml(yaml_str)
78+
assert dist_restored.data == data

tests/unit/core/test_request.py

Lines changed: 22 additions & 30 deletions
Original file line numberDiff line numberDiff line change
@@ -29,46 +29,38 @@ def test_text_generation_request_initialization_with_params():
2929

3030

3131
@pytest.mark.regression
32-
def test_request_to_json():
32+
def test_request_json():
3333
prompt = "Generate text"
34-
request = TextGenerationRequest(prompt=prompt)
34+
prompt_token_count = 10
35+
generated_token_count = 50
36+
params = {"temperature": 0.7}
37+
request = TextGenerationRequest(prompt=prompt, prompt_token_count=prompt_token_count, generated_token_count=generated_token_count, params=params)
3538
json_str = request.to_json()
3639
assert '"prompt":"Generate text"' in json_str
3740
assert '"id":' in json_str
3841

39-
40-
@pytest.mark.regression
41-
def test_request_from_json():
42-
json_str = (
43-
'{"id": "12345", "prompt": "Generate text", "prompt_token_count": 10, '
44-
'"generated_token_count": 50, "params": {"temperature": 0.7}}'
45-
)
46-
request = TextGenerationRequest.from_json(json_str)
47-
assert request.id == "12345"
48-
assert request.prompt == "Generate text"
49-
assert request.prompt_token_count == 10
50-
assert request.generated_token_count == 50
51-
assert request.params == {"temperature": 0.7}
42+
request_restored = TextGenerationRequest.from_json(json_str)
43+
assert request.id == request_restored.id
44+
assert request_restored.prompt == prompt
45+
assert request_restored.prompt_token_count == prompt_token_count
46+
assert request_restored.generated_token_count == generated_token_count
47+
assert request_restored.params == params
5248

5349

5450
@pytest.mark.regression
55-
def test_request_to_yaml():
51+
def test_request_yaml():
5652
prompt = "Generate text"
57-
request = TextGenerationRequest(prompt=prompt)
53+
prompt_token_count = 15
54+
generated_token_count = 55
55+
params = {"temperature": 0.8}
56+
request = TextGenerationRequest(prompt=prompt, prompt_token_count=prompt_token_count, generated_token_count=generated_token_count, params=params)
5857
yaml_str = request.to_yaml()
5958
assert "prompt: Generate text" in yaml_str
6059
assert "id:" in yaml_str
6160

62-
63-
@pytest.mark.regression
64-
def test_request_from_yaml():
65-
yaml_str = (
66-
"id: '12345'\nprompt: Generate text\nprompt_token_count: 10\n"
67-
"generated_token_count: 50\nparams:\n temperature: 0.7\n"
68-
)
69-
request = TextGenerationRequest.from_yaml(yaml_str)
70-
assert request.id == "12345"
71-
assert request.prompt == "Generate text"
72-
assert request.prompt_token_count == 10
73-
assert request.generated_token_count == 50
74-
assert request.params == {"temperature": 0.7}
61+
request_restored = TextGenerationRequest.from_yaml(yaml_str)
62+
assert request.id == request_restored.id
63+
assert request_restored.prompt == prompt
64+
assert request_restored.prompt_token_count == prompt_token_count
65+
assert request_restored.generated_token_count == generated_token_count
66+
assert request_restored.params == params

tests/unit/core/test_result.py

Lines changed: 164 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -28,13 +28,68 @@ def test_text_generation_result_start():
2828
assert result.start_time is not None
2929

3030

31+
@pytest.mark.sanity
32+
def test_text_generation_result_output_token():
33+
request = TextGenerationRequest(prompt="Generate a story")
34+
result = TextGenerationResult(request=request)
35+
prompt = "Once upon a time"
36+
result.start(prompt)
37+
token = "the"
38+
result.output_token(token)
39+
assert result.output == f"{token} "
40+
assert result.last_time is not None and result.last_time > result.start_time
41+
42+
3143
@pytest.mark.sanity
3244
def test_text_generation_result_end():
3345
request = TextGenerationRequest(prompt="Generate a story")
3446
result = TextGenerationResult(request=request)
47+
result.start("Once upon a time")
3548
result.end("The end")
3649
assert result.output == "The end"
37-
assert result.end_time is not None
50+
assert result.end_time is not None and result.end_time > result.start_time
51+
52+
53+
@pytest.mark.regression
54+
def test_text_generation_result_json():
55+
request = TextGenerationRequest(prompt="Generate a story")
56+
result = TextGenerationResult(request=request)
57+
prompt = "Once upon a time"
58+
result.start(prompt)
59+
generated = "The end"
60+
result.end(generated)
61+
json_str = result.to_json()
62+
assert '"prompt":"Once upon a time"' in json_str
63+
assert '"output":"The end"' in json_str
64+
65+
result_restored = TextGenerationResult.from_json(json_str)
66+
assert result.request == result_restored.request
67+
assert result_restored.prompt == prompt
68+
assert result_restored.output == generated
69+
70+
json_str_restored = result_restored.to_json()
71+
assert json_str == json_str_restored
72+
73+
74+
@pytest.mark.regression
75+
def test_text_generation_result_yaml():
76+
request = TextGenerationRequest(prompt="Generate a story")
77+
result = TextGenerationResult(request=request)
78+
prompt = "Once upon a time"
79+
result.start(prompt)
80+
generated = "The end"
81+
result.end(generated)
82+
yaml_str = result.to_yaml()
83+
assert "prompt: Once upon a time" in yaml_str
84+
assert "output: The end" in yaml_str
85+
86+
result_restored = TextGenerationResult.from_yaml(yaml_str)
87+
assert result.request == result_restored.request
88+
assert result_restored.prompt == prompt
89+
assert result_restored.output == generated
90+
91+
yaml_str_restored = result_restored.to_yaml()
92+
assert yaml_str == yaml_str_restored
3893

3994

4095
@pytest.mark.smoke
@@ -46,6 +101,38 @@ def test_text_generation_error_initialization():
46101
assert result.error == str(error)
47102

48103

104+
@pytest.mark.regression
105+
def test_text_generation_error_json():
106+
request = TextGenerationRequest(prompt="Generate a story")
107+
error = Exception("Test error")
108+
result = TextGenerationError(request=request, error=error)
109+
json_str = result.to_json()
110+
assert '"error":"Test error"' in json_str
111+
112+
result_restored = TextGenerationError.from_json(json_str)
113+
assert result.request == result_restored.request
114+
assert result_restored.error == str(error)
115+
116+
json_str_restored = result_restored.to_json()
117+
assert json_str == json_str_restored
118+
119+
120+
@pytest.mark.regression
121+
def test_text_generation_error_yaml():
122+
request = TextGenerationRequest(prompt="Generate a story")
123+
error = Exception("Test error")
124+
result = TextGenerationError(request=request, error=error)
125+
yaml_str = result.to_yaml()
126+
assert "error: Test error" in yaml_str
127+
128+
result_restored = TextGenerationError.from_yaml(yaml_str)
129+
assert result.request == result_restored.request
130+
assert result_restored.error == str(error)
131+
132+
yaml_str_restored = result_restored.to_yaml()
133+
assert yaml_str == yaml_str_restored
134+
135+
49136
@pytest.mark.smoke
50137
def test_text_generation_benchmark_initialization():
51138
benchmark = TextGenerationBenchmark(mode="test", rate=1.0)
@@ -84,6 +171,48 @@ def test_text_generation_benchmark_completed_with_error():
84171
assert benchmark.error_count == 1
85172

86173

174+
@pytest.mark.regression
175+
def test_text_generation_benchmark_json():
176+
benchmark = TextGenerationBenchmark(mode="test", rate=1.0)
177+
benchmark.request_started()
178+
request = TextGenerationRequest(prompt="Generate a story")
179+
result = TextGenerationResult(request=request)
180+
benchmark.request_completed(result)
181+
json_str = benchmark.to_json()
182+
assert '"mode":"test"' in json_str
183+
assert '"rate":1.0' in json_str
184+
185+
benchmark_restored = TextGenerationBenchmark.from_json(json_str)
186+
assert benchmark.mode == benchmark_restored.mode
187+
assert benchmark.rate == benchmark_restored.rate
188+
assert benchmark.request_count == benchmark_restored.request_count
189+
assert benchmark.error_count == benchmark_restored.error_count
190+
191+
json_str_restored = benchmark_restored.to_json()
192+
assert json_str == json_str_restored
193+
194+
195+
@pytest.mark.regression
196+
def test_text_generation_benchmark_yaml():
197+
benchmark = TextGenerationBenchmark(mode="test", rate=1.0)
198+
benchmark.request_started()
199+
request = TextGenerationRequest(prompt="Generate a story")
200+
result = TextGenerationResult(request=request)
201+
benchmark.request_completed(result)
202+
yaml_str = benchmark.to_yaml()
203+
assert "mode: test" in yaml_str
204+
assert "rate: 1.0" in yaml_str
205+
206+
benchmark_restored = TextGenerationBenchmark.from_yaml(yaml_str)
207+
assert benchmark.mode == benchmark_restored.mode
208+
assert benchmark.rate == benchmark_restored.rate
209+
assert benchmark.request_count == benchmark_restored.request_count
210+
assert benchmark.error_count == benchmark_restored.error_count
211+
212+
yaml_str_restored = benchmark_restored.to_yaml()
213+
assert yaml_str == yaml_str_restored
214+
215+
87216
@pytest.mark.smoke
88217
def test_text_generation_benchmark_report_initialization():
89218
report = TextGenerationBenchmarkReport()
@@ -97,3 +226,37 @@ def test_text_generation_benchmark_report_add_benchmark():
97226
benchmark = TextGenerationBenchmark(mode="test", rate=1.0)
98227
report.add_benchmark(benchmark)
99228
assert len(report.benchmarks) == 1
229+
230+
231+
@pytest.mark.regression
232+
def test_text_generation_benchmark_report_json():
233+
report = TextGenerationBenchmarkReport()
234+
benchmark = TextGenerationBenchmark(mode="test", rate=1.0)
235+
report.add_benchmark(benchmark)
236+
json_str = report.to_json()
237+
assert '"benchmarks":' in json_str
238+
assert '"args":[]' in json_str
239+
240+
report_restored = TextGenerationBenchmarkReport.from_json(json_str)
241+
assert len(report.benchmarks) == len(report_restored.benchmarks)
242+
assert len(report.args) == len(report_restored.args)
243+
244+
json_str_restored = report_restored.to_json()
245+
assert json_str == json_str_restored
246+
247+
248+
@pytest.mark.regression
249+
def test_text_generation_benchmark_report_yaml():
250+
report = TextGenerationBenchmarkReport()
251+
benchmark = TextGenerationBenchmark(mode="test", rate=1.0)
252+
report.add_benchmark(benchmark)
253+
yaml_str = report.to_yaml()
254+
assert "benchmarks:" in yaml_str
255+
assert "args: []" in yaml_str
256+
257+
report_restored = TextGenerationBenchmarkReport.from_yaml(yaml_str)
258+
assert len(report.benchmarks) == len(report_restored.benchmarks)
259+
assert len(report.args) == len(report_restored.args)
260+
261+
yaml_str_restored = report_restored.to_yaml()
262+
assert yaml_str == yaml_str_restored

0 commit comments

Comments
 (0)