Skip to content

Commit 0d4f319

Browse files
committed
fix typing issues
1 parent 514f6ec commit 0d4f319

File tree

8 files changed

+41
-10
lines changed

8 files changed

+41
-10
lines changed

src/guidellm/backend/openai.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -130,7 +130,7 @@ def available_models(self) -> List[str]:
130130
:rtype: List[str]
131131
"""
132132

133-
models: list[str] = [
133+
models: List[str] = [
134134
model.id for model in self.openai_client.models.list().data
135135
]
136136
logger.info(f"Available models: {models}")

src/guidellm/core/distribution.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -135,7 +135,7 @@ def percentile(self, percentile: float) -> float:
135135
logger.warning("No data points available to calculate percentile.")
136136
return 0.0
137137

138-
percentile_value = np.percentile(self._data, percentile)
138+
percentile_value = np.percentile(self._data, percentile).item()
139139
logger.debug(f"Calculated {percentile}th percentile: {percentile_value}")
140140
return percentile_value
141141

src/guidellm/core/result.py

Lines changed: 28 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -80,7 +80,7 @@ def __eq__(self, other: object) -> bool:
8080
:rtype: bool
8181
"""
8282

83-
if not isinstance(other, "TextGenerationResult"):
83+
if not isinstance(other, TextGenerationResult):
8484
raise NotImplementedError(
8585
"Only TextGenerationResult type could be used in that operation"
8686
)
@@ -232,6 +232,7 @@ def output_token(self, token: str):
232232

233233
def end(
234234
self,
235+
output: str,
235236
prompt_token_count: Optional[int] = None,
236237
output_token_count: Optional[int] = None,
237238
):
@@ -253,7 +254,7 @@ def end(
253254
self._output_token_count = output_token_count or self._output_word_count
254255
self._prompt_token_count = prompt_token_count or self._prompt_word_count
255256

256-
logger.info(f"Text generation ended with output: '{self.output}'")
257+
logger.info(f"Text generation ended with output: '{output}'")
257258

258259

259260
class TextGenerationError:
@@ -347,6 +348,8 @@ def __init__(self, mode: str, rate: Optional[float]):
347348
self._results: List[TextGenerationResult] = []
348349
self._errors: List[TextGenerationError] = []
349350
self._concurrencies: List[RequestConcurrencyMeasurement] = []
351+
self._overloaded = False
352+
self._args_rate: Optional[float] = None
350353

351354
logger.debug(
352355
f"Initialized TextGenerationBenchmark with mode={mode} and rate={rate}"
@@ -400,6 +403,16 @@ def __iter__(self):
400403
"""
401404
return iter(self._results)
402405

406+
@property
407+
def overloaded(self) -> bool:
408+
"""
409+
Get the overloaded state of the result.
410+
411+
:return: The overloaded state.
412+
:rtype: bool
413+
"""
414+
return self._overloaded
415+
403416
@property
404417
def mode(self) -> str:
405418
"""
@@ -410,6 +423,16 @@ def mode(self) -> str:
410423
"""
411424
return self._mode
412425

426+
@property
427+
def args_rate(self) -> Optional[float]:
428+
"""
429+
Get the args rate of the result.
430+
431+
:return: The args rate.
432+
:rtype: Optional[float]
433+
"""
434+
return self._args_rate
435+
413436
@property
414437
def rate(self) -> Optional[float]:
415438
"""
@@ -631,3 +654,6 @@ def add_benchmark(self, benchmark: TextGenerationBenchmark):
631654
"""
632655
self._benchmarks.append(benchmark)
633656
logger.debug(f"Added result: {benchmark}")
657+
658+
def to_dict(self) -> Dict[str, Any]:
659+
return {}

src/guidellm/executor/profile_generator.py

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -1,7 +1,7 @@
11
from abc import ABC, abstractmethod
22
from dataclasses import dataclass
33
from enum import Enum
4-
from typing import Optional, Union
4+
from typing import Dict, Optional, Type, Union
55

66
import numpy
77

@@ -29,7 +29,7 @@ class Profile:
2929

3030

3131
class ProfileGenerator(ABC):
32-
_registry = {}
32+
_registry: Dict[ProfileGenerationModes, "Type[ProfileGenerator]"] = {}
3333

3434
@staticmethod
3535
def register_generator(mode: ProfileGenerationModes):

src/guidellm/main.py

Lines changed: 2 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -10,6 +10,7 @@
1010
FileRequestGenerator,
1111
TransformersDatasetRequestGenerator,
1212
)
13+
from guidellm.request.base import RequestGenerator
1314

1415

1516
@click.command()
@@ -95,7 +96,7 @@ def main(
9596
)
9697

9798
if data_type == "emulated":
98-
request_generator = EmulatedRequestGenerator(config=data, tokenizer=tokenizer)
99+
request_generator: RequestGenerator = EmulatedRequestGenerator(config=data, tokenizer=tokenizer)
99100
elif data_type == "file":
100101
request_generator = FileRequestGenerator(file_path=data, tokenizer=tokenizer)
101102
elif data_type == "transformers":

src/guidellm/request/emulated.py

Lines changed: 3 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -2,7 +2,7 @@
22
import re
33
import unicodedata
44
from dataclasses import dataclass
5-
from typing import Dict, List, Optional, Union
5+
from typing import Dict, List, Optional, Tuple, Union
66

77
import numpy as np
88
import requests
@@ -26,7 +26,7 @@ class EmulatedConfig:
2626
prompt_tokens_min: Optional[int] = None
2727
prompt_tokens_max: Optional[int] = None
2828

29-
generated_tokens: int = None
29+
generated_tokens: Optional[int] = None
3030
generated_tokens_variance: Optional[int] = None
3131
generated_tokens_min: Optional[int] = None
3232
generated_tokens_max: Optional[int] = None
@@ -151,7 +151,7 @@ def _token_count(self, text: str) -> int:
151151
len(self.tokenizer.tokenize(text)) if self.tokenizer else len(text.split())
152152
)
153153

154-
def _sample_prompt(self) -> (str, int):
154+
def _sample_prompt(self) -> Tuple[str, int]:
155155
prompt_tokens = self._config.prompt_tokens
156156
prompt_tokens_variance = self._config.prompt_tokens_variance or 0
157157
prompt_tokens_min = self._config.prompt_tokens_min or 1

src/guidellm/request/file.py

Lines changed: 2 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -85,6 +85,8 @@ def _load_csv_file(self) -> List[str]:
8585
with open(self._file_path, "r", encoding="utf-8") as file:
8686
reader = csv.DictReader(file)
8787
columns = reader.fieldnames
88+
if not columns:
89+
raise ValueError("Invalid empty value for columns")
8890
for row in reader:
8991
# convert the row to a dictionary
9092
obj = {col: row[col] for col in columns}

src/guidellm/scheduler/scheduler.py

Lines changed: 2 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -71,6 +71,8 @@ async def _run_async(self) -> TextGenerationBenchmark:
7171
result_set = TextGenerationBenchmark(
7272
mode=self._load_gen_mode.value, rate=self._load_gen_rate
7373
)
74+
if (not self._load_gen_rate):
75+
raise ValueError("Invalid empty value for self._load_gen_rate")
7476
load_gen = LoadGenerator(self._load_gen_mode, self._load_gen_rate)
7577

7678
tasks = []

0 commit comments

Comments
 (0)