Skip to content

Commit 0defb0d

Browse files
author
Dmytro Parfeniuk
committed
🚧 WIP
1 parent 4af96de commit 0defb0d

File tree

11 files changed

+214
-10
lines changed

11 files changed

+214
-10
lines changed

src/guidellm/backend/__init__.py

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -1,9 +1,9 @@
1-
from .base import Backend, BackendTypes, GenerativeResponse
1+
from .base import Backend, BackendEngine, GenerativeResponse
22
from .openai import OpenAIBackend
33

44
__all__ = [
55
"Backend",
6-
"BackendTypes",
6+
"BackendEngine",
77
"GenerativeResponse",
88
"OpenAIBackend",
99
]

src/guidellm/backend/base.py

Lines changed: 4 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -9,10 +9,10 @@
99
from guidellm.core.request import TextGenerationRequest
1010
from guidellm.core.result import TextGenerationResult
1111

12-
__all__ = ["Backend", "BackendTypes", "GenerativeResponse"]
12+
__all__ = ["Backend", "BackendEngine", "GenerativeResponse"]
1313

1414

15-
class BackendTypes(Enum):
15+
class BackendEngine(Enum):
1616
TEST = "test"
1717
OPENAI_SERVER = "openai_server"
1818

@@ -39,7 +39,7 @@ class Backend(ABC):
3939
_registry = {}
4040

4141
@staticmethod
42-
def register_backend(backend_type: BackendTypes):
42+
def register(backend_type: BackendEngine):
4343
"""
4444
A decorator to register a backend class in the backend registry.
4545
@@ -54,7 +54,7 @@ def inner_wrapper(wrapped_class: Type["Backend"]):
5454
return inner_wrapper
5555

5656
@staticmethod
57-
def create_backend(backend_type: Union[str, BackendTypes], **kwargs) -> "Backend":
57+
def create(backend_type: Union[str, BackendEngine], **kwargs) -> "Backend":
5858
"""
5959
Factory method to create a backend based on the backend type.
6060

src/guidellm/backend/openai.py

Lines changed: 3 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -4,13 +4,13 @@
44
from loguru import logger
55
from transformers import AutoTokenizer
66

7-
from guidellm.backend import Backend, BackendTypes, GenerativeResponse
8-
from guidellm.core.request import TextGenerationRequest
7+
from guidellm.backend import Backend, BackendEngine, GenerativeResponse
8+
from guidellm.core import TextGenerationRequest
99

1010
__all__ = ["OpenAIBackend"]
1111

1212

13-
@Backend.register_backend(BackendTypes.OPENAI_SERVER)
13+
@Backend.register(BackendEngine.OPENAI_SERVER)
1414
class OpenAIBackend(Backend):
1515
"""
1616
An OpenAI backend implementation for the generative AI result.

src/guidellm/main.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -78,7 +78,7 @@ def main(
7878
num_requests,
7979
):
8080
# Create backend
81-
Backend.create_backend(
81+
Backend.create(
8282
backend_type=backend,
8383
target=target,
8484
host=host,

tests/conftest.py

Lines changed: 64 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,64 @@
1+
import random
2+
from typing import List, Optional
3+
4+
import pytest
5+
from loguru import logger
6+
from openai.pagination import SyncPage
7+
from openai.types import Completion, Model
8+
9+
from guidellm.backend import Backend, BackendEngine, OpenAIBackend
10+
11+
from . import dummy
12+
13+
14+
def pytest_configure() -> None:
15+
logger.disable("guidellm")
16+
17+
18+
@pytest.fixture(autouse=True)
19+
def openai_models_list_patch(mocker) -> List[Model]:
20+
"""
21+
Mock available models function to avoid OpenAI API call.
22+
"""
23+
24+
items = dummy.data.OpenAIModel.batch(3)
25+
mocker.patch(
26+
"openai.resources.models.Models.list",
27+
return_value=SyncPage(object="list", data=items),
28+
)
29+
30+
return items
31+
32+
33+
@pytest.fixture(autouse=True)
34+
def openai_completion_create_patch(mocker) -> List[Completion]:
35+
"""
36+
Mock available models function to avoid OpenAI API call.
37+
"""
38+
39+
items = dummy.data.OpenAICompletion.batch(random.randint(2, 5))
40+
mocker.patch("openai.resources.completions.Completions.create", return_value=items)
41+
42+
return items
43+
44+
45+
@pytest.fixture
46+
def openai_backend_factory():
47+
"""
48+
OpenAI Backend factory method.
49+
Call without provided arguments returns default Backend service.
50+
"""
51+
52+
def inner_wrapper(*_, base_url: Optional[str] = None, **kwargs) -> OpenAIBackend:
53+
static = {"backend_type": BackendEngine.OPENAI_SERVER}
54+
defaults = {
55+
"openai_api_key": "required but not used",
56+
"internal_callback_url": base_url or "http://localhost:8080",
57+
}
58+
59+
defaults.update(kwargs)
60+
defaults.update(static)
61+
62+
return Backend.create(**defaults)
63+
64+
return inner_wrapper

tests/dummy/__init__.py

Lines changed: 10 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,10 @@
1+
"""
2+
The tests.dummy package package represents dummy data factories and test services.
3+
test.dummy.data.OpenAIModel - openai.Model test factory
4+
test.dummy.data.OpenAICompletion - openai.Completion test factory
5+
test.dummy.data.OpenAICompletionChoice - openai.CompletionChoice test factory
6+
test.dummy.services.TestRequestGenerator - RequestGenerator that is used
7+
for testing purposes
8+
"""
9+
10+
from . import data, services

tests/dummy/data/__init__.py

Lines changed: 3 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,3 @@
1+
from .openai import OpenAICompletion, OpenAICompletionChoice, OpenAIModel
2+
3+
__all__ = ["OpenAIModel", "OpenAICompletionChoice", "OpenAICompletion"]

tests/dummy/data/openai.py

Lines changed: 33 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,33 @@
1+
"""
2+
This module includes data models factories for openai 3-rd party package
3+
"""
4+
5+
import functools
6+
import random
7+
8+
from openai.types import Completion, CompletionChoice, Model
9+
from polyfactory.factories.pydantic_factory import ModelFactory
10+
11+
12+
class OpenAIModel(ModelFactory[Model]):
13+
"""
14+
A model factory for Open AI Model representation.
15+
"""
16+
17+
pass
18+
19+
20+
class OpenAICompletionChoice(ModelFactory[CompletionChoice]):
21+
"""
22+
A model factory for Open AI Completion Choice representation.
23+
"""
24+
25+
finish_reason = "stop"
26+
27+
28+
class OpenAICompletion(ModelFactory[Completion]):
29+
"""
30+
A model factory for Open AI Completion representation.
31+
"""
32+
33+
choices = functools.partial(OpenAICompletionChoice.batch, random.randint(3, 5))

tests/dummy/services/__init__.py

Lines changed: 5 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,5 @@
1+
from .requests import TestRequestGenerator
2+
3+
__all__ = [
4+
"TestRequestGenerator",
5+
]

tests/dummy/services/requests.py

Lines changed: 12 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,12 @@
1+
from guidellm.core import TextGenerationRequest
2+
from guidellm.request import RequestGenerator
3+
4+
5+
class TestRequestGenerator(RequestGenerator):
6+
"""
7+
This class represents the Testing Request Generator.
8+
The purpose - to be used for testing.
9+
"""
10+
11+
def create_item(self) -> TextGenerationRequest:
12+
return TextGenerationRequest(prompt="Test prompt")

0 commit comments

Comments
 (0)