Skip to content

Remove Support for OpenAI < 1 #788

New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Merged
merged 20 commits into from
Jun 18, 2024
Merged
Show file tree
Hide file tree
Changes from 16 commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
31 changes: 8 additions & 23 deletions guardrails/run/stream_runner.py
Original file line number Diff line number Diff line change
Expand Up @@ -13,7 +13,6 @@
from guardrails.prompt import Instructions, Prompt
from guardrails.run.runner import Runner
from guardrails.schema import Schema, StringSchema
from guardrails.utils.openai_utils import OPENAI_VERSION
from guardrails.utils.reask_utils import SkeletonReAsk


Expand Down Expand Up @@ -205,29 +204,15 @@ def get_chunk_text(self, chunk: Any, api: Union[PromptCallableBase, None]) -> st
"""Get the text from a chunk."""
chunk_text = ""
if isinstance(api, OpenAICallable):
if OPENAI_VERSION.startswith("0"):
finished = chunk["choices"][0]["finish_reason"]
if "text" in chunk["choices"][0]:
content = chunk["choices"][0]["text"]
if not finished and content:
chunk_text = content
else:
finished = chunk.choices[0].finish_reason
content = chunk.choices[0].text
if not finished and content:
chunk_text = content
finished = chunk.choices[0].finish_reason
content = chunk.choices[0].text
if not finished and content:
chunk_text = content
elif isinstance(api, OpenAIChatCallable):
if OPENAI_VERSION.startswith("0"):
finished = chunk["choices"][0]["finish_reason"]
if "content" in chunk["choices"][0]["delta"]:
content = chunk["choices"][0]["delta"]["content"]
if not finished and content:
chunk_text = content
else:
finished = chunk.choices[0].finish_reason
content = chunk.choices[0].delta.content
if not finished and content:
chunk_text = content
finished = chunk.choices[0].finish_reason
content = chunk.choices[0].delta.content
if not finished and content:
chunk_text = content
elif isinstance(api, LiteLLMCallable):
finished = chunk.choices[0].finish_reason
content = chunk.choices[0].delta.content
Expand Down
31 changes: 9 additions & 22 deletions guardrails/utils/openai_utils/__init__.py
Original file line number Diff line number Diff line change
@@ -1,29 +1,16 @@
from openai.version import VERSION
from .v1 import AsyncOpenAIClientV1 as AsyncOpenAIClient
from .v1 import OpenAIClientV1 as OpenAIClient
from .v1 import (
OpenAIServiceUnavailableError,
get_static_openai_acreate_func,
get_static_openai_chat_acreate_func,
get_static_openai_chat_create_func,
get_static_openai_create_func,
)

OPENAI_VERSION = VERSION

if OPENAI_VERSION.startswith("0"):
from .v0 import AsyncOpenAIClientV0 as AsyncOpenAIClient
from .v0 import OpenAIClientV0 as OpenAIClient
from .v0 import (
OpenAIServiceUnavailableError,
get_static_openai_acreate_func,
get_static_openai_chat_acreate_func,
get_static_openai_chat_create_func,
get_static_openai_create_func,
)
else:
from .v1 import AsyncOpenAIClientV1 as AsyncOpenAIClient
from .v1 import OpenAIClientV1 as OpenAIClient
from .v1 import (
OpenAIServiceUnavailableError,
get_static_openai_acreate_func,
get_static_openai_chat_acreate_func,
get_static_openai_chat_create_func,
get_static_openai_create_func,
)


__all__ = [
"OPENAI_VERSION",
"AsyncOpenAIClient",
Expand Down
5 changes: 1 addition & 4 deletions tests/conftest.py
Original file line number Diff line number Diff line change
@@ -1,6 +1,3 @@
import os

from openai.version import VERSION as OPENAI_VERSION

if OPENAI_VERSION.startswith("1"):
os.environ["OPENAI_API_KEY"] = "mocked"
os.environ["OPENAI_API_KEY"] = "mocked"
9 changes: 0 additions & 9 deletions tests/integration_tests/applications/test_text2sql.py
Original file line number Diff line number Diff line change
@@ -1,11 +1,9 @@
import json
import os

import openai
import pytest

from guardrails.applications.text2sql import Text2Sql
from guardrails.utils.openai_utils import OPENAI_VERSION

CURRENT_DIR_PARENT = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
SCHEMA_PATH = os.path.join(CURRENT_DIR_PARENT, "test_assets/text2sql/schema.sql")
Expand Down Expand Up @@ -37,10 +35,3 @@ def test_text2sql_with_examples(conn_str: str, schema_path: str, examples: str,

# This should not raise an exception.
Text2Sql(conn_str, schema_file=schema_path, examples=examples)


@pytest.mark.skipif(not OPENAI_VERSION.startswith("0"), reason="Only for OpenAI v0")
def test_text2sql_with_coro():
s = Text2Sql("sqlite://", llm_api=openai.Completion.acreate)
with pytest.raises(ValueError):
s("")
Loading
Loading