From bb8f8add2dcd0fa9992e4d7cb680af3d580b32e2 Mon Sep 17 00:00:00 2001 From: Aarav Navani <38411399+oofmeister27@users.noreply.github.com> Date: Sat, 25 May 2024 16:20:19 -0700 Subject: [PATCH 01/19] remove support for openai < 1 --- guardrails/run/stream_runner.py | 30 +-- guardrails/utils/openai_utils/__init__.py | 30 +-- tests/conftest.py | 3 +- .../test_embedding_openai.py | 16 +- tests/integration_tests/test_streaming.py | 96 +++----- tests/unit_tests/mock_embeddings.py | 32 ++- tests/unit_tests/mock_provenance_v1.py | 58 ++--- tests/unit_tests/test_llm_providers.py | 218 +++++++----------- tests/unit_tests/test_validators.py | 26 +-- 9 files changed, 192 insertions(+), 317 deletions(-) diff --git a/guardrails/run/stream_runner.py b/guardrails/run/stream_runner.py index 890c3fd9c..c872d7cf8 100644 --- a/guardrails/run/stream_runner.py +++ b/guardrails/run/stream_runner.py @@ -205,29 +205,15 @@ def get_chunk_text(self, chunk: Any, api: Union[PromptCallableBase, None]) -> st """Get the text from a chunk.""" chunk_text = "" if isinstance(api, OpenAICallable): - if OPENAI_VERSION.startswith("0"): - finished = chunk["choices"][0]["finish_reason"] - if "text" in chunk["choices"][0]: - content = chunk["choices"][0]["text"] - if not finished and content: - chunk_text = content - else: - finished = chunk.choices[0].finish_reason - content = chunk.choices[0].text - if not finished and content: - chunk_text = content + finished = chunk.choices[0].finish_reason + content = chunk.choices[0].text + if not finished and content: + chunk_text = content elif isinstance(api, OpenAIChatCallable): - if OPENAI_VERSION.startswith("0"): - finished = chunk["choices"][0]["finish_reason"] - if "content" in chunk["choices"][0]["delta"]: - content = chunk["choices"][0]["delta"]["content"] - if not finished and content: - chunk_text = content - else: - finished = chunk.choices[0].finish_reason - content = chunk.choices[0].delta.content - if not finished and content: - chunk_text = content + finished = chunk.choices[0].finish_reason + content = chunk.choices[0].delta.content + if not finished and content: + chunk_text = content elif isinstance(api, LiteLLMCallable): finished = chunk.choices[0].finish_reason content = chunk.choices[0].delta.content diff --git a/guardrails/utils/openai_utils/__init__.py b/guardrails/utils/openai_utils/__init__.py index 9dfe6e422..175dd6424 100644 --- a/guardrails/utils/openai_utils/__init__.py +++ b/guardrails/utils/openai_utils/__init__.py @@ -2,26 +2,16 @@ OPENAI_VERSION = VERSION -if OPENAI_VERSION.startswith("0"): - from .v0 import AsyncOpenAIClientV0 as AsyncOpenAIClient - from .v0 import OpenAIClientV0 as OpenAIClient - from .v0 import ( - OpenAIServiceUnavailableError, - get_static_openai_acreate_func, - get_static_openai_chat_acreate_func, - get_static_openai_chat_create_func, - get_static_openai_create_func, - ) -else: - from .v1 import AsyncOpenAIClientV1 as AsyncOpenAIClient - from .v1 import OpenAIClientV1 as OpenAIClient - from .v1 import ( - OpenAIServiceUnavailableError, - get_static_openai_acreate_func, - get_static_openai_chat_acreate_func, - get_static_openai_chat_create_func, - get_static_openai_create_func, - ) + +from .v1 import AsyncOpenAIClientV1 as AsyncOpenAIClient +from .v1 import OpenAIClientV1 as OpenAIClient +from .v1 import ( + OpenAIServiceUnavailableError, + get_static_openai_acreate_func, + get_static_openai_chat_acreate_func, + get_static_openai_chat_create_func, + get_static_openai_create_func, +) __all__ = [ diff --git a/tests/conftest.py b/tests/conftest.py index 0d298e4e8..514b024dd 100644 --- a/tests/conftest.py +++ b/tests/conftest.py @@ -2,5 +2,4 @@ from openai.version import VERSION as OPENAI_VERSION -if OPENAI_VERSION.startswith("1"): - os.environ["OPENAI_API_KEY"] = "mocked" +os.environ["OPENAI_API_KEY"] = "mocked" diff --git a/tests/integration_tests/test_embedding_openai.py b/tests/integration_tests/test_embedding_openai.py index eebf7bfa5..c9dfcb211 100644 --- a/tests/integration_tests/test_embedding_openai.py +++ b/tests/integration_tests/test_embedding_openai.py @@ -53,10 +53,7 @@ def test_embedding_query(self): def test_embed_query(self, mocker): mock_create = None - if OPENAI_VERSION.startswith("0"): - mock_create = mocker.patch("openai.Embedding.create") - else: - mock_create = mocker.patch("openai.resources.Embeddings.create") + mock_create = mocker.patch("openai.resources.Embeddings.create") mock_create.return_value = MockOpenAIEmbedding() @@ -70,14 +67,9 @@ def test__get_embedding(self, mocker): mock_environ.return_value = "test_api_key" mock_create = None - if OPENAI_VERSION.startswith("0"): - mock_create = mocker.patch("openai.Embedding.create") - mock_create.return_value = MockResponse( - data=[{"embedding": [1.0, 2.0, 3.0]}] - ) - else: - mock_create = mocker.patch("openai.resources.Embeddings.create") - mock_create.return_value = MockResponse(data=[[1.0, 2.0, 3.0]]) + + mock_create = mocker.patch("openai.resources.Embeddings.create") + mock_create.return_value = MockResponse(data=[[1.0, 2.0, 3.0]]) instance = OpenAIEmbedding(api_key="test_api_key") result = instance._get_embedding(["test text"]) diff --git a/tests/integration_tests/test_streaming.py b/tests/integration_tests/test_streaming.py index 4551f8f4e..26c6c758d 100644 --- a/tests/integration_tests/test_streaming.py +++ b/tests/integration_tests/test_streaming.py @@ -61,22 +61,16 @@ def mock_openai_completion_create(): def gen(): for chunk in chunks: - if OPENAI_VERSION.startswith("0"): - yield { - "choices": [{"text": chunk, "finish_reason": None}], - "model": "OpenAI model name", - } - else: - yield MockOpenAIV1ChunkResponse( - choices=[ - Choice( - text=chunk, - delta=Delta(content=""), - finish_reason=None, - ) - ], - model="OpenAI model name", - ) + yield MockOpenAIV1ChunkResponse( + choices=[ + Choice( + text=chunk, + delta=Delta(content=""), + finish_reason=None, + ) + ], + model="OpenAI model name", + ) return gen() @@ -93,28 +87,17 @@ def mock_openai_chat_completion_create(): def gen(): for chunk in chunks: - if OPENAI_VERSION.startswith("0"): - yield { - "choices": [ - { - "index": 0, - "delta": {"content": chunk}, - "finish_reason": None, - } - ] - } - else: - yield MockOpenAIV1ChunkResponse( - choices=[ - Choice( - text="", - delta=Delta(content=chunk), - finish_reason=None, - ) - ], - model="OpenAI model name", - ) - + yield MockOpenAIV1ChunkResponse( + choices=[ + Choice( + text="", + delta=Delta(content=chunk), + finish_reason=None, + ) + ], + model="OpenAI model name", + ) + return gen() @@ -171,23 +154,17 @@ def test_streaming_with_openai_callable( Mocks openai.Completion.create. """ - if OPENAI_VERSION.startswith("0"): - mocker.patch( - "openai.Completion.create", return_value=mock_openai_completion_create() - ) - else: - mocker.patch( - "openai.resources.Completions.create", - return_value=mock_openai_completion_create(), - ) + + mocker.patch( + "openai.resources.Completions.create", + return_value=mock_openai_completion_create(), + ) # Create a guard object guard = gd.Guard.from_pydantic(output_class=op_class, prompt=PROMPT) method = ( - openai.Completion.create - if OPENAI_VERSION.startswith("0") - else openai.completions.create + openai.completions.create ) method.__name__ = "mock_openai_completion_create" @@ -227,24 +204,17 @@ def test_streaming_with_openai_chat_callable( Mocks openai.ChatCompletion.create. """ - if OPENAI_VERSION.startswith("0"): - mocker.patch( - "openai.ChatCompletion.create", - return_value=mock_openai_chat_completion_create(), - ) - else: - mocker.patch( - "openai.resources.chat.completions.Completions.create", - return_value=mock_openai_chat_completion_create(), - ) + + mocker.patch( + "openai.resources.chat.completions.Completions.create", + return_value=mock_openai_chat_completion_create(), + ) # Create a guard object guard = gd.Guard.from_pydantic(output_class=op_class, prompt=PROMPT) method = ( - openai.ChatCompletion.create - if OPENAI_VERSION.startswith("0") - else openai.chat.completions.create + openai.chat.completions.create ) method.__name__ = "mock_openai_chat_completion_create" diff --git a/tests/unit_tests/mock_embeddings.py b/tests/unit_tests/mock_embeddings.py index ee5a14aae..090501eff 100644 --- a/tests/unit_tests/mock_embeddings.py +++ b/tests/unit_tests/mock_embeddings.py @@ -22,24 +22,22 @@ def mock_create_embedding(*args, input, **kwargs): except KeyError: print(input) raise ValueError("Text not found in mocked embeddings") - if OPENAI_VERSION.startswith("0"): - return {"data": returns} - else: - from openai.types import CreateEmbeddingResponse, Embedding - from openai.types.create_embedding_response import Usage + + from openai.types import CreateEmbeddingResponse, Embedding + from openai.types.create_embedding_response import Usage - return CreateEmbeddingResponse( - data=[ - Embedding(embedding=r["embedding"], index=i, object="embedding") - for i, r in enumerate(returns) - ], - model="", - object="list", - usage=Usage( - prompt_tokens=10, - total_tokens=10, - ), - ) + return CreateEmbeddingResponse( + data=[ + Embedding(embedding=r["embedding"], index=i, object="embedding") + for i, r in enumerate(returns) + ], + model="", + object="list", + usage=Usage( + prompt_tokens=10, + total_tokens=10, + ), + ) MOCK_EMBEDDINGS = { diff --git a/tests/unit_tests/mock_provenance_v1.py b/tests/unit_tests/mock_provenance_v1.py index 323da4d45..d31ab578e 100644 --- a/tests/unit_tests/mock_provenance_v1.py +++ b/tests/unit_tests/mock_provenance_v1.py @@ -3,40 +3,32 @@ def mock_chat_completion(*args, **kwargs): """Mocks the OpenAI chat completion function for ProvenanceV1.""" - if OPENAI_VERSION.startswith("0"): - return { - "choices": [{"message": {"content": "Yes"}}], - "usage": { - "prompt_tokens": 10, - "completion_tokens": 20, - }, - } - else: - from openai.types import CompletionUsage - from openai.types.chat import ChatCompletion, ChatCompletionMessage - from openai.types.chat.chat_completion import Choice + + from openai.types import CompletionUsage + from openai.types.chat import ChatCompletion, ChatCompletionMessage + from openai.types.chat.chat_completion import Choice - return ChatCompletion( - id="", - choices=[ - Choice( - finish_reason="stop", - index=0, - message=ChatCompletionMessage( - content="Yes", - role="assistant", - ), - ) - ], - created=0, - model="", - object="chat.completion", - usage=CompletionUsage( - prompt_tokens=10, - completion_tokens=20, - total_tokens=30, - ), - ) + return ChatCompletion( + id="", + choices=[ + Choice( + finish_reason="stop", + index=0, + message=ChatCompletionMessage( + content="Yes", + role="assistant", + ), + ) + ], + created=0, + model="", + object="chat.completion", + usage=CompletionUsage( + prompt_tokens=10, + completion_tokens=20, + total_tokens=30, + ), + ) def mock_chromadb_query_function(**kwargs): diff --git a/tests/unit_tests/test_llm_providers.py b/tests/unit_tests/test_llm_providers.py index 76fa568c6..a7616e73c 100644 --- a/tests/unit_tests/test_llm_providers.py +++ b/tests/unit_tests/test_llm_providers.py @@ -87,44 +87,31 @@ async def test_async_openai_callable_does_not_retry_on_success(mocker): @pytest.fixture(scope="module") def openai_chat_mock(): - if OPENAI_VERSION.startswith("0"): - return { - "choices": [ - { - "message": {"content": "Mocked LLM output"}, - } - ], - "usage": { - "prompt_tokens": 10, - "completion_tokens": 20, - }, - } - else: - from openai.types import CompletionUsage - from openai.types.chat import ChatCompletion, ChatCompletionMessage - from openai.types.chat.chat_completion import Choice - - return ChatCompletion( - id="", - choices=[ - Choice( - finish_reason="stop", - index=0, - message=ChatCompletionMessage( - content="Mocked LLM output", - role="assistant", - ), + from openai.types import CompletionUsage + from openai.types.chat import ChatCompletion, ChatCompletionMessage + from openai.types.chat.chat_completion import Choice + + return ChatCompletion( + id="", + choices=[ + Choice( + finish_reason="stop", + index=0, + message=ChatCompletionMessage( + content="Mocked LLM output", + role="assistant", ), - ], - created=0, - model="", - object="chat.completion", - usage=CompletionUsage( - completion_tokens=20, - prompt_tokens=10, - total_tokens=30, ), - ) + ], + created=0, + model="", + object="chat.completion", + usage=CompletionUsage( + completion_tokens=20, + prompt_tokens=10, + total_tokens=30, + ), + ) @pytest.fixture(scope="module") @@ -147,61 +134,48 @@ def gen(): @pytest.fixture(scope="module") def openai_mock(): - if OPENAI_VERSION.startswith("0"): - return { - "choices": [ - { - "text": "Mocked LLM output", - } - ], - "usage": { - "prompt_tokens": 10, - "completion_tokens": 20, - }, - } - else: - - @dataclass - class MockCompletionUsage: - completion_tokens: int - prompt_tokens: int - total_tokens: int - - @dataclass - class MockCompletionChoice: - finish_reason: str - index: int - logprobs: Any - text: str - - @dataclass - class MockCompletion: - id: str - choices: List[MockCompletionChoice] - created: int - model: str - object: str - usage: MockCompletionUsage - - return MockCompletion( - id="", - choices=[ - MockCompletionChoice( - finish_reason="stop", - index=0, - logprobs=None, - text="Mocked LLM output", - ), - ], - created=0, - model="", - object="text_completion", - usage=MockCompletionUsage( - completion_tokens=20, - prompt_tokens=10, - total_tokens=30, + + @dataclass + class MockCompletionUsage: + completion_tokens: int + prompt_tokens: int + total_tokens: int + + @dataclass + class MockCompletionChoice: + finish_reason: str + index: int + logprobs: Any + text: str + + @dataclass + class MockCompletion: + id: str + choices: List[MockCompletionChoice] + created: int + model: str + object: str + usage: MockCompletionUsage + + return MockCompletion( + id="", + choices=[ + MockCompletionChoice( + finish_reason="stop", + index=0, + logprobs=None, + text="Mocked LLM output", ), - ) + ], + created=0, + model="", + object="text_completion", + usage=MockCompletionUsage( + completion_tokens=20, + prompt_tokens=10, + total_tokens=30, + ), + ) @pytest.fixture(scope="module") @@ -218,10 +192,8 @@ def gen(): def test_openai_callable(mocker, openai_mock): - if OPENAI_VERSION.startswith("0"): - mocker.patch("openai.Completion.create", return_value=openai_mock) - else: - mocker.patch("openai.resources.Completions.create", return_value=openai_mock) + + mocker.patch("openai.resources.Completions.create", return_value=openai_mock) from guardrails.llm_providers import OpenAICallable @@ -236,12 +208,10 @@ def test_openai_callable(mocker, openai_mock): def test_openai_stream_callable(mocker, openai_stream_mock): - if OPENAI_VERSION.startswith("0"): - mocker.patch("openai.Completion.create", return_value=openai_stream_mock) - else: - mocker.patch( - "openai.resources.Completions.create", return_value=openai_stream_mock - ) + + mocker.patch( + "openai.resources.Completions.create", return_value=openai_stream_mock + ) from guardrails.llm_providers import OpenAICallable @@ -276,13 +246,11 @@ async def test_async_openai_callable(mocker, openai_mock): def test_openai_chat_callable(mocker, openai_chat_mock): - if OPENAI_VERSION.startswith("0"): - mocker.patch("openai.ChatCompletion.create", return_value=openai_chat_mock) - else: - mocker.patch( - "openai.resources.chat.completions.Completions.create", - return_value=openai_chat_mock, - ) + + mocker.patch( + "openai.resources.chat.completions.Completions.create", + return_value=openai_chat_mock, + ) from guardrails.llm_providers import OpenAIChatCallable @@ -296,15 +264,11 @@ def test_openai_chat_callable(mocker, openai_chat_mock): def test_openai_chat_stream_callable(mocker, openai_chat_stream_mock): - if OPENAI_VERSION.startswith("0"): - mocker.patch( - "openai.ChatCompletion.create", return_value=openai_chat_stream_mock - ) - else: - mocker.patch( - "openai.resources.chat.completions.Completions.create", - return_value=openai_chat_stream_mock, - ) + + mocker.patch( + "openai.resources.chat.completions.Completions.create", + return_value=openai_chat_stream_mock, + ) from guardrails.llm_providers import OpenAIChatCallable openai_chat_callable = OpenAIChatCallable() @@ -338,13 +302,11 @@ async def test_async_openai_chat_callable(mocker, openai_chat_mock): def test_openai_chat_model_callable(mocker, openai_chat_mock): - if OPENAI_VERSION.startswith("0"): - mocker.patch("openai.ChatCompletion.create", return_value=openai_chat_mock) - else: - mocker.patch( - "openai.resources.chat.completions.Completions.create", - return_value=openai_chat_mock, - ) + + mocker.patch( + "openai.resources.chat.completions.Completions.create", + return_value=openai_chat_mock, + ) from guardrails.llm_providers import OpenAIChatCallable @@ -565,10 +527,7 @@ def test_get_llm_ask_openai_completion(): from guardrails.llm_providers import OpenAICallable completion_create = None - if OPENAI_VERSION.startswith("0"): - completion_create = openai.Completion.create - else: - completion_create = openai.completions.create + completion_create = openai.completions.create prompt_callable = get_llm_ask(completion_create) @@ -585,10 +544,7 @@ def test_get_llm_ask_openai_chat(): from guardrails.llm_providers import OpenAIChatCallable chat_completion_create = None - if OPENAI_VERSION.startswith("0"): - chat_completion_create = openai.ChatCompletion.create - else: - chat_completion_create = openai.chat.completions.create + chat_completion_create = openai.chat.completions.create prompt_callable = get_llm_ask(chat_completion_create) diff --git a/tests/unit_tests/test_validators.py b/tests/unit_tests/test_validators.py index 3f6f2c8c9..819b984c4 100644 --- a/tests/unit_tests/test_validators.py +++ b/tests/unit_tests/test_validators.py @@ -162,13 +162,10 @@ def test_summary_validators(mocker): pytest.importorskip("nltk", reason="nltk is not installed") pytest.importorskip("thefuzz", reason="thefuzz is not installed") - if OPENAI_VERSION.startswith("0"): - mocker.patch("openai.Embedding.create", new=mock_create_embedding) - else: - mocker.patch( - "openai.resources.embeddings.Embeddings.create", - new=mock_create_embedding, - ) + mocker.patch( + "openai.resources.embeddings.Embeddings.create", + new=mock_create_embedding, + ) mocker.patch("guardrails.embedding.OpenAIEmbedding.output_dim", new=2) @@ -381,13 +378,11 @@ def validate(value: Any) -> ValidationResult: def test_provenance_v1(mocker): """Test initialisation of ProvenanceV1.""" - if OPENAI_VERSION.startswith("0"): - mocker.patch("openai.ChatCompletion.create", new=mock_chat_completion) - else: - mocker.patch( - "openai.resources.chat.completions.Completions.create", - new=mock_chat_completion, - ) + + mocker.patch( + "openai.resources.chat.completions.Completions.create", + new=mock_chat_completion, + ) API_KEY = "" LLM_RESPONSE = "This is a sentence." @@ -419,9 +414,6 @@ def test_provenance_v1(mocker): # Test guard.parse() with 3 different ways of setting the OpenAI API key API key # 1. Setting the API key directly - if OPENAI_VERSION.startswith("0"): # not supported in v1 anymore - openai.api_key = API_KEY - output = string_guard.parse( llm_output=LLM_RESPONSE, metadata={"query_function": mock_chromadb_query_function}, From 460b7e905b9381fe6a44f17164162803398aa218 Mon Sep 17 00:00:00 2001 From: Aarav Navani <38411399+oofmeister27@users.noreply.github.com> Date: Mon, 27 May 2024 12:34:05 -0700 Subject: [PATCH 02/19] lint --- guardrails/run/stream_runner.py | 1 - guardrails/utils/openai_utils/__init__.py | 5 +---- tests/conftest.py | 1 - tests/integration_tests/test_streaming.py | 1 - tests/unit_tests/mock_embeddings.py | 1 - tests/unit_tests/mock_provenance_v1.py | 1 - tests/unit_tests/test_validators.py | 1 - 7 files changed, 1 insertion(+), 10 deletions(-) diff --git a/guardrails/run/stream_runner.py b/guardrails/run/stream_runner.py index c872d7cf8..ba8215530 100644 --- a/guardrails/run/stream_runner.py +++ b/guardrails/run/stream_runner.py @@ -13,7 +13,6 @@ from guardrails.prompt import Instructions, Prompt from guardrails.run.runner import Runner from guardrails.schema import Schema, StringSchema -from guardrails.utils.openai_utils import OPENAI_VERSION from guardrails.utils.reask_utils import SkeletonReAsk diff --git a/guardrails/utils/openai_utils/__init__.py b/guardrails/utils/openai_utils/__init__.py index 175dd6424..1b298ca7e 100644 --- a/guardrails/utils/openai_utils/__init__.py +++ b/guardrails/utils/openai_utils/__init__.py @@ -1,8 +1,4 @@ from openai.version import VERSION - -OPENAI_VERSION = VERSION - - from .v1 import AsyncOpenAIClientV1 as AsyncOpenAIClient from .v1 import OpenAIClientV1 as OpenAIClient from .v1 import ( @@ -13,6 +9,7 @@ get_static_openai_create_func, ) +OPENAI_VERSION = VERSION __all__ = [ "OPENAI_VERSION", diff --git a/tests/conftest.py b/tests/conftest.py index 514b024dd..55d7e7fcc 100644 --- a/tests/conftest.py +++ b/tests/conftest.py @@ -1,5 +1,4 @@ import os -from openai.version import VERSION as OPENAI_VERSION os.environ["OPENAI_API_KEY"] = "mocked" diff --git a/tests/integration_tests/test_streaming.py b/tests/integration_tests/test_streaming.py index 26c6c758d..e8843c164 100644 --- a/tests/integration_tests/test_streaming.py +++ b/tests/integration_tests/test_streaming.py @@ -10,7 +10,6 @@ from pydantic import BaseModel, Field import guardrails as gd -from guardrails.utils.openai_utils import OPENAI_VERSION from guardrails.validator_base import OnFailAction from guardrails.validators import LowerCase diff --git a/tests/unit_tests/mock_embeddings.py b/tests/unit_tests/mock_embeddings.py index 090501eff..9bc952e0b 100644 --- a/tests/unit_tests/mock_embeddings.py +++ b/tests/unit_tests/mock_embeddings.py @@ -1,4 +1,3 @@ -from guardrails.utils.openai_utils import OPENAI_VERSION def mock_create_embedding(*args, input, **kwargs): diff --git a/tests/unit_tests/mock_provenance_v1.py b/tests/unit_tests/mock_provenance_v1.py index d31ab578e..c9a0b4627 100644 --- a/tests/unit_tests/mock_provenance_v1.py +++ b/tests/unit_tests/mock_provenance_v1.py @@ -1,4 +1,3 @@ -from guardrails.utils.openai_utils import OPENAI_VERSION def mock_chat_completion(*args, **kwargs): diff --git a/tests/unit_tests/test_validators.py b/tests/unit_tests/test_validators.py index 819b984c4..741a3c2b8 100644 --- a/tests/unit_tests/test_validators.py +++ b/tests/unit_tests/test_validators.py @@ -3,7 +3,6 @@ import os from typing import Any, Dict, List -import openai import pytest from pydantic import BaseModel, Field From 4a0bece47392f0bfa03dc25b632f0511939b4910 Mon Sep 17 00:00:00 2001 From: Aarav Navani <38411399+oofmeister27@users.noreply.github.com> Date: Mon, 27 May 2024 12:37:13 -0700 Subject: [PATCH 03/19] lint --- tests/integration_tests/test_embedding_openai.py | 2 +- tests/integration_tests/test_streaming.py | 14 +++++--------- tests/unit_tests/mock_embeddings.py | 8 +++----- tests/unit_tests/mock_provenance_v1.py | 4 +--- tests/unit_tests/test_llm_providers.py | 16 ++++------------ tests/unit_tests/test_validators.py | 2 +- 6 files changed, 15 insertions(+), 31 deletions(-) diff --git a/tests/integration_tests/test_embedding_openai.py b/tests/integration_tests/test_embedding_openai.py index c9dfcb211..f72b641e1 100644 --- a/tests/integration_tests/test_embedding_openai.py +++ b/tests/integration_tests/test_embedding_openai.py @@ -67,7 +67,7 @@ def test__get_embedding(self, mocker): mock_environ.return_value = "test_api_key" mock_create = None - + mock_create = mocker.patch("openai.resources.Embeddings.create") mock_create.return_value = MockResponse(data=[[1.0, 2.0, 3.0]]) diff --git a/tests/integration_tests/test_streaming.py b/tests/integration_tests/test_streaming.py index e8843c164..e9962e0ae 100644 --- a/tests/integration_tests/test_streaming.py +++ b/tests/integration_tests/test_streaming.py @@ -96,7 +96,7 @@ def gen(): ], model="OpenAI model name", ) - + return gen() @@ -153,7 +153,7 @@ def test_streaming_with_openai_callable( Mocks openai.Completion.create. """ - + mocker.patch( "openai.resources.Completions.create", return_value=mock_openai_completion_create(), @@ -162,9 +162,7 @@ def test_streaming_with_openai_callable( # Create a guard object guard = gd.Guard.from_pydantic(output_class=op_class, prompt=PROMPT) - method = ( - openai.completions.create - ) + method = openai.completions.create method.__name__ = "mock_openai_completion_create" @@ -203,7 +201,7 @@ def test_streaming_with_openai_chat_callable( Mocks openai.ChatCompletion.create. """ - + mocker.patch( "openai.resources.chat.completions.Completions.create", return_value=mock_openai_chat_completion_create(), @@ -212,9 +210,7 @@ def test_streaming_with_openai_chat_callable( # Create a guard object guard = gd.Guard.from_pydantic(output_class=op_class, prompt=PROMPT) - method = ( - openai.chat.completions.create - ) + method = openai.chat.completions.create method.__name__ = "mock_openai_chat_completion_create" diff --git a/tests/unit_tests/mock_embeddings.py b/tests/unit_tests/mock_embeddings.py index 9bc952e0b..7c3a9f820 100644 --- a/tests/unit_tests/mock_embeddings.py +++ b/tests/unit_tests/mock_embeddings.py @@ -1,5 +1,3 @@ - - def mock_create_embedding(*args, input, **kwargs): mocked_embeddings = { "It was a beautiful day. " "In the afternoon, we drank tea.": [0, 0.5], @@ -21,7 +19,7 @@ def mock_create_embedding(*args, input, **kwargs): except KeyError: print(input) raise ValueError("Text not found in mocked embeddings") - + from openai.types import CreateEmbeddingResponse, Embedding from openai.types.create_embedding_response import Usage @@ -33,8 +31,8 @@ def mock_create_embedding(*args, input, **kwargs): model="", object="list", usage=Usage( - prompt_tokens=10, - total_tokens=10, + prompt_tokens=10, + total_tokens=10, ), ) diff --git a/tests/unit_tests/mock_provenance_v1.py b/tests/unit_tests/mock_provenance_v1.py index c9a0b4627..9f0f40d20 100644 --- a/tests/unit_tests/mock_provenance_v1.py +++ b/tests/unit_tests/mock_provenance_v1.py @@ -1,8 +1,6 @@ - - def mock_chat_completion(*args, **kwargs): """Mocks the OpenAI chat completion function for ProvenanceV1.""" - + from openai.types import CompletionUsage from openai.types.chat import ChatCompletion, ChatCompletionMessage from openai.types.chat.chat_completion import Choice diff --git a/tests/unit_tests/test_llm_providers.py b/tests/unit_tests/test_llm_providers.py index a7616e73c..83002e858 100644 --- a/tests/unit_tests/test_llm_providers.py +++ b/tests/unit_tests/test_llm_providers.py @@ -134,7 +134,6 @@ def gen(): @pytest.fixture(scope="module") def openai_mock(): - @dataclass class MockCompletionUsage: completion_tokens: int @@ -171,9 +170,9 @@ class MockCompletion: model="", object="text_completion", usage=MockCompletionUsage( - completion_tokens=20, - prompt_tokens=10, - total_tokens=30, + completion_tokens=20, + prompt_tokens=10, + total_tokens=30, ), ) @@ -192,7 +191,6 @@ def gen(): def test_openai_callable(mocker, openai_mock): - mocker.patch("openai.resources.Completions.create", return_value=openai_mock) from guardrails.llm_providers import OpenAICallable @@ -208,10 +206,7 @@ def test_openai_callable(mocker, openai_mock): def test_openai_stream_callable(mocker, openai_stream_mock): - - mocker.patch( - "openai.resources.Completions.create", return_value=openai_stream_mock - ) + mocker.patch("openai.resources.Completions.create", return_value=openai_stream_mock) from guardrails.llm_providers import OpenAICallable @@ -246,7 +241,6 @@ async def test_async_openai_callable(mocker, openai_mock): def test_openai_chat_callable(mocker, openai_chat_mock): - mocker.patch( "openai.resources.chat.completions.Completions.create", return_value=openai_chat_mock, @@ -264,7 +258,6 @@ def test_openai_chat_callable(mocker, openai_chat_mock): def test_openai_chat_stream_callable(mocker, openai_chat_stream_mock): - mocker.patch( "openai.resources.chat.completions.Completions.create", return_value=openai_chat_stream_mock, @@ -302,7 +295,6 @@ async def test_async_openai_chat_callable(mocker, openai_chat_mock): def test_openai_chat_model_callable(mocker, openai_chat_mock): - mocker.patch( "openai.resources.chat.completions.Completions.create", return_value=openai_chat_mock, diff --git a/tests/unit_tests/test_validators.py b/tests/unit_tests/test_validators.py index 741a3c2b8..74188c647 100644 --- a/tests/unit_tests/test_validators.py +++ b/tests/unit_tests/test_validators.py @@ -377,7 +377,7 @@ def validate(value: Any) -> ValidationResult: def test_provenance_v1(mocker): """Test initialisation of ProvenanceV1.""" - + mocker.patch( "openai.resources.chat.completions.Completions.create", new=mock_chat_completion, From 4f1122b952e4af0d50631a3fb531caa4ae4117ba Mon Sep 17 00:00:00 2001 From: Aarav Navani <38411399+oofmeister27@users.noreply.github.com> Date: Tue, 28 May 2024 17:31:21 -0700 Subject: [PATCH 04/19] remove pytest skip mark --- guardrails/utils/pydantic_utils/v2.py | 2 +- .../applications/test_text2sql.py | 2 +- tests/integration_tests/test_async.py | 16 ++++++++-------- tests/integration_tests/test_run.py | 2 +- tests/unit_tests/test_async_guard.py | 2 +- tests/unit_tests/test_guard.py | 2 +- tests/unit_tests/test_llm_providers.py | 10 +++++----- tests/unit_tests/test_validators.py | 4 ++-- 8 files changed, 20 insertions(+), 20 deletions(-) diff --git a/guardrails/utils/pydantic_utils/v2.py b/guardrails/utils/pydantic_utils/v2.py index fb523f99f..b717f8a26 100644 --- a/guardrails/utils/pydantic_utils/v2.py +++ b/guardrails/utils/pydantic_utils/v2.py @@ -62,7 +62,7 @@ def add_validator( ) -> Callable: if kwargs: warnings.warn( - "The following kwargs are not supported by pydantic v2 " + "The following kwargs are not by pydantic v2 " "and will be ignored: " f"{kwargs}" ) diff --git a/tests/integration_tests/applications/test_text2sql.py b/tests/integration_tests/applications/test_text2sql.py index ff5d199c5..e5e27e3d9 100644 --- a/tests/integration_tests/applications/test_text2sql.py +++ b/tests/integration_tests/applications/test_text2sql.py @@ -39,7 +39,7 @@ def test_text2sql_with_examples(conn_str: str, schema_path: str, examples: str, Text2Sql(conn_str, schema_file=schema_path, examples=examples) -@pytest.mark.skipif(not OPENAI_VERSION.startswith("0"), reason="Only for OpenAI v0") +#@pytest.mark.skipif(not OPENAI_VERSION.startswith("0"), reason="Only for OpenAI v0") def test_text2sql_with_coro(): s = Text2Sql("sqlite://", llm_api=openai.Completion.acreate) with pytest.raises(ValueError): diff --git a/tests/integration_tests/test_async.py b/tests/integration_tests/test_async.py index 987055412..60649faca 100644 --- a/tests/integration_tests/test_async.py +++ b/tests/integration_tests/test_async.py @@ -17,7 +17,7 @@ @pytest.mark.asyncio @pytest.mark.parametrize("multiprocessing_validators", (True, False)) -@pytest.mark.skipif(not OPENAI_VERSION.startswith("0"), reason="Only for OpenAI v0") +#@pytest.mark.skipif(not OPENAI_VERSION.startswith("0"), reason="Only for OpenAI v0") async def test_entity_extraction_with_reask(mocker, multiprocessing_validators: bool): """Test that the entity extraction works with re-asking.""" mocker.patch( @@ -76,7 +76,7 @@ async def test_entity_extraction_with_reask(mocker, multiprocessing_validators: @pytest.mark.asyncio -@pytest.mark.skipif(not OPENAI_VERSION.startswith("0"), reason="Only for OpenAI v0") +#@pytest.mark.skipif(not OPENAI_VERSION.startswith("0"), reason="Only for OpenAI v0") async def test_entity_extraction_with_noop(mocker): mocker.patch( "guardrails.llm_providers.AsyncOpenAICallable", @@ -115,7 +115,7 @@ async def test_entity_extraction_with_noop(mocker): @pytest.mark.asyncio -@pytest.mark.skipif(not OPENAI_VERSION.startswith("0"), reason="Only for OpenAI v0") +#@pytest.mark.skipif(not OPENAI_VERSION.startswith("0"), reason="Only for OpenAI v0") async def test_entity_extraction_with_noop_pydantic(mocker): mocker.patch( "guardrails.llm_providers.AsyncOpenAICallable", @@ -151,7 +151,7 @@ async def test_entity_extraction_with_noop_pydantic(mocker): @pytest.mark.asyncio -@pytest.mark.skipif(not OPENAI_VERSION.startswith("0"), reason="Only for OpenAI v0") +#@pytest.mark.skipif(not OPENAI_VERSION.startswith("0"), reason="Only for OpenAI v0") async def test_entity_extraction_with_filter(mocker): """Test that the entity extraction works with re-asking.""" mocker.patch( @@ -186,7 +186,7 @@ async def test_entity_extraction_with_filter(mocker): @pytest.mark.asyncio -@pytest.mark.skipif(not OPENAI_VERSION.startswith("0"), reason="Only for OpenAI v0") +#@pytest.mark.skipif(not OPENAI_VERSION.startswith("0"), reason="Only for OpenAI v0") async def test_entity_extraction_with_fix(mocker): """Test that the entity extraction works with re-asking.""" mocker.patch( @@ -218,7 +218,7 @@ async def test_entity_extraction_with_fix(mocker): @pytest.mark.asyncio -@pytest.mark.skipif(not OPENAI_VERSION.startswith("0"), reason="Only for OpenAI v0") +#@pytest.mark.skipif(not OPENAI_VERSION.startswith("0"), reason="Only for OpenAI v0") async def test_entity_extraction_with_refrain(mocker): """Test that the entity extraction works with re-asking.""" mocker.patch( @@ -250,7 +250,7 @@ async def test_entity_extraction_with_refrain(mocker): @pytest.mark.asyncio -@pytest.mark.skipif(not OPENAI_VERSION.startswith("0"), reason="Only for OpenAI v0") +#@pytest.mark.skipif(not OPENAI_VERSION.startswith("0"), reason="Only for OpenAI v0") async def test_rail_spec_output_parse(rail_spec, llm_output, validated_output): """Test that the rail_spec fixture is working.""" guard = gd.Guard.from_rail_string(rail_spec) @@ -288,7 +288,7 @@ def validated_string_output(): @pytest.mark.asyncio -@pytest.mark.skipif(not OPENAI_VERSION.startswith("0"), reason="Only for OpenAI v0") +#@pytest.mark.skipif(not OPENAI_VERSION.startswith("0"), reason="Only for OpenAI v0") async def test_string_rail_spec_output_parse( string_rail_spec, string_llm_output, validated_string_output ): diff --git a/tests/integration_tests/test_run.py b/tests/integration_tests/test_run.py index 49496928c..3b236ed32 100644 --- a/tests/integration_tests/test_run.py +++ b/tests/integration_tests/test_run.py @@ -63,7 +63,7 @@ def runner_instance(is_sync: bool): os.environ.get("OPENAI_API_KEY") is None, reason="openai api key not set" ) @pytest.mark.asyncio -@pytest.mark.skipif(not OPENAI_VERSION.startswith("0"), reason="Only for OpenAI v0") +#@pytest.mark.skipif(not OPENAI_VERSION.startswith("0"), reason="Only for OpenAI v0") async def test_sync_async_call_equivalence(mocker): mocker.patch( "guardrails.llm_providers.AsyncOpenAICallable", diff --git a/tests/unit_tests/test_async_guard.py b/tests/unit_tests/test_async_guard.py index 0f4e33d6d..48109511a 100644 --- a/tests/unit_tests/test_async_guard.py +++ b/tests/unit_tests/test_async_guard.py @@ -90,7 +90,7 @@ def validate(self, value, metadata): ], ) @pytest.mark.asyncio -@pytest.mark.skipif(not OPENAI_VERSION.startswith("0"), reason="Only for OpenAI v0") +#@pytest.mark.skipif(not OPENAI_VERSION.startswith("0"), reason="Only for OpenAI v0") async def test_required_metadata(spec, metadata, error_message): guard = AsyncGuard.from_rail_string(spec) diff --git a/tests/unit_tests/test_guard.py b/tests/unit_tests/test_guard.py index 9e400c896..0ca8fb1e4 100644 --- a/tests/unit_tests/test_guard.py +++ b/tests/unit_tests/test_guard.py @@ -90,7 +90,7 @@ def validate(self, value, metadata): ], ) @pytest.mark.asyncio -@pytest.mark.skipif(not OPENAI_VERSION.startswith("0"), reason="Only for OpenAI v0") +#@pytest.mark.skipif(not OPENAI_VERSION.startswith("0"), reason="Only for OpenAI v0") async def test_required_metadata(spec, metadata, error_message): guard = Guard.from_rail_string(spec) diff --git a/tests/unit_tests/test_llm_providers.py b/tests/unit_tests/test_llm_providers.py index 83002e858..2b4af2c3f 100644 --- a/tests/unit_tests/test_llm_providers.py +++ b/tests/unit_tests/test_llm_providers.py @@ -21,7 +21,7 @@ from .mocks import MockAsyncOpenAILlm, MockOpenAILlm -@pytest.mark.skipif(not OPENAI_VERSION.startswith("0"), reason="OpenAI v0 only") +#@pytest.mark.skipif(not OPENAI_VERSION.startswith("0"), reason="OpenAI v0 only") def test_openai_callable_does_not_retry_on_non_retryable_errors(mocker): with pytest.raises(Exception) as e: llm = MockOpenAILlm() @@ -52,7 +52,7 @@ def test_openai_callable_does_not_retry_on_success(mocker): assert response.response_token_count is None -@pytest.mark.skipif(not OPENAI_VERSION.startswith("0"), reason="OpenAI v0 only") +#@pytest.mark.skipif(not OPENAI_VERSION.startswith("0"), reason="OpenAI v0 only") @pytest.mark.asyncio async def test_async_openai_callable_does_not_retry_on_non_retryable_errors(mocker): with pytest.raises(Exception) as e: @@ -225,7 +225,7 @@ def test_openai_stream_callable(mocker, openai_stream_mock): @pytest.mark.asyncio -@pytest.mark.skipif(not OPENAI_VERSION.startswith("0"), reason="OpenAI v0 only") +#@pytest.mark.skipif(not OPENAI_VERSION.startswith("0"), reason="OpenAI v0 only") async def test_async_openai_callable(mocker, openai_mock): mocker.patch("openai.Completion.acreate", return_value=openai_mock) @@ -279,7 +279,7 @@ def test_openai_chat_stream_callable(mocker, openai_chat_stream_mock): @pytest.mark.asyncio -@pytest.mark.skipif(not OPENAI_VERSION.startswith("0"), reason="OpenAI v0 only") +#@pytest.mark.skipif(not OPENAI_VERSION.startswith("0"), reason="OpenAI v0 only") async def test_async_openai_chat_callable(mocker, openai_chat_mock): mocker.patch("openai.ChatCompletion.acreate", return_value=openai_chat_mock) @@ -318,7 +318,7 @@ class MyModel(BaseModel): @pytest.mark.asyncio -@pytest.mark.skipif(not OPENAI_VERSION.startswith("0"), reason="OpenAI v0 only") +#@pytest.mark.skipif(not OPENAI_VERSION.startswith("0"), reason="OpenAI v0 only") async def test_async_openai_chat_model_callable(mocker, openai_chat_mock): mocker.patch("openai.ChatCompletion.acreate", return_value=openai_chat_mock) diff --git a/tests/unit_tests/test_validators.py b/tests/unit_tests/test_validators.py index 74188c647..46d103fde 100644 --- a/tests/unit_tests/test_validators.py +++ b/tests/unit_tests/test_validators.py @@ -786,7 +786,7 @@ def mock_llm_api(*args, **kwargs): @pytest.mark.asyncio -@pytest.mark.skipif(not OPENAI_VERSION.startswith("0"), reason="Not supported in v1") +#@pytest.mark.skipif(not OPENAI_VERSION.startswith("0"), reason="Not supported in v1") async def test_async_input_validation_fix(mocker): async def mock_llm_api(*args, **kwargs): return json.dumps({"name": "Fluffy"}) @@ -1068,7 +1068,7 @@ def test_input_validation_fail( ], ) @pytest.mark.asyncio -@pytest.mark.skipif(not OPENAI_VERSION.startswith("0"), reason="Not supported in v1") +#@pytest.mark.skipif(not OPENAI_VERSION.startswith("0"), reason="Not supported in v1") async def test_input_validation_fail_async( on_fail, structured_prompt_error, From d5b3185681ffcf7946363d1da4a43621d9eed9fe Mon Sep 17 00:00:00 2001 From: Aarav Navani <38411399+oofmeister27@users.noreply.github.com> Date: Tue, 28 May 2024 17:32:39 -0700 Subject: [PATCH 05/19] run tests --- tests/unit_tests/test_validators.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tests/unit_tests/test_validators.py b/tests/unit_tests/test_validators.py index 46d103fde..821135d66 100644 --- a/tests/unit_tests/test_validators.py +++ b/tests/unit_tests/test_validators.py @@ -786,7 +786,7 @@ def mock_llm_api(*args, **kwargs): @pytest.mark.asyncio -#@pytest.mark.skipif(not OPENAI_VERSION.startswith("0"), reason="Not supported in v1") +#@pytest.mark.skipif(not OPENAI_VERSION.startswith("0"), reason="Not supported in v1") async def test_async_input_validation_fix(mocker): async def mock_llm_api(*args, **kwargs): return json.dumps({"name": "Fluffy"}) From 5201775ff2b31cde5f275d291e24c9d03d700048 Mon Sep 17 00:00:00 2001 From: Aarav Navani <38411399+oofmeister27@users.noreply.github.com> Date: Tue, 28 May 2024 17:34:01 -0700 Subject: [PATCH 06/19] run tests --- tests/unit_tests/test_validators.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tests/unit_tests/test_validators.py b/tests/unit_tests/test_validators.py index 821135d66..46d103fde 100644 --- a/tests/unit_tests/test_validators.py +++ b/tests/unit_tests/test_validators.py @@ -786,7 +786,7 @@ def mock_llm_api(*args, **kwargs): @pytest.mark.asyncio -#@pytest.mark.skipif(not OPENAI_VERSION.startswith("0"), reason="Not supported in v1") +#@pytest.mark.skipif(not OPENAI_VERSION.startswith("0"), reason="Not supported in v1") async def test_async_input_validation_fix(mocker): async def mock_llm_api(*args, **kwargs): return json.dumps({"name": "Fluffy"}) From cef2719152c575f336d02f2e33611a94416c23ec Mon Sep 17 00:00:00 2001 From: Aarav Navani <38411399+oofmeister27@users.noreply.github.com> Date: Tue, 28 May 2024 18:06:16 -0700 Subject: [PATCH 07/19] revert other changes --- .../applications/test_text2sql.py | 2 +- tests/integration_tests/test_async.py | 16 ++++++++-------- tests/integration_tests/test_run.py | 2 +- tests/unit_tests/test_async_guard.py | 2 +- tests/unit_tests/test_guard.py | 2 +- tests/unit_tests/test_llm_providers.py | 10 +++++----- tests/unit_tests/test_validators.py | 4 ++-- 7 files changed, 19 insertions(+), 19 deletions(-) diff --git a/tests/integration_tests/applications/test_text2sql.py b/tests/integration_tests/applications/test_text2sql.py index e5e27e3d9..ff5d199c5 100644 --- a/tests/integration_tests/applications/test_text2sql.py +++ b/tests/integration_tests/applications/test_text2sql.py @@ -39,7 +39,7 @@ def test_text2sql_with_examples(conn_str: str, schema_path: str, examples: str, Text2Sql(conn_str, schema_file=schema_path, examples=examples) -#@pytest.mark.skipif(not OPENAI_VERSION.startswith("0"), reason="Only for OpenAI v0") +@pytest.mark.skipif(not OPENAI_VERSION.startswith("0"), reason="Only for OpenAI v0") def test_text2sql_with_coro(): s = Text2Sql("sqlite://", llm_api=openai.Completion.acreate) with pytest.raises(ValueError): diff --git a/tests/integration_tests/test_async.py b/tests/integration_tests/test_async.py index 60649faca..987055412 100644 --- a/tests/integration_tests/test_async.py +++ b/tests/integration_tests/test_async.py @@ -17,7 +17,7 @@ @pytest.mark.asyncio @pytest.mark.parametrize("multiprocessing_validators", (True, False)) -#@pytest.mark.skipif(not OPENAI_VERSION.startswith("0"), reason="Only for OpenAI v0") +@pytest.mark.skipif(not OPENAI_VERSION.startswith("0"), reason="Only for OpenAI v0") async def test_entity_extraction_with_reask(mocker, multiprocessing_validators: bool): """Test that the entity extraction works with re-asking.""" mocker.patch( @@ -76,7 +76,7 @@ async def test_entity_extraction_with_reask(mocker, multiprocessing_validators: @pytest.mark.asyncio -#@pytest.mark.skipif(not OPENAI_VERSION.startswith("0"), reason="Only for OpenAI v0") +@pytest.mark.skipif(not OPENAI_VERSION.startswith("0"), reason="Only for OpenAI v0") async def test_entity_extraction_with_noop(mocker): mocker.patch( "guardrails.llm_providers.AsyncOpenAICallable", @@ -115,7 +115,7 @@ async def test_entity_extraction_with_noop(mocker): @pytest.mark.asyncio -#@pytest.mark.skipif(not OPENAI_VERSION.startswith("0"), reason="Only for OpenAI v0") +@pytest.mark.skipif(not OPENAI_VERSION.startswith("0"), reason="Only for OpenAI v0") async def test_entity_extraction_with_noop_pydantic(mocker): mocker.patch( "guardrails.llm_providers.AsyncOpenAICallable", @@ -151,7 +151,7 @@ async def test_entity_extraction_with_noop_pydantic(mocker): @pytest.mark.asyncio -#@pytest.mark.skipif(not OPENAI_VERSION.startswith("0"), reason="Only for OpenAI v0") +@pytest.mark.skipif(not OPENAI_VERSION.startswith("0"), reason="Only for OpenAI v0") async def test_entity_extraction_with_filter(mocker): """Test that the entity extraction works with re-asking.""" mocker.patch( @@ -186,7 +186,7 @@ async def test_entity_extraction_with_filter(mocker): @pytest.mark.asyncio -#@pytest.mark.skipif(not OPENAI_VERSION.startswith("0"), reason="Only for OpenAI v0") +@pytest.mark.skipif(not OPENAI_VERSION.startswith("0"), reason="Only for OpenAI v0") async def test_entity_extraction_with_fix(mocker): """Test that the entity extraction works with re-asking.""" mocker.patch( @@ -218,7 +218,7 @@ async def test_entity_extraction_with_fix(mocker): @pytest.mark.asyncio -#@pytest.mark.skipif(not OPENAI_VERSION.startswith("0"), reason="Only for OpenAI v0") +@pytest.mark.skipif(not OPENAI_VERSION.startswith("0"), reason="Only for OpenAI v0") async def test_entity_extraction_with_refrain(mocker): """Test that the entity extraction works with re-asking.""" mocker.patch( @@ -250,7 +250,7 @@ async def test_entity_extraction_with_refrain(mocker): @pytest.mark.asyncio -#@pytest.mark.skipif(not OPENAI_VERSION.startswith("0"), reason="Only for OpenAI v0") +@pytest.mark.skipif(not OPENAI_VERSION.startswith("0"), reason="Only for OpenAI v0") async def test_rail_spec_output_parse(rail_spec, llm_output, validated_output): """Test that the rail_spec fixture is working.""" guard = gd.Guard.from_rail_string(rail_spec) @@ -288,7 +288,7 @@ def validated_string_output(): @pytest.mark.asyncio -#@pytest.mark.skipif(not OPENAI_VERSION.startswith("0"), reason="Only for OpenAI v0") +@pytest.mark.skipif(not OPENAI_VERSION.startswith("0"), reason="Only for OpenAI v0") async def test_string_rail_spec_output_parse( string_rail_spec, string_llm_output, validated_string_output ): diff --git a/tests/integration_tests/test_run.py b/tests/integration_tests/test_run.py index 3b236ed32..49496928c 100644 --- a/tests/integration_tests/test_run.py +++ b/tests/integration_tests/test_run.py @@ -63,7 +63,7 @@ def runner_instance(is_sync: bool): os.environ.get("OPENAI_API_KEY") is None, reason="openai api key not set" ) @pytest.mark.asyncio -#@pytest.mark.skipif(not OPENAI_VERSION.startswith("0"), reason="Only for OpenAI v0") +@pytest.mark.skipif(not OPENAI_VERSION.startswith("0"), reason="Only for OpenAI v0") async def test_sync_async_call_equivalence(mocker): mocker.patch( "guardrails.llm_providers.AsyncOpenAICallable", diff --git a/tests/unit_tests/test_async_guard.py b/tests/unit_tests/test_async_guard.py index 48109511a..0f4e33d6d 100644 --- a/tests/unit_tests/test_async_guard.py +++ b/tests/unit_tests/test_async_guard.py @@ -90,7 +90,7 @@ def validate(self, value, metadata): ], ) @pytest.mark.asyncio -#@pytest.mark.skipif(not OPENAI_VERSION.startswith("0"), reason="Only for OpenAI v0") +@pytest.mark.skipif(not OPENAI_VERSION.startswith("0"), reason="Only for OpenAI v0") async def test_required_metadata(spec, metadata, error_message): guard = AsyncGuard.from_rail_string(spec) diff --git a/tests/unit_tests/test_guard.py b/tests/unit_tests/test_guard.py index 0ca8fb1e4..9e400c896 100644 --- a/tests/unit_tests/test_guard.py +++ b/tests/unit_tests/test_guard.py @@ -90,7 +90,7 @@ def validate(self, value, metadata): ], ) @pytest.mark.asyncio -#@pytest.mark.skipif(not OPENAI_VERSION.startswith("0"), reason="Only for OpenAI v0") +@pytest.mark.skipif(not OPENAI_VERSION.startswith("0"), reason="Only for OpenAI v0") async def test_required_metadata(spec, metadata, error_message): guard = Guard.from_rail_string(spec) diff --git a/tests/unit_tests/test_llm_providers.py b/tests/unit_tests/test_llm_providers.py index 2b4af2c3f..83002e858 100644 --- a/tests/unit_tests/test_llm_providers.py +++ b/tests/unit_tests/test_llm_providers.py @@ -21,7 +21,7 @@ from .mocks import MockAsyncOpenAILlm, MockOpenAILlm -#@pytest.mark.skipif(not OPENAI_VERSION.startswith("0"), reason="OpenAI v0 only") +@pytest.mark.skipif(not OPENAI_VERSION.startswith("0"), reason="OpenAI v0 only") def test_openai_callable_does_not_retry_on_non_retryable_errors(mocker): with pytest.raises(Exception) as e: llm = MockOpenAILlm() @@ -52,7 +52,7 @@ def test_openai_callable_does_not_retry_on_success(mocker): assert response.response_token_count is None -#@pytest.mark.skipif(not OPENAI_VERSION.startswith("0"), reason="OpenAI v0 only") +@pytest.mark.skipif(not OPENAI_VERSION.startswith("0"), reason="OpenAI v0 only") @pytest.mark.asyncio async def test_async_openai_callable_does_not_retry_on_non_retryable_errors(mocker): with pytest.raises(Exception) as e: @@ -225,7 +225,7 @@ def test_openai_stream_callable(mocker, openai_stream_mock): @pytest.mark.asyncio -#@pytest.mark.skipif(not OPENAI_VERSION.startswith("0"), reason="OpenAI v0 only") +@pytest.mark.skipif(not OPENAI_VERSION.startswith("0"), reason="OpenAI v0 only") async def test_async_openai_callable(mocker, openai_mock): mocker.patch("openai.Completion.acreate", return_value=openai_mock) @@ -279,7 +279,7 @@ def test_openai_chat_stream_callable(mocker, openai_chat_stream_mock): @pytest.mark.asyncio -#@pytest.mark.skipif(not OPENAI_VERSION.startswith("0"), reason="OpenAI v0 only") +@pytest.mark.skipif(not OPENAI_VERSION.startswith("0"), reason="OpenAI v0 only") async def test_async_openai_chat_callable(mocker, openai_chat_mock): mocker.patch("openai.ChatCompletion.acreate", return_value=openai_chat_mock) @@ -318,7 +318,7 @@ class MyModel(BaseModel): @pytest.mark.asyncio -#@pytest.mark.skipif(not OPENAI_VERSION.startswith("0"), reason="OpenAI v0 only") +@pytest.mark.skipif(not OPENAI_VERSION.startswith("0"), reason="OpenAI v0 only") async def test_async_openai_chat_model_callable(mocker, openai_chat_mock): mocker.patch("openai.ChatCompletion.acreate", return_value=openai_chat_mock) diff --git a/tests/unit_tests/test_validators.py b/tests/unit_tests/test_validators.py index 46d103fde..74188c647 100644 --- a/tests/unit_tests/test_validators.py +++ b/tests/unit_tests/test_validators.py @@ -786,7 +786,7 @@ def mock_llm_api(*args, **kwargs): @pytest.mark.asyncio -#@pytest.mark.skipif(not OPENAI_VERSION.startswith("0"), reason="Not supported in v1") +@pytest.mark.skipif(not OPENAI_VERSION.startswith("0"), reason="Not supported in v1") async def test_async_input_validation_fix(mocker): async def mock_llm_api(*args, **kwargs): return json.dumps({"name": "Fluffy"}) @@ -1068,7 +1068,7 @@ def test_input_validation_fail( ], ) @pytest.mark.asyncio -#@pytest.mark.skipif(not OPENAI_VERSION.startswith("0"), reason="Not supported in v1") +@pytest.mark.skipif(not OPENAI_VERSION.startswith("0"), reason="Not supported in v1") async def test_input_validation_fail_async( on_fail, structured_prompt_error, From 8956a806f2ae3b22a999acec5a4fc9e8a07b040b Mon Sep 17 00:00:00 2001 From: Aarav Navani <38411399+oofmeister27@users.noreply.github.com> Date: Wed, 29 May 2024 00:38:19 -0700 Subject: [PATCH 08/19] minor changes --- guardrails/run/stream_runner.py | 1 + guardrails/utils/pydantic_utils/v2.py | 2 +- tests/conftest.py | 1 - 3 files changed, 2 insertions(+), 2 deletions(-) diff --git a/guardrails/run/stream_runner.py b/guardrails/run/stream_runner.py index ba8215530..c872d7cf8 100644 --- a/guardrails/run/stream_runner.py +++ b/guardrails/run/stream_runner.py @@ -13,6 +13,7 @@ from guardrails.prompt import Instructions, Prompt from guardrails.run.runner import Runner from guardrails.schema import Schema, StringSchema +from guardrails.utils.openai_utils import OPENAI_VERSION from guardrails.utils.reask_utils import SkeletonReAsk diff --git a/guardrails/utils/pydantic_utils/v2.py b/guardrails/utils/pydantic_utils/v2.py index b717f8a26..fb523f99f 100644 --- a/guardrails/utils/pydantic_utils/v2.py +++ b/guardrails/utils/pydantic_utils/v2.py @@ -62,7 +62,7 @@ def add_validator( ) -> Callable: if kwargs: warnings.warn( - "The following kwargs are not by pydantic v2 " + "The following kwargs are not supported by pydantic v2 " "and will be ignored: " f"{kwargs}" ) diff --git a/tests/conftest.py b/tests/conftest.py index 55d7e7fcc..2fcf23c65 100644 --- a/tests/conftest.py +++ b/tests/conftest.py @@ -1,4 +1,3 @@ import os - os.environ["OPENAI_API_KEY"] = "mocked" From a305dee7486f9791883a9de49f2f08f9dbf8db98 Mon Sep 17 00:00:00 2001 From: Aarav Navani <38411399+oofmeister27@users.noreply.github.com> Date: Wed, 29 May 2024 01:45:10 -0700 Subject: [PATCH 09/19] fix lint --- guardrails/run/stream_runner.py | 1 - 1 file changed, 1 deletion(-) diff --git a/guardrails/run/stream_runner.py b/guardrails/run/stream_runner.py index c872d7cf8..ba8215530 100644 --- a/guardrails/run/stream_runner.py +++ b/guardrails/run/stream_runner.py @@ -13,7 +13,6 @@ from guardrails.prompt import Instructions, Prompt from guardrails.run.runner import Runner from guardrails.schema import Schema, StringSchema -from guardrails.utils.openai_utils import OPENAI_VERSION from guardrails.utils.reask_utils import SkeletonReAsk From 0ed8563ceb3cc934ce17b7848f349d526552cecc Mon Sep 17 00:00:00 2001 From: Aarav Navani <38411399+oofmeister27@users.noreply.github.com> Date: Mon, 3 Jun 2024 16:38:11 -0700 Subject: [PATCH 10/19] remove pytests for openai < 0 --- .../applications/test_text2sql.py | 9 - tests/integration_tests/test_async.py | 269 ------------------ tests/unit_tests/test_async_guard.py | 88 ------ tests/unit_tests/test_guard.py | 88 ------ tests/unit_tests/test_llm_providers.py | 113 -------- tests/unit_tests/test_validators.py | 200 ------------- 6 files changed, 767 deletions(-) diff --git a/tests/integration_tests/applications/test_text2sql.py b/tests/integration_tests/applications/test_text2sql.py index ff5d199c5..625db70e6 100644 --- a/tests/integration_tests/applications/test_text2sql.py +++ b/tests/integration_tests/applications/test_text2sql.py @@ -1,11 +1,9 @@ import json import os -import openai import pytest from guardrails.applications.text2sql import Text2Sql -from guardrails.utils.openai_utils import OPENAI_VERSION CURRENT_DIR_PARENT = os.path.dirname(os.path.dirname(os.path.abspath(__file__))) SCHEMA_PATH = os.path.join(CURRENT_DIR_PARENT, "test_assets/text2sql/schema.sql") @@ -37,10 +35,3 @@ def test_text2sql_with_examples(conn_str: str, schema_path: str, examples: str, # This should not raise an exception. Text2Sql(conn_str, schema_file=schema_path, examples=examples) - - -@pytest.mark.skipif(not OPENAI_VERSION.startswith("0"), reason="Only for OpenAI v0") -def test_text2sql_with_coro(): - s = Text2Sql("sqlite://", llm_api=openai.Completion.acreate) - with pytest.raises(ValueError): - s("") diff --git a/tests/integration_tests/test_async.py b/tests/integration_tests/test_async.py index 987055412..199c5a771 100644 --- a/tests/integration_tests/test_async.py +++ b/tests/integration_tests/test_async.py @@ -1,265 +1,12 @@ -from unittest.mock import patch -import openai import pytest -import guardrails as gd -from guardrails.schema import JsonSchema -from guardrails.utils.openai_utils import OPENAI_VERSION from tests.integration_tests.test_assets.fixtures import ( # noqa fixture_llm_output, fixture_rail_spec, fixture_validated_output, ) -from .mock_llm_outputs import MockAsyncOpenAICallable, entity_extraction - - -@pytest.mark.asyncio -@pytest.mark.parametrize("multiprocessing_validators", (True, False)) -@pytest.mark.skipif(not OPENAI_VERSION.startswith("0"), reason="Only for OpenAI v0") -async def test_entity_extraction_with_reask(mocker, multiprocessing_validators: bool): - """Test that the entity extraction works with re-asking.""" - mocker.patch( - "guardrails.llm_providers.AsyncOpenAICallable", - new=MockAsyncOpenAICallable, - ) - mocker.patch( - "guardrails.validators.Validator.run_in_separate_process", - new=multiprocessing_validators, - ) - - content = gd.docs_utils.read_pdf("docs/examples/data/chase_card_agreement.pdf") - guard = gd.Guard.from_rail_string(entity_extraction.RAIL_SPEC_WITH_REASK) - - with patch.object( - JsonSchema, "preprocess_prompt", wraps=guard.output_schema.preprocess_prompt - ) as mock_preprocess_prompt: - final_output = await guard( - llm_api=openai.Completion.acreate, - prompt_params={"document": content[:6000]}, - num_reasks=1, - ) - - # Check that the preprocess_prompt method was called. - mock_preprocess_prompt.assert_called() - - # Assertions are made on the guard state object. - assert final_output.validation_passed is True - assert final_output.validated_output == entity_extraction.VALIDATED_OUTPUT_REASK_2 - - guard_history = guard.history - call = guard_history.first - - # Check that the guard was only called once and - # has the correct number of re-asks. - assert guard_history.length == 1 - assert call.iterations.length == 2 - - # For orginal prompt and output - first = call.iterations.first - assert first.inputs.prompt == gd.Prompt(entity_extraction.COMPILED_PROMPT) - # Same as above - assert call.compiled_prompt == entity_extraction.COMPILED_PROMPT - assert first.prompt_tokens_consumed == 123 - assert first.completion_tokens_consumed == 1234 - assert first.raw_output == entity_extraction.LLM_OUTPUT - assert first.validation_response == entity_extraction.VALIDATED_OUTPUT_REASK_1 - - # For re-asked prompt and output - final = call.iterations.last - assert final.inputs.prompt == gd.Prompt(entity_extraction.COMPILED_PROMPT_REASK) - # Same as above - assert call.reask_prompts.last == entity_extraction.COMPILED_PROMPT_REASK - assert final.raw_output == entity_extraction.LLM_OUTPUT_REASK - assert call.guarded_output == entity_extraction.VALIDATED_OUTPUT_REASK_2 - - -@pytest.mark.asyncio -@pytest.mark.skipif(not OPENAI_VERSION.startswith("0"), reason="Only for OpenAI v0") -async def test_entity_extraction_with_noop(mocker): - mocker.patch( - "guardrails.llm_providers.AsyncOpenAICallable", - new=MockAsyncOpenAICallable, - ) - content = gd.docs_utils.read_pdf("docs/examples/data/chase_card_agreement.pdf") - guard = gd.Guard.from_rail_string(entity_extraction.RAIL_SPEC_WITH_NOOP) - final_output = await guard( - llm_api=openai.Completion.acreate, - prompt_params={"document": content[:6000]}, - num_reasks=1, - ) - - # Assertions are made on the guard state object. - - # Old assertion which is wrong - # This should not pass validation and therefore will not have a validated output - # assert final_output.validated_output == entity_extraction.VALIDATED_OUTPUT_NOOP - - assert final_output.validation_passed is False - assert final_output.validated_output is not None - assert final_output.validated_output["fees"] - assert final_output.validated_output["interest_rates"] - - call = guard.history.first - - # Check that the guard was called once - # and did not have to reask - assert guard.history.length == 1 - assert call.iterations.length == 1 - - # For orginal prompt and output - assert call.compiled_prompt == entity_extraction.COMPILED_PROMPT - assert call.raw_outputs.last == entity_extraction.LLM_OUTPUT - assert call.validation_response == entity_extraction.VALIDATED_OUTPUT_NOOP - - -@pytest.mark.asyncio -@pytest.mark.skipif(not OPENAI_VERSION.startswith("0"), reason="Only for OpenAI v0") -async def test_entity_extraction_with_noop_pydantic(mocker): - mocker.patch( - "guardrails.llm_providers.AsyncOpenAICallable", - new=MockAsyncOpenAICallable, - ) - content = gd.docs_utils.read_pdf("docs/examples/data/chase_card_agreement.pdf") - guard = gd.Guard.from_pydantic( - entity_extraction.PYDANTIC_RAIL_WITH_NOOP, entity_extraction.PYDANTIC_PROMPT - ) - final_output = await guard( - llm_api=openai.Completion.acreate, - prompt_params={"document": content[:6000]}, - num_reasks=1, - ) - - # Assertions are made on the guard state object. - assert final_output.validation_passed is False - assert final_output.validated_output is not None - assert final_output.validated_output["fees"] - assert final_output.validated_output["interest_rates"] - - call = guard.history.first - - # Check that the guard was called once - # and did not have toreask - assert guard.history.length == 1 - assert call.iterations.length == 1 - - # For orginal prompt and output - assert call.compiled_prompt == entity_extraction.COMPILED_PROMPT - assert call.raw_outputs.last == entity_extraction.LLM_OUTPUT - assert call.validation_response == entity_extraction.VALIDATED_OUTPUT_NOOP - - -@pytest.mark.asyncio -@pytest.mark.skipif(not OPENAI_VERSION.startswith("0"), reason="Only for OpenAI v0") -async def test_entity_extraction_with_filter(mocker): - """Test that the entity extraction works with re-asking.""" - mocker.patch( - "guardrails.llm_providers.AsyncOpenAICallable", - new=MockAsyncOpenAICallable, - ) - - content = gd.docs_utils.read_pdf("docs/examples/data/chase_card_agreement.pdf") - guard = gd.Guard.from_rail_string(entity_extraction.RAIL_SPEC_WITH_FILTER) - final_output = await guard( - llm_api=openai.Completion.acreate, - prompt_params={"document": content[:6000]}, - num_reasks=1, - ) - - # Assertions are made on the guard state object. - assert final_output.validation_passed is False - assert final_output.validated_output is None - - call = guard.history.first - - # Check that the guard state object has the correct number of re-asks. - assert guard.history.length == 1 - assert call.iterations.length == 1 - - # For orginal prompt and output - assert call.compiled_prompt == entity_extraction.COMPILED_PROMPT - assert call.raw_outputs.last == entity_extraction.LLM_OUTPUT - assert call.validation_response == entity_extraction.VALIDATED_OUTPUT_FILTER - assert call.guarded_output is None - assert call.status == "fail" - - -@pytest.mark.asyncio -@pytest.mark.skipif(not OPENAI_VERSION.startswith("0"), reason="Only for OpenAI v0") -async def test_entity_extraction_with_fix(mocker): - """Test that the entity extraction works with re-asking.""" - mocker.patch( - "guardrails.llm_providers.AsyncOpenAICallable", - new=MockAsyncOpenAICallable, - ) - - content = gd.docs_utils.read_pdf("docs/examples/data/chase_card_agreement.pdf") - guard = gd.Guard.from_rail_string(entity_extraction.RAIL_SPEC_WITH_FIX) - final_output = await guard( - llm_api=openai.Completion.acreate, - prompt_params={"document": content[:6000]}, - num_reasks=1, - ) - - # Assertions are made on the guard state object. - assert final_output.validation_passed is True - assert final_output.validated_output == entity_extraction.VALIDATED_OUTPUT_FIX - - call = guard.history.first - - # Check that the guard state object has the correct number of re-asks. - assert guard.history.length == 1 - - # For orginal prompt and output - assert call.compiled_prompt == entity_extraction.COMPILED_PROMPT - assert call.raw_outputs.last == entity_extraction.LLM_OUTPUT - assert call.guarded_output == entity_extraction.VALIDATED_OUTPUT_FIX - - -@pytest.mark.asyncio -@pytest.mark.skipif(not OPENAI_VERSION.startswith("0"), reason="Only for OpenAI v0") -async def test_entity_extraction_with_refrain(mocker): - """Test that the entity extraction works with re-asking.""" - mocker.patch( - "guardrails.llm_providers.AsyncOpenAICallable", - new=MockAsyncOpenAICallable, - ) - - content = gd.docs_utils.read_pdf("docs/examples/data/chase_card_agreement.pdf") - guard = gd.Guard.from_rail_string(entity_extraction.RAIL_SPEC_WITH_REFRAIN) - final_output = await guard( - llm_api=openai.Completion.acreate, - prompt_params={"document": content[:6000]}, - num_reasks=1, - ) - # Assertions are made on the guard state object. - - assert final_output.validation_passed is False - assert final_output.validated_output == entity_extraction.VALIDATED_OUTPUT_REFRAIN - - call = guard.history.first - - # Check that the guard state object has the correct number of re-asks. - assert guard.history.length == 1 - - # For orginal prompt and output - assert call.compiled_prompt == entity_extraction.COMPILED_PROMPT - assert call.raw_outputs.last == entity_extraction.LLM_OUTPUT - assert call.guarded_output == entity_extraction.VALIDATED_OUTPUT_REFRAIN - - -@pytest.mark.asyncio -@pytest.mark.skipif(not OPENAI_VERSION.startswith("0"), reason="Only for OpenAI v0") -async def test_rail_spec_output_parse(rail_spec, llm_output, validated_output): - """Test that the rail_spec fixture is working.""" - guard = gd.Guard.from_rail_string(rail_spec) - output = await guard.parse( - llm_output, - llm_api=openai.Completion.acreate, - ) - assert output.validated_output == validated_output - @pytest.fixture def string_rail_spec(): @@ -276,7 +23,6 @@ def string_rail_spec(): """ - @pytest.fixture def string_llm_output(): return "string output yes" @@ -285,18 +31,3 @@ def string_llm_output(): @pytest.fixture def validated_string_output(): return "string output" - - -@pytest.mark.asyncio -@pytest.mark.skipif(not OPENAI_VERSION.startswith("0"), reason="Only for OpenAI v0") -async def test_string_rail_spec_output_parse( - string_rail_spec, string_llm_output, validated_string_output -): - """Test that the string_rail_spec fixture is working.""" - guard = gd.Guard.from_rail_string(string_rail_spec) - output = await guard.parse( - string_llm_output, - llm_api=openai.Completion.acreate, - num_reasks=0, - ) - assert output.validated_output == validated_string_output diff --git a/tests/unit_tests/test_async_guard.py b/tests/unit_tests/test_async_guard.py index 0f4e33d6d..6e35f4464 100644 --- a/tests/unit_tests/test_async_guard.py +++ b/tests/unit_tests/test_async_guard.py @@ -34,94 +34,6 @@ class RequiringValidator2(Validator): def validate(self, value, metadata): return PassResult() - -@pytest.mark.parametrize( - "spec,metadata,error_message", - [ - ( - """ - - - - - - """, - {"required_key": "a"}, - "Missing required metadata keys: required_key", - ), - ( - """ - - - - - - - - - - - """, - {"required_key": "a", "required_key2": "b"}, - "Missing required metadata keys: required_key, required_key2", - ), - ( - """ - - - - - - - - - - - - - - - - -""", - {"required_key": "a"}, - "Missing required metadata keys: required_key", - ), - ], -) -@pytest.mark.asyncio -@pytest.mark.skipif(not OPENAI_VERSION.startswith("0"), reason="Only for OpenAI v0") -async def test_required_metadata(spec, metadata, error_message): - guard = AsyncGuard.from_rail_string(spec) - - missing_keys = verify_metadata_requirements({}, guard.output_schema.root_datatype) - assert set(missing_keys) == set(metadata) - - not_missing_keys = verify_metadata_requirements( - metadata, guard.output_schema.root_datatype - ) - assert not_missing_keys == [] - - # test sync guard - with pytest.raises(ValueError) as excinfo: - guard.parse("{}") - assert str(excinfo.value) == error_message - - response = guard.parse("{}", metadata=metadata, num_reasks=0) - assert response.error is None - - # test async guard - with pytest.raises(ValueError) as excinfo: - guard.parse("{}") - await guard.parse("{}", llm_api=openai.ChatCompletion.acreate, num_reasks=0) - assert str(excinfo.value) == error_message - - response = await guard.parse( - "{}", metadata=metadata, llm_api=openai.ChatCompletion.acreate, num_reasks=0 - ) - assert response.error is None - - rail = Rail.from_string_validators([], "empty railspec") empty_rail_string = """ - - - - - """, - {"required_key": "a"}, - "Missing required metadata keys: required_key", - ), - ( - """ - - - - - - - - - - - """, - {"required_key": "a", "required_key2": "b"}, - "Missing required metadata keys: required_key, required_key2", - ), - ( - """ - - - - - - - - - - - - - - - - -""", - {"required_key": "a"}, - "Missing required metadata keys: required_key", - ), - ], -) -@pytest.mark.asyncio -@pytest.mark.skipif(not OPENAI_VERSION.startswith("0"), reason="Only for OpenAI v0") -async def test_required_metadata(spec, metadata, error_message): - guard = Guard.from_rail_string(spec) - - missing_keys = verify_metadata_requirements({}, guard.output_schema.root_datatype) - assert set(missing_keys) == set(metadata) - - not_missing_keys = verify_metadata_requirements( - metadata, guard.output_schema.root_datatype - ) - assert not_missing_keys == [] - - # test sync guard - with pytest.raises(ValueError) as excinfo: - guard.parse("{}") - assert str(excinfo.value) == error_message - - response = guard.parse("{}", metadata=metadata, num_reasks=0) - assert response.error is None - - # test async guard - with pytest.raises(ValueError) as excinfo: - guard.parse("{}") - await guard.parse("{}", llm_api=openai.ChatCompletion.acreate, num_reasks=0) - assert str(excinfo.value) == error_message - - response = await guard.parse( - "{}", metadata=metadata, llm_api=openai.ChatCompletion.acreate, num_reasks=0 - ) - assert response.error is None - - rail = Rail.from_string_validators([], "empty railspec") empty_rail_string = """ - -This is not two words - - - - -""" - ) - await guard( - mock_llm_api, - ) - assert guard.history.first.iterations.first.outputs.validation_response == "This is" - - # rail instructions validation - guard = Guard.from_rail_string( - """ - - -This is not two words - - -This also is not two words - - - - -""" - ) - await guard( - mock_llm_api, - ) - assert ( - guard.history.first.iterations.first.outputs.validation_response == "This also" - ) - - @pytest.mark.parametrize( "on_fail," "structured_prompt_error," @@ -1067,110 +971,6 @@ def test_input_validation_fail( ), ], ) -@pytest.mark.asyncio -@pytest.mark.skipif(not OPENAI_VERSION.startswith("0"), reason="Not supported in v1") -async def test_input_validation_fail_async( - on_fail, - structured_prompt_error, - structured_instructions_error, - structured_message_history_error, - unstructured_prompt_error, - unstructured_instructions_error, -): - # with_prompt_validation - guard = Guard.from_pydantic(output_class=Pet).with_prompt_validation( - validators=[TwoWords(on_fail=on_fail)] - ) - with pytest.raises(ValidationError) as excinfo: - await guard( - get_static_openai_acreate_func(), - prompt="What kind of pet should I get?", - ) - assert str(excinfo.value) == structured_prompt_error - assert isinstance(guard.history.last.exception, ValidationError) - assert guard.history.last.exception == excinfo.value - - # with_instructions_validation - guard = Guard.from_pydantic(output_class=Pet).with_instructions_validation( - validators=[TwoWords(on_fail=on_fail)] - ) - with pytest.raises(ValidationError) as excinfo: - await guard( - get_static_openai_acreate_func(), - prompt="What kind of pet should I get and what should I name it?", - instructions="What kind of pet should I get?", - ) - assert str(excinfo.value) == structured_instructions_error - assert isinstance(guard.history.last.exception, ValidationError) - assert guard.history.last.exception == excinfo.value - - # with_msg_history_validation - guard = Guard.from_pydantic(output_class=Pet).with_msg_history_validation( - validators=[TwoWords(on_fail=on_fail)] - ) - with pytest.raises(ValidationError) as excinfo: - await guard( - get_static_openai_acreate_func(), - msg_history=[ - { - "role": "user", - "content": "What kind of pet should I get?", - } - ], - ) - assert str(excinfo.value) == structured_message_history_error - assert isinstance(guard.history.last.exception, ValidationError) - assert guard.history.last.exception == excinfo.value - - # rail prompt validation - guard = Guard.from_rail_string( - f""" - - -This is not two words - - - - -""" - ) - with pytest.raises(ValidationError) as excinfo: - await guard( - get_static_openai_acreate_func(), - ) - assert str(excinfo.value) == unstructured_prompt_error - assert isinstance(guard.history.last.exception, ValidationError) - assert guard.history.last.exception == excinfo.value - - # rail instructions validation - guard = Guard.from_rail_string( - f""" - - -This is not two words - - -This also is not two words - - - - -""" - ) - with pytest.raises(ValidationError) as excinfo: - await guard( - get_static_openai_acreate_func(), - ) - assert str(excinfo.value) == unstructured_instructions_error - assert isinstance(guard.history.last.exception, ValidationError) - assert guard.history.last.exception == excinfo.value - def test_input_validation_mismatch_raise(): # prompt validation, msg_history argument From 4eaecbfcad0b82da3468bc8fb728c399002be17c Mon Sep 17 00:00:00 2001 From: Aarav Navani <38411399+oofmeister27@users.noreply.github.com> Date: Mon, 3 Jun 2024 16:38:49 -0700 Subject: [PATCH 11/19] run tests --- tests/integration_tests/test_python_rail.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tests/integration_tests/test_python_rail.py b/tests/integration_tests/test_python_rail.py index 6a88d1f00..5674fa6df 100644 --- a/tests/integration_tests/test_python_rail.py +++ b/tests/integration_tests/test_python_rail.py @@ -210,7 +210,7 @@ class Director(BaseModel): python_rail.LLM_OUTPUT_3_SUCCEED_GUARDRAILS_AND_PYDANTIC ) - + @pytest.mark.skipif(not PYDANTIC_VERSION.startswith("1"), reason="Pydantic 1.x only") def test_python_rail_add_validator(mocker): from pydantic import root_validator, validator From 9ca5089ef682d5b9e574f5477a5943ec2daa8eef Mon Sep 17 00:00:00 2001 From: Aarav Navani <38411399+oofmeister27@users.noreply.github.com> Date: Mon, 3 Jun 2024 16:40:10 -0700 Subject: [PATCH 12/19] lint --- tests/integration_tests/test_async.py | 2 +- tests/integration_tests/test_python_rail.py | 2 +- tests/unit_tests/test_async_guard.py | 4 +--- tests/unit_tests/test_guard.py | 4 +--- tests/unit_tests/test_llm_providers.py | 6 +++++- tests/unit_tests/test_validators.py | 4 +--- 6 files changed, 10 insertions(+), 12 deletions(-) diff --git a/tests/integration_tests/test_async.py b/tests/integration_tests/test_async.py index 199c5a771..97929cb10 100644 --- a/tests/integration_tests/test_async.py +++ b/tests/integration_tests/test_async.py @@ -1,4 +1,3 @@ - import pytest from tests.integration_tests.test_assets.fixtures import ( # noqa @@ -23,6 +22,7 @@ def string_rail_spec(): """ + @pytest.fixture def string_llm_output(): return "string output yes" diff --git a/tests/integration_tests/test_python_rail.py b/tests/integration_tests/test_python_rail.py index 5674fa6df..6a88d1f00 100644 --- a/tests/integration_tests/test_python_rail.py +++ b/tests/integration_tests/test_python_rail.py @@ -210,7 +210,7 @@ class Director(BaseModel): python_rail.LLM_OUTPUT_3_SUCCEED_GUARDRAILS_AND_PYDANTIC ) - + @pytest.mark.skipif(not PYDANTIC_VERSION.startswith("1"), reason="Pydantic 1.x only") def test_python_rail_add_validator(mocker): from pydantic import root_validator, validator diff --git a/tests/unit_tests/test_async_guard.py b/tests/unit_tests/test_async_guard.py index 6e35f4464..e23ecf7e3 100644 --- a/tests/unit_tests/test_async_guard.py +++ b/tests/unit_tests/test_async_guard.py @@ -1,11 +1,8 @@ -import openai import pytest from pydantic import BaseModel from guardrails import AsyncGuard, Rail, Validator -from guardrails.datatypes import verify_metadata_requirements from guardrails.utils import args, kwargs, on_fail -from guardrails.utils.openai_utils import OPENAI_VERSION from guardrails.validator_base import OnFailAction from guardrails.validators import ( # ReadingTime, EndsWith, @@ -34,6 +31,7 @@ class RequiringValidator2(Validator): def validate(self, value, metadata): return PassResult() + rail = Rail.from_string_validators([], "empty railspec") empty_rail_string = """ Date: Mon, 3 Jun 2024 20:10:37 -0700 Subject: [PATCH 13/19] fix test --- tests/unit_tests/test_validators.py | 46 ++--------------------------- 1 file changed, 2 insertions(+), 44 deletions(-) diff --git a/tests/unit_tests/test_validators.py b/tests/unit_tests/test_validators.py index e6f2c4386..c968ce1b1 100644 --- a/tests/unit_tests/test_validators.py +++ b/tests/unit_tests/test_validators.py @@ -11,6 +11,8 @@ from guardrails.errors import ValidationError from guardrails.schema import StringSchema from guardrails.utils.openai_utils import ( + OPENAI_VERSION, + get_static_openai_acreate_func, get_static_openai_create_func, ) from guardrails.utils.reask_utils import FieldReAsk @@ -782,7 +784,6 @@ def mock_llm_api(*args, **kwargs): guard.history.first.iterations.first.outputs.validation_response == "This also" ) - @pytest.mark.parametrize( "on_fail," "structured_prompt_error," @@ -927,49 +928,6 @@ def test_input_validation_fail( assert isinstance(guard.history.last.exception, ValidationError) assert guard.history.last.exception == excinfo.value - -@pytest.mark.parametrize( - "on_fail," - "structured_prompt_error," - "structured_instructions_error," - "structured_message_history_error," - "unstructured_prompt_error," - "unstructured_instructions_error", - [ - ( - OnFailAction.REASK, - "Prompt validation failed: incorrect_value='What kind of pet should I get?\\n\\nJson Output:\\n\\n' fail_results=[FailResult(outcome='fail', metadata=None, error_message='must be exactly two words', fix_value='What kind')] path=None", # noqa - "Instructions validation failed: incorrect_value='What kind of pet should I get?' fail_results=[FailResult(outcome='fail', metadata=None, error_message='must be exactly two words', fix_value='What kind')] path=None", # noqa - "Message history validation failed: incorrect_value='What kind of pet should I get?' fail_results=[FailResult(outcome='fail', metadata=None, error_message='must be exactly two words', fix_value='What kind')] path=None", # noqa - "Prompt validation failed: incorrect_value='\\nThis is not two words\\n\\n\\nString Output:\\n\\n' fail_results=[FailResult(outcome='fail', metadata=None, error_message='must be exactly two words', fix_value='This is')] path=None", # noqa - "Instructions validation failed: incorrect_value='\\nThis also is not two words\\n' fail_results=[FailResult(outcome='fail', metadata=None, error_message='must be exactly two words', fix_value='This also')] path=None", # noqa - ), - ( - OnFailAction.FILTER, - "Prompt validation failed", - "Instructions validation failed", - "Message history validation failed", - "Prompt validation failed", - "Instructions validation failed", - ), - ( - OnFailAction.REFRAIN, - "Prompt validation failed", - "Instructions validation failed", - "Message history validation failed", - "Prompt validation failed", - "Instructions validation failed", - ), - ( - OnFailAction.EXCEPTION, - "Validation failed for field with errors: must be exactly two words", - "Validation failed for field with errors: must be exactly two words", - "Validation failed for field with errors: must be exactly two words", - "Validation failed for field with errors: must be exactly two words", - "Validation failed for field with errors: must be exactly two words", - ), - ], -) def test_input_validation_mismatch_raise(): # prompt validation, msg_history argument guard = Guard.from_pydantic(output_class=Pet).with_prompt_validation( From c42f9319ad8c7ef14930fe6d5922960cd68435c8 Mon Sep 17 00:00:00 2001 From: Aarav Navani <38411399+oofmeister27@users.noreply.github.com> Date: Mon, 3 Jun 2024 20:17:32 -0700 Subject: [PATCH 14/19] fix lint --- tests/unit_tests/test_validators.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/tests/unit_tests/test_validators.py b/tests/unit_tests/test_validators.py index c968ce1b1..4898f02c7 100644 --- a/tests/unit_tests/test_validators.py +++ b/tests/unit_tests/test_validators.py @@ -11,8 +11,6 @@ from guardrails.errors import ValidationError from guardrails.schema import StringSchema from guardrails.utils.openai_utils import ( - OPENAI_VERSION, - get_static_openai_acreate_func, get_static_openai_create_func, ) from guardrails.utils.reask_utils import FieldReAsk @@ -784,6 +782,7 @@ def mock_llm_api(*args, **kwargs): guard.history.first.iterations.first.outputs.validation_response == "This also" ) + @pytest.mark.parametrize( "on_fail," "structured_prompt_error," @@ -928,6 +927,7 @@ def test_input_validation_fail( assert isinstance(guard.history.last.exception, ValidationError) assert guard.history.last.exception == excinfo.value + def test_input_validation_mismatch_raise(): # prompt validation, msg_history argument guard = Guard.from_pydantic(output_class=Pet).with_prompt_validation( From 5338f5a69613231f8ca41f44db1da30edf037708 Mon Sep 17 00:00:00 2001 From: Aarav Navani <38411399+oofmeister27@users.noreply.github.com> Date: Mon, 3 Jun 2024 23:12:36 -0700 Subject: [PATCH 15/19] fix tests --- tests/unit_tests/test_llm_providers.py | 33 +++++++++++++++++++------- 1 file changed, 24 insertions(+), 9 deletions(-) diff --git a/tests/unit_tests/test_llm_providers.py b/tests/unit_tests/test_llm_providers.py index 952afc3fe..21b293ab3 100644 --- a/tests/unit_tests/test_llm_providers.py +++ b/tests/unit_tests/test_llm_providers.py @@ -15,6 +15,7 @@ chat_prompt, get_llm_ask, ) +from guardrails.utils.openai_utils import OPENAI_VERSION from guardrails.utils.safe_get import safe_get_with_brackets from .mocks import MockAsyncOpenAILlm, MockOpenAILlm @@ -33,7 +34,6 @@ def test_openai_callable_does_not_retry_on_success(mocker): assert response.prompt_token_count is None assert response.response_token_count is None - @pytest.mark.asyncio async def test_async_openai_callable_does_not_retry_on_success(mocker): llm = MockAsyncOpenAILlm() @@ -140,7 +140,6 @@ class MockCompletion: ), ) - @pytest.fixture(scope="module") def openai_stream_mock(): def gen(): @@ -155,6 +154,7 @@ def gen(): def test_openai_callable(mocker, openai_mock): + mocker.patch("openai.resources.Completions.create", return_value=openai_mock) from guardrails.llm_providers import OpenAICallable @@ -170,7 +170,9 @@ def test_openai_callable(mocker, openai_mock): def test_openai_stream_callable(mocker, openai_stream_mock): - mocker.patch("openai.resources.Completions.create", return_value=openai_stream_mock) + mocker.patch( + "openai.resources.Completions.create", return_value=openai_stream_mock + ) from guardrails.llm_providers import OpenAICallable @@ -187,13 +189,11 @@ def test_openai_stream_callable(mocker, openai_stream_mock): assert actual_op == f"{i}," i += 1 - def test_openai_chat_callable(mocker, openai_chat_mock): mocker.patch( "openai.resources.chat.completions.Completions.create", return_value=openai_chat_mock, ) - from guardrails.llm_providers import OpenAIChatCallable openai_chat_callable = OpenAIChatCallable() @@ -225,7 +225,6 @@ def test_openai_chat_stream_callable(mocker, openai_chat_stream_mock): assert actual_op == f"{i}," i += 1 - def test_openai_chat_model_callable(mocker, openai_chat_mock): mocker.patch( "openai.resources.chat.completions.Completions.create", @@ -248,6 +247,24 @@ class MyModel(BaseModel): assert response.prompt_token_count == 10 assert response.response_token_count == 20 +@pytest.mark.skipif( + not importlib.util.find_spec("manifest"), + reason="manifest-ml is not installed", +) +def test_manifest_callable(): + client = MagicMock() + client.run.return_value = "Hello world!" + + from guardrails.llm_providers import ManifestCallable + + manifest_callable = ManifestCallable() + response = manifest_callable(text="Hello", client=client) + + assert isinstance(response, LLMResponse) is True + assert response.output == "Hello world!" + assert response.prompt_token_count is None + assert response.response_token_count is None + @pytest.mark.skipif( not importlib.util.find_spec("manifest"), @@ -411,7 +428,6 @@ def test_get_llm_ask_openai_completion(): completion_create = None completion_create = openai.completions.create - prompt_callable = get_llm_ask(completion_create) assert isinstance(prompt_callable, OpenAICallable) @@ -426,7 +442,6 @@ def test_get_llm_ask_openai_chat(): from guardrails.llm_providers import OpenAIChatCallable - chat_completion_create = None chat_completion_create = openai.chat.completions.create prompt_callable = get_llm_ask(chat_completion_create) @@ -578,4 +593,4 @@ def test_get_llm_ask_litellm(): def test_chat_prompt(): # raises when neither msg_history or prompt are provided with pytest.raises(PromptCallableException): - chat_prompt(None) + chat_prompt(None) \ No newline at end of file From 1e0ccccf4e5e15bf587febe7e01c433b1fb05c0b Mon Sep 17 00:00:00 2001 From: Aarav Navani <38411399+oofmeister27@users.noreply.github.com> Date: Mon, 3 Jun 2024 23:16:20 -0700 Subject: [PATCH 16/19] lint --- tests/unit_tests/test_llm_providers.py | 13 +++++++------ 1 file changed, 7 insertions(+), 6 deletions(-) diff --git a/tests/unit_tests/test_llm_providers.py b/tests/unit_tests/test_llm_providers.py index 21b293ab3..6bd2e022e 100644 --- a/tests/unit_tests/test_llm_providers.py +++ b/tests/unit_tests/test_llm_providers.py @@ -15,7 +15,6 @@ chat_prompt, get_llm_ask, ) -from guardrails.utils.openai_utils import OPENAI_VERSION from guardrails.utils.safe_get import safe_get_with_brackets from .mocks import MockAsyncOpenAILlm, MockOpenAILlm @@ -34,6 +33,7 @@ def test_openai_callable_does_not_retry_on_success(mocker): assert response.prompt_token_count is None assert response.response_token_count is None + @pytest.mark.asyncio async def test_async_openai_callable_does_not_retry_on_success(mocker): llm = MockAsyncOpenAILlm() @@ -140,6 +140,7 @@ class MockCompletion: ), ) + @pytest.fixture(scope="module") def openai_stream_mock(): def gen(): @@ -154,7 +155,6 @@ def gen(): def test_openai_callable(mocker, openai_mock): - mocker.patch("openai.resources.Completions.create", return_value=openai_mock) from guardrails.llm_providers import OpenAICallable @@ -170,9 +170,7 @@ def test_openai_callable(mocker, openai_mock): def test_openai_stream_callable(mocker, openai_stream_mock): - mocker.patch( - "openai.resources.Completions.create", return_value=openai_stream_mock - ) + mocker.patch("openai.resources.Completions.create", return_value=openai_stream_mock) from guardrails.llm_providers import OpenAICallable @@ -189,6 +187,7 @@ def test_openai_stream_callable(mocker, openai_stream_mock): assert actual_op == f"{i}," i += 1 + def test_openai_chat_callable(mocker, openai_chat_mock): mocker.patch( "openai.resources.chat.completions.Completions.create", @@ -225,6 +224,7 @@ def test_openai_chat_stream_callable(mocker, openai_chat_stream_mock): assert actual_op == f"{i}," i += 1 + def test_openai_chat_model_callable(mocker, openai_chat_mock): mocker.patch( "openai.resources.chat.completions.Completions.create", @@ -247,6 +247,7 @@ class MyModel(BaseModel): assert response.prompt_token_count == 10 assert response.response_token_count == 20 + @pytest.mark.skipif( not importlib.util.find_spec("manifest"), reason="manifest-ml is not installed", @@ -593,4 +594,4 @@ def test_get_llm_ask_litellm(): def test_chat_prompt(): # raises when neither msg_history or prompt are provided with pytest.raises(PromptCallableException): - chat_prompt(None) \ No newline at end of file + chat_prompt(None) From 56d37f5b62dee6705ce4245dccb4df42fb5dec2c Mon Sep 17 00:00:00 2001 From: Aarav Navani <38411399+oofmeister27@users.noreply.github.com> Date: Wed, 5 Jun 2024 02:08:20 -0700 Subject: [PATCH 17/19] additional changes --- guardrails/run/stream_runner.py | 7 +- guardrails/utils/openai_utils/v0.py | 318 ---------------------------- 2 files changed, 1 insertion(+), 324 deletions(-) delete mode 100644 guardrails/utils/openai_utils/v0.py diff --git a/guardrails/run/stream_runner.py b/guardrails/run/stream_runner.py index ba8215530..692d531ae 100644 --- a/guardrails/run/stream_runner.py +++ b/guardrails/run/stream_runner.py @@ -203,16 +203,11 @@ def step( def get_chunk_text(self, chunk: Any, api: Union[PromptCallableBase, None]) -> str: """Get the text from a chunk.""" chunk_text = "" - if isinstance(api, OpenAICallable): + if isinstance(api, OpenAICallable) or isinstance(api, OpenAIChatCallable): finished = chunk.choices[0].finish_reason content = chunk.choices[0].text if not finished and content: chunk_text = content - elif isinstance(api, OpenAIChatCallable): - finished = chunk.choices[0].finish_reason - content = chunk.choices[0].delta.content - if not finished and content: - chunk_text = content elif isinstance(api, LiteLLMCallable): finished = chunk.choices[0].finish_reason content = chunk.choices[0].delta.content diff --git a/guardrails/utils/openai_utils/v0.py b/guardrails/utils/openai_utils/v0.py deleted file mode 100644 index 6b4afd0fb..000000000 --- a/guardrails/utils/openai_utils/v0.py +++ /dev/null @@ -1,318 +0,0 @@ -from typing import Any, AsyncIterable, Dict, Iterable, List, cast -from warnings import warn - -import openai -import openai.error -from tenacity import retry, retry_if_exception_type, wait_exponential_jitter - -from guardrails.utils.llm_response import LLMResponse -from guardrails.utils.openai_utils.base import ( - BaseAsyncOpenAIClient, - BaseSyncOpenAIClient, -) -from guardrails.utils.openai_utils.streaming_utils import ( - num_tokens_from_messages, - num_tokens_from_string, -) - - -def raise_v0_deprecation_warning(): - """Raise a warning about support deprecation for OpenAI v0.x.""" - warn( - """Support for OpenAI v0.x is deprecated and will be removed in - Guardrails 0.5.x. Please upgrade to the latest OpenAI v1.x to - continue receiving future updates and support.""", - FutureWarning, - ) - - -def get_static_openai_create_func(): - raise_v0_deprecation_warning() - return openai.Completion.create - - -def get_static_openai_chat_create_func(): - raise_v0_deprecation_warning() - return openai.ChatCompletion.create - - -def get_static_openai_acreate_func(): - raise_v0_deprecation_warning() - return openai.Completion.acreate - - -def get_static_openai_chat_acreate_func(): - raise_v0_deprecation_warning() - return openai.ChatCompletion.acreate - - -OpenAIServiceUnavailableError = openai.error.ServiceUnavailableError - -OPENAI_RETRYABLE_ERRORS = [ - openai.error.APIConnectionError, - openai.error.APIError, - openai.error.TryAgain, - openai.error.Timeout, - openai.error.RateLimitError, - openai.error.ServiceUnavailableError, -] -RETRYABLE_ERRORS = tuple(OPENAI_RETRYABLE_ERRORS) - - -class OpenAIClientV0(BaseSyncOpenAIClient): - def create_embedding( - self, - model: str, - input: List[str], - ): - response = openai.Embedding.create( - api_key=self.api_key, - model=model, - input=input, - api_base=self.api_base, - ) - return [r["embedding"] for r in response["data"]] # type: ignore - - @retry( - wait=wait_exponential_jitter(max=60), - retry=retry_if_exception_type(RETRYABLE_ERRORS), - ) - def create_completion( - self, engine: str, prompt: str, *args, **kwargs - ) -> LLMResponse: - response = openai.Completion.create( - api_key=self.api_key, engine=engine, prompt=prompt, *args, **kwargs - ) - return self.construct_nonchat_response( - stream=kwargs.get("stream", False), - openai_response=response, - ) - - def construct_nonchat_response( - self, - stream: bool, - openai_response: Any, - ) -> LLMResponse: - """Construct an LLMResponse from an OpenAI response. - - Splits execution based on whether the `stream` parameter is set - in the kwargs. - """ - if stream: - # If stream is defined and set to True, - # openai returns a generator - openai_response = cast(Iterable[Dict[str, Any]], openai_response) - - # Simply return the generator wrapped in an LLMResponse - return LLMResponse(output="", stream_output=openai_response) - - # If stream is not defined or is set to False, - # return default behavior - openai_response = cast(Dict[str, Any], openai_response) - return LLMResponse( - output=openai_response["choices"][0]["text"], # type: ignore - prompt_token_count=openai_response["usage"][ # type: ignore - "prompt_tokens" - ], - response_token_count=openai_response["usage"][ # type: ignore - "completion_tokens" - ], - ) - - @retry( - wait=wait_exponential_jitter(max=60), - retry=retry_if_exception_type(RETRYABLE_ERRORS), - ) - def create_chat_completion( - self, model: str, messages: List[Any], *args, **kwargs - ) -> LLMResponse: - response = openai.ChatCompletion.create( - api_key=self.api_key, model=model, messages=messages, *args, **kwargs - ) - - return self.construct_chat_response( - stream=kwargs.get("stream", False), - openai_response=response, - ) - - def construct_chat_response( - self, - stream: bool, - openai_response: Any, - ) -> LLMResponse: - """Construct an LLMResponse from an OpenAI response. - - Splits execution based on whether the `stream` parameter is set - in the kwargs. - """ - if stream: - # If stream is defined and set to True, - # openai returns a generator object - openai_response = cast(Iterable[Dict[str, Any]], openai_response) - - # Simply return the generator wrapped in an LLMResponse - return LLMResponse(output="", stream_output=openai_response) - - # If stream is not defined or is set to False, - # extract string from response - openai_response = cast(Dict[str, Any], openai_response) - if "function_call" in openai_response["choices"][0]["message"]: # type: ignore - output = openai_response["choices"][0]["message"][ # type: ignore - "function_call" - ]["arguments"] - else: - output = openai_response["choices"][0]["message"]["content"] # type: ignore - - return LLMResponse( - output=output, - prompt_token_count=openai_response["usage"][ # type: ignore - "prompt_tokens" - ], - response_token_count=openai_response["usage"][ # type: ignore - "completion_tokens" - ], - ) - - -class AsyncOpenAIClientV0(BaseAsyncOpenAIClient): - @retry( - wait=wait_exponential_jitter(max=60), - retry=retry_if_exception_type(RETRYABLE_ERRORS), - ) - async def create_completion( - self, engine: str, prompt: str, *args, **kwargs - ) -> LLMResponse: - response = await openai.Completion.acreate( - api_key=self.api_key, engine=engine, prompt=prompt, *args, **kwargs - ) - return await self.construct_nonchat_response( - stream=kwargs.get("stream", False), - openai_response=response, - prompt=prompt, - engine=engine, - ) - - async def construct_nonchat_response( - self, - stream: bool, - openai_response: Any, - prompt: str, - engine: str, - ) -> LLMResponse: - if stream: - # If stream is defined and set to True, - # openai returns a generator object - complete_output = "" - openai_response = cast(AsyncIterable[Dict[str, Any]], openai_response) - async for response in openai_response: - complete_output += response["choices"][0]["text"] - - # Also, it no longer returns usage information - # So manually count the tokens using tiktoken - prompt_token_count = num_tokens_from_string( - text=prompt, - model_name=engine, - ) - response_token_count = num_tokens_from_string( - text=complete_output, model_name=engine - ) - - # Return the LLMResponse - return LLMResponse( - output=complete_output, - prompt_token_count=prompt_token_count, - response_token_count=response_token_count, - ) - - # If stream is not defined or is set to False, - # extract string from response - openai_response = cast(Dict[str, Any], openai_response) - return LLMResponse( - output=openai_response["choices"][0]["text"], # type: ignore - prompt_token_count=openai_response["usage"][ # type: ignore - "prompt_tokens" - ], - response_token_count=openai_response["usage"][ # type: ignore - "completion_tokens" - ], - ) - - @retry( - wait=wait_exponential_jitter(max=60), - retry=retry_if_exception_type(RETRYABLE_ERRORS), - ) - async def create_chat_completion( - self, model: str, messages: List[Any], *args, **kwargs - ) -> LLMResponse: - response = await openai.ChatCompletion.acreate( - api_key=self.api_key, model=model, messages=messages, *args, **kwargs - ) - - return await self.construct_chat_response( - stream=kwargs.get("stream", False), - openai_response=response, - prompt=messages, - model=model, - ) - - async def construct_chat_response( - self, - stream: bool, - openai_response: Any, - prompt: List[Any], - model: str, - ) -> LLMResponse: - """Construct an LLMResponse from an OpenAI response. - - Splits execution based on whether the `stream` parameter is set - in the kwargs. - """ - if stream: - # If stream is defined and set to True, - # openai returns a generator object - collected_messages = [] - openai_response = cast(AsyncIterable[Dict[str, Any]], openai_response) - async for chunk in openai_response: - chunk_message = chunk["choices"][0]["delta"] - collected_messages.append(chunk_message) # save the message - - complete_output = "".join( - [msg.get("content", "") for msg in collected_messages] - ) - - # Also, it no longer returns usage information - # So manually count the tokens using tiktoken - prompt_token_count = num_tokens_from_messages( - messages=prompt, - model=model, - ) - response_token_count = num_tokens_from_string( - text=complete_output, model_name=model - ) - - # Return the LLMResponse - return LLMResponse( - output=complete_output, - prompt_token_count=prompt_token_count, - response_token_count=response_token_count, - ) - - # If stream is not defined or is set to False, - # Extract string from response - openai_response = cast(Dict[str, Any], openai_response) - if "function_call" in openai_response["choices"][0]["message"]: # type: ignore - output = openai_response["choices"][0]["message"][ # type: ignore - "function_call" - ]["arguments"] - else: - output = openai_response["choices"][0]["message"]["content"] # type: ignore - - return LLMResponse( - output=output, - prompt_token_count=openai_response["usage"][ # type: ignore - "prompt_tokens" - ], - response_token_count=openai_response["usage"][ # type: ignore - "completion_tokens" - ], - ) From 883366f9d9d5131d377d130d20ad9d3f0a02b1ab Mon Sep 17 00:00:00 2001 From: Aarav Navani <38411399+oofmeister27@users.noreply.github.com> Date: Wed, 5 Jun 2024 02:16:55 -0700 Subject: [PATCH 18/19] fix if logic --- guardrails/run/stream_runner.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/guardrails/run/stream_runner.py b/guardrails/run/stream_runner.py index 692d531ae..e932b4b76 100644 --- a/guardrails/run/stream_runner.py +++ b/guardrails/run/stream_runner.py @@ -203,12 +203,12 @@ def step( def get_chunk_text(self, chunk: Any, api: Union[PromptCallableBase, None]) -> str: """Get the text from a chunk.""" chunk_text = "" - if isinstance(api, OpenAICallable) or isinstance(api, OpenAIChatCallable): + if isinstance(api, OpenAICallable): finished = chunk.choices[0].finish_reason content = chunk.choices[0].text if not finished and content: chunk_text = content - elif isinstance(api, LiteLLMCallable): + elif isinstance(api, OpenAIChatCallable) or isinstance(api, LiteLLMCallable): finished = chunk.choices[0].finish_reason content = chunk.choices[0].delta.content if not finished and content: From f572ddc54f6a4c872e75c913f718a915cc1b1446 Mon Sep 17 00:00:00 2001 From: Caleb Courier Date: Mon, 17 Jun 2024 14:44:20 -0500 Subject: [PATCH 19/19] remove duplication --- guardrails/run/stream_runner.py | 16 +++------------- 1 file changed, 3 insertions(+), 13 deletions(-) diff --git a/guardrails/run/stream_runner.py b/guardrails/run/stream_runner.py index f55bc7078..71de8ba2d 100644 --- a/guardrails/run/stream_runner.py +++ b/guardrails/run/stream_runner.py @@ -271,21 +271,11 @@ def step( def is_last_chunk(self, chunk: Any, api: Union[PromptCallableBase, None]) -> bool: """Detect if chunk is final chunk.""" - if isinstance(api, OpenAICallable): - finished = chunk.choices[0].finish_reason - return finished is not None - elif isinstance(api, OpenAIChatCallable): + try: finished = chunk.choices[0].finish_reason return finished is not None - elif isinstance(api, LiteLLMCallable): - finished = chunk.choices[0].finish_reason - return finished is not None - else: - try: - finished = chunk.choices[0].finish_reason - return finished is not None - except (AttributeError, TypeError): - return False + except (AttributeError, TypeError): + return False def get_chunk_text(self, chunk: Any, api: Union[PromptCallableBase, None]) -> str: """Get the text from a chunk."""