Skip to content

add handling for validation errors to return 400 and message #56

New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Merged
merged 5 commits into from
Jul 11, 2024
Merged
Show file tree
Hide file tree
Changes from 2 commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
33 changes: 13 additions & 20 deletions guardrails_api/blueprints/guards.py
Original file line number Diff line number Diff line change
Expand Up @@ -191,26 +191,19 @@ def openai_v1_chat_completions(guard_name: str):
pass

if not stream:
try:
validation_outcome: ValidationOutcome = guard(
# todo make this come from the guard struct?
# currently we dont support .configure
num_reasks=0,
**payload,
)
llm_response = guard.history[-1].iterations[-1].outputs.llm_response_info
result = outcome_to_chat_completion(
validation_outcome=validation_outcome,
llm_response=llm_response,
has_tool_gd_tool_call=has_tool_gd_tool_call,
)
return result
except Exception as e:
raise HttpError(
status=400,
message="BadRequest",
cause=(str(e)),
)
validation_outcome: ValidationOutcome = guard(
# todo make this come from the guard struct?
# currently we dont support .configure
num_reasks=0,
**payload,
)
llm_response = guard.history[-1].iterations[-1].outputs.llm_response_info
result = outcome_to_chat_completion(
validation_outcome=validation_outcome,
llm_response=llm_response,
has_tool_gd_tool_call=has_tool_gd_tool_call,
)
return result

else:
# need to return validated chunks that look identical to openai's
Expand Down
5 changes: 5 additions & 0 deletions guardrails_api/utils/handle_error.py
Original file line number Diff line number Diff line change
Expand Up @@ -3,13 +3,18 @@
from werkzeug.exceptions import HTTPException
from guardrails_api.classes.http_error import HttpError
from guardrails_api.utils.logger import logger
from guardrails.errors import ValidationError


def handle_error(fn):
@wraps(fn)
def decorator(*args, **kwargs):
try:
return fn(*args, **kwargs)
except ValidationError as validation_error:
logger.error(validation_error)
traceback.print_exception(type(validation_error), validation_error, validation_error.__traceback__)
return str(validation_error), 400
except HttpError as http_error:
logger.error(http_error)
traceback.print_exception(http_error)
Expand Down
66 changes: 66 additions & 0 deletions tests/blueprints/test_guards.py
Original file line number Diff line number Diff line change
Expand Up @@ -11,6 +11,7 @@
from guardrails.classes.generic import Stack
from guardrails.classes.history import Call, Iteration
from guardrails_api.app import register_config
from guardrails.errors import ValidationError

# TODO: Should we mock this somehow?
# Right now it's just empty, but it technically does a file read
Expand Down Expand Up @@ -548,6 +549,71 @@ def test_validate__call(mocker):

del os.environ["PGHOST"]

def test_validate__call_throws_validation_error(mocker):
os.environ["PGHOST"] = "localhost"

mock___call__ = mocker.patch.object(MockGuardStruct, "__call__")
mock___call__.side_effect = ValidationError("Test guard validation error")

mock_guard = MockGuardStruct()
mock_from_dict = mocker.patch("guardrails_api.blueprints.guards.Guard.from_dict")
mock_from_dict.return_value = mock_guard

# mock_tracer = MockTracer()
mock_request = MockRequest(
"POST",
json={
"llmApi": "openai.Completion.create",
"promptParams": {"p1": "bar"},
"args": [1, 2, 3],
"some_kwarg": "foo",
"prompt": "Hello world!",
},
headers={"x-openai-api-key": "mock-key"},
)

mocker.patch("flask.Blueprint", new=MockBlueprint)
mocker.patch("guardrails_api.blueprints.guards.request", mock_request)
mock_get_guard = mocker.patch(
"guardrails_api.blueprints.guards.guard_client.get_guard",
return_value=mock_guard,
)
mocker.patch(
"guardrails_api.blueprints.guards.get_llm_callable",
return_value="openai.Completion.create",
)

mocker.patch("guardrails_api.blueprints.guards.CacheClient.set")

mock_status = mocker.patch(
"guardrails.classes.history.call.Call.status", new_callable=PropertyMock
)
mock_status.return_value = "fail"
mock_guard.history = Stack(Call())
from guardrails_api.blueprints.guards import validate

response = validate("My%20Guard's%20Name")

mock_get_guard.assert_called_once_with("My Guard's Name")

assert mock___call__.call_count == 1

mock___call__.assert_called_once_with(
1,
2,
3,
llm_api="openai.Completion.create",
prompt_params={"p1": "bar"},
num_reasks=None,
some_kwarg="foo",
api_key="mock-key",
prompt="Hello world!",
)

assert response == ('Test guard validation error', 400)

del os.environ["PGHOST"]

def test_openai_v1_chat_completions__call(mocker):
from guardrails_api.blueprints.guards import openai_v1_chat_completions
os.environ["PGHOST"] = "localhost"
Expand Down