Skip to content

fix telem problem from api and add e2e server test gh action #1109

New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Merged
merged 7 commits into from
Oct 2, 2024
Merged
Show file tree
Hide file tree
Changes from 4 commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
3 changes: 3 additions & 0 deletions .dockerignore
Original file line number Diff line number Diff line change
@@ -0,0 +1,3 @@
__pycache__
*.pyc
*.pyo
63 changes: 63 additions & 0 deletions .github/workflows/server_ci.yml
Original file line number Diff line number Diff line change
@@ -0,0 +1,63 @@
name: Server CI

on:
push:
branches:
- main
pull_request:
branches:
- main
Copy link
Collaborator

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

let's exclude this I think. Runs too often, too many tests. Good to have on push to main though

workflow_dispatch:

jobs:
build-test:
runs-on: ubuntu-latest
steps:
- name: Check out head
uses: actions/checkout@v3
with:
persist-credentials: false

- name: Set up QEMU
uses: docker/setup-qemu-action@master
with:
platforms: linux/amd64

- name: Set up Docker Buildx
uses: docker/setup-buildx-action@master
with:
platforms: linux/amd64

- name: Build Docker image
uses: docker/build-push-action@v6
with:
context: .
file: server_ci/Dockerfile
platforms: linux/amd64
push: false
tags: guardrails:${{ github.sha }}
load: true
build-args: |
GUARDRAILS_TOKEN=${{ secrets.GUARDRAILS_API_KEY }}

- name: Start Docker container
run: |
docker run -d --name guardrails-container -p 8000:8000 -e OPENAI_API_KEY=${{ secrets.OPENAI_API_KEY }} guardrails:${{ github.sha }}

- name: Wait for Docker container to be ready
run: |
for i in {1..30}; do
if docker exec guardrails-container curl -s http://localhost:8000/; then
echo "Server is up!"
break
fi
echo "Waiting for server..."
sleep 5
done

- name: Run Pytest
run: |
pip install pytest openai guardrails-ai
pytest server_ci/tests
docker stop guardrails-container
docker rm guardrails-container
14 changes: 8 additions & 6 deletions guardrails/cli/create.py
Original file line number Diff line number Diff line change
Expand Up @@ -22,7 +22,7 @@ def create_command(
default="",
help="A comma-separated list of validator hub URIs.",
),
name: Optional[str] = typer.Option(
guard_name: Optional[str] = typer.Option(
default=None, help="The name of the guard to define in the file."
),
local_models: Optional[bool] = typer.Option(
Expand Down Expand Up @@ -78,13 +78,15 @@ def create_command(
local_models,
dry_run,
)
if name is None and validators:
name = "Guard"
if guard_name is None and validators:
guard_name = "Guard"
if len(installed_validators) > 0:
name = installed_validators[0] + "Guard"
guard_name = installed_validators[0] + "Guard"

console.print(f"No name provided for guard. Defaulting to {name}")
new_config_file = generate_config_file(installed_validators, name)
console.print(
"No guard name provided for guard. Defaulting to {guard_name}"
)
new_config_file = generate_config_file(installed_validators, guard_name)

if dry_run:
console.print(f"Not actually saving output to [bold]{filepath}[/bold]")
Expand Down
2 changes: 2 additions & 0 deletions guardrails/cli/telemetry.py
Original file line number Diff line number Diff line change
Expand Up @@ -19,4 +19,6 @@ def trace_if_enabled(command_name: str):
("machine", platform.machine()),
("processor", platform.processor()),
],
False,
False,
)
9 changes: 8 additions & 1 deletion guardrails/utils/hub_telemetry_utils.py
Original file line number Diff line number Diff line change
Expand Up @@ -104,7 +104,14 @@ def extract_current_context(self):
context = self._prop.extract(carrier=self._carrier)
return context

def create_new_span(self, span_name: str, attributes: list):
def create_new_span(
self,
span_name: str,
attributes: list,
# todo deprecate these in 060
is_parent: bool, #
has_parent: bool, # no-qa
):
"""Creates a new span within the tracer with the given name and
attributes.

Expand Down
56 changes: 56 additions & 0 deletions server_ci/Dockerfile
Original file line number Diff line number Diff line change
@@ -0,0 +1,56 @@
FROM python:3.11-slim

ARG GUARDRAILS_TOKEN

Check warning on line 3 in server_ci/Dockerfile

View workflow job for this annotation

GitHub Actions / build-test

Sensitive data should not be used in the ARG or ENV commands

SecretsUsedInArgOrEnv: Do not use ARG or ENV instructions for sensitive data (ARG "GUARDRAILS_TOKEN") More info: https://docs.docker.com/go/dockerfile/rule/secrets-used-in-arg-or-env/
ARG GUARDRAILS_TEMPLATE="guard-template.json"

# Set environment variables to avoid writing .pyc files and to unbuffer Python output
ENV PYTHONDONTWRITEBYTECODE=1
ENV PYTHONUNBUFFERED=1
ENV LOGLEVEL="DEBUG"
ENV GUARDRAILS_LOG_LEVEL="DEBUG"
ENV APP_ENVIRONMENT="production"
ENV GUARDRAILS_TEMPLATE=$GUARDRAILS_TEMPLATE

WORKDIR /app

# Install Git and necessary dependencies
RUN apt-get update && \
apt-get install -y make git curl gcc jq pipx && \
apt-get clean && \
rm -rf /var/lib/apt/lists/*

RUN pipx install poetry

# Ensure poetry is available in the PATH
ENV PATH="/root/.local/bin:$PATH"

# Copy the entrypoint script
COPY /server_ci/entry.sh /app/entry.sh
COPY ../ /app/guardrails

# Install guardrails, the guardrails API, and gunicorn
# openai optional. only used for integration testing
RUN pip install "gunicorn" "guardrails-api"

WORKDIR /app/guardrails

RUN poetry install

RUN pip install ./

RUN guardrails configure --enable-metrics --enable-remote-inferencing --token $GUARDRAILS_TOKEN

# bring in base template
COPY /server_ci/$GUARDRAILS_TEMPLATE /app/$GUARDRAILS_TEMPLATE

# Install Hub Deps and create config.py
RUN guardrails create --template /app/$GUARDRAILS_TEMPLATE


RUN cp -r /usr/local/lib/python3.11/site-packages/guardrails/hub/* /app/guardrails/guardrails/hub

# Expose port 8000 for the application
EXPOSE 8000

# Command to start the Gunicorn server with specified settings
CMD ["/bin/bash", "/app/entry.sh"]
14 changes: 14 additions & 0 deletions server_ci/config.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,14 @@
import json
import os
from guardrails import Guard

try:
file_path = os.path.join(os.getcwd(), "guard-template.json")
with open(file_path, "r") as fin:
guards = json.load(fin)["guards"] or []
except json.JSONDecodeError:
print("Error parsing guards from JSON")
SystemExit(1)

# instantiate guards
guard0 = Guard.from_dict(guards[0])
10 changes: 10 additions & 0 deletions server_ci/entry.sh
Original file line number Diff line number Diff line change
@@ -0,0 +1,10 @@
gunicorn \
--workers 3 \
--threads 2 \
--bind 0.0.0.0:8000 \
--worker-class gthread \
--timeout 30 \
--keep-alive 20 \
--preload \
--graceful-timeout 60 \
"guardrails_api.app:create_app()"
19 changes: 19 additions & 0 deletions server_ci/guard-template.json
Original file line number Diff line number Diff line change
@@ -0,0 +1,19 @@
{
"name": "server-ci",
"description": "basic guard template for oss server ci",
"template_version": "0.0.1",
"namespace": "guardrails",
"guards": [
{
"id": "test-guard",
"name": "test-guard",
"validators": [
{
"id": "guardrails/two_words",
"on": "$",
"onFail": "fix"
}
]
}
]
}
98 changes: 98 additions & 0 deletions server_ci/tests/test_server.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,98 @@
import openai
import os
import pytest
from guardrails import Guard, settings

# OpenAI compatible Guardrails API Guard
openai.base_url = "http://127.0.0.1:8000/guards/test-guard/openai/v1/"

openai.api_key = os.getenv("OPENAI_API_KEY") or "some key"


@pytest.mark.parametrize(
"mock_llm_output, validation_output, validation_passed, error",
[
(
"France is wonderful in the spring",
"France is",
True,
False,
),
],
)
def test_guard_validation(mock_llm_output, validation_output, validation_passed, error):
settings.use_server = True
guard = Guard(name="test-guard")
if error:
with pytest.raises(Exception):
validation_outcome = guard.validate(mock_llm_output)
else:
validation_outcome = guard.validate(mock_llm_output)
assert validation_outcome.validation_passed == validation_passed
assert validation_outcome.validated_output == validation_output


@pytest.mark.parametrize(
"message_content, output, validation_passed, error",
[
(
"Tell me about Oranges in 5 words",
"Citrus fruit",
True,
False,
),
],
)
def test_server_guard_llm_integration(
message_content, output, validation_passed, error
):
settings.use_server = True
guard = Guard(name="test-guard")
messages = [{"role": "user", "content": message_content}]
if error:
with pytest.raises(Exception):
validation_outcome = guard(
model="gpt-3.5-turbo",
messages=messages,
temperature=0.0,
)
else:
validation_outcome = guard(
model="gpt-4o-mini",
messages=messages,
temperature=0.0,
)
assert (output) in validation_outcome.validated_output
assert (validation_outcome.validation_passed) is validation_passed


@pytest.mark.parametrize(
"message_content, output, validation_passed, error",
[
(
"Write 5 words of prose.",
"Whispers of",
True,
False,
),
],
)
def test_server_openai_llm_integration(
message_content, output, validation_passed, error
):
messages = [{"role": "user", "content": message_content}]
if error:
with pytest.raises(Exception):
completion = openai.chat.completions.create(
model="gpt-4o-mini",
messages=messages,
temperature=0.0,
)
else:
completion = openai.chat.completions.create(
model="gpt-4o-mini",
messages=messages,
temperature=0.0,
)
assert (output) in completion.choices[0].message.content
assert (completion.guardrails["validation_passed"]) is validation_passed
Loading