diff --git a/.dockerignore b/.dockerignore new file mode 100644 index 000000000..008af3582 --- /dev/null +++ b/.dockerignore @@ -0,0 +1,3 @@ +__pycache__ +*.pyc +*.pyo diff --git a/.github/workflows/server_ci.yml b/.github/workflows/server_ci.yml new file mode 100644 index 000000000..1fb1d9242 --- /dev/null +++ b/.github/workflows/server_ci.yml @@ -0,0 +1,60 @@ +name: Server CI + +on: + push: + branches: + - main + workflow_dispatch: + +jobs: + build-test-server: + runs-on: ubuntu-latest + steps: + - name: Check out head + uses: actions/checkout@v3 + with: + persist-credentials: false + + - name: Set up QEMU + uses: docker/setup-qemu-action@master + with: + platforms: linux/amd64 + + - name: Set up Docker Buildx + uses: docker/setup-buildx-action@master + with: + platforms: linux/amd64 + + - name: Build Docker image + uses: docker/build-push-action@v6 + with: + context: . + file: server_ci/Dockerfile + platforms: linux/amd64 + push: false + tags: guardrails:${{ github.sha }} + load: true + build-args: | + GUARDRAILS_TOKEN=${{ secrets.GUARDRAILS_API_KEY }} + + - name: Start Docker container + run: | + docker run -d --name guardrails-container -p 8000:8000 -e OPENAI_API_KEY=${{ secrets.OPENAI_API_KEY }} guardrails:${{ github.sha }} + + - name: Wait for Docker container to be ready + run: | + for i in {1..30}; do + if docker exec guardrails-container curl -s http://localhost:8000/; then + echo "Server is up!" + break + fi + echo "Waiting for server..." + sleep 5 + done + + - name: Run Pytest + run: | + pip install pytest openai guardrails-ai + pytest server_ci/tests + docker stop guardrails-container + docker rm guardrails-container \ No newline at end of file diff --git a/docs/getting_started/guardrails_server.md b/docs/getting_started/guardrails_server.md index 077b1dbc6..e150a88d8 100644 --- a/docs/getting_started/guardrails_server.md +++ b/docs/getting_started/guardrails_server.md @@ -35,7 +35,7 @@ We'll use the `create` command on the guardrails CLI to do this. We'll specify t ```bash -guardrails create --validators hub://guardrails/gibberish_text --name gibberish_guard +guardrails create --validators hub://guardrails/gibberish_text --guard-name gibberish_guard ``` :::note diff --git a/docs/how_to_guides/hosting_with_docker.md b/docs/how_to_guides/hosting_with_docker.md index acd8d159d..d25b7205b 100644 --- a/docs/how_to_guides/hosting_with_docker.md +++ b/docs/how_to_guides/hosting_with_docker.md @@ -28,7 +28,7 @@ First, we need to install any validators we want to use from the hub. In this ex ``` -guardrails create --validators=hub://guardrails/regex_match --name=title-case +guardrails create --validators=hub://guardrails/regex_match --guard-name=title-case ``` diff --git a/guardrails/cli/create.py b/guardrails/cli/create.py index 26ea830ca..4668236ba 100644 --- a/guardrails/cli/create.py +++ b/guardrails/cli/create.py @@ -22,7 +22,7 @@ def create_command( default="", help="A comma-separated list of validator hub URIs.", ), - name: Optional[str] = typer.Option( + guard_name: Optional[str] = typer.Option( default=None, help="The name of the guard to define in the file." ), local_models: Optional[bool] = typer.Option( @@ -78,13 +78,15 @@ def create_command( local_models, dry_run, ) - if name is None and validators: - name = "Guard" + if guard_name is None and validators: + guard_name = "Guard" if len(installed_validators) > 0: - name = installed_validators[0] + "Guard" + guard_name = installed_validators[0] + "Guard" - console.print(f"No name provided for guard. Defaulting to {name}") - new_config_file = generate_config_file(installed_validators, name) + console.print( + "No guard name provided for guard. Defaulting to {guard_name}" + ) + new_config_file = generate_config_file(installed_validators, guard_name) if dry_run: console.print(f"Not actually saving output to [bold]{filepath}[/bold]") diff --git a/guardrails/cli/telemetry.py b/guardrails/cli/telemetry.py index 1d66adeaa..91460f30f 100644 --- a/guardrails/cli/telemetry.py +++ b/guardrails/cli/telemetry.py @@ -19,4 +19,6 @@ def trace_if_enabled(command_name: str): ("machine", platform.machine()), ("processor", platform.processor()), ], + False, + False, ) diff --git a/guardrails/utils/hub_telemetry_utils.py b/guardrails/utils/hub_telemetry_utils.py index d14dcaf6d..aaaa710d7 100644 --- a/guardrails/utils/hub_telemetry_utils.py +++ b/guardrails/utils/hub_telemetry_utils.py @@ -104,7 +104,14 @@ def extract_current_context(self): context = self._prop.extract(carrier=self._carrier) return context - def create_new_span(self, span_name: str, attributes: list): + def create_new_span( + self, + span_name: str, + attributes: list, + # todo deprecate these in 060 + is_parent: bool, # + has_parent: bool, # no-qa + ): """Creates a new span within the tracer with the given name and attributes. diff --git a/server_ci/Dockerfile b/server_ci/Dockerfile new file mode 100644 index 000000000..477069317 --- /dev/null +++ b/server_ci/Dockerfile @@ -0,0 +1,56 @@ +FROM python:3.11-slim + +ARG GUARDRAILS_TOKEN +ARG GUARDRAILS_TEMPLATE="guard-template.json" + +# Set environment variables to avoid writing .pyc files and to unbuffer Python output +ENV PYTHONDONTWRITEBYTECODE=1 +ENV PYTHONUNBUFFERED=1 +ENV LOGLEVEL="DEBUG" +ENV GUARDRAILS_LOG_LEVEL="DEBUG" +ENV APP_ENVIRONMENT="production" +ENV GUARDRAILS_TEMPLATE=$GUARDRAILS_TEMPLATE + +WORKDIR /app + +# Install Git and necessary dependencies +RUN apt-get update && \ + apt-get install -y make git curl gcc jq pipx && \ + apt-get clean && \ + rm -rf /var/lib/apt/lists/* + +RUN pipx install poetry + +# Ensure poetry is available in the PATH +ENV PATH="/root/.local/bin:$PATH" + +# Copy the entrypoint script +COPY /server_ci/entry.sh /app/entry.sh +COPY ../ /app/guardrails + +# Install guardrails, the guardrails API, and gunicorn +# openai optional. only used for integration testing +RUN pip install "gunicorn" "guardrails-api" + +WORKDIR /app/guardrails + +RUN poetry install + +RUN pip install ./ + +RUN guardrails configure --enable-metrics --enable-remote-inferencing --token $GUARDRAILS_TOKEN + +# bring in base template +COPY /server_ci/$GUARDRAILS_TEMPLATE /app/$GUARDRAILS_TEMPLATE + +# Install Hub Deps and create config.py +RUN guardrails create --template /app/$GUARDRAILS_TEMPLATE + + +RUN cp -r /usr/local/lib/python3.11/site-packages/guardrails/hub/* /app/guardrails/guardrails/hub + +# Expose port 8000 for the application +EXPOSE 8000 + +# Command to start the Gunicorn server with specified settings +CMD ["/bin/bash", "/app/entry.sh"] \ No newline at end of file diff --git a/server_ci/config.py b/server_ci/config.py new file mode 100644 index 000000000..b84d637ea --- /dev/null +++ b/server_ci/config.py @@ -0,0 +1,14 @@ +import json +import os +from guardrails import Guard + +try: + file_path = os.path.join(os.getcwd(), "guard-template.json") + with open(file_path, "r") as fin: + guards = json.load(fin)["guards"] or [] +except json.JSONDecodeError: + print("Error parsing guards from JSON") + SystemExit(1) + +# instantiate guards +guard0 = Guard.from_dict(guards[0]) diff --git a/server_ci/entry.sh b/server_ci/entry.sh new file mode 100644 index 000000000..6a9b61b9e --- /dev/null +++ b/server_ci/entry.sh @@ -0,0 +1,10 @@ +gunicorn \ + --workers 3 \ + --threads 2 \ + --bind 0.0.0.0:8000 \ + --worker-class gthread \ + --timeout 30 \ + --keep-alive 20 \ + --preload \ + --graceful-timeout 60 \ + "guardrails_api.app:create_app()" \ No newline at end of file diff --git a/server_ci/guard-template.json b/server_ci/guard-template.json new file mode 100644 index 000000000..1edc95735 --- /dev/null +++ b/server_ci/guard-template.json @@ -0,0 +1,19 @@ +{ + "name": "server-ci", + "description": "basic guard template for oss server ci", + "template_version": "0.0.1", + "namespace": "guardrails", + "guards": [ + { + "id": "test-guard", + "name": "test-guard", + "validators": [ + { + "id": "guardrails/two_words", + "on": "$", + "onFail": "fix" + } + ] + } + ] +} \ No newline at end of file diff --git a/server_ci/tests/test_server.py b/server_ci/tests/test_server.py new file mode 100644 index 000000000..9d51fde0f --- /dev/null +++ b/server_ci/tests/test_server.py @@ -0,0 +1,98 @@ +import openai +import os +import pytest +from guardrails import Guard, settings + +# OpenAI compatible Guardrails API Guard +openai.base_url = "http://127.0.0.1:8000/guards/test-guard/openai/v1/" + +openai.api_key = os.getenv("OPENAI_API_KEY") or "some key" + + +@pytest.mark.parametrize( + "mock_llm_output, validation_output, validation_passed, error", + [ + ( + "France is wonderful in the spring", + "France is", + True, + False, + ), + ], +) +def test_guard_validation(mock_llm_output, validation_output, validation_passed, error): + settings.use_server = True + guard = Guard(name="test-guard") + if error: + with pytest.raises(Exception): + validation_outcome = guard.validate(mock_llm_output) + else: + validation_outcome = guard.validate(mock_llm_output) + assert validation_outcome.validation_passed == validation_passed + assert validation_outcome.validated_output == validation_output + + +@pytest.mark.parametrize( + "message_content, output, validation_passed, error", + [ + ( + "Tell me about Oranges in 5 words", + "Citrus fruit", + True, + False, + ), + ], +) +def test_server_guard_llm_integration( + message_content, output, validation_passed, error +): + settings.use_server = True + guard = Guard(name="test-guard") + messages = [{"role": "user", "content": message_content}] + if error: + with pytest.raises(Exception): + validation_outcome = guard( + model="gpt-3.5-turbo", + messages=messages, + temperature=0.0, + ) + else: + validation_outcome = guard( + model="gpt-4o-mini", + messages=messages, + temperature=0.0, + ) + assert (output) in validation_outcome.validated_output + assert (validation_outcome.validation_passed) is validation_passed + + +@pytest.mark.parametrize( + "message_content, output, validation_passed, error", + [ + ( + "Write 5 words of prose.", + "Whispers of", + True, + False, + ), + ], +) +def test_server_openai_llm_integration( + message_content, output, validation_passed, error +): + messages = [{"role": "user", "content": message_content}] + if error: + with pytest.raises(Exception): + completion = openai.chat.completions.create( + model="gpt-4o-mini", + messages=messages, + temperature=0.0, + ) + else: + completion = openai.chat.completions.create( + model="gpt-4o-mini", + messages=messages, + temperature=0.0, + ) + assert (output) in completion.choices[0].message.content + assert (completion.guardrails["validation_passed"]) is validation_passed