diff --git a/.cursor/rules/global.mdc b/.cursor/rules/global.mdc new file mode 100644 index 00000000..456a8f4c --- /dev/null +++ b/.cursor/rules/global.mdc @@ -0,0 +1,99 @@ +--- +description: +globs: *.py,**/*.py +alwaysApply: false +--- +# Global Cursor Rules for Python Development + +## Code Style and Formatting +-e quotes for strings, as specified in configuration for `ruff`. +- Indent with 4 spaces; avoid tabs. +- Adhere to a maximum line length of 88 characters, per `ruff`'s default. +- Employ type hints from the `typing` module for all function parameters, return types, and variable declarations where applicable. +- Provide docstrings for all modules, classes, and functions in Google style format. +- Prefer f-strings for string interpolation, except when no variables are involved, to maintain clarity. +- Organize import statements at the file's top, grouped as follows with a blank line between groups: + 1. Standard library imports (e.g., `import os`) + 2. Third-party imports (e.g., `import numpy`) + 3. Local application/library imports (e.g., `from myapp.utils import helper`) +- Use absolute imports over relative imports for consistency and clarity. +- Prohibit wildcard imports (`from module import *`) to prevent namespace pollution. + +## Dependencies +- Manage dependencies using the `uv` dependency manager, with lock files stored in `uv.lock`. +- Document all dependencies in `pyproject.toml`. +- Regularly update dependencies to address security vulnerabilities and maintain compatibility. + +## Naming Conventions +- Use `snake_case` for variables, functions, and methods. +- Use `PascalCase` for class and type names. +- Use `UPPER_SNAKE_CASE` for constants. +- Prefix private attributes and methods with a single underscore (e.g., `_private_method`). +- Choose descriptive, self-explanatory names that reflect the purpose or behavior of variables, functions, or classes. +- Avoid ambiguous abbreviations unless widely understood (e.g., `id` is acceptable; `tmp` is not). + +## Code Organization +- Ensure each module has a single, well-defined responsibility (e.g., `api/chat.py` for chat endpoints, `services/ai/workflows/agents/evaluator.py` for evaluation logic). +- Keep modules focused on a single responsibility. Split large modules into smaller, more manageable ones when they become difficult to navigate or maintain. +- Limit directory nesting to three levels where possible (e.g., `services/ai/workflows` is acceptable; avoid deeper nesting like `services/ai/workflows/agents/handlers` unless justified). +- Consolidate utility modules (e.g., `utils.py`) into domain-specific modules or packages (e.g., move `services/ai/workflows/utils/model_factory.py` to `services/ai/workflows/models/factory.py`) to avoid generic naming. +- Organize the `tools` directory by domain, creating subdirectories like `tools/blockchain` for DAO-related utilities and `tools/integrations` for external services (e.g., Twitter, LunarCrush). +- Place test files in a `tests` directory with a mirrored structure (e.g., `tests/services/ai/workflows/test_evaluator.py` for `services/ai/workflows/agents/evaluator.py`) or alongside modules if small (e.g., `services/ai/workflows/agents/test_evaluator.py`). +- Include a `README.md` or module-level docstring in each major directory (e.g., `services/ai`, `services/integrations`) to document purpose, usage, and key components. +- Structure files with imports at the top, followed by constants, classes, and then functions, maintaining a logical flow. +- Group related functionality within packages (e.g., keep all webhook handlers in `services/integrations/webhooks`) and avoid duplicating functionality across directories (e.g., `lib/utils.py` vs. `services/ai/workflows/utils`). + +## Error Handling +- Catch specific exception types (e.g., `ValueError`, `KeyError`) rather than the generic `Exception`, except in cases where catching all exceptions is necessary, such as in top-level error handlers. +- Handle exceptions at the appropriate abstraction level, avoiding overly broad try-except blocks. +- Include descriptive error messages in raised exceptions to aid debugging. +- Use context managers (`with` statements) for resource management (e.g., file operations, database connections). + +## Logging +- Utilize the project's standard logging setup via `configure_logger`. +- Apply appropriate log levels: + - `DEBUG`: Detailed information for debugging. + - `INFO`: Confirmation of normal operation. + - `WARNING`: Indications of potential issues. + - `ERROR`: Errors that impact functionality. + - `CRITICAL`: Severe errors that may cause termination. +- Include sufficient context in log messages (e.g., relevant variables or state) to facilitate troubleshooting. + +## Performance +- Use list, dictionary, or set comprehensions for concise and efficient data transformations when appropriate. +- Employ generators or lazy evaluation for processing large datasets to optimize memory usage. +- Evaluate algorithms for time and space complexity, favoring efficient solutions for critical paths. +- Profile code to identify and address performance bottlenecks. +- Use `httpx` for any web api calls. + +## Security +- Avoid hardcoding sensitive information (e.g., API keys, passwords); use environment variables or secret management tools. +- Validate and sanitize all user inputs to prevent injection attacks or invalid data. +- Adhere to the principle of least privilege when accessing resources or assigning permissions. +- Use secure protocols for network operations (e.g., `https`) and appropriate libraries for security-sensitive operations (e.g., `bcrypt` or `argon2` for password hashing). + +## Testing +- Write unit tests for all public functions, methods, and classes using a framework like `pytest`. +- Achieve at least 80% test coverage for critical modules; aim for 100% in security-sensitive code. +- Write tests that cover happy paths, edge cases, and error conditions to ensure robustness. +- Include integration tests for interactions between modules or external systems. +- Mock external dependencies in tests to ensure isolation and reproducibility. +- Run tests automatically in CI/CD pipelines to catch regressions early. + +## Documentation +- Maintain a `README.md` with project setup, usage, and contribution guidelines. +- Document public APIs and interfaces in docstrings, including parameters, return values, and exceptions. +- Keep inline documentation (e.g., comments, docstrings) up-to-date with code changes. +- Use type hints as a form of self-documentation to improve code clarity and IDE support. + +## Maintenance +- Remove unused imports, variables, or functions using tools like `ruff`. +- Refactor duplicated code into reusable functions, classes, or modules to reduce technical debt. +- Regularly review and update dependencies to mitigate vulnerabilities and ensure compatibility. +- Use static analysis tools (e.g., `ruff`, `ty`) to enforce code quality and catch potential issues. + +## Tooling +- Format and lint code with `ruff` to ensure consistency and catch potential errors. +- Type-check code with `ty` to validate type hints. +- Manage dependencies and virtual environments with `uv` for reproducibility. +- Integrate tools into CI/CD pipelines to enforce compliance with these rules. \ No newline at end of file diff --git a/.cursor/rules/python.mdc b/.cursor/rules/python.mdc deleted file mode 100644 index bfae29a1..00000000 --- a/.cursor/rules/python.mdc +++ /dev/null @@ -1,69 +0,0 @@ ---- -description: Python Code Style and Formatting Guidelines -globs: *.py,**/*.py -alwaysApply: true ---- -# Code Style and Formatting -- Use double quotes for strings as specified in ruff.toml. -- Use 4 spaces for indentation, not tabs. -- Maximum line length should be 88 characters (Black default). -- Use type hints from the typing library for all function parameters and return types. -- Include docstrings for all modules, classes, and functions following Google style format. -- Use f-strings for string formatting instead of % or .format() when possible. But not when there are no variables. -- Import statements should be grouped in the following order with a blank line between each group: - 1. Standard library imports - 2. Related third-party imports - 3. Local application/library specific imports -- Use absolute imports rather than relative imports. -- Avoid wildcard imports (from module import *). - -# Naming Conventions -- Use snake_case for variables, functions, and methods. -- Use PascalCase for class names. -- Use UPPER_CASE for constants. -- Prefix private attributes and methods with a single underscore (_). -- Use descriptive names that clearly indicate the purpose of the variable, function, or class. - -# Code Organization -- Each module should have a clear, single responsibility. -- Keep functions and methods short and focused on a single task. -- Use comments sparingly and only when necessary to explain complex logic. -- Group related functionality together within modules and classes. -- Place imports at the top of the file, followed by constants, then classes, and finally functions. - -# Error Handling -- Use specific exception types rather than catching Exception. -- Handle exceptions at the appropriate level of abstraction. -- Include meaningful error messages in exceptions. -- Use context managers (with statements) for resource management. - -# Testing -- Write unit tests for all new functionality. -- Test both normal and edge cases. -- Use descriptive test names that explain what is being tested. -- Keep tests independent of each other. - -# Logging -- Use the project's standard logging configuration via configure_logger. -- Include appropriate log levels (DEBUG, INFO, WARNING, ERROR, CRITICAL). -- Provide context in log messages to aid debugging. - -# Performance -- Prefer list/dict/set comprehensions over loops when appropriate. -- Use generators for large data sets to minimize memory usage. -- Consider the time and space complexity of algorithms. - -# Documentation -- Keep docstrings up to date with code changes. -- Include examples in docstrings for complex functions. -- Document any non-obvious behavior or edge cases. - -# Security -- Never hardcode sensitive information like API keys or passwords. -- Validate and sanitize all user inputs. -- Follow the principle of least privilege when accessing resources. - -# Maintenance -- Remove any unused imports, variables, or functions. -- Refactor duplicated code into reusable functions or classes. -- Keep dependencies up to date and documented in requirements.txt. \ No newline at end of file diff --git a/.dockerignore b/.dockerignore index 843ced96..5cbe1ccc 100644 --- a/.dockerignore +++ b/.dockerignore @@ -1,4 +1,66 @@ +# Python +__pycache__/ +*.pyc +*.pyo +*.pyd +*.pdb +*.egg-info/ +.eggs/ +*.egg +*.log + +# Node/Bun/JS/TS +node_modules/ +bun.lockb +npm-debug.log +yarn-error.log +.pnpm-debug.log +agent-tools-ts/node_modules/ +agent-tools-ts/.next/ +agent-tools-ts/dist/ +agent-tools-ts/.turbo/ +agent-tools-ts/.bun/ + +# General +.DS_Store .env -agent-tools-ts/.env +.env.* +*.swp +*.swo +*.bak +*.tmp +*.orig +*.old + +# Git .git -agent-tools-ts/.git \ No newline at end of file +.gitmodules +.gitignore + +# Docker +Dockerfile +.dockerignore + +# VSCode/Editor +.vscode/ +.idea/ +*.code-workspace + +# Test/Cache/Build Artifacts +.pytest_cache/ +.ruff_cache/ +*.coverage +coverage.* +htmlcov/ +dist/ +build/ +*.spec + +# Documentation +*.md +docs/ + +# Misc +*.sqlite3 +*.db +*.pid \ No newline at end of file diff --git a/.env.example b/.env.example index a7f72765..47604cc0 100644 --- a/.env.example +++ b/.env.example @@ -3,127 +3,120 @@ # ============================================================================= # Logging level (DEBUG, INFO, WARNING, ERROR, CRITICAL) LOG_LEVEL=INFO - -# Backend type (supabase, cloudflare) -AIBTC_BACKEND="supabase" - -# Network configuration (mainnet, testnet) -NETWORK="testnet" - -# WebSocket settings -WEBSOCKETS_MAX_LINE_LENGTH=31928 +# Network Configuration +NETWORK=testnet # ============================================================================= # Database Configuration (Supabase) # ============================================================================= -AIBTC_SUPABASE_URL="https://hellowowld.supabase.co" -AIBTC_SUPABASE_SERVICE_KEY="your-service-key" -AIBTC_SUPABASE_BUCKET_NAME="your-bucket-name" -# Database connection details -AIBTC_SUPABASE_USER="user" -AIBTC_SUPABASE_PASSWORD="user" -AIBTC_SUPABASE_HOST="localhost" -AIBTC_SUPABASE_PORT="5432" -AIBTC_SUPABASE_DBNAME="postgres" +AIBTC_BACKEND=supabase +AIBTC_SUPABASE_USER=your_supabase_user +AIBTC_SUPABASE_PASSWORD=your_supabase_password +AIBTC_SUPABASE_HOST=your_supabase_host +AIBTC_SUPABASE_PORT=5432 +AIBTC_SUPABASE_DBNAME=your_database_name +AIBTC_SUPABASE_URL=https://your-project.supabase.co +AIBTC_SUPABASE_SERVICE_KEY=your_supabase_service_key +AIBTC_SUPABASE_BUCKET_NAME=your_bucket_name # ============================================================================= -# External API Endpoints & Keys +# Backend Wallet Configuration # ============================================================================= -# Webhook Configuration -AIBTC_WEBHOOK_URL="https://core-staging.aibtc.dev/webhooks/chainhook" -AIBTC_WEBHOOK_AUTH_TOKEN="Bearer your-webhook-auth-token" +AIBTC_BACKEND_WALLET_SEED_PHRASE=your_wallet_seed_phrase -# Platform APIs -AIBTC_PLATFORM_API_URL="https://api.platform.hiro.so" -AIBTC_HIRO_API_URL=https://api.hiro.so -HIRO_API_KEY="your-hiro-api-key" -AIBTC_ALEX_BASE_URL=https://api.alexgo.io/ -AIBTC_VELAR_BASE_URL="https://gateway.velar.network/" +# ============================================================================= +# Twitter Configuration +# ============================================================================= +AIBTC_TWITTER_ENABLED=false +AIBTC_TWITTER_INTERVAL_SECONDS=120 +AIBTC_TWITTER_CONSUMER_KEY=your_twitter_consumer_key +AIBTC_TWITTER_CONSUMER_SECRET=your_twitter_consumer_secret +AIBTC_TWITTER_CLIENT_ID=your_twitter_client_id +AIBTC_TWITTER_CLIENT_SECRET=your_twitter_client_secret +AIBTC_TWITTER_ACCESS_TOKEN=your_twitter_access_token +AIBTC_TWITTER_ACCESS_SECRET=your_twitter_access_secret +AIBTC_TWITTER_USERNAME=your_twitter_username +AIBTC_TWITTER_AUTOMATED_USER_ID=your_automated_user_id +AIBTC_TWITTER_WHITELISTED=user1,user2,user3 -# AI Services -OPENAI_MODEL_NAME="gpt-4.1" -OPENAI_API_KEY="sk-proj-your-api-key-here" -# For local model deployment -# OPENAI_API_BASE="http://localhost:5000" +# ============================================================================= +# Telegram Configuration +# ============================================================================= +AIBTC_TELEGRAM_BOT_TOKEN=your_telegram_bot_token +AIBTC_TELEGRAM_BOT_ENABLED=false -# Market Data APIs -SERPER_API_KEY="your-serper-api-key" -AIBTC_CMC_API_KEY='cmc-api-key' -AIBTC_LUNARCRUSH_API_KEY="lunarcrush-api-key" -AIBTC_LUNARCRUSH_BASE_URL="https://lunarcrush.com/api/v2" +# ============================================================================= +# Discord Configuration +# ============================================================================= +AIBTC_DISCORD_WEBHOOK_URL_PASSED=https://discord.com/api/webhooks/your_passed_webhook +AIBTC_DISCORD_WEBHOOK_URL_FAILED=https://discord.com/api/webhooks/your_failed_webhook # ============================================================================= -# Task Scheduling Configuration +# Job Scheduler Configuration (NEW NAMING - matches job types exactly) # ============================================================================= -# Schedule Sync + +# General Scheduler Settings AIBTC_SCHEDULE_SYNC_ENABLED=false AIBTC_SCHEDULE_SYNC_INTERVAL_SECONDS=60 -# DAO Processing Pipeline -# Step 1: Process DAO deployments -AIBTC_DAO_RUNNER_ENABLED=false -AIBTC_DAO_RUNNER_INTERVAL_SECONDS=30 +# Agent Account Deployer Job +AIBTC_AGENT_ACCOUNT_DEPLOYER_ENABLED=false +AIBTC_AGENT_ACCOUNT_DEPLOYER_INTERVAL_SECONDS=60 -# Step 2: Generate tweets for completed DAOs -AIBTC_DAO_TWEET_RUNNER_ENABLED=false -AIBTC_DAO_TWEET_RUNNER_INTERVAL_SECONDS=30 +# Chain State Monitor Job +AIBTC_CHAIN_STATE_MONITOR_ENABLED=true +AIBTC_CHAIN_STATE_MONITOR_INTERVAL_SECONDS=300 -# Step 3: Post generated tweets -AIBTC_TWEET_RUNNER_ENABLED=false -AIBTC_TWEET_RUNNER_INTERVAL_SECONDS=30 +# DAO Deployment Job +AIBTC_DAO_DEPLOYMENT_ENABLED=false +AIBTC_DAO_DEPLOYMENT_INTERVAL_SECONDS=60 -# Step 4: Process DAO proposal votes -AIBTC_DAO_PROPOSAL_VOTE_RUNNER_ENABLED=false -AIBTC_DAO_PROPOSAL_VOTE_RUNNER_INTERVAL_SECONDS=60 +# DAO Deployment Tweet Job +AIBTC_DAO_DEPLOYMENT_TWEET_ENABLED=false +AIBTC_DAO_DEPLOYMENT_TWEET_INTERVAL_SECONDS=60 -# Step 5: Process DAO proposal conclusions -AIBTC_DAO_PROPOSAL_CONCLUDE_RUNNER_ENABLED=false -AIBTC_DAO_PROPOSAL_CONCLUDE_RUNNER_INTERVAL_SECONDS=60 -AIBTC_DAO_PROPOSAL_CONCLUDE_RUNNER_WALLET_ID="your-wallet-id" +# DAO Proposal Conclude Job +AIBTC_DAO_PROPOSAL_CONCLUDE_ENABLED=false +AIBTC_DAO_PROPOSAL_CONCLUDE_INTERVAL_SECONDS=60 -# Step 6: -AIBTC_DAO_PROPOSAL_EVALUATION_RUNNER_ENABLED=false -AIBTC_DAO_PROPOSAL_EVALUATION_RUNNER_INTERVAL_SECONDS=60 +# DAO Proposal Embedder Job +AIBTC_DAO_PROPOSAL_EMBEDDER_ENABLED=false +AIBTC_DAO_PROPOSAL_EMBEDDER_INTERVAL_SECONDS=300 -# Step 6: Process agent account deployments -AIBTC_AGENT_ACCOUNT_DEPLOY_RUNNER_ENABLED=false -AIBTC_AGENT_ACCOUNT_DEPLOY_RUNNER_INTERVAL_SECONDS=60 -AIBTC_AGENT_ACCOUNT_DEPLOY_RUNNER_WALLET_ID="your-wallet-id" +# DAO Proposal Evaluation Job +AIBTC_DAO_PROPOSAL_EVALUATION_ENABLED=false +AIBTC_DAO_PROPOSAL_EVALUATION_INTERVAL_SECONDS=60 -# ============================================================================= -# Social Media Integration -# ============================================================================= -# Twitter Configuration -AIBTC_TWITTER_ENABLED=false -AIBTC_TWITTER_INTERVAL_SECONDS=120 +# DAO Proposal Vote Job +AIBTC_DAO_PROPOSAL_VOTE_ENABLED=false +AIBTC_DAO_PROPOSAL_VOTE_INTERVAL_SECONDS=60 + +# Discord Job +AIBTC_DISCORD_ENABLED=false +AIBTC_DISCORD_INTERVAL_SECONDS=30 + +# Tweet Job +AIBTC_TWEET_ENABLED=false +AIBTC_TWEET_INTERVAL_SECONDS=30 -# Twitter API Credentials -AIBTC_TWITTER_CONSUMER_KEY="your-twitter-consumer-key" -AIBTC_TWITTER_CONSUMER_SECRET="your-twitter-consumer-secret" -AIBTC_TWITTER_ACCESS_TOKEN="your-twitter-access-token" -AIBTC_TWITTER_ACCESS_SECRET="your-twitter-access-secret" -AIBTC_TWITTER_CLIENT_ID="your-twitter-client-id" -AIBTC_TWITTER_CLIENT_SECRET="your-twitter-client-secret" - -# Twitter User Configuration -AIBTC_TWITTER_AUTOMATED_USER_ID="your-twitter-automated-user-id" -AIBTC_TWITTER_PROFILE_ID="your-twitter-profile-id" -AIBTC_TWITTER_AGENT_ID="your-twitter-agent-id" -# Comma-separated list of whitelisted Twitter user IDs -AIBTC_TWITTER_WHITELISTED="your-twitter-whitelisted" -AIBTC_TWITTER_WHITELIST_ENABLED=false -# Telegram Configuration -AIBTC_TELEGRAM_BOT_TOKEN="your-telegram-bot-token" -AIBTC_TELEGRAM_BOT_ENABLED=false # ============================================================================= -# Additional Tools & Services +# API Configuration # ============================================================================= -AIBTC_FAKTORY_API_KEY="your-faktory-api-key" +AIBTC_BASEURL=https://app-staging.aibtc.dev +AIBTC_ALEX_BASE_URL=https://api.alexgo.io/ +AIBTC_HIRO_API_URL=https://api.hiro.so +AIBTC_PLATFORM_API_URL=https://api.platform.hiro.so +AIBTC_VELAR_BASE_URL=https://gateway.velar.network/ +AIBTC_LUNARCRUSH_BASE_URL=https://lunarcrush.com/api/v2 + +# API Keys +HIRO_API_KEY=your_hiro_api_key +AIBTC_LUNARCRUSH_API_KEY=your_lunarcrush_api_key +AIBTC_CMC_API_KEY=your_coinmarketcap_api_key +OPENAI_API_KEY=your_openai_api_key -# Bitflow Configuration -BITFLOW_API_HOST=https://bitflowapihost.hiro.so -BITFLOW_API_KEY="your-bitflow-api-key" -BITFLOW_STACKS_API_HOST=https://api.hiro.so/ -BITFLOW_READONLY_CALL_API_HOST=https://readonly-call-api.hiro.so +# Webhook Configuration +AIBTC_WEBHOOK_URL=https://your-webhook-url.com +AIBTC_WEBHOOK_AUTH_TOKEN=Bearer your_webhook_auth_token \ No newline at end of file diff --git a/.github/dependabot.yml b/.github/dependabot.yml index 7c106d6a..387a5e12 100644 --- a/.github/dependabot.yml +++ b/.github/dependabot.yml @@ -8,7 +8,7 @@ updates: - package-ecosystem: "pip" # See documentation for possible values directory: "/" # Location of package manifests schedule: - interval: "weekly" + interval: "daily" target-branch: "staging" groups: dev-dependencies: @@ -16,4 +16,6 @@ updates: - "*" ignore: - dependency-name: "pgvector" - versions: [">=0.3.1"] \ No newline at end of file + versions: [">=0.3.1, <=0.4.0"] + - dependency-name: "starlette" + versions: ["<=0.46.0"] \ No newline at end of file diff --git a/.github/workflows/docker-image.yml b/.github/workflows/docker-image.yml index 480fc614..37c0e06e 100644 --- a/.github/workflows/docker-image.yml +++ b/.github/workflows/docker-image.yml @@ -1,20 +1,19 @@ -name: Docker Image CI +name: Docker Image Build Test on: push: - branches: [ "main", "staging" ] + branches: ["main", "staging"] pull_request: - branches: [ "main", "staging" ] + branches: ["main", "staging"] jobs: - - build: - + docker-image: runs-on: ubuntu-latest steps: - - uses: actions/checkout@v4 - with: - submodules: true - - name: Build the Docker image - run: docker build . --file Dockerfile --tag my-image-name:$(date +%s) + - uses: actions/checkout@v4 + with: + submodules: true + + - name: Build the Docker image + run: docker build . --file Dockerfile --tag my-image-name:$(date +%s) diff --git a/.github/workflows/ruff-checks.yml b/.github/workflows/ruff-checks.yml new file mode 100644 index 00000000..f60cf341 --- /dev/null +++ b/.github/workflows/ruff-checks.yml @@ -0,0 +1,35 @@ +name: Ruff Code Quality Checks + +on: + push: + branches: ["main", "staging"] + pull_request: + branches: ["main", "staging"] + +jobs: + ruff-check: + runs-on: ubuntu-latest + + steps: + - uses: actions/checkout@v4 + with: + submodules: false + + - name: Set up Python + uses: actions/setup-python@v5 + with: + python-version: "3.13" + + - name: Install uv + uses: astral-sh/setup-uv@v4 + with: + version: "latest" + + - name: Install dependencies + run: uv sync + + - name: Run ruff format check + run: uvx ruff format --check . + + - name: Run ruff lint check + run: uvx ruff check . diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml new file mode 100644 index 00000000..8999f660 --- /dev/null +++ b/.pre-commit-config.yaml @@ -0,0 +1,10 @@ +repos: +- repo: https://github.com/astral-sh/ruff-pre-commit + # Ruff version. + rev: v0.12.0 + hooks: + # Run the linter. + - id: ruff-check + args: [ --fix ] + # Run the formatter. + - id: ruff-format \ No newline at end of file diff --git a/.python-version b/.python-version new file mode 100644 index 00000000..24ee5b1b --- /dev/null +++ b/.python-version @@ -0,0 +1 @@ +3.13 diff --git a/.vscode/settings.json b/.vscode/settings.json deleted file mode 100644 index 4ad8bd30..00000000 --- a/.vscode/settings.json +++ /dev/null @@ -1,3 +0,0 @@ -{ - "codium.codeCompletion.enable": true -} diff --git a/Dockerfile b/Dockerfile index 32bce5c3..d86ec87a 100644 --- a/Dockerfile +++ b/Dockerfile @@ -1,14 +1,53 @@ -FROM public.ecr.aws/docker/library/python:3.13 +FROM oven/bun:latest AS bun + +# First stage: build the application with uv +FROM public.ecr.aws/docker/library/python:3.13 AS builder + +# Enable bytecode compilation and set link mode +ENV UV_COMPILE_BYTECODE=1 UV_LINK_MODE=copy + +# Disable Python downloads to use the system interpreter across both images +ENV UV_PYTHON_DOWNLOADS=0 WORKDIR /usr/src/app -COPY requirements.txt ./ -RUN pip install --no-cache-dir -r requirements.txt +# Install uv +RUN pip install uv + +# Copy dependency files +COPY pyproject.toml uv.lock ./ + +# Install dependencies using the lockfile +RUN --mount=type=cache,target=/root/.cache/uv \ + uv sync --frozen --no-install-project + +# Copy the rest of the code +COPY . /usr/src/app -COPY . . +# Sync again to install the project and all dependencies +RUN --mount=type=cache,target=/root/.cache/uv \ + uv sync --frozen + +# Second stage: final image without uv +FROM public.ecr.aws/docker/library/python:3.13 + +# Install libmagic1 for mime type detection +RUN apt-get update && apt-get install -y libmagic1 + +# Copy the application from the builder +COPY --from=builder /usr/src/app /usr/src/app +COPY --from=bun /usr/local/bin/bun /usr/local/bin/bun +COPY --from=builder /usr/src/app/agent-tools-ts/package.json /usr/src/app/agent-tools-ts/bun.lock ./ + +# Install JS/TS dependencies +WORKDIR /usr/src/app/agent-tools-ts +RUN bun install --frozen-lockfile + +# Return to app directory +WORKDIR /usr/src/app -RUN curl -fsSL https://bun.sh/install | bash -RUN cp /root/.bun/bin/bun /usr/local/bin/bun -RUN cd /usr/src/app/agent-tools-ts/ && bun install +# Place executables in the environment at the front of the path +ENV PATH="/usr/src/app/.venv/bin:$PATH" -CMD [ "uvicorn", "main:app", "--host", "0.0.0.0", "--port", "8000" ] \ No newline at end of file +# Run using uvicorn +CMD ["uvicorn", "main:app", "--host", "0.0.0.0", "--port", "8000"] \ No newline at end of file diff --git a/LICENSE b/LICENSE new file mode 100644 index 00000000..95866bdd --- /dev/null +++ b/LICENSE @@ -0,0 +1,21 @@ +MIT License + +Copyright (c) 2025 aibtcdev + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. diff --git a/README.md b/README.md index fdf9c62e..c00a7286 100644 --- a/README.md +++ b/README.md @@ -1,23 +1,39 @@ # aibtcdev-backend -## Overview +[![standard-readme compliant](https://img.shields.io/badge/readme%20style-standard-brightgreen.svg?style=flat-square)](https://github.com/RichardLitt/standard-readme) -aibtcdev-backend is a sophisticated FastAPI-based backend service that powers AI-driven interactions with Bitcoin and Stacks blockchain technologies. The service provides: +> A sophisticated FastAPI-based backend service that powers AI-driven interactions with Bitcoin and Stacks blockchain technologies. -1. Real-time chat functionality with AI agents via WebSocket -2. Automated DAO management and monitoring -3. Social media integration (Twitter, Telegram, Discord) -4. Blockchain interaction capabilities (Stacks, Bitcoin) -5. Market data analysis and reporting -6. Document processing and vector search capabilities +aibtcdev-backend provides real-time chat functionality with AI agents, automated DAO management, social media integration, blockchain interaction capabilities, market data analysis, and document processing with vector search. -The system is designed to be modular, scalable, and easily configurable through environment variables. +**⚠️ Disclaimer**: aibtc.dev is not liable for any lost, locked, or mistakenly sent funds. This is alpha software—use at your own risk. Any STX sent to you is owned by you, the trader, and may be redeemed, including profits or losses, at the end of the aibtc.dev Champions Sprint (~5 days). By participating, you accept that aibtc.dev is not responsible for any product use, costs, taxes incurred from trading STX or any other digital asset, or any other liability. -## Disclaimer +## Table of Contents -aibtc.dev is not liable for any lost, locked, or mistakenly sent funds. This is alpha software—use at your own risk. Any STX sent to you is owned by you, the trader, and may be redeemed, including profits or losses, at the end of the aibtc.dev Champions Sprint (~5 days). By participating, you accept that aibtc.dev is not responsible for any product use, costs, taxes incurred from trading STX or any other digital asset, or any other liability. +- [Background](#background) +- [Install](#install) +- [Usage](#usage) +- [Maintainers](#maintainers) +- [Contributing](#contributing) +- [License](#license) -## Prerequisites +## Background + +aibtcdev-backend was created to bridge AI capabilities with blockchain technologies, specifically Bitcoin and Stacks. The system is designed to be modular, scalable, and easily configurable through environment variables. + +### Key Features + +- **AI Chat System**: Real-time WebSocket-based chat with AI agent integration, context-aware conversations, and vector search capabilities +- **DAO Management**: Automated DAO deployment monitoring, proposal creation and tracking, vote processing, and automated conclusion handling +- **Social Media Integration**: Twitter automation with automatic threading for tweets longer than 280 characters, Telegram bot integration, and Discord notifications +- **Blockchain Integration**: Stacks blockchain interaction, Bitcoin network monitoring, and multiple API integrations (Hiro, Alex, Velar, Platform API) +- **Market Analysis**: LunarCrush integration, CoinMarketCap data processing, and automated reporting +- **Background Processing**: Scheduled task management, event-driven processing, and multi-threaded task execution + + +## Install + +### Prerequisites - Python 3.13 - [Bun](https://bun.sh/) (for TypeScript tools) @@ -25,116 +41,28 @@ aibtc.dev is not liable for any lost, locked, or mistakenly sent funds. This is - Conda (recommended for development) or Docker - Node.js and npm (for agent tools) -## Project Structure - -``` -aibtcdev-backend/ -├── api/ # FastAPI endpoint definitions -│ ├── chat.py # WebSocket chat endpoints -│ ├── tools.py # Tool endpoints -│ ├── webhooks.py # Webhook handlers -│ └── dependencies.py # API dependencies -├── services/ # Core business logic -│ ├── workflows/ # Workflow implementations -│ ├── runner/ # Background task runners -│ ├── webhooks/ # Webhook processors -│ ├── discord/ # Discord integration -│ ├── chat.py # Chat service -│ ├── daos.py # DAO operations -│ ├── schedule.py # Task scheduling -│ ├── startup.py # App lifecycle management -│ ├── twitter.py # Twitter integration -│ ├── bot.py # Telegram bot -│ └── websocket.py # WebSocket management -├── backend/ # Database and storage -├── tools/ # AI agent tools -├── lib/ # Shared utilities -├── tests/ # Test suite -├── docs/ # Documentation -├── examples/ # Usage examples -└── agent-tools-ts/ # TypeScript-based agent tools -``` - -## Key Features - -### 1. AI Chat System -- Real-time WebSocket-based chat -- AI agent integration with OpenAI -- Context-aware conversations -- Document-based knowledge integration -- Vector search capabilities - -### 2. DAO Management -- Automated DAO deployment monitoring -- Proposal creation and tracking -- Vote processing -- Automated conclusion handling -- Tweet generation for DAO events - -### 3. Social Media Integration -- Twitter automation and monitoring -- Telegram bot integration -- Discord notifications -- Automated content generation -- Social engagement tracking - -### 4. Blockchain Integration -- Stacks blockchain interaction -- Bitcoin network monitoring -- Multiple API integrations: - - Hiro - - Alex - - Velar - - Platform API - -### 5. Market Analysis -- LunarCrush integration -- CoinMarketCap data processing -- Market trend analysis -- Automated reporting - -### 6. Background Processing -- Scheduled task management -- Event-driven processing -- Multi-threaded task execution -- Failure recovery and retry logic - -## Installation - -### 1. Clone the Repository +### Development Setup ```bash +# Clone the repository git clone [repository-url] cd aibtcdev-backend git submodule init git submodule update --remote -``` -### 2. Environment Setup - -1. Copy the example environment file: -```bash +# Copy environment file cp .env.example .env -``` +# Configure your environment variables by following the Configuration Guide -2. Configure your environment variables by following the [Configuration Guide](CONFIG.md) +# Install UV (modern Python package manager) +curl -LsSf https://astral.sh/uv/install.sh | sh +# Or on macOS: brew install uv -### 3. Development Setup (Conda Recommended) +# Create virtual environment and install dependencies +uv sync -```bash -# Install Miniconda -brew install miniconda - -# Initialize conda -conda init "$(basename "${SHELL}")" -# Restart your terminal - -# Create and activate environment -conda create --name aibackend python=3.12 -conda activate aibackend - -# Install dependencies -pip install -r requirements.txt +# Activate the virtual environment +source .venv/bin/activate # Set up TypeScript tools cd agent-tools-ts/ @@ -142,104 +70,77 @@ bun install cd .. ``` -### 4. Docker Setup +### Docker Setup ```bash docker build -t aibtcdev-backend . docker run -p 8000:8000 --env-file .env aibtcdev-backend ``` -## API Documentation +## Usage -### WebSocket Endpoints (`/chat`) -- `/chat/ws`: Real-time chat communication - - Supports message history - - AI agent integration - - Context management - - Document processing - -### Tool Endpoints (`/tools`) -- `/tools/available`: Available tool listing -- `/tools/execute`: Tool execution endpoint -- Custom tool integration support - -### Webhook Endpoints (`/webhooks`) -- `/webhooks/chainhook`: Blockchain event processing -- `/webhooks/github`: GitHub integration -- `/webhooks/discord`: Discord notifications +### Running the Development Server -### Bot Endpoints (`/bot`) -- `/bot/telegram`: Telegram bot integration -- User verification and management -- Command processing +```bash +uvicorn main:app --host 0.0.0.0 --port 8000 --reload +``` -## Development +The server will be available at `http://localhost:8000` with API documentation at `/docs`. -### Running the Development Server +### Testing ```bash -uvicorn main:app --host 0.0.0.0 --port 8000 --reload +pytest tests/ ``` ### Code Style The project uses ruff for code formatting and linting. Configuration is in `ruff.toml`. -### Testing - ```bash -pytest tests/ +ruff check . +ruff format . ``` -### Documentation -API documentation is available at `/docs` when running the server. -## Contributing +### Troubleshooting -1. Fork the repository -2. Create a feature branch -3. Make your changes -4. Run tests -5. Submit a pull request +**OpenAI Rate Limits** +- Check limits at https://platform.openai.com/settings/organization/limits +- TPM (Tokens Per Minute) limits: Tier 1: 200,000 TPM, Tier 2: 2,000,000 TPM -Guidelines: -- Follow the Python code style guide -- Add tests for new features -- Update documentation as needed -- Keep pull requests focused and atomic +**WebSocket Connection Issues** +- Check network connectivity and authentication tokens +- Verify server logs for details + +**Database Connection Issues** +- Verify Supabase credentials and network access +- Check connection string format -## Troubleshooting +## Maintainers -### Common Issues +[@aibtcdev](https://github.com/aibtcdev) -1. OpenAI Rate Limits - - Check limits at https://platform.openai.com/settings/organization/limits - - TPM (Tokens Per Minute) limits: - - Tier 1: 200,000 TPM - - Tier 2: 2,000,000 TPM +## Contributing -2. WebSocket Connection Issues - - Check network connectivity - - Verify authentication tokens - - Check server logs for details +PRs accepted. -3. Database Connection Issues - - Verify Supabase credentials - - Check network access to database - - Verify connection string format +### Guidelines -## Support +- Follow the Python code style guide +- Add tests for new features +- Update documentation as needed +- Keep pull requests focused and atomic -For support: -1. Check the documentation -2. Search existing issues -3. Create a new issue with: - - Clear description - - Steps to reproduce - - Expected vs actual behavior - - Environment details +### Development Process + +1. Fork the repository +2. Create a feature branch +3. Make your changes +4. Run tests +5. Submit a pull request ## License -[License Information] +[MIT](LICENSE) aibtcdev diff --git a/agent-tools-ts b/agent-tools-ts index ea13e086..f30bca84 160000 --- a/agent-tools-ts +++ b/agent-tools-ts @@ -1 +1 @@ -Subproject commit ea13e0864d5755b29fd81990fc39b1b8a57b8ca4 +Subproject commit f30bca846e6ba841ab8b375dc1be6ce1b0424b06 diff --git a/api/chat.py b/api/chat.py index 517ea4aa..70ff4a87 100644 --- a/api/chat.py +++ b/api/chat.py @@ -8,13 +8,13 @@ from backend.factory import backend from backend.models import UUID, JobCreate, Profile from lib.logger import configure_logger -from services.chat import ( +from services.core.chat_service import ( get_job_history, get_thread_history, process_chat_message, - running_jobs, ) -from services.websocket import websocket_manager +from services.processing.streaming_service import running_jobs +from services.communication.websocket_service import websocket_manager # Configure logger logger = configure_logger(__name__) @@ -373,7 +373,9 @@ async def websocket_endpoint( if connection_accepted: try: # Ensure all jobs for this session are marked as disconnected - from services.chat import mark_jobs_disconnected_for_session + from services.processing.streaming_service import ( + mark_jobs_disconnected_for_session, + ) await asyncio.wait_for( mark_jobs_disconnected_for_session(session_id), timeout=2.0 diff --git a/api/tools.py b/api/tools.py index 0cf77ff3..bc354a86 100644 --- a/api/tools.py +++ b/api/tools.py @@ -1,10 +1,53 @@ -from typing import List, Optional +from typing import Any, Dict, List, Optional -from fastapi import APIRouter, HTTPException, Query, Request +from fastapi import APIRouter, Depends, HTTPException, Query, Request +from pydantic import BaseModel, Field # Added import for Pydantic models from starlette.responses import JSONResponse +import httpx # Added import for async HTTP requests +from api.dependencies import ( + verify_profile_from_token, # Added verify_profile_from_token + verify_profile, +) +from backend.factory import backend # Added backend factory +from backend.models import ( # Added Profile, AgentFilter, Wallet + UUID, + AgentFilter, + ContractStatus, + Profile, + Proposal, + ProposalCreate, + ProposalType, + Wallet, + WalletFilter, +) +from config import config # Added config import from lib.logger import configure_logger from lib.tools import Tool, get_available_tools +from services.ai.workflows.agents.proposal_metadata import ( + ProposalMetadataAgent, +) + +# Import the proposal recommendation agent and metadata agent +from services.ai.workflows.agents.proposal_recommendation import ( + ProposalRecommendationAgent, +) +from services.ai.workflows.comprehensive_evaluation import ( + evaluate_proposal_comprehensive, +) +from services.ai.workflows.agents.evaluator import ( + DEFAULT_SYSTEM_PROMPT, + DEFAULT_USER_PROMPT_TEMPLATE, +) +from tools.dao_ext_action_proposals import ( + ProposeActionSendMessageTool, # Added ProposeActionSendMessageTool + VetoActionProposalTool, # Added VetoActionProposalTool +) +from tools.faktory import ( + FaktoryExecuteBuyTool, # Added import for Faktory tool + FaktoryGetSbtcTool, # Added import for Faktory sBTC faucet tool +) +from tools.wallet import WalletFundMyWalletFaucet # Added import for wallet faucet tool # Configure logger logger = configure_logger(__name__) @@ -16,6 +59,244 @@ available_tools = get_available_tools() +async def _create_proposal_from_tool_result( + tool_result: dict, + payload: "ProposeSendMessageRequest", + enhanced_message: str, + title: str, + summary: str, + tags: List[str], + profile: "Profile", + wallet: "Wallet", +) -> Optional["Proposal"]: + """Create a proposal record from successful tool execution result. + + Args: + tool_result: The result from ProposeActionSendMessageTool execution + payload: The original request payload + enhanced_message: The enhanced message with title and tags + title: The generated title for the proposal + summary: The generated summary for the proposal + tags: The generated tags for the proposal + profile: The user's profile + wallet: The agent's wallet + + Returns: + The created proposal or None if creation failed + """ + import re + + try: + output = tool_result.get("output", "") + if not output: + logger.warning("No output in tool result") + return None + + # Extract transaction ID from the output + tx_id_match = re.search( + r"Transaction broadcasted successfully: (0x[a-fA-F0-9]+)", output + ) + if not tx_id_match: + logger.warning("Could not extract transaction ID from tool output") + return None + + tx_id = tx_id_match.group(1) + + # Use the voting contract from the original payload since it's no longer in the output + voting_contract = payload.action_proposals_voting_extension + + # Find the DAO based on the voting contract or token contract + # First try to find by the voting contract in extensions + extensions = backend.list_extensions() + dao_id = None + + for extension in extensions: + if extension.contract_principal == voting_contract: + dao_id = extension.dao_id + break + + # If not found in extensions, try to find by token contract + if not dao_id: + tokens = backend.list_tokens() + for token in tokens: + if token.contract_principal == payload.dao_token_contract_address: + dao_id = token.dao_id + break + + if not dao_id: + logger.warning( + f"Could not find DAO for contracts: {voting_contract}, {payload.dao_token_contract_address}" + ) + return None + + # Get the appropriate wallet address based on network configuration + creator_address = ( + wallet.mainnet_address + if config.network.network == "mainnet" + else wallet.testnet_address + ) + + # Create the proposal record + proposal_content = ProposalCreate( + dao_id=dao_id, + title=title if title else "Action Proposal", + content=enhanced_message, + summary=summary, + tags=tags, + status=ContractStatus.DRAFT, # Since transaction was successful + contract_principal=voting_contract, + tx_id=tx_id, + type=ProposalType.ACTION, + # Additional fields that might be available + creator=creator_address or "Unknown", + memo=payload.memo, + ) + + proposal = backend.create_proposal(proposal_content) + logger.info(f"Created proposal record {proposal.id} for transaction {tx_id}") + return proposal + + except Exception as e: + logger.error(f"Error creating proposal from tool result: {str(e)}") + return None + + +class FaktoryBuyTokenRequest(BaseModel): + """Request body for executing a Faktory buy order.""" + + # agent_id: UUID = Field(..., description="The ID of the agent performing the action") # Removed agent_id + btc_amount: str = Field( + ..., + description="Amount of BTC to spend on the purchase in standard units (e.g. 0.0004 = 0.0004 BTC or 40000 sats)", + ) + dao_token_dex_contract_address: str = Field( + ..., description="Contract principal where the DAO token is listed" + ) + slippage: Optional[str] = Field( + default="15", + description="Slippage tolerance in basis points (default: 15, which is 0.15%)", + ) + + +class ProposeSendMessageRequest(BaseModel): + """Request body for proposing a DAO action to send a message.""" + + action_proposals_voting_extension: str = Field( + ..., + description="Contract principal where the DAO creates action proposals for voting by DAO members.", + ) + action_proposal_contract_to_execute: str = Field( + ..., + description="Contract principal of the action proposal that executes sending a message.", + ) + dao_token_contract_address: str = Field( + ..., + description="Contract principal of the token used by the DAO for voting.", + ) + message: str = Field( + ..., + description="Message to be sent through the DAO proposal system.", + ) + memo: Optional[str] = Field( + None, + description="Optional memo to include with the proposal.", + ) + + +class VetoActionProposalRequest(BaseModel): + """Request body for vetoing a DAO action proposal.""" + + dao_action_proposal_voting_contract: str = Field( + ..., + description="Contract principal where the DAO creates action proposals for voting by DAO members.", + ) + proposal_id: str = Field( + ..., + description="ID of the proposal to veto.", + ) + + +class FundWalletFaucetRequest(BaseModel): + """Request body for funding wallet with testnet STX tokens.""" + + pass # No parameters needed as the tool uses wallet_id from initialization + + +class FundSbtcFaucetRequest(BaseModel): + """Request body for requesting testnet sBTC from Faktory faucet.""" + + pass # No parameters needed as the tool uses wallet_id from initialization + + +class ProposalRecommendationRequest(BaseModel): + """Request body for getting a proposal recommendation.""" + + dao_id: UUID = Field( + ..., + description="The ID of the DAO to generate a proposal recommendation for.", + ) + focus_area: Optional[str] = Field( + default="general improvement", + description="Specific area of focus for the recommendation (e.g., 'community growth', 'technical development', 'partnerships')", + ) + specific_needs: Optional[str] = Field( + default="", + description="Any specific needs or requirements to consider in the recommendation", + ) + model_name: Optional[str] = Field( + default="gpt-4.1", + description="LLM model to use for generation (e.g., 'gpt-4.1', 'gpt-4o', 'gpt-3.5-turbo')", + ) + temperature: Optional[float] = Field( + default=0.1, + description="Temperature for LLM generation (0.0-2.0). Lower = more focused, Higher = more creative", + ge=0.0, + le=2.0, + ) + + +class ComprehensiveEvaluationRequest(BaseModel): + """Request body for comprehensive proposal evaluation.""" + + proposal_id: str = Field( + ..., + description="Unique identifier for the proposal being evaluated.", + ) + proposal_content: Optional[str] = Field( + None, + description="Optional proposal content to override the default proposal content.", + ) + dao_id: Optional[UUID] = Field( + None, + description="Optional DAO ID for context.", + ) + custom_system_prompt: Optional[str] = Field( + None, + description="Optional custom system prompt to override the default evaluation prompt.", + ) + custom_user_prompt: Optional[str] = Field( + None, + description="Optional custom user prompt to override the default evaluation instructions.", + ) + config: Optional[Dict[str, Any]] = Field( + default_factory=dict, + description="Optional configuration for the evaluation agent.", + ) + + +class DefaultPromptsResponse(BaseModel): + """Response body for default evaluation prompts.""" + + system_prompt: str = Field( + ..., + description="The default system prompt used for comprehensive evaluation.", + ) + user_prompt_template: str = Field( + ..., + description="The default user prompt template used for comprehensive evaluation.", + ) + + @router.get("/available", response_model=List[Tool]) async def get_tools( request: Request, @@ -131,3 +412,739 @@ async def search_tools( except Exception as e: logger.error(f"Failed to search tools with query '{query}'", exc_info=e) raise HTTPException(status_code=500, detail=f"Failed to search tools: {str(e)}") + + +@router.post("/faktory/execute_buy") +async def execute_faktory_buy( + request: Request, + payload: FaktoryBuyTokenRequest, + profile: Profile = Depends(verify_profile_from_token), # Added auth dependency +) -> JSONResponse: + """Execute a buy order on Faktory DEX. + + This endpoint allows an authenticated user's agent to execute a buy order + for a specified token using BTC on the Faktory DEX. + + Args: + request: The FastAPI request object. + payload: The request body containing btc_amount, + dao_token_dex_contract_address, and optional slippage. + profile: The authenticated user's profile. + + Returns: + JSONResponse: The result of the buy order execution. + + Raises: + HTTPException: If there's an error executing the buy order, or if the + agent for the profile is not found. + """ + try: + logger.info( + f"Faktory execute buy request received from {request.client.host if request.client else 'unknown'} for profile {profile.id}" + ) + + # Get agent_id from profile_id + agents = backend.list_agents(AgentFilter(profile_id=profile.id)) + if not agents: + logger.error(f"No agent found for profile ID: {profile.id}") + raise HTTPException( + status_code=404, + detail=f"No agent found for profile ID: {profile.id}", + ) + + agent = agents[0] # Assuming the first agent is the one to use + agent_id = agent.id + + # get wallet id from agent + wallets = backend.list_wallets(WalletFilter(agent_id=agent_id)) + if not wallets: + logger.error(f"No wallet found for agent ID: {agent_id}") + raise HTTPException( + status_code=404, + detail=f"No wallet found for agent ID: {agent_id}", + ) + + wallet = wallets[0] # Get the first wallet for this agent + + logger.info( + f"Using agent {agent_id} for profile {profile.id} to execute Faktory buy." + ) + + tool = FaktoryExecuteBuyTool(wallet_id=wallet.id) # Use fetched agent_id + result = await tool._arun( + btc_amount=payload.btc_amount, + dao_token_dex_contract_address=payload.dao_token_dex_contract_address, + slippage=payload.slippage, + ) + + logger.debug( + f"Faktory execute buy result for agent {agent_id} (profile {profile.id}): {result}" + ) + return JSONResponse(content=result) + + except HTTPException as he: + # Re-raise HTTPExceptions directly + raise he + except Exception as e: + logger.error( + f"Failed to execute Faktory buy for profile {profile.id}", exc_info=e + ) + raise HTTPException( + status_code=500, + detail=f"Failed to execute Faktory buy order: {str(e)}", + ) + + +@router.post("/dao/action_proposals/propose_send_message") +async def propose_dao_action_send_message( + request: Request, + payload: ProposeSendMessageRequest, + profile: Profile = Depends(verify_profile_from_token), +) -> JSONResponse: + """Propose a DAO action to send a message. + + This endpoint allows an authenticated user's agent to create a proposal + for sending a message via the DAO's action proposal system. + + Args: + request: The FastAPI request object. + payload: The request body containing the proposal details. + profile: The authenticated user's profile. + + Returns: + JSONResponse: The result of the proposal creation. + + Raises: + HTTPException: If there's an error, or if the agent for the profile is not found. + """ + try: + logger.info( + f"DAO propose send message request received from {request.client.host if request.client else 'unknown'} for profile {profile.id}" + ) + + agents = backend.list_agents(AgentFilter(profile_id=profile.id)) + if not agents: + logger.error(f"No agent found for profile ID: {profile.id}") + raise HTTPException( + status_code=404, + detail=f"No agent found for profile ID: {profile.id}", + ) + + agent = agents[0] + agent_id = agent.id + + # get wallet id from agent + wallets = backend.list_wallets(WalletFilter(agent_id=agent_id)) + if not wallets: + logger.error(f"No wallet found for agent ID: {agent_id}") + raise HTTPException( + status_code=404, + detail=f"No wallet found for agent ID: {agent_id}", + ) + + wallet = wallets[0] # Get the first wallet for this agent + + logger.info( + f"Using wallet {wallet.id} for profile {profile.id} to propose DAO send message action." + ) + + # Generate title, summary, and tags for the message before sending + try: + metadata_agent = ProposalMetadataAgent() + metadata_state = { + "proposal_content": payload.message, + "dao_name": "", # Could be enhanced to fetch DAO name if available + "proposal_type": "action_proposal", + } + + metadata_result = await metadata_agent.process(metadata_state) + title = metadata_result.get("title", "") + summary = metadata_result.get("summary", "") + metadata_tags = metadata_result.get("tags", []) + + # Enhance message with title and tags using structured format + enhanced_message = payload.message + + # Add metadata section if we have title or tags + if title or metadata_tags: + enhanced_message = f"{payload.message}\n\n--- Metadata ---" + + if title: + enhanced_message += f"\nTitle: {title}" + logger.info(f"Enhanced message with title: {title}") + + if metadata_tags: + tags_string = "|".join(metadata_tags) + enhanced_message += f"\nTags: {tags_string}" + logger.info(f"Enhanced message with tags: {metadata_tags}") + else: + logger.warning("No title or tags generated for the message") + + except Exception as e: + logger.error(f"Failed to generate title and metadata: {str(e)}") + # Continue with original message if enhancement fails + enhanced_message = payload.message + + tool = ProposeActionSendMessageTool(wallet_id=wallet.id) + result = await tool._arun( + action_proposals_voting_extension=payload.action_proposals_voting_extension, + action_proposal_contract_to_execute=payload.action_proposal_contract_to_execute, + dao_token_contract_address=payload.dao_token_contract_address, + message=enhanced_message, + memo=payload.memo, + ) + + logger.debug( + f"DAO propose send message result for wallet {wallet.id} (profile {profile.id}): {result}" + ) + + # Create proposal record if tool execution was successful + if result.get("success") and result.get("output"): + try: + await _create_proposal_from_tool_result( + result, + payload, + enhanced_message, + title if "title" in locals() else "", + summary if "summary" in locals() else "", + metadata_tags if "metadata_tags" in locals() else [], + profile, + wallet, + ) + except Exception as e: + logger.error(f"Failed to create proposal record: {str(e)}") + # Don't fail the entire request if proposal creation fails + + return JSONResponse(content=result) + + except HTTPException as he: + raise he + except Exception as e: + logger.error( + f"Failed to propose DAO send message action for profile {profile.id}", + exc_info=e, + ) + raise HTTPException( + status_code=500, + detail=f"Failed to propose DAO send message action: {str(e)}", + ) + + +@router.post("/dao/action_proposals/veto_proposal") +async def veto_dao_action_proposal( + request: Request, + payload: VetoActionProposalRequest, + profile: Profile = Depends(verify_profile_from_token), +) -> JSONResponse: + """Veto a DAO action proposal. + + This endpoint allows an authenticated user's agent to veto an existing + action proposal in the DAO's action proposal system. + + Args: + request: The FastAPI request object. + payload: The request body containing the proposal details to veto. + profile: The authenticated user's profile. + + Returns: + JSONResponse: The result of the veto operation. + + Raises: + HTTPException: If there's an error, or if the agent for the profile is not found. + """ + try: + logger.info( + f"DAO veto action proposal request received from {request.client.host if request.client else 'unknown'} for profile {profile.id}" + ) + + agents = backend.list_agents(AgentFilter(profile_id=profile.id)) + if not agents: + logger.error(f"No agent found for profile ID: {profile.id}") + raise HTTPException( + status_code=404, + detail=f"No agent found for profile ID: {profile.id}", + ) + + agent = agents[0] + agent_id = agent.id + + # get wallet id from agent + wallets = backend.list_wallets(WalletFilter(agent_id=agent_id)) + if not wallets: + logger.error(f"No wallet found for agent ID: {agent_id}") + raise HTTPException( + status_code=404, + detail=f"No wallet found for agent ID: {agent_id}", + ) + + wallet = wallets[0] # Get the first wallet for this agent + + logger.info( + f"Using wallet {wallet.id} for profile {profile.id} to veto DAO action proposal {payload.proposal_id}." + ) + + # get proposal from id + proposal = backend.get_proposal(payload.proposal_id) + if not proposal: + logger.error(f"No proposal found for ID: {payload.proposal_id}") + raise HTTPException( + status_code=404, + detail=f"No proposal found for ID: {payload.proposal_id}", + ) + + tool = VetoActionProposalTool(wallet_id=wallet.id) + result = await tool._arun( + dao_action_proposal_voting_contract=payload.dao_action_proposal_voting_contract, + proposal_id=proposal.proposal_id, + ) + + logger.debug( + f"DAO veto action proposal result for wallet {wallet.id} (profile {profile.id}): {result}" + ) + return JSONResponse(content=result) + + except HTTPException as he: + raise he + except Exception as e: + logger.error( + f"Failed to veto DAO action proposal for profile {profile.id}", + exc_info=e, + ) + raise HTTPException( + status_code=500, + detail=f"Failed to veto DAO action proposal: {str(e)}", + ) + + +@router.post("/dao/proposal_recommendations/generate") +async def generate_proposal_recommendation( + request: Request, + payload: ProposalRecommendationRequest, + profile: Profile = Depends(verify_profile_from_token), +) -> JSONResponse: + """Generate a proposal recommendation for a DAO. + + This endpoint allows an authenticated user to get AI-generated proposal + recommendations based on the DAO's mission, description, and previous proposals. + + Args: + request: The FastAPI request object. + payload: The request body containing dao_id and optional parameters. + profile: The authenticated user's profile. + + Returns: + JSONResponse: The generated proposal recommendation. + + Raises: + HTTPException: If there's an error, or if the DAO is not found. + """ + try: + logger.info( + f"Proposal recommendation request received from {request.client.host if request.client else 'unknown'} for profile {profile.id} and DAO {payload.dao_id}" + ) + + # Verify that the DAO exists + dao = backend.get_dao(payload.dao_id) + if not dao: + logger.error(f"DAO with ID {payload.dao_id} not found") + raise HTTPException( + status_code=404, + detail=f"DAO with ID {payload.dao_id} not found", + ) + + logger.info( + f"Generating proposal recommendation for DAO {dao.name} (ID: {payload.dao_id})" + ) + + # Create the proposal recommendation agent with model configuration + config = { + "model_name": "gpt-4.1", # Use model from request or default + "temperature": 0.9, # Use temperature from request or default + "streaming": True, # Enable streaming responses + "callbacks": [], # Optional callback handlers + } + agent = ProposalRecommendationAgent(config=config) + + # Prepare state for the agent + state = { + "dao_id": payload.dao_id, + "focus_area": payload.focus_area, + "specific_needs": payload.specific_needs, + } + + # Get the recommendation + result = await agent.process(state) + + logger.debug( + f"Proposal recommendation result for DAO {payload.dao_id}: {result.get('title', 'Unknown')}" + ) + return JSONResponse(content=result) + + except HTTPException as he: + raise he + except Exception as e: + logger.error( + f"Failed to generate proposal recommendation for DAO {payload.dao_id}", + exc_info=e, + ) + raise HTTPException( + status_code=500, + detail=f"Failed to generate proposal recommendation: {str(e)}", + ) + + +@router.post("/wallet/fund_testnet_faucet") +async def fund_wallet_with_testnet_faucet( + request: Request, + payload: FundWalletFaucetRequest, + profile: Profile = Depends(verify_profile_from_token), +) -> JSONResponse: + """Fund wallet with testnet STX tokens using the faucet. + + This endpoint allows an authenticated user's agent to request testnet STX tokens + from the Stacks testnet faucet. This operation only works on testnet. + + Args: + request: The FastAPI request object. + payload: The request body (empty as no parameters are needed). + profile: The authenticated user's profile. + + Returns: + JSONResponse: The result of the faucet funding operation. + + Raises: + HTTPException: If there's an error, or if the agent/wallet for the profile is not found. + """ + try: + logger.info( + f"Wallet testnet faucet request received from {request.client.host if request.client else 'unknown'} for profile {profile.id}" + ) + + # Get agent from profile + agents = backend.list_agents(AgentFilter(profile_id=profile.id)) + if not agents: + logger.error(f"No agent found for profile ID: {profile.id}") + raise HTTPException( + status_code=404, + detail=f"No agent found for profile ID: {profile.id}", + ) + + agent = agents[0] + agent_id = agent.id + + # Get wallet from agent + wallets = backend.list_wallets(WalletFilter(agent_id=agent_id)) + if not wallets: + logger.error(f"No wallet found for agent ID: {agent_id}") + raise HTTPException( + status_code=404, + detail=f"No wallet found for agent ID: {agent_id}", + ) + + wallet = wallets[0] # Get the first wallet for this agent + + logger.info( + f"Using wallet {wallet.id} for profile {profile.id} to fund with testnet faucet." + ) + + # Initialize and execute the wallet faucet tool + tool = WalletFundMyWalletFaucet(wallet_id=wallet.id) + result = await tool._arun() + + logger.debug( + f"Wallet testnet faucet result for wallet {wallet.id} (profile {profile.id}): {result}" + ) + return JSONResponse(content=result) + + except HTTPException as he: + raise he + except Exception as e: + logger.error( + f"Failed to fund wallet with testnet faucet for profile {profile.id}", + exc_info=e, + ) + raise HTTPException( + status_code=500, + detail=f"Failed to fund wallet with testnet faucet: {str(e)}", + ) + + +@router.post("/faktory/fund_testnet_sbtc") +async def fund_with_testnet_sbtc_faucet( + request: Request, + payload: FundSbtcFaucetRequest, + profile: Profile = Depends(verify_profile_from_token), +) -> JSONResponse: + """Request testnet sBTC from the Faktory faucet. + + This endpoint allows an authenticated user's agent to request testnet sBTC tokens + from the Faktory faucet. This operation only works on testnet. + + Args: + request: The FastAPI request object. + payload: The request body (empty as no parameters are needed). + profile: The authenticated user's profile. + + Returns: + JSONResponse: The result of the sBTC faucet request operation. + + Raises: + HTTPException: If there's an error, or if the agent/wallet for the profile is not found. + """ + try: + logger.info( + f"Faktory testnet sBTC faucet request received from {request.client.host if request.client else 'unknown'} for profile {profile.id}" + ) + + # Get agent from profile + agents = backend.list_agents(AgentFilter(profile_id=profile.id)) + if not agents: + logger.error(f"No agent found for profile ID: {profile.id}") + raise HTTPException( + status_code=404, + detail=f"No agent found for profile ID: {profile.id}", + ) + + agent = agents[0] + agent_id = agent.id + + # Get wallet from agent + wallets = backend.list_wallets(WalletFilter(agent_id=agent_id)) + if not wallets: + logger.error(f"No wallet found for agent ID: {agent_id}") + raise HTTPException( + status_code=404, + detail=f"No wallet found for agent ID: {agent_id}", + ) + + wallet = wallets[0] # Get the first wallet for this agent + + logger.info( + f"Using wallet {wallet.id} for profile {profile.id} to request testnet sBTC from Faktory faucet." + ) + + # Initialize and execute the Faktory sBTC faucet tool + tool = FaktoryGetSbtcTool(wallet_id=wallet.id) + result = await tool._arun() + + logger.debug( + f"Faktory testnet sBTC faucet result for wallet {wallet.id} (profile {profile.id}): {result}" + ) + return JSONResponse(content=result) + + except HTTPException as he: + raise he + except Exception as e: + logger.error( + f"Failed to request testnet sBTC from Faktory faucet for profile {profile.id}", + exc_info=e, + ) + raise HTTPException( + status_code=500, + detail=f"Failed to request testnet sBTC from Faktory faucet: {str(e)}", + ) + + +@router.get("/twitter/oembed") +async def get_twitter_embed( + request: Request, + url: str = Query(..., description="Twitter/X.com URL to embed"), + media_max_width: Optional[int] = Query(560, description="Maximum width for media"), + hide_thread: Optional[bool] = Query(False, description="Hide thread"), +) -> JSONResponse: + """Proxy endpoint for Twitter oembed API. + + This endpoint acts as a proxy to Twitter's oembed API to avoid CORS issues + when embedding tweets in web applications. + + Args: + request: The FastAPI request object. + url: The Twitter/X.com URL to embed. + media_max_width: Maximum width for embedded media (default: 560). + hide_thread: Whether to hide the thread (default: False). + + Returns: + JSONResponse: The oembed data from Twitter or error details. + + Raises: + HTTPException: If there's an error with the request or Twitter API. + """ + try: + logger.info( + f"Twitter oembed request received from {request.client.host if request.client else 'unknown'} for URL: {url}" + ) + + # Validate the URL format + if not url.startswith(("https://x.com/", "https://twitter.com/")): + logger.warning(f"Invalid Twitter URL provided: {url}") + raise HTTPException( + status_code=400, + detail="Invalid Twitter URL. URL must start with https://x.com/ or https://twitter.com/", + ) + + # Make async request to Twitter oembed API + async with httpx.AsyncClient() as client: + oembed_url = "https://publish.twitter.com/oembed" + params = { + "url": url, + "media_max_width": media_max_width, + "partner": "", + "hide_thread": hide_thread, + } + + logger.debug(f"Making request to Twitter oembed API with params: {params}") + + response = await client.get(oembed_url, params=params, timeout=10.0) + + if response.status_code == 200: + logger.info(f"Successfully retrieved oembed data for URL: {url}") + return JSONResponse(content=response.json()) + elif response.status_code == 404: + logger.warning(f"Twitter post not found for URL: {url}") + raise HTTPException(status_code=404, detail="Twitter post not found") + else: + logger.error(f"Twitter API error {response.status_code} for URL: {url}") + raise HTTPException( + status_code=response.status_code, + detail=f"Twitter API error: {response.status_code}", + ) + + except httpx.TimeoutException: + logger.error(f"Request timeout for Twitter URL: {url}") + raise HTTPException(status_code=408, detail="Request to Twitter API timed out") + except httpx.RequestError as e: + logger.error(f"Request failed for Twitter URL {url}: {str(e)}") + raise HTTPException( + status_code=500, detail=f"Failed to connect to Twitter API: {str(e)}" + ) + except HTTPException as he: + # Re-raise HTTPExceptions directly + raise he + except Exception as e: + logger.error(f"Unexpected error for Twitter URL {url}: {str(e)}", exc_info=e) + raise HTTPException(status_code=500, detail=f"Internal server error: {str(e)}") + + +@router.get("/evaluation/default_prompts", response_model=DefaultPromptsResponse) +async def get_default_evaluation_prompts( + request: Request, + profile: Profile = Depends(verify_profile), +) -> JSONResponse: + """Get the default system and user prompts for comprehensive evaluation. + + This endpoint returns the default prompts used by the comprehensive + evaluation system. These can be used as templates for custom evaluation + prompts in the frontend. + + Args: + request: The FastAPI request object. + profile: The authenticated user's profile. + + Returns: + JSONResponse: The default system and user prompt templates. + + Raises: + HTTPException: If there's an error retrieving the prompts. + """ + try: + logger.info( + f"Default evaluation prompts request received from {request.client.host if request.client else 'unknown'} for profile {profile.id}" + ) + + # Return the default prompts + response_data = { + "system_prompt": DEFAULT_SYSTEM_PROMPT, + "user_prompt_template": DEFAULT_USER_PROMPT_TEMPLATE, + } + + logger.debug(f"Returning default evaluation prompts for profile {profile.id}") + return JSONResponse(content=response_data) + + except Exception as e: + logger.error( + f"Failed to retrieve default evaluation prompts for profile {profile.id}", + exc_info=e, + ) + raise HTTPException( + status_code=500, + detail=f"Failed to retrieve default evaluation prompts: {str(e)}", + ) + + +@router.post("/evaluation/comprehensive") +async def run_comprehensive_evaluation( + request: Request, + payload: ComprehensiveEvaluationRequest, + profile: Profile = Depends(verify_profile), +) -> JSONResponse: + """Run comprehensive evaluation on a proposal with optional custom prompts. + + This endpoint allows an authenticated user to run the comprehensive + evaluation workflow on a proposal, with optional custom system and user + prompts to override the defaults. + + Args: + request: The FastAPI request object. + payload: The request body containing proposal data and optional custom prompts. + profile: The authenticated user's profile. + + Returns: + JSONResponse: The comprehensive evaluation results. + + Raises: + HTTPException: If there's an error during evaluation. + """ + try: + logger.info( + f"Comprehensive evaluation request received from {request.client.host if request.client else 'unknown'} for profile {profile.id}" + ) + + # Get agent from profile for context + agents = backend.list_agents(AgentFilter(profile_id=profile.id)) + agent_id = None + if agents: + agent_id = str(agents[0].id) + + # Look up the proposal to get its content + proposal = backend.get_proposal(payload.proposal_id) + if not proposal: + logger.error(f"Proposal with ID {payload.proposal_id} not found") + raise HTTPException( + status_code=404, + detail=f"Proposal with ID {payload.proposal_id} not found", + ) + + proposal_content = payload.proposal_content or proposal.content or "" + + logger.info( + f"Starting comprehensive evaluation for proposal {payload.proposal_id} with agent {agent_id}" + ) + + # Run the comprehensive evaluation + result = await evaluate_proposal_comprehensive( + proposal_id=payload.proposal_id, + proposal_content=proposal_content, + config=payload.config, + dao_id=str(payload.dao_id) if payload.dao_id else None, + agent_id=agent_id, + profile_id=str(profile.id), + custom_system_prompt=payload.custom_system_prompt, + custom_user_prompt=payload.custom_user_prompt, + ) + + logger.debug( + f"Comprehensive evaluation completed for proposal {payload.proposal_id}: {'Approved' if result.decision else 'Rejected'}" + ) + return JSONResponse(content=result.model_dump()) + + except HTTPException as he: + raise he + except Exception as e: + logger.error( + f"Failed to run comprehensive evaluation for profile {profile.id}", + exc_info=e, + ) + raise HTTPException( + status_code=500, + detail=f"Failed to run comprehensive evaluation: {str(e)}", + ) diff --git a/api/webhooks.py b/api/webhooks.py index c0e2233f..c1ca07a3 100644 --- a/api/webhooks.py +++ b/api/webhooks.py @@ -4,9 +4,9 @@ from api.dependencies import verify_webhook_auth from lib.logger import configure_logger -from services.webhooks.base import WebhookResponse -from services.webhooks.chainhook import ChainhookService -from services.webhooks.dao import DAOService +from services.integrations.webhooks.base import WebhookResponse +from services.integrations.webhooks.chainhook import ChainhookService +from services.integrations.webhooks.dao import DAOService # Configure logger logger = configure_logger(__name__) diff --git a/backend/abstract.py b/backend/abstract.py index 9cb7d437..feb4d25c 100644 --- a/backend/abstract.py +++ b/backend/abstract.py @@ -8,7 +8,6 @@ AgentBase, AgentCreate, AgentFilter, - AgentWithWalletTokenDTO, ChainState, ChainStateBase, ChainStateCreate, @@ -44,6 +43,7 @@ ProposalBase, ProposalCreate, ProposalFilter, + ProposalFilterN, QueueMessage, QueueMessageBase, QueueMessageCreate, @@ -78,6 +78,7 @@ WalletBase, WalletCreate, WalletFilter, + WalletFilterN, XCreds, XCredsBase, XCredsCreate, @@ -135,6 +136,19 @@ async def add_vectors( """ pass + @abstractmethod + async def fetch_vectors(self, collection_name: str, ids: List[str]) -> List[Any]: + """Fetch specific vectors by their IDs from a collection. + + Args: + collection_name: The name of the vector collection + ids: A list of vector IDs to fetch + + Returns: + A list of the fetched records (structure depends on the backend). + """ + pass + @abstractmethod async def query_vectors( self, collection_name: str, query_text: str, limit: int = 4 @@ -276,6 +290,11 @@ def get_wallet(self, wallet_id: UUID) -> Optional[Wallet]: def list_wallets(self, filters: Optional[WalletFilter] = None) -> List[Wallet]: pass + @abstractmethod + def list_wallets_n(self, filters: Optional[WalletFilterN] = None) -> List[Wallet]: + """Enhanced wallets listing with support for batch operations and advanced filtering.""" + pass + @abstractmethod def update_wallet( self, wallet_id: UUID, update_data: WalletBase @@ -456,6 +475,13 @@ def list_proposals( ) -> List[Proposal]: pass + @abstractmethod + def list_proposals_n( + self, filters: Optional[ProposalFilterN] = None + ) -> List[Proposal]: + """Enhanced proposals listing with support for batch operations and advanced filtering.""" + pass + @abstractmethod def update_proposal( self, proposal_id: UUID, update_data: ProposalBase diff --git a/backend/models.py b/backend/models.py index 6e188817..308685f9 100644 --- a/backend/models.py +++ b/backend/models.py @@ -1,6 +1,6 @@ from datetime import datetime from enum import Enum -from typing import List, Optional +from typing import Any, Dict, List, Optional from uuid import UUID from pydantic import BaseModel, ConfigDict @@ -8,7 +8,8 @@ class CustomBaseModel(BaseModel): model_config = ConfigDict( - json_encoders={UUID: str, datetime: lambda v: v.isoformat()} + json_encoders={UUID: str, datetime: lambda v: v.isoformat()}, + arbitrary_types_allowed=True, ) @@ -29,6 +30,7 @@ class ChainStateBase(CustomBaseModel): block_height: Optional[int] = None block_hash: Optional[str] = None network: Optional[str] = "mainnet" # mainnet or testnet + bitcoin_block_height: Optional[int] = None class ChainStateCreate(ChainStateBase): @@ -62,56 +64,128 @@ def __str__(self): return self.value -class QueueMessageType(str, Enum): - TWEET = "tweet" - DAO = "dao" - DAO_TWEET = "dao_tweet" - DAO_PROPOSAL_VOTE = "dao_proposal_vote" - DAO_PROPOSAL_CONCLUDE = "dao_proposal_conclude" - DAO_PROPOSAL_EVALUATION = ( - "dao_proposal_evaluation" # New type for proposal evaluation - ) - AGENT_ACCOUNT_DEPLOY = ( - "agent_account_deploy" # New type for agent account deployment - ) +class QueueMessageType: + """Dynamic queue message types that are registered at runtime. - def __str__(self): - return self.value + This system is compatible with the runner's dynamic JobType system. + Queue message types are registered dynamically as job tasks are discovered. + """ + _message_types: Dict[str, "QueueMessageType"] = {} -# -# SECRETS -# -class SecretBase(CustomBaseModel): - name: Optional[str] = None - description: Optional[str] = None - secret: Optional[str] = None - decrypted_secret: Optional[str] = None - key_id: Optional[str] = None - nonce: Optional[str] = None + def __init__(self, value: str): + self._value = value.lower() + self._name = value.upper() + @property + def value(self) -> str: + return self._value -class SecretCreate(SecretBase): - pass + @property + def name(self) -> str: + return self._name + def __str__(self) -> str: + return self._value -class Secret(SecretBase): - id: UUID - created_at: datetime - updated_at: datetime + def __repr__(self) -> str: + return f"QueueMessageType({self._value})" + + def __eq__(self, other) -> bool: + if isinstance(other, QueueMessageType): + return self._value == other._value + if isinstance(other, str): + return self._value == other.lower() + return False + + def __hash__(self) -> int: + return hash(self._value) + + def __json__(self) -> str: + """Custom JSON serialization for Pydantic.""" + return self._value + + @classmethod + def __get_pydantic_core_schema__(cls, source_type, handler): + """Custom Pydantic schema for serialization/deserialization.""" + from pydantic_core import core_schema + + def validate_queue_message_type(value): + if value is None: + return None + if isinstance(value, cls): + return value + if isinstance(value, str): + return cls.get_or_create(value) + raise ValueError(f"Invalid QueueMessageType value: {value}") + + return core_schema.no_info_plain_validator_function( + validate_queue_message_type, + serialization=core_schema.to_string_ser_schema(), + ) + + @classmethod + def get_or_create(cls, message_type: str) -> "QueueMessageType": + """Get existing message type or create new one.""" + normalized = message_type.lower() + if normalized not in cls._message_types: + cls._message_types[normalized] = cls(normalized) + return cls._message_types[normalized] + + @classmethod + def register(cls, message_type: str) -> "QueueMessageType": + """Register a new message type and return the instance.""" + return cls.get_or_create(message_type) + + @classmethod + def get_all_message_types(cls) -> Dict[str, str]: + """Get all registered message types.""" + return {mt._value: mt._value for mt in cls._message_types.values()} + + @classmethod + def list_all(cls) -> List["QueueMessageType"]: + """Get all registered message type instances.""" + return list(cls._message_types.values()) + + +# Types are registered dynamically by the runner system +# No need to pre-register common types # # QUEUE MESSAGES # class QueueMessageBase(CustomBaseModel): + """Base model for queue messages. + + The message field supports multiple formats: + + 1. New chunked format (recommended for tweets): + { + "chunks": ["First chunk text (1/3)", "Second chunk text (2/3)", "Third chunk text (3/3)"], + "total_chunks": 3 + } + + 2. Legacy format (backward compatibility): + { + "message": "Main message content", + "follow_up_message": "Optional follow-up content", + "reply_to_tweet_id": "optional_tweet_id_for_threading" + } + + 3. Discord format: + { + "content": "Discord message content", + "proposal_status": "passed" or "failed" + } + """ + type: Optional[QueueMessageType] = None message: Optional[dict] = None is_processed: Optional[bool] = False - tweet_id: Optional[str] = None - conversation_id: Optional[str] = None dao_id: Optional[UUID] = None wallet_id: Optional[UUID] = None + result: Optional[dict] = None class QueueMessageCreate(QueueMessageBase): @@ -123,6 +197,28 @@ class QueueMessage(QueueMessageBase): created_at: datetime +# +# SECRETS +# +class SecretBase(CustomBaseModel): + name: Optional[str] = None + description: Optional[str] = None + secret: Optional[str] = None + decrypted_secret: Optional[str] = None + key_id: Optional[str] = None + nonce: Optional[str] = None + + +class SecretCreate(SecretBase): + pass + + +class Secret(SecretBase): + id: UUID + created_at: datetime + updated_at: datetime + + # # WALLETS # @@ -158,6 +254,7 @@ class XCredsBase(CustomBaseModel): client_secret: Optional[str] = None username: Optional[str] = None dao_id: Optional[UUID] = None + bearer_token: Optional[str] = None class XCredsCreate(XCredsBase): @@ -173,13 +270,8 @@ class XCreds(XCredsBase): # AGENTS # class AgentBase(CustomBaseModel): - name: Optional[str] = None - role: Optional[str] = None - goal: Optional[str] = None - backstory: Optional[str] = None profile_id: Optional[UUID] = None - agent_tools: Optional[List[str]] = None - image_url: Optional[str] = None + is_archived: Optional[bool] = False class AgentCreate(AgentBase): @@ -198,6 +290,7 @@ class Agent(AgentBase): class ExtensionBase(CustomBaseModel): dao_id: Optional[UUID] = None type: Optional[str] = None + subtype: Optional[str] = None contract_principal: Optional[str] = None tx_id: Optional[str] = None status: Optional[ContractStatus] = ContractStatus.DRAFT @@ -221,8 +314,6 @@ class DAOBase(CustomBaseModel): description: Optional[str] = None is_deployed: Optional[bool] = False is_broadcasted: Optional[bool] = False - wallet_id: Optional[UUID] = None - author_id: Optional[UUID] = None class DAOCreate(DAOBase): @@ -320,23 +411,16 @@ class Profile(ProfileBase): class ProposalBase(CustomBaseModel): dao_id: Optional[UUID] = None title: Optional[str] = None - description: Optional[str] = None + content: Optional[str] = None # Replaces both description and parameters status: Optional[ContractStatus] = ContractStatus.DRAFT contract_principal: Optional[str] = None tx_id: Optional[str] = None proposal_id: Optional[int] = None # On-chain proposal ID if its an action proposal - proposal_contract: Optional[str] = ( - None # Contract address of the proposal if its a core contract proposal - ) type: Optional[ProposalType] = ProposalType.ACTION action: Optional[str] = None caller: Optional[str] = None creator: Optional[str] = None - created_at_block: Optional[int] = None - end_block: Optional[int] = None - start_block: Optional[int] = None liquid_tokens: Optional[str] = None # Using string to handle large numbers - parameters: Optional[str] = None # Hex encoded parameters # Additional fields from blockchain data concluded_by: Optional[str] = None executed: Optional[bool] = None @@ -346,6 +430,27 @@ class ProposalBase(CustomBaseModel): votes_against: Optional[str] = None # String to handle large numbers votes_for: Optional[str] = None # String to handle large numbers bond: Optional[str] = None # String to handle large numbers + # Fields from updated chainhook payload + contract_caller: Optional[str] = None + created_btc: Optional[int] = None + created_stx: Optional[int] = None + creator_user_id: Optional[int] = None + exec_end: Optional[int] = None + exec_start: Optional[int] = None + memo: Optional[str] = None + tx_sender: Optional[str] = None + vote_end: Optional[int] = None + vote_start: Optional[int] = None + voting_delay: Optional[int] = None + voting_period: Optional[int] = None + voting_quorum: Optional[int] = None + voting_reward: Optional[str] = None # String to handle large numbers + voting_threshold: Optional[int] = None + summary: Optional[str] = None + tags: Optional[List[str]] = None + has_embedding: Optional[bool] = ( + False # Flag to track if proposal has been embedded in vector store + ) class ProposalCreate(ProposalBase): @@ -523,25 +628,39 @@ class WalletFilter(CustomBaseModel): testnet_address: Optional[str] = None +class WalletFilterN(CustomBaseModel): + """Enhanced wallet filter with support for batch operations using 'in_' queries.""" + + # Standard equality filters (same as WalletFilter) + agent_id: Optional[UUID] = None + profile_id: Optional[UUID] = None + mainnet_address: Optional[str] = None + testnet_address: Optional[str] = None + + # Batch filters using 'in_' operations + ids: Optional[List[UUID]] = None + agent_ids: Optional[List[UUID]] = None + profile_ids: Optional[List[UUID]] = None + mainnet_addresses: Optional[List[str]] = None + testnet_addresses: Optional[List[str]] = None + + class QueueMessageFilter(CustomBaseModel): type: Optional[QueueMessageType] = None is_processed: Optional[bool] = None - tweet_id: Optional[str] = None - conversation_id: Optional[str] = None wallet_id: Optional[UUID] = None dao_id: Optional[UUID] = None class AgentFilter(CustomBaseModel): - name: Optional[str] = None - role: Optional[str] = None - goal: Optional[str] = None + is_archived: Optional[bool] = None profile_id: Optional[UUID] = None class ExtensionFilter(CustomBaseModel): dao_id: Optional[UUID] = None type: Optional[str] = None + subtype: Optional[str] = None status: Optional[ContractStatus] = None contract_principal: Optional[str] = None @@ -550,7 +669,6 @@ class DAOFilter(CustomBaseModel): name: Optional[str] = None is_deployed: Optional[bool] = None is_broadcasted: Optional[bool] = None - wallet_id: Optional[UUID] = None class ThreadFilter(CustomBaseModel): @@ -579,7 +697,38 @@ class ProposalFilter(CustomBaseModel): met_quorum: Optional[bool] = None met_threshold: Optional[bool] = None type: Optional[ProposalType] = None - proposal_contract: Optional[str] = None + tx_id: Optional[str] = None + has_embedding: Optional[bool] = None # Filter by embedding presence + + +class ProposalFilterN(CustomBaseModel): + """Enhanced proposal filter with support for batch operations using 'in_' queries.""" + + # Standard equality filters (same as ProposalFilter) + dao_id: Optional[UUID] = None + status: Optional[ContractStatus] = None + contract_principal: Optional[str] = None + proposal_id: Optional[int] = None + executed: Optional[bool] = None + passed: Optional[bool] = None + met_quorum: Optional[bool] = None + met_threshold: Optional[bool] = None + type: Optional[ProposalType] = None + + # Batch filters using 'in_' operations + dao_ids: Optional[List[UUID]] = None + proposal_ids: Optional[List[int]] = None + statuses: Optional[List[ContractStatus]] = None + contract_principals: Optional[List[str]] = None + types: Optional[List[ProposalType]] = None + + # Range filters for numeric fields + proposal_id_gte: Optional[int] = None # greater than or equal + proposal_id_lte: Optional[int] = None # less than or equal + + # Text search (if supported by backend) + title_contains: Optional[str] = None + content_contains: Optional[str] = None class StepFilter(CustomBaseModel): @@ -648,9 +797,11 @@ class XTweetFilter(CustomBaseModel): # HOLDERS # class HolderBase(CustomBaseModel): - wallet_id: UUID - token_id: UUID - dao_id: UUID # Direct reference to the DAO for easier queries + wallet_id: Optional[UUID] = None + token_id: Optional[UUID] = None + agent_id: Optional[UUID] = None + dao_id: Optional[UUID] = None # Direct reference to the DAO for easier queries + address: Optional[str] = None amount: str # String to handle large numbers precisely updated_at: datetime = datetime.now() @@ -665,9 +816,11 @@ class Holder(HolderBase): class HolderFilter(CustomBaseModel): + agent_id: Optional[UUID] = None wallet_id: Optional[UUID] = None token_id: Optional[UUID] = None dao_id: Optional[UUID] = None + address: Optional[str] = None # @@ -689,6 +842,13 @@ class VoteBase(CustomBaseModel): cost: Optional[float] = None model: Optional[str] = None profile_id: Optional[UUID] = None + evaluation_score: Optional[Dict[str, Any]] = ( + None # Store final score from proposal evaluation + ) + flags: Optional[List[str]] = None # Store flags from proposal evaluation + evaluation: Optional[Dict[str, Any]] = ( + None # Store evaluation from proposal evaluation + ) class VoteCreate(VoteBase): @@ -711,12 +871,13 @@ class VoteFilter(CustomBaseModel): model: Optional[str] = None tx_id: Optional[str] = None profile_id: Optional[UUID] = None + evaluation_score: Optional[Dict[str, Any]] = None # Filter by evaluation score + flags: Optional[List[str]] = None # Filter by flags # Add this to your backend interface class to get agents by tokens class AgentWithWalletTokenDTO(CustomBaseModel): agent_id: UUID - agent_name: str wallet_id: UUID wallet_address: str token_id: UUID @@ -737,7 +898,7 @@ class PromptBase(CustomBaseModel): prompt_text: Optional[str] = None is_active: Optional[bool] = True model: Optional[str] = "gpt-4.1" - temperature: Optional[float] = 0.1 # Add temperature field with default value + temperature: Optional[float] = 0.9 # Add temperature field with default value class PromptCreate(PromptBase): diff --git a/backend/supabase.py b/backend/supabase.py index f2b11ecc..400c25fe 100644 --- a/backend/supabase.py +++ b/backend/supabase.py @@ -53,6 +53,7 @@ ProposalBase, ProposalCreate, ProposalFilter, + ProposalFilterN, QueueMessage, QueueMessageBase, QueueMessageCreate, @@ -88,6 +89,7 @@ WalletBase, WalletCreate, WalletFilter, + WalletFilterN, XCreds, XCredsBase, XCredsCreate, @@ -237,6 +239,34 @@ async def add_vectors( ) raise + async def fetch_vectors(self, collection_name: str, ids: List[str]) -> List[Any]: + """Fetch specific vectors by their IDs using the vecs client. + + Args: + collection_name: Name of the collection to query + ids: List of vector IDs to fetch + + Returns: + List of fetched records (typically tuples of id, vector, metadata). + """ + collection = self.get_vector_collection(collection_name) + if not ids: + logger.debug("fetch_vectors called with empty ID list.") + return [] + + try: + # Assuming the vecs library provides a `fetch` method + fetched_records = collection.fetch(ids=ids) + logger.debug( + f"Fetched {len(fetched_records)} vectors from collection {collection_name} for {len(ids)} requested IDs." + ) + return fetched_records + except Exception as e: + logger.error( + f"Failed to fetch vectors by ID from collection {collection_name}: {str(e)}" + ) + raise + async def query_vectors( self, collection_name: str, query_text: str, limit: int = 4, embeddings=None ) -> List[Dict[str, Any]]: @@ -659,15 +689,10 @@ def list_queue_messages( query = query.eq("type", filters.type) if filters.is_processed is not None: query = query.eq("is_processed", filters.is_processed) - if filters.tweet_id is not None: - query = query.eq("tweet_id", filters.tweet_id) - if filters.conversation_id is not None: - query = query.eq("conversation_id", filters.conversation_id) if filters.wallet_id is not None: query = query.eq("wallet_id", filters.wallet_id) if filters.dao_id is not None: query = query.eq("dao_id", str(filters.dao_id)) - response = query.execute() data = response.data or [] return [QueueMessage(**row) for row in data] @@ -738,6 +763,65 @@ def list_wallets(self, filters: Optional["WalletFilter"] = None) -> List["Wallet data = response.data or [] return [Wallet(**row) for row in data] + def list_wallets_n( + self, filters: Optional["WalletFilterN"] = None + ) -> List["Wallet"]: + """Enhanced wallets listing with support for batch operations and advanced filtering.""" + query = self.client.table("wallets").select("*") + + if filters: + # Standard equality filters + if filters.agent_id is not None: + query = query.eq("agent_id", str(filters.agent_id)) + if filters.profile_id is not None: + query = query.eq("profile_id", str(filters.profile_id)) + if filters.mainnet_address is not None: + query = query.eq("mainnet_address", filters.mainnet_address) + if filters.testnet_address is not None: + query = query.eq("testnet_address", filters.testnet_address) + + # Batch filters using 'in_' operations + if filters.ids is not None and len(filters.ids) > 0: + id_strings = [str(wallet_id) for wallet_id in filters.ids] + query = query.in_("id", id_strings) + if filters.agent_ids is not None and len(filters.agent_ids) > 0: + agent_id_strings = [str(agent_id) for agent_id in filters.agent_ids] + query = query.in_("agent_id", agent_id_strings) + if filters.profile_ids is not None and len(filters.profile_ids) > 0: + profile_id_strings = [ + str(profile_id) for profile_id in filters.profile_ids + ] + query = query.in_("profile_id", profile_id_strings) + if ( + filters.mainnet_addresses is not None + and len(filters.mainnet_addresses) > 0 + ): + query = query.in_("mainnet_address", filters.mainnet_addresses) + if ( + filters.testnet_addresses is not None + and len(filters.testnet_addresses) > 0 + ): + query = query.in_("testnet_address", filters.testnet_addresses) + + try: + response = query.execute() + data = response.data or [] + return [Wallet(**row) for row in data] + except Exception as e: + logger.error(f"Error in list_wallets_n: {str(e)}") + # Fallback to original list_wallets if enhanced filtering fails + if filters: + # Convert enhanced filter to basic filter for fallback + basic_filter = WalletFilter( + agent_id=filters.agent_id, + profile_id=filters.profile_id, + mainnet_address=filters.mainnet_address, + testnet_address=filters.testnet_address, + ) + return self.list_wallets(basic_filter) + else: + return self.list_wallets() + def update_wallet( self, wallet_id: UUID, update_data: "WalletBase" ) -> Optional["Wallet"]: @@ -815,7 +899,6 @@ def get_agents_with_dao_tokens( result.append( AgentWithWalletTokenDTO( agent_id=agent.id, - agent_name=agent.name, wallet_id=wallet.id, wallet_address=wallet_address, token_id=token.id, @@ -853,12 +936,8 @@ def get_agent(self, agent_id: UUID) -> Optional["Agent"]: def list_agents(self, filters: Optional["AgentFilter"] = None) -> List["Agent"]: query = self.client.table("agents").select("*") if filters: - if filters.name is not None: - query = query.eq("name", filters.name) - if filters.role is not None: - query = query.eq("role", filters.role) - if filters.goal is not None: - query = query.eq("goal", filters.goal) + if filters.is_archived is not None: + query = query.eq("is_archived", filters.is_archived) if filters.profile_id is not None: query = query.eq("profile_id", str(filters.profile_id)) response = query.execute() @@ -986,8 +1065,6 @@ def list_daos(self, filters: Optional["DAOFilter"] = None) -> List["DAO"]: query = query.eq("is_deployed", filters.is_deployed) if filters.is_broadcasted is not None: query = query.eq("is_broadcasted", filters.is_broadcasted) - if filters.wallet_id is not None: - query = query.eq("wallet_id", str(filters.wallet_id)) response = query.execute() data = response.data or [] return [DAO(**row) for row in data] @@ -1277,8 +1354,10 @@ def list_proposals( query = query.eq("met_threshold", filters.met_threshold) if filters.type is not None: query = query.eq("type", filters.type) - if filters.proposal_contract is not None: - query = query.eq("proposal_contract", filters.proposal_contract) + if filters.tx_id is not None: + query = query.eq("tx_id", filters.tx_id) + if filters.has_embedding is not None: + query = query.eq("has_embedding", filters.has_embedding) response = query.execute() data = response.data or [] return [Proposal(**row) for row in data] @@ -1586,6 +1665,10 @@ def list_votes(self, filters: Optional["VoteFilter"] = None) -> List["Vote"]: query = query.eq("tx_id", filters.tx_id) if filters.profile_id is not None: query = query.eq("profile_id", str(filters.profile_id)) + if filters.evaluation_score is not None: + query = query.eq("evaluation_score", filters.evaluation_score) + if filters.flags is not None: + query = query.eq("flags", filters.flags) response = query.execute() data = response.data or [] return [Vote(**row) for row in data] @@ -1907,3 +1990,87 @@ def delete_holder(self, holder_id: UUID) -> bool: ) deleted = response.data or [] return len(deleted) > 0 + + # ---------------------------------------------------------------- + # 18. PROPOSALS_N + # ---------------------------------------------------------------- + def list_proposals_n( + self, filters: Optional["ProposalFilterN"] = None + ) -> List["Proposal"]: + """Enhanced proposals listing with support for batch operations and advanced filtering.""" + query = self.client.table("proposals").select("*") + + if filters: + # Standard equality filters + if filters.dao_id is not None: + query = query.eq("dao_id", str(filters.dao_id)) + if filters.status is not None: + query = query.eq("status", str(filters.status)) + if filters.contract_principal is not None: + query = query.eq("contract_principal", filters.contract_principal) + if filters.proposal_id is not None: + query = query.eq("proposal_id", filters.proposal_id) + if filters.executed is not None: + query = query.eq("executed", filters.executed) + if filters.passed is not None: + query = query.eq("passed", filters.passed) + if filters.met_quorum is not None: + query = query.eq("met_quorum", filters.met_quorum) + if filters.met_threshold is not None: + query = query.eq("met_threshold", filters.met_threshold) + if filters.type is not None: + query = query.eq("type", filters.type) + + # Batch filters using 'in_' operations + if filters.dao_ids is not None and len(filters.dao_ids) > 0: + dao_id_strings = [str(dao_id) for dao_id in filters.dao_ids] + query = query.in_("dao_id", dao_id_strings) + if filters.proposal_ids is not None and len(filters.proposal_ids) > 0: + query = query.in_("proposal_id", filters.proposal_ids) + if filters.statuses is not None and len(filters.statuses) > 0: + status_strings = [str(status) for status in filters.statuses] + query = query.in_("status", status_strings) + if ( + filters.contract_principals is not None + and len(filters.contract_principals) > 0 + ): + query = query.in_("contract_principal", filters.contract_principals) + if filters.types is not None and len(filters.types) > 0: + type_strings = [str(ptype) for ptype in filters.types] + query = query.in_("type", type_strings) + + # Range filters for numeric fields + if filters.proposal_id_gte is not None: + query = query.gte("proposal_id", filters.proposal_id_gte) + if filters.proposal_id_lte is not None: + query = query.lte("proposal_id", filters.proposal_id_lte) + + # Text search filters (using ilike for case-insensitive partial matching) + if filters.title_contains is not None: + query = query.ilike("title", f"%{filters.title_contains}%") + if filters.content_contains is not None: + query = query.ilike("content", f"%{filters.content_contains}%") + + try: + response = query.execute() + data = response.data or [] + return [Proposal(**row) for row in data] + except Exception as e: + logger.error(f"Error in list_proposals_n: {str(e)}") + # Fallback to original list_proposals if enhanced filtering fails + if filters: + # Convert enhanced filter to basic filter for fallback + basic_filter = ProposalFilter( + dao_id=filters.dao_id, + status=filters.status, + contract_principal=filters.contract_principal, + proposal_id=filters.proposal_id, + executed=filters.executed, + passed=filters.passed, + met_quorum=filters.met_quorum, + met_threshold=filters.met_threshold, + type=filters.type, + ) + return self.list_proposals(basic_filter) + else: + return self.list_proposals() diff --git a/check_updates.py b/check_updates.py new file mode 100644 index 00000000..0a21f99d --- /dev/null +++ b/check_updates.py @@ -0,0 +1,381 @@ +#!/usr/bin/env python3 +""" +Script to check for available updates to dependencies in pyproject.toml +""" + +import argparse +import asyncio +import re +import sys +from pathlib import Path +from typing import Dict, Optional + +import aiohttp + +try: + from packaging import version + + HAS_PACKAGING = True +except ImportError: + HAS_PACKAGING = False + + +async def get_latest_version( + session: aiohttp.ClientSession, package_name: str +) -> Optional[str]: + """Get the latest version of a package from PyPI.""" + try: + url = f"https://pypi.org/pypi/{package_name}/json" + async with session.get(url) as response: + if response.status == 200: + data = await response.json() + return data["info"]["version"] + except Exception as e: + print(f"Error fetching {package_name}: {e}") + return None + + +def update_pyproject_toml(file_path: Path, updates: Dict[str, str]) -> None: + """Update pyproject.toml with new dependency versions.""" + try: + with open(file_path, "r") as f: + content = f.read() + + lines = content.split("\n") + updated_lines = [] + in_dependencies = False + + for line in lines: + original_line = line + stripped_line = line.strip() + + # Check if we're entering the dependencies section + if stripped_line == "dependencies = [": + in_dependencies = True + updated_lines.append(original_line) + continue + + # Check if we're leaving the dependencies section + if in_dependencies and stripped_line == "]": + in_dependencies = False + updated_lines.append(original_line) + continue + + # Process dependency lines for updates + if in_dependencies and stripped_line and not stripped_line.startswith("#"): + # Extract package name from the line + clean_line = stripped_line.strip().strip('"').strip("'").rstrip(",") + + if "==" in clean_line: + package_name = clean_line.split("==")[0].strip() + if package_name in updates: + # Preserve the original formatting but update the version + # Find the version part and replace it + updated_line = re.sub( + rf'("{package_name}==)[^"]*(")', + rf"\g<1>{updates[package_name]}\g<2>", + original_line, + ) + if updated_line == original_line: + # Try without quotes + updated_line = re.sub( + rf"({package_name}==)[^,\s]*", + rf"\g<1>{updates[package_name]}", + original_line, + ) + updated_lines.append(updated_line) + continue + + # If no update needed, keep original line + updated_lines.append(original_line) + else: + updated_lines.append(original_line) + + # Write back to file + updated_content = "\n".join(updated_lines) + with open(file_path, "w") as f: + f.write(updated_content) + + except Exception as e: + print(f"❌ Error updating {file_path}: {e}") + raise + + +def parse_pyproject_toml(file_path: Path) -> Dict[str, str]: + """Parse pyproject.toml and extract dependencies with their versions.""" + dependencies = {} + + try: + with open(file_path, "r") as f: + content = f.read() + + # Find the dependencies section + in_dependencies = False + lines = content.split("\n") + + for line in lines: + line = line.strip() + + # Check if we're entering the dependencies section + if line == "dependencies = [": + in_dependencies = True + continue + + # Check if we're leaving the dependencies section + if in_dependencies and line == "]": + break + + # Process dependency lines + if in_dependencies and line and not line.startswith("#"): + # Remove quotes and trailing comma + line = line.strip().strip('"').strip("'").rstrip(",") + + # Parse package name and version + if "==" in line: + package, version = line.split("==", 1) + dependencies[package.strip()] = ( + version.strip().strip('"').strip("'") + ) + elif "<=" in line: + # Handle version constraints like "<=0.46.0" + parts = line.split("<=") + package = parts[0].strip() + version_constraint = parts[1].strip().strip('"').strip("'") + dependencies[package] = f"<={version_constraint}" + elif ">=" in line: + # Handle version constraints like ">=0.25.0" + parts = line.split(">=") + package = parts[0].strip() + version_constraint = parts[1].strip().strip('"').strip("'") + # Extract just the minimum version, ignoring additional constraints + if "," in version_constraint: + version_constraint = version_constraint.split(",")[0] + dependencies[package] = f">={version_constraint}" + elif "<" in line and "<=" not in line: + parts = line.split("<") + package = parts[0].strip() + version_constraint = parts[1].strip().strip('"').strip("'") + dependencies[package] = f"<{version_constraint}" + elif ">" in line and ">=" not in line: + parts = line.split(">") + package = parts[0].strip() + version_constraint = parts[1].strip().strip('"').strip("'") + dependencies[package] = f">{version_constraint}" + else: + # Package without version specification + dependencies[line.strip()] = "no version specified" + + except FileNotFoundError: + print(f"Error: {file_path} not found") + sys.exit(1) + except Exception as e: + print(f"Error parsing {file_path}: {e}") + sys.exit(1) + + return dependencies + + +def parse_arguments() -> argparse.Namespace: + """Parse command line arguments.""" + parser = argparse.ArgumentParser( + description="Check for available updates to dependencies in pyproject.toml" + ) + parser.add_argument( + "--update", + action="store_true", + help="Automatically update pyproject.toml with latest versions", + ) + parser.add_argument( + "--dry-run", + action="store_true", + help="Show what would be updated without making changes (only with --update)", + ) + return parser.parse_args() + + +async def check_updates(auto_update: bool = False, dry_run: bool = False): + """Main function to check for updates.""" + pyproject_path = Path("pyproject.toml") + + if not pyproject_path.exists(): + print("Error: pyproject.toml not found in current directory") + sys.exit(1) + + print("🔍 Parsing pyproject.toml...") + if not HAS_PACKAGING: + print( + "⚠️ Note: 'packaging' library not found. Version comparison may be less accurate." + ) + print(" Install with: pip install packaging") + dependencies = parse_pyproject_toml(pyproject_path) + + if not dependencies: + print("No dependencies found in pyproject.toml") + return + + print(f"📦 Found {len(dependencies)} dependencies. Checking for updates...\n") + + # Create aiohttp session for concurrent requests + connector = aiohttp.TCPConnector(limit=10) # Limit concurrent connections + timeout = aiohttp.ClientTimeout(total=30) + + async with aiohttp.ClientSession(connector=connector, timeout=timeout) as session: + # Fetch latest versions concurrently + tasks = [] + for package_name in dependencies.keys(): + task = get_latest_version(session, package_name) + tasks.append((package_name, task)) + + results = [] + for package_name, task in tasks: + latest_version = await task + results.append((package_name, dependencies[package_name], latest_version)) + + # Sort results alphabetically + results.sort(key=lambda x: x[0].lower()) + + # Display results and collect updates + print("📋 Dependency Update Report") + print("=" * 60) + print(f"{'Package':<25} {'Current':<15} {'Latest':<15} {'Status'}") + print("-" * 60) + + updates_available = 0 + available_updates = {} # Dict to store packages that can be updated + + for package_name, current_version, latest_version in results: + if latest_version is None: + status = "❌ Error" + latest_display = "N/A" + elif current_version == "no version specified": + status = "⚠️ No version" + latest_display = latest_version + elif ( + current_version.startswith(">=") + or current_version.startswith(">") + or current_version.startswith("<=") + or current_version.startswith("<") + ): + # For version constraints, just show the info + status = "ℹ️ Constraint" + latest_display = latest_version + elif current_version == latest_version: + status = "✅ Up to date" + latest_display = latest_version + else: + # Compare versions properly if packaging is available + update_available = False + if HAS_PACKAGING: + try: + current_ver = version.parse(current_version) + latest_ver = version.parse(latest_version) + if current_ver < latest_ver: + status = "🔄 Update available" + update_available = True + updates_available += 1 + elif current_ver == latest_ver: + status = "✅ Up to date" + else: + status = "⬇️ Downgrade available" + except (ValueError, TypeError): + # Fall back to string comparison + status = "🔄 Update available" + update_available = True + updates_available += 1 + else: + # Simple string comparison fallback + status = "🔄 Update available" + update_available = True + updates_available += 1 + + # Store packages that can be updated (only exact versions) + if update_available and not ( + current_version.startswith(">=") + or current_version.startswith(">") + or current_version.startswith("<=") + or current_version.startswith("<") + ): + available_updates[package_name] = latest_version + + latest_display = latest_version + + print(f"{package_name:<25} {current_version:<15} {latest_display:<15} {status}") + + print("-" * 60) + if updates_available > 0: + print(f"🎯 {updates_available} package(s) may have updates available") + else: + print("🎉 All packages appear to be up to date!") + + # Handle auto-update functionality + if auto_update: + if available_updates: + print("\n🔄 Auto-update mode enabled...") + + if dry_run: + print("🧪 Dry run mode - showing what would be updated:") + for pkg, new_ver in available_updates.items(): + current_ver = dependencies[pkg] + print(f" • {pkg}: {current_ver} → {new_ver}") + + if updates_available > len(available_updates): + skipped = updates_available - len(available_updates) + print( + f"\n⚠️ {skipped} package(s) with version constraints will be skipped" + ) + else: + try: + print( + f"📝 Updating {len(available_updates)} package(s) in pyproject.toml..." + ) + update_pyproject_toml(pyproject_path, available_updates) + print("✅ Successfully updated pyproject.toml!") + + print("\n📦 Updated packages:") + for pkg, new_ver in available_updates.items(): + current_ver = dependencies[pkg] + print(f" • {pkg}: {current_ver} → {new_ver}") + + if updates_available > len(available_updates): + skipped = updates_available - len(available_updates) + print( + f"\n⚠️ {skipped} package(s) with version constraints were skipped" + ) + + print( + "\n💡 Run 'uv lock' to update the lock file with new versions" + ) + + except Exception as e: + print(f"❌ Failed to update pyproject.toml: {e}") + sys.exit(1) + else: + print("\n✅ No packages can be auto-updated!") + if updates_available > 0: + print(" (All available updates have version constraints)") + elif not auto_update: + print( + "\n💡 To update a package, modify pyproject.toml and run: uv lock --upgrade-package " + ) + if available_updates: + print( + "💡 Or run this script with --update to automatically update all packages" + ) + + +if __name__ == "__main__": + try: + args = parse_arguments() + + # Validate arguments + if args.dry_run and not args.update: + print("❌ --dry-run can only be used with --update") + sys.exit(1) + + asyncio.run(check_updates(auto_update=args.update, dry_run=args.dry_run)) + except KeyboardInterrupt: + print("\n⚠️ Check interrupted by user") + sys.exit(1) + except Exception as e: + print(f"❌ Error: {e}") + sys.exit(1) diff --git a/config.py b/config.py index da68c751..c012a06a 100644 --- a/config.py +++ b/config.py @@ -8,7 +8,6 @@ logger = configure_logger(__name__) -# Load environment variables first load_dotenv() @@ -27,7 +26,7 @@ class DatabaseConfig: @dataclass class TwitterConfig: - enabled: bool = os.getenv("AIBTC_TWITTER_ENABLED", "false").lower() == "true" + enabled: bool = os.getenv("AIBTC_TWITTER_ENABLED", "true").lower() == "true" interval_seconds: int = int(os.getenv("AIBTC_TWITTER_INTERVAL_SECONDS", "120")) consumer_key: str = os.getenv("AIBTC_TWITTER_CONSUMER_KEY", "") consumer_secret: str = os.getenv("AIBTC_TWITTER_CONSUMER_SECRET", "") @@ -35,12 +34,21 @@ class TwitterConfig: client_secret: str = os.getenv("AIBTC_TWITTER_CLIENT_SECRET", "") access_token: str = os.getenv("AIBTC_TWITTER_ACCESS_TOKEN", "") access_secret: str = os.getenv("AIBTC_TWITTER_ACCESS_SECRET", "") + bearer_token: str = os.getenv("AIBTC_TWITTER_BEARER_TOKEN", "") + username: str = os.getenv("AIBTC_TWITTER_USERNAME", "") automated_user_id: str = os.getenv("AIBTC_TWITTER_AUTOMATED_USER_ID", "") whitelisted_authors: List[str] = field( default_factory=lambda: os.getenv("AIBTC_TWITTER_WHITELISTED", "").split(",") ) +@dataclass +class BackendWalletConfig: + """Configuration for backend wallet operations.""" + + seed_phrase: str = os.getenv("AIBTC_BACKEND_WALLET_SEED_PHRASE", "") + + @dataclass class TelegramConfig: token: str = os.getenv("AIBTC_TELEGRAM_BOT_TOKEN", "") @@ -49,7 +57,37 @@ class TelegramConfig: @dataclass class DiscordConfig: - webhook_url: str = os.getenv("AIBTC_DISCORD_WEBHOOK_URL", "") + webhook_url_passed: str = os.getenv("AIBTC_DISCORD_WEBHOOK_URL_PASSED", "") + webhook_url_failed: str = os.getenv("AIBTC_DISCORD_WEBHOOK_URL_FAILED", "") + + +@dataclass +class ChatLLMConfig: + """Configuration for chat-based LLM models.""" + + default_model: str = os.getenv("AIBTC_CHAT_DEFAULT_MODEL", "gpt-4.1") + default_temperature: float = float( + os.getenv("AIBTC_CHAT_DEFAULT_TEMPERATURE", "0.9") + ) + api_base: str = os.getenv("AIBTC_CHAT_API_BASE", "") + api_key: str = os.getenv("AIBTC_CHAT_API_KEY", "") + # Reasoning-specific model settings + reasoning_model: str = os.getenv("AIBTC_CHAT_REASONING_MODEL", "o3-mini") + reasoning_temperature: float = float( + os.getenv("AIBTC_CHAT_REASONING_TEMPERATURE", "0.9") + ) + + +@dataclass +class EmbeddingConfig: + """Configuration for embedding models.""" + + default_model: str = os.getenv( + "AIBTC_EMBEDDING_DEFAULT_MODEL", "text-embedding-ada-002" + ) + api_base: str = os.getenv("AIBTC_EMBEDDING_API_BASE", "") + api_key: str = os.getenv("AIBTC_EMBEDDING_API_KEY", "") + dimensions: int = int(os.getenv("AIBTC_EMBEDDING_DIMENSIONS", "1536")) @dataclass @@ -60,64 +98,87 @@ class SchedulerConfig: sync_interval_seconds: int = int( os.getenv("AIBTC_SCHEDULE_SYNC_INTERVAL_SECONDS", "60") ) - dao_runner_enabled: bool = ( - os.getenv("AIBTC_DAO_RUNNER_ENABLED", "false").lower() == "true" + + # Job-specific configurations matching job_type names exactly + + # agent_account_deployer job + agent_account_deployer_enabled: bool = ( + os.getenv("AIBTC_AGENT_ACCOUNT_DEPLOYER_ENABLED", "true").lower() == "true" ) - dao_runner_interval_seconds: int = int( - os.getenv("AIBTC_DAO_RUNNER_INTERVAL_SECONDS", "30") + agent_account_deployer_interval_seconds: int = int( + os.getenv("AIBTC_AGENT_ACCOUNT_DEPLOYER_INTERVAL_SECONDS", "60") ) - dao_tweet_runner_enabled: bool = ( - os.getenv("AIBTC_DAO_TWEET_RUNNER_ENABLED", "false").lower() == "true" + + # chain_state_monitor job + chain_state_monitor_enabled: bool = ( + os.getenv("AIBTC_CHAIN_STATE_MONITOR_ENABLED", "true").lower() == "true" ) - dao_tweet_runner_interval_seconds: int = int( - os.getenv("AIBTC_DAO_TWEET_RUNNER_INTERVAL_SECONDS", "30") + chain_state_monitor_interval_seconds: int = int( + os.getenv("AIBTC_CHAIN_STATE_MONITOR_INTERVAL_SECONDS", "300") ) - tweet_runner_enabled: bool = ( - os.getenv("AIBTC_TWEET_RUNNER_ENABLED", "false").lower() == "true" + + # dao_deployment job + dao_deployment_enabled: bool = ( + os.getenv("AIBTC_DAO_DEPLOYMENT_ENABLED", "true").lower() == "true" ) - tweet_runner_interval_seconds: int = int( - os.getenv("AIBTC_TWEET_RUNNER_INTERVAL_SECONDS", "30") + dao_deployment_interval_seconds: int = int( + os.getenv("AIBTC_DAO_DEPLOYMENT_INTERVAL_SECONDS", "60") ) - dao_proposal_vote_runner_enabled: bool = ( - os.getenv("AIBTC_DAO_PROPOSAL_VOTE_RUNNER_ENABLED", "false").lower() == "true" + + # dao_deployment_tweet job + dao_deployment_tweet_enabled: bool = ( + os.getenv("AIBTC_DAO_DEPLOYMENT_TWEET_ENABLED", "true").lower() == "true" ) - dao_proposal_vote_runner_interval_seconds: int = int( - os.getenv("AIBTC_DAO_PROPOSAL_VOTE_RUNNER_INTERVAL_SECONDS", "60") + dao_deployment_tweet_interval_seconds: int = int( + os.getenv("AIBTC_DAO_DEPLOYMENT_TWEET_INTERVAL_SECONDS", "60") ) - dao_proposal_conclude_runner_enabled: bool = ( - os.getenv("AIBTC_DAO_PROPOSAL_CONCLUDE_RUNNER_ENABLED", "false").lower() - == "true" + + # dao_proposal_conclude job + dao_proposal_conclude_enabled: bool = ( + os.getenv("AIBTC_DAO_PROPOSAL_CONCLUDE_ENABLED", "true").lower() == "true" ) - dao_proposal_conclude_runner_interval_seconds: int = int( - os.getenv("AIBTC_DAO_PROPOSAL_CONCLUDE_RUNNER_INTERVAL_SECONDS", "60") + dao_proposal_conclude_interval_seconds: int = int( + os.getenv("AIBTC_DAO_PROPOSAL_CONCLUDE_INTERVAL_SECONDS", "60") ) - dao_proposal_conclude_runner_wallet_id: str = os.getenv( - "AIBTC_DAO_PROPOSAL_CONCLUDE_RUNNER_WALLET_ID", "" + + # dao_proposal_embedder job + dao_proposal_embedder_enabled: bool = ( + os.getenv("AIBTC_DAO_PROPOSAL_EMBEDDER_ENABLED", "true").lower() == "true" ) - dao_proposal_evaluation_runner_enabled: bool = ( - os.getenv("AIBTC_DAO_PROPOSAL_EVALUATION_RUNNER_ENABLED", "false").lower() - == "true" + dao_proposal_embedder_interval_seconds: int = int( + os.getenv("AIBTC_DAO_PROPOSAL_EMBEDDER_INTERVAL_SECONDS", "300") ) - dao_proposal_evaluation_runner_interval_seconds: int = int( - os.getenv("AIBTC_DAO_PROPOSAL_EVALUATION_RUNNER_INTERVAL_SECONDS", "60") + + # dao_proposal_evaluation job + dao_proposal_evaluation_enabled: bool = ( + os.getenv("AIBTC_DAO_PROPOSAL_EVALUATION_ENABLED", "true").lower() == "true" ) - agent_account_deploy_runner_enabled: bool = ( - os.getenv("AIBTC_AGENT_ACCOUNT_DEPLOY_RUNNER_ENABLED", "false").lower() - == "true" + dao_proposal_evaluation_interval_seconds: int = int( + os.getenv("AIBTC_DAO_PROPOSAL_EVALUATION_INTERVAL_SECONDS", "60") ) - agent_account_deploy_runner_interval_seconds: int = int( - os.getenv("AIBTC_AGENT_ACCOUNT_DEPLOY_RUNNER_INTERVAL_SECONDS", "60") + + # dao_proposal_vote job + dao_proposal_vote_enabled: bool = ( + os.getenv("AIBTC_DAO_PROPOSAL_VOTE_ENABLED", "true").lower() == "true" ) - agent_account_deploy_runner_wallet_id: str = os.getenv( - "AIBTC_AGENT_ACCOUNT_DEPLOY_RUNNER_WALLET_ID", "" + dao_proposal_vote_interval_seconds: int = int( + os.getenv("AIBTC_DAO_PROPOSAL_VOTE_INTERVAL_SECONDS", "60") ) - dao_proposal_vote_delay_blocks: int = int( - os.getenv("AIBTC_DAO_PROPOSAL_VOTE_DELAY_BLOCKS", "2") + + # discord job + discord_enabled: bool = os.getenv("AIBTC_DISCORD_ENABLED", "true").lower() == "true" + discord_interval_seconds: int = int( + os.getenv("AIBTC_DISCORD_INTERVAL_SECONDS", "30") ) + # tweet job + tweet_enabled: bool = os.getenv("AIBTC_TWEET_ENABLED", "true").lower() == "true" + tweet_interval_seconds: int = int(os.getenv("AIBTC_TWEET_INTERVAL_SECONDS", "30")) + @dataclass class APIConfig: + base_url: str = os.getenv("AIBTC_BASEURL", "https://app-staging.aibtc.dev") alex_base_url: str = os.getenv("AIBTC_ALEX_BASE_URL", "https://api.alexgo.io/") hiro_api_url: str = os.getenv("AIBTC_HIRO_API_URL", "https://api.hiro.so") platform_base_url: str = os.getenv( @@ -134,7 +195,6 @@ class APIConfig: webhook_auth: str = os.getenv("AIBTC_WEBHOOK_AUTH_TOKEN", "Bearer 1234567890") lunarcrush_api_key: str = os.getenv("AIBTC_LUNARCRUSH_API_KEY", "") cmc_api_key: str = os.getenv("AIBTC_CMC_API_KEY", "") - openai_api_key: str = os.getenv("OPENAI_API_KEY", "") @dataclass @@ -151,6 +211,9 @@ class Config: api: APIConfig = field(default_factory=APIConfig) network: NetworkConfig = field(default_factory=NetworkConfig) discord: DiscordConfig = field(default_factory=DiscordConfig) + backend_wallet: BackendWalletConfig = field(default_factory=BackendWalletConfig) + chat_llm: ChatLLMConfig = field(default_factory=ChatLLMConfig) + embedding: EmbeddingConfig = field(default_factory=EmbeddingConfig) @classmethod def load(cls) -> "Config": diff --git a/docker_entrypoint.py b/docker_entrypoint.py new file mode 100644 index 00000000..8d5f1663 --- /dev/null +++ b/docker_entrypoint.py @@ -0,0 +1,14 @@ +#!/usr/bin/env python +"""Docker entrypoint script to run the application.""" + +import os +import sys + +# Add the current directory to the path +sys.path.insert(0, os.path.abspath(".")) + +# Run uvicorn +if __name__ == "__main__": + import uvicorn + + uvicorn.run("main:app", host="0.0.0.0", port=8000) diff --git a/docs/CONFIG.md b/docs/CONFIG.md deleted file mode 100644 index bb16b8a0..00000000 --- a/docs/CONFIG.md +++ /dev/null @@ -1,163 +0,0 @@ -# Configuration Guide - -This document details all configuration options for the aibtcdev-backend service. All configuration is loaded from environment variables. - -## Quick Start - -1. Copy the example environment file: -```bash -cp .env.example .env -``` - -2. Configure the environment variables according to the sections below. - -## Configuration Components - -### Database Configuration (DatabaseConfig) -- `AIBTC_BACKEND`: Database backend type (default: "supabase") -- `AIBTC_SUPABASE_USER`: Supabase user -- `AIBTC_SUPABASE_PASSWORD`: Supabase password -- `AIBTC_SUPABASE_HOST`: Database host -- `AIBTC_SUPABASE_PORT`: Database port -- `AIBTC_SUPABASE_DBNAME`: Database name -- `AIBTC_SUPABASE_URL`: Supabase project URL -- `AIBTC_SUPABASE_SERVICE_KEY`: Supabase service key -- `AIBTC_SUPABASE_BUCKET_NAME`: Storage bucket name - -### Twitter Configuration (TwitterConfig) -- `AIBTC_TWITTER_ENABLED`: Enable Twitter integration (true/false) -- `AIBTC_TWITTER_INTERVAL_SECONDS`: Interval for Twitter operations (default: 120) -- `AIBTC_TWITTER_CONSUMER_KEY`: Twitter API consumer key -- `AIBTC_TWITTER_CONSUMER_SECRET`: Twitter API consumer secret -- `AIBTC_TWITTER_CLIENT_ID`: Twitter client ID -- `AIBTC_TWITTER_CLIENT_SECRET`: Twitter client secret -- `AIBTC_TWITTER_ACCESS_TOKEN`: Twitter access token -- `AIBTC_TWITTER_ACCESS_SECRET`: Twitter access secret -- `AIBTC_TWITTER_AUTOMATED_USER_ID`: Automated Twitter user ID -- `AIBTC_TWITTER_WHITELISTED`: Comma-separated list of whitelisted authors - -### Telegram Configuration (TelegramConfig) -- `AIBTC_TELEGRAM_BOT_TOKEN`: Telegram bot token -- `AIBTC_TELEGRAM_BOT_ENABLED`: Enable Telegram bot (true/false) - -### Discord Configuration (DiscordConfig) -- `AIBTC_DISCORD_WEBHOOK_URL`: Discord webhook URL for notifications - -### API Configuration (APIConfig) -- `AIBTC_ALEX_BASE_URL`: Alex API base URL (default: "https://api.alexgo.io/") -- `AIBTC_HIRO_API_URL`: Hiro API URL (default: "https://api.hiro.so") -- `AIBTC_PLATFORM_API_URL`: Platform API URL -- `AIBTC_VELAR_BASE_URL`: Velar network gateway URL -- `AIBTC_LUNARCRUSH_BASE_URL`: LunarCrush API base URL -- `HIRO_API_KEY`: Hiro API key -- `AIBTC_WEBHOOK_URL`: Webhook URL for notifications -- `AIBTC_WEBHOOK_AUTH_TOKEN`: Webhook authentication token -- `AIBTC_LUNARCRUSH_API_KEY`: LunarCrush API key -- `AIBTC_CMC_API_KEY`: CoinMarketCap API key -- `OPENAI_API_KEY`: OpenAI API key - -### Network Configuration (NetworkConfig) -- `NETWORK`: Network type (testnet/mainnet) - -### Scheduler Configuration (SchedulerConfig) - -The application includes several background task runners that can be configured: - -#### Schedule Sync Runner -- `AIBTC_SCHEDULE_SYNC_ENABLED`: Enable schedule sync (true/false) -- `AIBTC_SCHEDULE_SYNC_INTERVAL_SECONDS`: Sync interval in seconds (default: 60) - -#### DAO Runners -- `AIBTC_DAO_RUNNER_ENABLED`: Enable DAO processing (true/false) -- `AIBTC_DAO_RUNNER_INTERVAL_SECONDS`: Processing interval (default: 30) -- `AIBTC_DAO_TWEET_RUNNER_ENABLED`: Enable DAO tweet generation (true/false) -- `AIBTC_DAO_TWEET_RUNNER_INTERVAL_SECONDS`: Tweet generation interval (default: 30) -- `AIBTC_DAO_PROPOSAL_VOTE_RUNNER_ENABLED`: Enable proposal vote processing (true/false) -- `AIBTC_DAO_PROPOSAL_VOTE_RUNNER_INTERVAL_SECONDS`: Vote processing interval (default: 60) -- `AIBTC_DAO_PROPOSAL_CONCLUDE_RUNNER_ENABLED`: Enable proposal conclusion processing (true/false) -- `AIBTC_DAO_PROPOSAL_CONCLUDE_RUNNER_INTERVAL_SECONDS`: Conclusion processing interval (default: 60) -- `AIBTC_DAO_PROPOSAL_CONCLUDE_RUNNER_WALLET_ID`: Wallet ID for conclusion processing - -#### Agent Account Runner -- `AIBTC_AGENT_ACCOUNT_DEPLOY_RUNNER_ENABLED`: Enable agent account deployment (true/false) -- `AIBTC_AGENT_ACCOUNT_DEPLOY_RUNNER_INTERVAL_SECONDS`: Deployment interval (default: 60) -- `AIBTC_AGENT_ACCOUNT_DEPLOY_RUNNER_WALLET_ID`: Wallet ID for deployments - -#### Tweet Runner -- `AIBTC_TWEET_RUNNER_ENABLED`: Enable tweet processing (true/false) -- `AIBTC_TWEET_RUNNER_INTERVAL_SECONDS`: Processing interval (default: 30) - -## Example Configurations - -### DAO Processing Configuration -```env -AIBTC_DAO_RUNNER_ENABLED=true -AIBTC_DAO_RUNNER_INTERVAL_SECONDS=30 -AIBTC_DAO_TWEET_RUNNER_ENABLED=true -AIBTC_DAO_TWEET_RUNNER_INTERVAL_SECONDS=30 -``` - -### Agent Account Deployment -```env -AIBTC_AGENT_ACCOUNT_DEPLOY_RUNNER_ENABLED=false -AIBTC_AGENT_ACCOUNT_DEPLOY_RUNNER_INTERVAL_SECONDS=60 -AIBTC_AGENT_ACCOUNT_DEPLOY_RUNNER_WALLET_ID="your-wallet-id" -``` - -### Social Media Integration -```env -AIBTC_TWITTER_ENABLED=true -AIBTC_TWITTER_INTERVAL_SECONDS=120 -AIBTC_TELEGRAM_BOT_ENABLED=true -``` - -## Security Considerations - -1. API Keys and Secrets - - Never commit API keys or secrets to version control - - Use environment variables for all sensitive data - - Rotate keys regularly - - Use appropriate access scopes - -2. Network Configuration - - Use HTTPS for all external API communications - - Configure appropriate CORS settings - - Use secure WebSocket connections (WSS) - -3. Database Security - - Use strong passwords - - Limit database user permissions - - Enable SSL for database connections - - Regular backup configuration - -## Troubleshooting - -### Common Configuration Issues - -1. Database Connection - - Verify all database credentials are correct - - Check network access to database - - Verify SSL requirements - -2. API Integration - - Validate API keys and tokens - - Check API rate limits - - Verify endpoint URLs - -3. Background Tasks - - Check runner enabled flags - - Verify interval settings - - Monitor task execution logs - -## Maintenance - -1. Regular Tasks - - Monitor API usage and rate limits - - Check log files for errors - - Review and rotate API keys - - Update configuration as needed - -2. Backup Configuration - - Regular database backups - - Configuration backup - - Key rotation schedule \ No newline at end of file diff --git a/docs/chainhook_service.md b/docs/chainhook_service.md deleted file mode 100644 index cf58ede2..00000000 --- a/docs/chainhook_service.md +++ /dev/null @@ -1,121 +0,0 @@ -# Chainhook Parsing Service - -## Overview - -The Chainhook parsing service is a specialized component within the backend that processes and handles blockchain-related webhook events. It's designed to parse, validate, and process webhook payloads from the Chainhook service, which monitors blockchain events and state changes. - -## Architecture - -The service is composed of three main components: - -1. **ChainhookService** (`service.py`) - - Acts as the main entry point for webhook processing - - Coordinates between the parser and handler components - - Implements the base WebhookService interface - -2. **ChainhookParser** (`parser.py`) - - Responsible for parsing raw webhook payloads into structured data - - Implements comprehensive validation and type checking - - Converts JSON data into strongly-typed Python objects - -3. **ChainhookHandler** (`handler.py`) - - Manages the processing of parsed webhook events - - Coordinates multiple specialized handlers for different event types - - Implements a sequential processing pipeline - -## Data Models - -The service uses a comprehensive set of data models (`models.py`) to represent blockchain data: - -- `ChainHookData`: Top-level container for webhook payloads -- `ChainHookInfo`: Metadata about the webhook configuration -- `Apply`: Represents block-level data and transactions -- `BlockIdentifier`: Block hash and index information -- `TransactionWithReceipt`: Detailed transaction data with receipts -- `Operation`: Individual blockchain operations -- `Event`: Transaction events and their data -- `Receipt`: Transaction receipts and execution results - -## Event Handlers - -The service includes several specialized handlers for different types of blockchain events: - -- `BlockStateHandler`: Processes block-level state changes -- `BuyEventHandler`: Handles purchase-related events -- `SellEventHandler`: Processes sale-related events -- `DAOProposalHandler`: Manages DAO proposal events -- `DAOVoteHandler`: Handles DAO voting events -- `ContractMessageHandler`: Processes smart contract messages -- `DAOProposalBurnHeightHandler`: Handles proposal burn height events -- `DAOProposalConclusionHandler`: Processes proposal conclusions - -## Processing Pipeline - -The webhook processing follows a sequential pipeline: - -1. **Parsing Phase** - - Raw JSON payload is received - - Data is validated and converted to typed objects - - Structured data is created using the defined models - -2. **Handling Phase** - - Block-level processing occurs first - - Transaction-level processing follows - - Each handler processes events it's responsible for - - Post-processing cleanup is performed - -3. **Error Handling** - - Comprehensive error catching and logging - - Structured error responses - - Transaction rollback support - -## Usage - -The service is automatically initialized when webhook events are received. It processes events in the following order: - -1. The webhook payload is received by the service -2. The parser converts the raw data into structured objects -3. The handler coordinates processing through specialized handlers -4. Results are logged and any necessary actions are taken - -## Logging - -The service implements comprehensive logging using the project's standard logging configuration: - -- DEBUG level for detailed processing information -- INFO level for standard operation logging -- ERROR level for exception handling -- Contextual information included in all log messages - -## Error Handling - -The service implements robust error handling: - -- Specific exception types for different error scenarios -- Comprehensive error logging -- Transaction rollback support -- Structured error responses - -## Security Considerations - -- Input validation on all webhook payloads -- Type checking and sanitization -- Secure handling of sensitive blockchain data -- Proper error handling to prevent information leakage - -## Dependencies - -The service relies on several key components: - -- Base webhook service infrastructure -- Logging configuration -- Type hints from the Python typing library -- JSON parsing and validation -- Blockchain-specific data models - -## Future Considerations - -- Potential for parallel processing of transactions -- Enhanced monitoring and metrics -- Additional specialized handlers for new event types -- Performance optimizations for large block processing \ No newline at end of file diff --git a/docs/chat_api_examples.md b/docs/chat_api_examples.md deleted file mode 100644 index 4e25d752..00000000 --- a/docs/chat_api_examples.md +++ /dev/null @@ -1,272 +0,0 @@ -# Chat API Examples - -This document provides examples of how to use the WebSocket-based Chat API. - -## Authentication - -The Chat API requires authentication using either a Bearer token or an API key. These can be provided as query parameters: - -- `token`: Bearer token for authentication -- `key`: API key for authentication - -## WebSocket Connection - -### Establishing a Connection - -To establish a WebSocket connection, connect to the `/chat/ws` endpoint with your authentication credentials: - -```javascript -// Using a Bearer token -const socket = new WebSocket('wss://your-api-url/chat/ws?token=your-bearer-token'); - -// Or using an API key -const socket = new WebSocket('wss://your-api-url/chat/ws?key=your-api-key'); -``` - -### Message Types - -The Chat API supports two main message types: - -1. **History Messages**: Request message history for a thread -2. **Chat Messages**: Send a new message to a thread - -All messages are JSON objects with a `type` field indicating the message type. - -## Requesting Message History - -To request the message history for a thread, send a message with the following format: - -```javascript -const historyRequest = { - type: 'history', - thread_id: '123e4567-e89b-12d3-a456-426614174000' -}; - -socket.send(JSON.stringify(historyRequest)); -``` - -### Example Response - -The server will respond with a series of messages representing the thread history: - -```javascript -// Message from user -{ - "type": "message", - "id": "msg_123", - "thread_id": "123e4567-e89b-12d3-a456-426614174000", - "role": "user", - "content": "Hello, can you help me with something?", - "created_at": "2023-06-15T14:30:00Z" -} - -// Message from assistant -{ - "type": "message", - "id": "msg_124", - "thread_id": "123e4567-e89b-12d3-a456-426614174000", - "role": "assistant", - "content": "Of course! I'd be happy to help. What do you need assistance with?", - "created_at": "2023-06-15T14:30:15Z" -} -``` - -## Sending a Chat Message - -To send a new message to a thread, use the following format: - -```javascript -const chatMessage = { - type: 'message', - thread_id: '123e4567-e89b-12d3-a456-426614174000', - agent_id: '234e5678-e89b-12d3-a456-426614174001', // Optional, can be null - content: 'I need help understanding how DAOs work.' -}; - -socket.send(JSON.stringify(chatMessage)); -``` - -### Example Response - -The server will process your message and respond with one or more messages: - -```javascript -// Acknowledgment of received message -{ - "type": "message_received", - "id": "msg_125", - "thread_id": "123e4567-e89b-12d3-a456-426614174000", - "job_id": "job_123" -} - -// Assistant's response (may come in multiple parts for streaming) -{ - "type": "message", - "id": "msg_126", - "thread_id": "123e4567-e89b-12d3-a456-426614174000", - "role": "assistant", - "content": "A DAO, or Decentralized Autonomous Organization, is a blockchain-based organization...", - "created_at": "2023-06-15T14:35:00Z" -} -``` - -## Error Handling - -If an error occurs, the server will send an error message: - -```javascript -{ - "type": "error", - "message": "Thread ID is required" -} -``` - -Common error scenarios include: -- Missing or invalid authentication -- Invalid thread ID -- Unknown message type -- Server processing errors - -## Complete Example - -Here's a complete example using JavaScript: - -```javascript -// Connect to the WebSocket -const socket = new WebSocket('wss://your-api-url/chat/ws?token=your-bearer-token'); - -// Handle connection open -socket.onopen = function(e) { - console.log('Connection established'); - - // Request thread history - const historyRequest = { - type: 'history', - thread_id: '123e4567-e89b-12d3-a456-426614174000' - }; - socket.send(JSON.stringify(historyRequest)); -}; - -// Handle messages from server -socket.onmessage = function(event) { - const message = JSON.parse(event.data); - - if (message.type === 'error') { - console.error('Error:', message.message); - return; - } - - console.log('Received message:', message); - - // Display message in UI - if (message.type === 'message') { - displayMessage(message); - } -}; - -// Handle errors -socket.onerror = function(error) { - console.error('WebSocket Error:', error); -}; - -// Handle connection close -socket.onclose = function(event) { - if (event.wasClean) { - console.log(`Connection closed cleanly, code=${event.code}, reason=${event.reason}`); - } else { - console.error('Connection died'); - } -}; - -// Function to send a new message -function sendMessage(content) { - const chatMessage = { - type: 'message', - thread_id: '123e4567-e89b-12d3-a456-426614174000', - agent_id: null, - content: content - }; - socket.send(JSON.stringify(chatMessage)); -} - -// Example UI function to display messages -function displayMessage(message) { - const messageElement = document.createElement('div'); - messageElement.className = `message ${message.role}`; - messageElement.textContent = message.content; - document.getElementById('chat-container').appendChild(messageElement); -} -``` - -## Python Example - -Here's an example using Python with the `websockets` library: - -```python -import asyncio -import json -import websockets -import uuid - -async def chat_client(): - # Connect to the WebSocket with authentication - uri = "wss://your-api-url/chat/ws?token=your-bearer-token" - - async with websockets.connect(uri) as websocket: - # Generate a new thread ID or use an existing one - thread_id = str(uuid.uuid4()) - - # Request thread history (if using an existing thread) - history_request = { - "type": "history", - "thread_id": thread_id - } - await websocket.send(json.dumps(history_request)) - - # Process history messages - async for message in websocket: - data = json.loads(message) - print(f"Received: {data}") - - # After receiving history, send a new message - if data.get("type") == "message": - # Send a new message - chat_message = { - "type": "message", - "thread_id": thread_id, - "agent_id": None, - "content": "Hello, I'd like to learn about blockchain technology." - } - await websocket.send(json.dumps(chat_message)) - break # Break after sending one message for this example - - # Continue processing responses - async for message in websocket: - data = json.loads(message) - print(f"Received: {data}") - -# Run the client -asyncio.run(chat_client()) -``` - -## Notes and Best Practices - -1. **Thread Management**: - - Create a new thread ID (UUID) for each new conversation - - Reuse the same thread ID to continue an existing conversation - -2. **Error Handling**: - - Always implement proper error handling for WebSocket connections - - Handle reconnection logic for network interruptions - -3. **Message Processing**: - - Process messages based on their `type` field - - For streaming responses, accumulate content until complete - -4. **Authentication**: - - Keep authentication tokens secure - - Implement token refresh logic if tokens expire - -5. **Performance**: - - Limit the frequency of message requests to avoid rate limiting - - Consider implementing backoff strategies for reconnection attempts \ No newline at end of file diff --git a/docs/chat_api_reference.md b/docs/chat_api_reference.md deleted file mode 100644 index 08ce211d..00000000 --- a/docs/chat_api_reference.md +++ /dev/null @@ -1,191 +0,0 @@ -# Chat API Reference - -This document provides a technical reference for the WebSocket-based Chat API. - -## API Endpoint - -``` -WebSocket: /chat/ws -``` - -## Authentication - -Authentication is required for all WebSocket connections. Provide one of the following query parameters: - -| Parameter | Type | Description | -|-----------|--------|--------------------------------------------| -| `token` | string | Bearer token for authentication | -| `key` | string | API key for authentication | - -## Message Format - -All messages sent to and received from the WebSocket are JSON objects. Each message must include a `type` field that indicates the message type. - -### Client Message Types - -#### History Request - -Request message history for a specific thread. - -```json -{ - "type": "history", - "thread_id": "string (UUID)" -} -``` - -| Field | Type | Required | Description | -|------------|--------|----------|----------------------------------| -| `type` | string | Yes | Must be "history" | -| `thread_id`| string | Yes | UUID of the thread | - -#### Chat Message - -Send a new message to a thread. - -```json -{ - "type": "message", - "thread_id": "string (UUID)", - "agent_id": "string (UUID) or null", - "content": "string" -} -``` - -| Field | Type | Required | Description | -|------------|--------|----------|----------------------------------| -| `type` | string | Yes | Must be "message" | -| `thread_id`| string | Yes | UUID of the thread | -| `agent_id` | string | No | UUID of the agent to use, or null for default | -| `content` | string | Yes | The message content | - -### Server Message Types - -#### Message - -A message in the conversation. - -```json -{ - "type": "message", - "id": "string", - "thread_id": "string (UUID)", - "role": "string", - "content": "string", - "created_at": "string (ISO 8601 datetime)" -} -``` - -| Field | Type | Description | -|-------------|--------|--------------------------------------------| -| `type` | string | "message" | -| `id` | string | Unique identifier for the message | -| `thread_id` | string | UUID of the thread | -| `role` | string | Either "user" or "assistant" | -| `content` | string | The message content | -| `created_at`| string | ISO 8601 formatted datetime | - -#### Message Received - -Acknowledgment that a message was received and is being processed. - -```json -{ - "type": "message_received", - "id": "string", - "thread_id": "string (UUID)", - "job_id": "string" -} -``` - -| Field | Type | Description | -|-------------|--------|--------------------------------------------| -| `type` | string | "message_received" | -| `id` | string | Unique identifier for the message | -| `thread_id` | string | UUID of the thread | -| `job_id` | string | Identifier for the processing job | - -#### Error - -An error message. - -```json -{ - "type": "error", - "message": "string" -} -``` - -| Field | Type | Description | -|-----------|--------|--------------------------------------------| -| `type` | string | "error" | -| `message` | string | Description of the error | - -## Error Codes - -The WebSocket connection may return standard WebSocket close codes: - -| Code | Description | Handling Strategy | -|------|-----------------------------|------------------------------------------| -| 1000 | Normal closure | Normal operation | -| 1001 | Going away | Reconnect if needed | -| 1002 | Protocol error | Check message format | -| 1003 | Unsupported data | Check message format | -| 1008 | Policy violation | Check authentication | -| 1011 | Internal server error | Retry with exponential backoff | - -Additionally, HTTP status codes may be returned before the WebSocket connection is established: - -| Code | Description | Handling Strategy | -|------|-----------------------------|------------------------------------------| -| 401 | Unauthorized | Check authentication credentials | -| 404 | Not found | Check endpoint URL | -| 500 | Internal server error | Retry with exponential backoff | - -## Rate Limiting - -The Chat API implements rate limiting to prevent abuse. Clients should respect the following limits: - -- Maximum of 10 messages per minute per user -- Maximum of 100 messages per hour per user - -When rate limits are exceeded, the server will close the WebSocket connection with code 1008 (Policy Violation). - -## Thread Lifecycle - -Threads are the primary organizational unit for conversations. Each thread has the following lifecycle: - -1. **Creation**: A thread is implicitly created when the first message is sent with a new thread ID -2. **Active**: Messages can be sent to and received from the thread -3. **Archived**: After 30 days of inactivity, threads are archived but still accessible -4. **Deleted**: Threads may be deleted according to data retention policies - -## Implementation Notes - -### Connection Management - -- Clients should implement reconnection logic with exponential backoff -- Connections may be terminated after 5 minutes of inactivity -- A single client should maintain only one WebSocket connection at a time - -### Message Processing - -- Messages within a thread are processed in the order they are received -- Large messages may be rejected (maximum content size: 4KB) -- Binary messages are not supported - -### Security Considerations - -- Authentication tokens should be kept secure -- All communication is encrypted via TLS -- Do not send sensitive information in message content - -## API Versioning - -The current API version is v1. Future versions may be introduced with breaking changes. - -To ensure compatibility, clients should: - -1. Handle unknown message types gracefully -2. Ignore unknown fields in messages -3. Check for API announcements regarding deprecation and new features \ No newline at end of file diff --git a/docs/runners.md b/docs/runners.md deleted file mode 100644 index be1391c4..00000000 --- a/docs/runners.md +++ /dev/null @@ -1,125 +0,0 @@ -# Runners System Documentation - -## Overview - -The runners system is a core component of the AIBTC backend that manages and executes various automated tasks. It provides a flexible and extensible framework for scheduling and running different types of jobs, from DAO operations to Twitter interactions. - -## Architecture - -### Core Components - -1. **BaseTask** - - Abstract base class for all runner tasks - - Provides common functionality for task execution and validation - - Implements logging and metrics collection - - Supports generic result types through type parameters - -2. **JobManager** - - Manages scheduled jobs using AsyncIOScheduler - - Handles job configuration and scheduling - - Supports enabling/disabling jobs through configuration - -3. **JobRegistry** - - Maintains a registry of available runners - - Maps job types to their corresponding runner implementations - - Provides registration and lookup functionality - -### Job Types - -The system supports several types of jobs: - -- `DAO`: General DAO operations -- `DAO_PROPOSAL_VOTE`: Handling DAO proposal voting -- `DAO_PROPOSAL_CONCLUDE`: Concluding DAO proposals -- `DAO_TWEET`: Managing DAO-related tweets -- `TWEET`: General tweet operations -- `AGENT_ACCOUNT_DEPLOY`: Deploying agent accounts - -## Configuration - -Runners are configured through environment variables and configuration files. Key configuration includes: - -- Twitter profile and agent IDs -- Wallet configurations -- Job intervals and scheduling parameters -- Feature toggles for enabling/disabling specific runners - -## Job Execution Flow - -1. **Initialization** - - JobManager loads configurations for all available jobs - - Enabled jobs are scheduled with specified intervals - -2. **Execution** - - Jobs are executed according to their schedule - - Each execution follows a standard pipeline: - 1. Configuration validation - 2. Prerequisites validation - 3. Task-specific validation - 4. Task execution - 5. Result logging and metrics collection - -3. **Error Handling** - - Comprehensive error handling and logging - - Support for retries with configurable retry counts - - Detailed error reporting and metrics - -## Runner Implementation - -To implement a new runner: - -1. Create a new class inheriting from `BaseTask` -2. Define the result type using the generic parameter -3. Implement required methods: - - `_validate_config` - - `_validate_prerequisites` - - `_validate_task_specific` - - `_execute_impl` -4. Register the runner with `JobRegistry` - -Example: -```python -class MyCustomRunner(BaseTask[MyCustomResult]): - async def _execute_impl(self, context: JobContext) -> List[MyCustomResult]: - # Implementation here - pass -``` - -## Monitoring and Logging - -The runner system includes comprehensive logging: - -- Task start and completion times -- Success and failure metrics -- Execution duration -- Detailed error information -- Debug-level configuration logging - -## Best Practices - -1. **Validation** - - Implement thorough validation in all runners - - Check prerequisites before execution - - Validate configuration and parameters - -2. **Error Handling** - - Use specific exception types - - Provide detailed error messages - - Implement appropriate retry logic - -3. **Logging** - - Use appropriate log levels - - Include context in log messages - - Log metrics for monitoring - -4. **Configuration** - - Use environment variables for sensitive data - - Implement feature toggles for runners - - Document configuration requirements - -## Security Considerations - -- Sensitive configuration is managed through environment variables -- Wallet operations require proper authentication -- Task validation ensures proper authorization -- Error messages are sanitized for security \ No newline at end of file diff --git a/docs/tools_api_examples.md b/docs/tools_api_examples.md deleted file mode 100644 index 4d8e8f2c..00000000 --- a/docs/tools_api_examples.md +++ /dev/null @@ -1,469 +0,0 @@ -# Tools API Examples - -This document provides practical examples of how to use the Tools API in various programming languages and scenarios. - -## Basic Usage - -The Tools API allows you to discover and interact with the available tools in the system. The primary endpoint is: - -``` -GET /tools/available -``` - -This endpoint returns a list of all available tools, their descriptions, and parameter requirements. - -## Authentication - -All requests to the Tools API require authentication. You can use either a Bearer token or an API key. - -### Using a Bearer Token - -```bash -curl -X GET "https://api.example.com/tools/available" \ - -H "Authorization: Bearer your_token_here" -``` - -### Using an API Key - -```bash -curl -X GET "https://api.example.com/tools/available" \ - -H "X-API-Key: your_api_key_here" -``` - -## Example: Fetching Available Tools - -### Python Example - -```python -import requests - -def get_available_tools(api_url, api_key=None, bearer_token=None): - """Fetch available tools from the API. - - Args: - api_url: Base URL of the API - api_key: Optional API key for authentication - bearer_token: Optional bearer token for authentication - - Returns: - List of available tools - """ - headers = {} - - if api_key: - headers["X-API-Key"] = api_key - elif bearer_token: - headers["Authorization"] = f"Bearer {bearer_token}" - else: - raise ValueError("Either api_key or bearer_token must be provided") - - response = requests.get(f"{api_url}/tools/available", headers=headers) - - if response.status_code == 200: - return response.json() - else: - raise Exception(f"Error fetching tools: {response.status_code} - {response.text}") - -# Example usage -api_url = "https://api.example.com" -bearer_token = "your_token_here" - -try: - tools = get_available_tools(api_url, bearer_token=bearer_token) - - # Print all tool names and descriptions - for tool in tools: - print(f"{tool['name']}: {tool['description']}") - - # Find tools in a specific category - wallet_tools = [tool for tool in tools if tool['category'] == "WALLET"] - print(f"\nFound {len(wallet_tools)} wallet tools:") - for tool in wallet_tools: - print(f"- {tool['name']}") - -except Exception as e: - print(f"Error: {e}") -``` - -### JavaScript Example - -```javascript -async function getAvailableTools(apiUrl, apiKey = null, bearerToken = null) { - const headers = {}; - - if (apiKey) { - headers['X-API-Key'] = apiKey; - } else if (bearerToken) { - headers['Authorization'] = `Bearer ${bearerToken}`; - } else { - throw new Error('Either apiKey or bearerToken must be provided'); - } - - try { - const response = await fetch(`${apiUrl}/tools/available`, { headers }); - - if (!response.ok) { - throw new Error(`Error fetching tools: ${response.status} - ${await response.text()}`); - } - - return await response.json(); - } catch (error) { - console.error('Failed to fetch tools:', error); - throw error; - } -} - -// Example usage -const apiUrl = 'https://api.example.com'; -const bearerToken = 'your_token_here'; - -getAvailableTools(apiUrl, null, bearerToken) - .then(tools => { - // Print all tool names and descriptions - tools.forEach(tool => { - console.log(`${tool.name}: ${tool.description}`); - }); - - // Find tools in a specific category - const daoTools = tools.filter(tool => tool.category === 'DAO'); - console.log(`\nFound ${daoTools.length} DAO tools:`); - daoTools.forEach(tool => { - console.log(`- ${tool.name}`); - }); - }) - .catch(error => { - console.error('Error:', error); - }); -``` - -## Working with Tool Parameters - -Each tool has a `parameters` field that contains a JSON string with information about the required parameters. Here's how to parse and use this information: - -### Python Example - -```python -import json - -def parse_tool_parameters(tool): - """Parse the parameters JSON string from a tool object. - - Args: - tool: Tool object from the API - - Returns: - Dictionary of parameter information - """ - return json.loads(tool['parameters']) - -# Example usage -tools = get_available_tools(api_url, bearer_token=bearer_token) - -# Find a specific tool -wallet_send_tool = next((tool for tool in tools if tool['id'] == 'wallet_send_stx'), None) - -if wallet_send_tool: - # Parse parameters - parameters = parse_tool_parameters(wallet_send_tool) - - print(f"Parameters for {wallet_send_tool['name']}:") - for param_name, param_info in parameters.items(): - print(f"- {param_name}: {param_info['description']} (Type: {param_info['type']})") -``` - -### JavaScript Example - -```javascript -function parseToolParameters(tool) { - return JSON.parse(tool.parameters); -} - -// Example usage -getAvailableTools(apiUrl, null, bearerToken) - .then(tools => { - // Find a specific tool - const faktoryBuyTool = tools.find(tool => tool.id === 'faktory_exec_buy'); - - if (faktoryBuyTool) { - // Parse parameters - const parameters = parseToolParameters(faktoryBuyTool); - - console.log(`Parameters for ${faktoryBuyTool.name}:`); - Object.entries(parameters).forEach(([paramName, paramInfo]) => { - console.log(`- ${paramName}: ${paramInfo.description} (Type: ${paramInfo.type})`); - }); - } - }); -``` - -## Filtering Tools by Category - -You can filter tools by category to find the ones relevant to your use case: - -### Python Example - -```python -def get_tools_by_category(tools, category): - """Filter tools by category. - - Args: - tools: List of tools from the API - category: Category to filter by - - Returns: - List of tools in the specified category - """ - return [tool for tool in tools if tool['category'] == category] - -# Example usage -tools = get_available_tools(api_url, bearer_token=bearer_token) - -# Get all categories -categories = set(tool['category'] for tool in tools) -print(f"Available categories: {', '.join(categories)}") - -# Get tools for each category -for category in categories: - category_tools = get_tools_by_category(tools, category) - print(f"\n{category} Tools ({len(category_tools)}):") - for tool in category_tools: - print(f"- {tool['name']}") -``` - -## Searching for Tools - -You can search for tools by name or description: - -### Python Example - -```python -def search_tools(tools, query): - """Search for tools by name or description. - - Args: - tools: List of tools from the API - query: Search query string - - Returns: - List of tools matching the query - """ - query = query.lower() - return [ - tool for tool in tools - if query in tool['name'].lower() or query in tool['description'].lower() - ] - -# Example usage -tools = get_available_tools(api_url, bearer_token=bearer_token) - -# Search for tools related to "balance" -balance_tools = search_tools(tools, "balance") -print(f"Found {len(balance_tools)} tools related to 'balance':") -for tool in balance_tools: - print(f"- {tool['name']}: {tool['description']}") -``` - -## Complete Application Example - -Here's a more complete example that demonstrates how to build a simple tool explorer application: - -### Python Example (Flask) - -```python -from flask import Flask, render_template, request, jsonify -import requests -import json - -app = Flask(__name__) - -API_URL = "https://api.example.com" -API_KEY = "your_api_key_here" # In production, store this securely - -@app.route('/') -def index(): - return render_template('index.html') - -@app.route('/api/tools') -def get_tools(): - try: - headers = {"X-API-Key": API_KEY} - response = requests.get(f"{API_URL}/tools/available", headers=headers) - - if response.status_code != 200: - return jsonify({"error": f"API error: {response.status_code}"}), 500 - - tools = response.json() - - # Process tools if needed - for tool in tools: - # Parse parameters for easier use in frontend - tool['parsed_parameters'] = json.loads(tool['parameters']) - - return jsonify(tools) - except Exception as e: - return jsonify({"error": str(e)}), 500 - -@app.route('/api/tools/search') -def search_tools(): - query = request.args.get('q', '').lower() - category = request.args.get('category', '') - - try: - # Get all tools first - headers = {"X-API-Key": API_KEY} - response = requests.get(f"{API_URL}/tools/available", headers=headers) - - if response.status_code != 200: - return jsonify({"error": f"API error: {response.status_code}"}), 500 - - tools = response.json() - - # Filter by category if specified - if category: - tools = [tool for tool in tools if tool['category'] == category] - - # Filter by search query if specified - if query: - tools = [ - tool for tool in tools - if query in tool['name'].lower() or query in tool['description'].lower() - ] - - return jsonify(tools) - except Exception as e: - return jsonify({"error": str(e)}), 500 - -if __name__ == '__main__': - app.run(debug=True) -``` - -With corresponding HTML template: - -```html - - - - Tools Explorer - - - -

Tools Explorer

- -
-
- - -
- -
- -
- - - - -``` - -## Conclusion - -The Tools API provides a flexible way to discover and interact with the various tools available in the system. By using the examples in this document, you can build applications that leverage these tools for wallet management, DAO operations, market interactions, and more. - -Remember to always authenticate your requests and handle errors appropriately to ensure a smooth user experience. \ No newline at end of file diff --git a/docs/tools_api_reference.md b/docs/tools_api_reference.md deleted file mode 100644 index 6661ba0e..00000000 --- a/docs/tools_api_reference.md +++ /dev/null @@ -1,160 +0,0 @@ -# Tools API Reference - -This document provides a technical reference for the Tools API, which allows you to discover and interact with the available tools in the system. - -## API Endpoint - -``` -GET /tools/available -``` - -## Authentication - -Authentication is required for all API requests. Provide one of the following: - -- **Bearer Token**: Include in the `Authorization` header as `Bearer ` -- **API Key**: Include in the `X-API-Key` header - -## Response Format - -The API returns a JSON array of tool objects, each with the following structure: - -```json -{ - "id": "string", - "name": "string", - "description": "string", - "category": "string", - "parameters": "string" -} -``` - -### Tool Object Fields - -| Field | Type | Description | -|---------------|--------|------------------------------------------------------------| -| `id` | string | Unique identifier for the tool (e.g., "faktory_exec_buy") | -| `name` | string | Display name of the tool (e.g., "Exec Buy") | -| `description` | string | Human-readable description of the tool's functionality | -| `category` | string | Category the tool belongs to (e.g., "FAKTORY", "WALLET") | -| `parameters` | string | JSON string containing parameter information (see below) | - -### Parameters Format - -The `parameters` field is a JSON string that, when parsed, contains an object mapping parameter names to their details: - -```json -{ - "parameter_name": { - "description": "string", - "type": "string" - } -} -``` - -| Field | Type | Description | -|---------------|--------|-------------------------------------------------------| -| `description` | string | Human-readable description of the parameter | -| `type` | string | Data type of the parameter (e.g., "str", "int") | - -## Example Request - -```bash -curl -X GET "https://api.example.com/tools/available" \ - -H "Authorization: Bearer your_token_here" -``` - -## Example Response - -```json -[ - { - "id": "wallet_get_my_balance", - "name": "Get My Balance", - "description": "Retrieve the current balance of your wallet", - "category": "WALLET", - "parameters": "{\"wallet_id\":{\"description\":\"ID of the wallet\",\"type\":\"UUID\"}}" - }, - { - "id": "faktory_get_token", - "name": "Get Token", - "description": "Get information about a token on Faktory", - "category": "FAKTORY", - "parameters": "{\"token_id\":{\"description\":\"ID of the token\",\"type\":\"str\"},\"wallet_id\":{\"description\":\"ID of the wallet\",\"type\":\"UUID\"}}" - } -] -``` - -## Error Responses - -| Status Code | Description | Response Body | -|-------------|----------------------------|---------------------------------------------------| -| 401 | Unauthorized | `{"detail": "Not authenticated"}` | -| 403 | Forbidden | `{"detail": "Not authorized to access this API"}` | -| 500 | Internal Server Error | `{"detail": "Failed to serve available tools: [error message]"}` | - -## Tool Categories - -Tools are organized into the following categories: - -| Category | Description | -|--------------|-------------------------------------------------------| -| WALLET | Tools for wallet management and transactions | -| DAO | Tools for DAO operations and governance | -| FAKTORY | Tools for interacting with the Faktory marketplace | -| JING | Tools for the Jing.js trading platform | -| CONTRACTS | Tools for smart contract interactions | -| DATABASE | Tools for database operations | -| STACKS | Tools for Stacks blockchain interactions | -| ALEX | Tools for ALEX DEX operations | -| BITFLOW | Tools for Bitflow trading platform | -| VELAR | Tools for Velar protocol interactions | -| LUNARCRUSH | Tools for LunarCrush data and analytics | -| STXCITY | Tools for STX City platform | - -## Available Tools - -The system provides a wide range of tools across different categories. Here are some examples: - -### Wallet Tools -- `wallet_get_my_balance`: Get your wallet balance -- `wallet_get_my_address`: Get your wallet address -- `wallet_send_stx`: Send STX to another address -- `wallet_send_sip10`: Send SIP-10 tokens to another address - -### DAO Tools -- `dao_charter_get_current`: Get the current DAO charter -- `dao_messaging_send`: Send a message through the DAO -- `dao_payments_get_invoice`: Get invoice details -- `dao_treasury_get_allowed_asset`: Check if an asset is allowed in the treasury - -### Market Tools -- `faktory_exec_buy`: Execute a buy order on Faktory -- `faktory_get_token`: Get token information from Faktory -- `jing_get_order_book`: Get the order book from Jing.js -- `jing_submit_bid`: Submit a bid on Jing.js - -## Implementation Notes - -- The Tools API is designed to be used by both human users and AI agents -- Tool availability may depend on user permissions and wallet configuration -- Some tools require specific parameters like `wallet_id` which are automatically populated when possible -- Tool responses are standardized but vary based on the specific tool functionality - -## Rate Limiting - -The Tools API implements rate limiting to prevent abuse: - -- Maximum of 60 requests per minute per user -- Maximum of 1000 requests per day per user - -When rate limits are exceeded, the server will return a 429 Too Many Requests response. - -## API Versioning - -The current API version is v1. Future versions may be introduced with breaking changes. - -To ensure compatibility, clients should: - -1. Handle unknown fields in responses gracefully -2. Check for API announcements regarding deprecation and new features \ No newline at end of file diff --git a/docs/tools_implementation_guide.md b/docs/tools_implementation_guide.md deleted file mode 100644 index f6f37c26..00000000 --- a/docs/tools_implementation_guide.md +++ /dev/null @@ -1,386 +0,0 @@ -# Tools Implementation Guide - -This document provides a step-by-step guide on how to implement new tools in the system. - -## Overview - -Tools are modular components that provide specific functionality to the system. Each tool follows a standard interface, making it easy to add new capabilities without modifying the core codebase. - -## Prerequisites - -Before implementing a new tool, you should have: - -1. A clear understanding of the functionality you want to implement -2. Familiarity with Python and the Pydantic library -3. Access to the codebase and development environment - -## Tool Structure - -Each tool consists of: - -1. A Pydantic model defining the input parameters -2. A class that implements the tool functionality -3. Registration in the tools factory - -## Step 1: Define the Tool Category - -Tools are organized by category. Choose an existing category or create a new one if your tool doesn't fit into any existing categories: - -- WALLET: Tools for wallet management and transactions -- DAO: Tools for DAO operations and governance -- FAKTORY: Tools for interacting with the Faktory marketplace -- JING: Tools for the Jing.js trading platform -- CONTRACTS: Tools for smart contract interactions -- DATABASE: Tools for database operations -- STACKS: Tools for Stacks blockchain interactions -- ALEX: Tools for ALEX DEX operations -- BITFLOW: Tools for Bitflow trading platform -- VELAR: Tools for Velar protocol interactions -- LUNARCRUSH: Tools for LunarCrush data and analytics -- STXCITY: Tools for STX City platform - -## Step 2: Create a New Tool File - -Create a new Python file in the appropriate directory under the `tools/` folder. If you're creating a tool for a new category, you may need to create a new directory. - -For example, if you're creating a new tool for the WALLET category, you might create a file at `tools/wallet/my_new_tool.py`. - -## Step 3: Define the Tool Parameters - -Define a Pydantic model for your tool's input parameters. This model will be used to validate the inputs and generate documentation. - -```python -from pydantic import BaseModel, Field -from typing import Optional, List, Dict, Any - -class MyNewToolParameters(BaseModel): - """Parameters for MyNewTool.""" - - param1: str = Field( - ..., - description="Description of parameter 1" - ) - param2: int = Field( - default=0, - description="Description of parameter 2" - ) - param3: Optional[List[str]] = Field( - default=None, - description="Description of parameter 3" - ) -``` - -## Step 4: Implement the Tool Class - -Create a class that implements your tool's functionality. The class should inherit from `BaseTool` and implement the required methods. - -```python -from langchain.tools.base import BaseTool -from lib.logger import configure_logger -from typing import Any, Dict, Optional, Type - -logger = configure_logger(__name__) - -class MyNewTool(BaseTool): - """Tool that provides [description of your tool's functionality].""" - - name = "category_my_new_tool" - description = "Detailed description of what this tool does" - args_schema: Type[BaseModel] = MyNewToolParameters - - def __init__(self, wallet_id: Optional[UUID] = None): - """Initialize the tool. - - Args: - wallet_id: Optional wallet ID to use for this tool - """ - super().__init__() - self.wallet_id = wallet_id - - def _run(self, param1: str, param2: int = 0, param3: Optional[List[str]] = None) -> Dict[str, Any]: - """Execute the tool functionality. - - Args: - param1: Description of parameter 1 - param2: Description of parameter 2 - param3: Description of parameter 3 - - Returns: - Dict containing the tool's output - - Raises: - Exception: If an error occurs during execution - """ - try: - logger.info(f"Running {self.name} with params: {param1}, {param2}, {param3}") - - # Implement your tool's functionality here - result = {} - - # Example implementation - result["status"] = "success" - result["data"] = { - "param1": param1, - "param2": param2, - "param3": param3, - } - - return result - except Exception as e: - logger.error(f"Error running {self.name}", exc_info=e) - raise - - async def _arun(self, param1: str, param2: int = 0, param3: Optional[List[str]] = None) -> Dict[str, Any]: - """Execute the tool functionality asynchronously. - - This method can be implemented if your tool supports async execution. - If not needed, it can simply call the synchronous _run method. - - Args: - param1: Description of parameter 1 - param2: Description of parameter 2 - param3: Description of parameter 3 - - Returns: - Dict containing the tool's output - """ - return self._run(param1, param2, param3) -``` - -## Step 5: Register the Tool in the Factory - -Import and register your tool in the `tools/tools_factory.py` file: - -1. Import your tool at the top of the file: - -```python -from .category.my_new_tool import MyNewTool -``` - -2. Add your tool to the `initialize_tools` function: - -```python -def initialize_tools( - profile: Optional[Profile] = None, - agent_id: Optional[UUID] = None, -) -> Dict[str, LangChainBaseTool]: - # ... existing code ... - - tools = { - # ... existing tools ... - "category_my_new_tool": MyNewTool(wallet_id), - } - - return tools -``` - -## Step 6: Write Unit Tests - -Create unit tests for your tool to ensure it works as expected. Tests should be placed in the `tests/tools/` directory. - -```python -import pytest -from unittest.mock import MagicMock, patch -from tools.category.my_new_tool import MyNewTool - -def test_my_new_tool_initialization(): - """Test that the tool initializes correctly.""" - tool = MyNewTool(wallet_id="test-wallet-id") - assert tool.name == "category_my_new_tool" - assert tool.wallet_id == "test-wallet-id" - -def test_my_new_tool_run(): - """Test that the tool runs correctly.""" - tool = MyNewTool() - - # Mock any external dependencies - with patch("some.external.dependency", MagicMock(return_value="mocked_result")): - result = tool._run(param1="test", param2=42, param3=["a", "b", "c"]) - - assert result["status"] == "success" - assert result["data"]["param1"] == "test" - assert result["data"]["param2"] == 42 - assert result["data"]["param3"] == ["a", "b", "c"] - -def test_my_new_tool_error_handling(): - """Test that the tool handles errors correctly.""" - tool = MyNewTool() - - # Mock an external dependency to raise an exception - with patch("some.external.dependency", MagicMock(side_effect=Exception("Test error"))): - with pytest.raises(Exception) as excinfo: - tool._run(param1="test") - - assert "Test error" in str(excinfo.value) -``` - -## Step 7: Document Your Tool - -Add documentation for your tool in the appropriate documentation files: - -1. Update the tool categories in `docs/tools_api_reference.md` if you've created a new category -2. Add examples of how to use your tool in `docs/tools_api_examples.md` - -## Best Practices - -### Naming Conventions - -- Tool class names should be descriptive and follow PascalCase (e.g., `WalletGetBalanceTool`) -- Tool IDs should follow the format `category_action_noun` (e.g., `wallet_get_balance`) -- Parameter names should be descriptive and follow snake_case - -### Error Handling - -- Always use try/except blocks to catch and log errors -- Provide meaningful error messages -- Use the logger to log errors with appropriate context - -### Documentation - -- Provide detailed docstrings for your tool class and methods -- Include parameter descriptions in the Pydantic model -- Document any external dependencies or requirements - -### Performance - -- Consider the performance implications of your tool -- Implement caching where appropriate -- Use async methods for I/O-bound operations - -## Example: Complete Tool Implementation - -Here's a complete example of a tool that retrieves the price of a cryptocurrency: - -```python -# tools/crypto/get_price.py -from langchain.tools.base import BaseTool -from lib.logger import configure_logger -from pydantic import BaseModel, Field -from typing import Any, Dict, Optional, Type -import aiohttp -import uuid - -logger = configure_logger(__name__) - -class GetCryptoPriceParameters(BaseModel): - """Parameters for the GetCryptoPrice tool.""" - - symbol: str = Field( - ..., - description="The cryptocurrency symbol (e.g., BTC, ETH)" - ) - currency: str = Field( - default="USD", - description="The currency to get the price in (e.g., USD, EUR)" - ) - -class GetCryptoPriceTool(BaseTool): - """Tool that retrieves the current price of a cryptocurrency.""" - - name = "crypto_get_price" - description = "Get the current price of a cryptocurrency in a specified currency" - args_schema: Type[BaseModel] = GetCryptoPriceParameters - - def __init__(self, wallet_id: Optional[uuid.UUID] = None): - """Initialize the tool. - - Args: - wallet_id: Optional wallet ID (not used for this tool) - """ - super().__init__() - self.wallet_id = wallet_id - - def _run(self, symbol: str, currency: str = "USD") -> Dict[str, Any]: - """Execute the tool functionality synchronously. - - This method delegates to the async version for simplicity. - - Args: - symbol: The cryptocurrency symbol (e.g., BTC, ETH) - currency: The currency to get the price in (e.g., USD, EUR) - - Returns: - Dict containing the price information - """ - import asyncio - return asyncio.run(self._arun(symbol, currency)) - - async def _arun(self, symbol: str, currency: str = "USD") -> Dict[str, Any]: - """Execute the tool functionality asynchronously. - - Args: - symbol: The cryptocurrency symbol (e.g., BTC, ETH) - currency: The currency to get the price in (e.g., USD, EUR) - - Returns: - Dict containing the price information - - Raises: - Exception: If an error occurs during execution - """ - try: - logger.info(f"Getting price for {symbol} in {currency}") - - # Normalize inputs - symbol = symbol.upper() - currency = currency.upper() - - # Make API request - async with aiohttp.ClientSession() as session: - url = f"https://api.example.com/v1/cryptocurrency/price" - params = {"symbol": symbol, "currency": currency} - - async with session.get(url, params=params) as response: - if response.status != 200: - error_text = await response.text() - logger.error(f"API error: {response.status} - {error_text}") - return { - "status": "error", - "message": f"Failed to get price: {response.status}", - } - - data = await response.json() - - # Process and return the result - return { - "status": "success", - "data": { - "symbol": symbol, - "currency": currency, - "price": data["price"], - "last_updated": data["last_updated"], - } - } - except Exception as e: - logger.error(f"Error getting price for {symbol}", exc_info=e) - return { - "status": "error", - "message": f"Failed to get price: {str(e)}", - } -``` - -## Troubleshooting - -### Common Issues - -1. **Tool not appearing in available tools** - - Check that the tool is properly registered in `tools_factory.py` - - Verify that the tool name follows the correct format - -2. **Parameter validation errors** - - Check that the parameters in your `_run` and `_arun` methods match the Pydantic model - - Ensure that required parameters are provided - -3. **Import errors** - - Check for circular imports - - Verify that all dependencies are installed - -### Debugging Tips - -- Use the logger to add debug statements -- Test your tool in isolation before integrating it -- Use the `pdb` debugger to step through your code - -## Conclusion - -By following this guide, you should be able to implement new tools that seamlessly integrate with the existing system. Remember to follow the established patterns and best practices to ensure consistency and reliability. \ No newline at end of file diff --git a/docs/webhook_examples.md b/docs/webhook_examples.md deleted file mode 100644 index 05eca9a7..00000000 --- a/docs/webhook_examples.md +++ /dev/null @@ -1,158 +0,0 @@ -# Webhook API Examples - -This document provides examples of how to call the webhook endpoints with proper authentication. - -## Authentication - -All webhook endpoints require Bearer token authentication. You need to include an `Authorization` header with a valid token: - -``` -Authorization: Bearer your-webhook-auth-token -``` - -The token must match the one configured in the `AIBTC_WEBHOOK_AUTH_TOKEN` environment variable. - -## Chainhook Webhook - -### Example Request - -```bash -curl -X POST https://your-api-url/webhooks/chainhook \ - -H "Content-Type: application/json" \ - -H "Authorization: Bearer your-webhook-auth-token" \ - -d '{ - "apply": [ - { - "block_identifier": { - "index": 12345, - "hash": "0x1234567890abcdef" - }, - "transactions": [ - { - "transaction_identifier": { - "hash": "0xabcdef1234567890" - }, - "operations": [ - { - "type": "contract_call", - "contract_identifier": "SP123...", - "contract_call": { - "function_name": "example-function", - "function_args": [] - } - } - ] - } - ] - } - ] - }' -``` - -### Example Response - -```json -{ - "success": true, - "message": "Webhook processed successfully", - "data": { - "processed_transactions": 1 - } -} -``` - -## DAO Creation Webhook - -The DAO webhook is used to record a new DAO in the system (not for deployment). - -### Example Request - -```bash -curl -X POST https://your-api-url/webhooks/dao \ - -H "Content-Type: application/json" \ - -H "Authorization: Bearer your-webhook-auth-token" \ - -d '{ - "name": "Example DAO", - "mission": "To build a better future with blockchain", - "description": "This is an example DAO for demonstration purposes", - "extensions": [ - { - "type": "voting", - "contract_principal": "SP123...", - "tx_id": "0xabcdef1234567890" - }, - { - "type": "treasury", - "contract_principal": "SP456...", - "tx_id": "0x0987654321fedcba" - } - ], - "token": { - "name": "Example Token", - "symbol": "EXT", - "decimals": 6, - "max_supply": "1000000000", - "contract_principal": "SP789...", - "tx_id": "0x1122334455667788", - "description": "Governance token for Example DAO", - "uri": "https://example.com/token", - "image_url": "https://example.com/token.png", - "x_url": "https://x.com/exampletoken", - "telegram_url": "https://t.me/exampletoken", - "website_url": "https://example.com" - } - }' -``` - -### Example Response - -```json -{ - "success": true, - "message": "Successfully created DAO 'Example DAO' with ID: 123e4567-e89b-12d3-a456-426614174000", - "data": { - "dao_id": "123e4567-e89b-12d3-a456-426614174000", - "extension_ids": [ - "234e5678-e89b-12d3-a456-426614174001", - "345e6789-e89b-12d3-a456-426614174002" - ], - "token_id": "456e7890-e89b-12d3-a456-426614174003" - } -} -``` - -## Error Responses - -### Authentication Failure - -```json -{ - "detail": "Missing Authorization header" -} -``` - -```json -{ - "detail": "Invalid Authorization format. Use 'Bearer '" -} -``` - -```json -{ - "detail": "Invalid authentication token" -} -``` - -### Processing Failure - -```json -{ - "detail": "Error processing webhook" -} -``` - -```json -{ - "detail": "Error processing DAO creation webhook: [specific error message]" -} -``` \ No newline at end of file diff --git a/docs/workflows.md b/docs/workflows.md deleted file mode 100644 index 39c61856..00000000 --- a/docs/workflows.md +++ /dev/null @@ -1,190 +0,0 @@ -# Workflows System Documentation - -## Overview - -The workflows system is a sophisticated implementation of AI-driven task execution pipelines built on top of LangGraph and LangChain. It provides a flexible and extensible framework for creating complex AI workflows that can combine multiple capabilities such as planning, vector retrieval, web searching, and reactive decision-making. - -## Core Components - -### Base Workflow (`base.py`) - -The foundation of the workflow system is the `BaseWorkflow` class, which provides: - -- Common functionality for all workflow types -- State management and validation -- LLM integration with OpenAI models -- Error handling and logging -- Extensible architecture through mixins - -### Available Workflows - -1. **ReAct Workflow** (`react.py`) - - Implements the Reasoning and Acting pattern - - Supports streaming responses - - Handles tool execution and state management - - Uses a message-based architecture for communication - -2. **Vector ReAct Workflow** (`vector_react.py`) - - Extends ReAct with vector database integration - - Enables semantic search and retrieval - - Combines vector search results with reasoning - -3. **Preplan ReAct Workflow** (`preplan_react.py`) - - Adds planning capabilities before execution - - Creates structured plans for complex tasks - - Executes plans step by step - -4. **Vector Preplan ReAct Workflow** (`vector_preplan_react.py`) - - Combines planning with vector retrieval - - Uses context from vector store for better planning - - Enhanced decision making with relevant information - -5. **Web Search Workflow** (`web_search.py`) - - Integrates web search capabilities - - Processes and summarizes web results - - Combines web information with other workflow steps - -6. **Proposal Evaluation Workflow** (`proposal_evaluation.py`) - - Specialized workflow for evaluating proposals - - Structured analysis and decision making - - Supports complex evaluation criteria - -7. **Tweet Analysis Workflow** (`tweet_analysis.py`) - - Analyzes tweet content and metrics - - Provides insights and recommendations - - Supports social media strategy - -8. **Tweet Generator Workflow** (`tweet_generator.py`) - - Creates engaging tweet content - - Follows best practices and guidelines - - Optimizes for engagement - -## Key Features - -### Workflow Capabilities - -The system includes several core capabilities that can be mixed into workflows: - -1. **Planning Capability** - - Creates structured plans for complex tasks - - Breaks down problems into manageable steps - - Ensures systematic approach to problem-solving - -2. **Vector Retrieval Capability** - - Integrates with vector databases - - Enables semantic search and context retrieval - - Enhances decision making with relevant information - -3. **Web Search Capability** - - Performs web searches for real-time information - - Processes and summarizes search results - - Integrates external knowledge into workflows - -### State Management - -- Type-safe state handling using TypedDict -- Validation of required fields -- Clean state transitions -- Error handling and recovery - -### Streaming Support - -- Real-time response streaming -- Progress updates during execution -- Tool execution status updates -- Error handling during streaming - -## Implementation Details - -### Message Processing - -The system uses a sophisticated message processing system that: -- Filters and formats message history -- Converts messages to LangChain format -- Handles different message types (system, human, AI) -- Supports tool calls and responses - -### Error Handling - -Comprehensive error handling includes: -- `LangGraphError`: Base exception class -- `StreamingError`: For streaming-related issues -- `ExecutionError`: For workflow execution problems -- `ValidationError`: For state validation failures - -### Logging - -- Structured logging throughout the system -- Debug information for development -- Error tracking and reporting -- Performance monitoring - -## Usage Guidelines - -### Creating New Workflows - -To create a new workflow: - -1. Inherit from `BaseWorkflow` -2. Implement required methods: - - `_create_prompt()` - - `_create_graph()` -3. Define state validation rules -4. Add necessary capabilities through mixins - -### Best Practices - -1. **State Management** - - Keep state minimal and focused - - Validate state transitions - - Handle edge cases - -2. **Error Handling** - - Use specific error types - - Provide detailed error messages - - Implement recovery strategies - -3. **Performance** - - Optimize tool usage - - Implement caching where appropriate - - Monitor execution times - -4. **Testing** - - Write unit tests for workflows - - Test edge cases and error conditions - - Validate tool integration - -## Integration - -The workflow system integrates with: -- LangChain for LLM interactions -- LangGraph for workflow orchestration -- Vector databases for retrieval -- Web search APIs -- Custom tools and capabilities - -## Security Considerations - -- API key management -- Input validation -- Rate limiting -- Error handling -- Access control - -## Future Enhancements - -Potential areas for expansion: -- Additional workflow types -- More capabilities and tools -- Enhanced monitoring -- Performance optimizations -- Additional integrations - -## Contributing - -When contributing new workflows: -1. Follow existing patterns and conventions -2. Implement comprehensive error handling -3. Add appropriate documentation -4. Include tests -5. Consider performance implications \ No newline at end of file diff --git a/document_processor.py b/document_processor.py deleted file mode 100644 index e533d8a3..00000000 --- a/document_processor.py +++ /dev/null @@ -1,566 +0,0 @@ -#!/usr/bin/env python -""" -Document processor for loading texts from URLs and local files, adding them to a vector database. - -This utility focuses solely on ingesting documents from specified URLs and local files, -processing them, and storing them in a vector collection for later retrieval. -""" - -import asyncio -import os -from pathlib import Path -from typing import List, Optional - -import dotenv -from langchain_community.document_loaders import TextLoader, WebBaseLoader -from langchain_core.documents import Document -from langchain_openai import OpenAIEmbeddings -from langchain_text_splitters import RecursiveCharacterTextSplitter - -from backend.factory import backend -from backend.models import ( - ExtensionFilter, - ProposalFilter, - TokenFilter, - VoteFilter, -) -from services.workflows.vector_react import add_documents_to_vectors - -# Load environment variables -dotenv.load_dotenv() - - -async def load_documents_from_url(url: str) -> List[Document]: - """ - Load documents from a URL using WebBaseLoader and split them with RecursiveCharacterTextSplitter. - - Args: - url: The URL to load documents from - - Returns: - List of processed Document objects - """ - try: - print(f"Loading content from URL: {url}...") - loader = WebBaseLoader(url) - docs = loader.load() - - # Initialize the text splitter - text_splitter = RecursiveCharacterTextSplitter( - chunk_size=4000, - chunk_overlap=200, - length_function=len, - separators=["\n\n", "\n", " ", ""], - ) - - # Split the documents - split_docs = text_splitter.split_documents(docs) - - # Add metadata to each document - for doc in split_docs: - doc.metadata["type"] = "web_documentation" - doc.metadata["url"] = url - doc.metadata["source_type"] = "url" - - print( - f"Successfully loaded and split into {len(split_docs)} documents from {url}" - ) - return split_docs - except Exception as e: - print(f"Error loading content from URL {url}: {str(e)}") - return [] - - -def load_documents_from_file( - file_path: str, document_type: str = "local_file" -) -> List[Document]: - """ - Load documents from a local file and split them with RecursiveCharacterTextSplitter. - - Args: - file_path: Path to the local file - document_type: Type to assign in document metadata - - Returns: - List of processed Document objects - """ - try: - print(f"Loading content from file: {file_path}...") - file_path = Path(file_path) - - # Skip non-text files and hidden files - if not file_path.is_file() or file_path.name.startswith("."): - return [] - - # Skip files that are likely binary or non-text - text_extensions = [ - ".txt", - ".md", - ".py", - ".js", - ".ts", - ".html", - ".css", - ".json", - ".yaml", - ".yml", - ".clar", - ] - if file_path.suffix.lower() not in text_extensions: - print(f"Skipping likely non-text file: {file_path}") - return [] - - loader = TextLoader(str(file_path)) - docs = loader.load() - - # Initialize the text splitter - text_splitter = RecursiveCharacterTextSplitter( - chunk_size=4000, - chunk_overlap=200, - length_function=len, - separators=["\n\n", "\n", " ", ""], - ) - - # Split the documents - split_docs = text_splitter.split_documents(docs) - - # Add metadata to each document - for doc in split_docs: - doc.metadata["type"] = document_type - doc.metadata["file_path"] = str(file_path) - doc.metadata["file_name"] = file_path.name - doc.metadata["source_type"] = "file" - - print( - f"Successfully loaded and split into {len(split_docs)} documents from {file_path}" - ) - return split_docs - except Exception as e: - print(f"Error loading content from file {file_path}: {str(e)}") - return [] - - -def get_files_from_directory(directory_path: str, recursive: bool = True) -> List[str]: - """ - Get a list of all files in a directory, optionally recursively. - - Args: - directory_path: Path to the directory - recursive: Whether to search recursively - - Returns: - List of file paths - """ - file_paths = [] - directory = Path(directory_path) - - if not directory.exists() or not directory.is_dir(): - print(f"Directory does not exist or is not a directory: {directory_path}") - return file_paths - - if recursive: - for root, _, files in os.walk(directory): - for file in files: - file_paths.append(os.path.join(root, file)) - else: - for item in directory.iterdir(): - if item.is_file(): - file_paths.append(str(item)) - - return file_paths - - -def extract_dao_documents() -> List[Document]: - """ - Extract DAO-related data from the database and convert it to Document objects. - - Returns: - List of Document objects containing DAO data - """ - documents = [] - print("\nExtracting DAO data from the database...") - - try: - # Get all DAOs - daos = backend.list_daos() - print(f"Found {len(daos)} DAOs in the database") - - for dao in daos: - # Create a document for the DAO - dao_content = f""" - DAO: {dao.name} - ID: {dao.id} - Mission: {dao.description} - Description: {dao.description} - Deployed: {dao.is_deployed} - Broadcasted: {dao.is_broadcasted} - """ - - # Create a document from the DAO - dao_doc = Document( - page_content=dao_content, - metadata={ - "type": "dao", - "id": str(dao.id), - "name": dao.name or "Unnamed DAO", - "source_type": "database", - "entity_type": "dao", - }, - ) - documents.append(dao_doc) - - # Get tokens for this DAO - tokens = backend.list_tokens(TokenFilter(dao_id=dao.id)) - if tokens: - print(f"Found {len(tokens)} tokens for DAO {dao.name}") - - for token in tokens: - token_content = f""" - Token: {token.name} ({token.symbol}) - DAO: {dao.name} - Description: {token.description} - Decimals: {token.decimals} - Max Supply: {token.max_supply} - Contract: {token.contract_principal} - Status: {token.status} - """ - - token_doc = Document( - page_content=token_content, - metadata={ - "type": "token", - "id": str(token.id), - "dao_id": str(dao.id), - "dao_name": dao.name or "Unnamed DAO", - "name": token.name or "Unnamed Token", - "symbol": token.symbol, - "source_type": "database", - "entity_type": "token", - }, - ) - documents.append(token_doc) - - # Get extensions for this DAO - extensions = backend.list_extensions(ExtensionFilter(dao_id=dao.id)) - if extensions: - print(f"Found {len(extensions)} extensions for DAO {dao.name}") - - for extension in extensions: - extension_content = f""" - Extension Type: {extension.type} - DAO: {dao.name} - Contract: {extension.contract_principal} - Status: {extension.status} - Transaction: {extension.tx_id} - """ - - extension_doc = Document( - page_content=extension_content, - metadata={ - "type": "extension", - "id": str(extension.id), - "dao_id": str(dao.id), - "dao_name": dao.name or "Unnamed DAO", - "extension_type": extension.type, - "source_type": "database", - "entity_type": "extension", - }, - ) - documents.append(extension_doc) - - # Get proposals for this DAO - proposals = backend.list_proposals(ProposalFilter(dao_id=dao.id)) - if proposals: - print(f"Found {len(proposals)} proposals for DAO {dao.name}") - - for proposal in proposals: - proposal_content = f""" - Proposal: {proposal.title} - DAO: {dao.name} - Description: {proposal.description} - Status: {proposal.status} - Action: {proposal.action} - Executed: {proposal.executed} - Passed: {proposal.passed} - Met Quorum: {proposal.met_quorum} - Met Threshold: {proposal.met_threshold} - Votes For: {proposal.votes_for} - Votes Against: {proposal.votes_against} - """ - - proposal_doc = Document( - page_content=proposal_content, - metadata={ - "type": "proposal", - "id": str(proposal.id), - "dao_id": str(dao.id), - "dao_name": dao.name or "Unnamed DAO", - "title": proposal.title, - "source_type": "database", - "entity_type": "proposal", - }, - ) - documents.append(proposal_doc) - - # Get votes for this proposal - votes = backend.list_votes(VoteFilter(proposal_id=proposal.id)) - if votes: - print(f"Found {len(votes)} votes for proposal {proposal.title}") - - vote_content = f""" - Votes for Proposal: {proposal.title} - DAO: {dao.name} - """ - - for vote in votes: - vote_content += f""" - Vote by: {vote.address} - Answer: {"Yes" if vote.answer else "No"} - Amount: {vote.amount} - Reasoning: {vote.reasoning} - """ - - vote_doc = Document( - page_content=vote_content, - metadata={ - "type": "votes", - "proposal_id": str(proposal.id), - "dao_id": str(dao.id), - "dao_name": dao.name or "Unnamed DAO", - "proposal_title": proposal.title, - "source_type": "database", - "entity_type": "votes", - }, - ) - documents.append(vote_doc) - - # Split the documents if they are too large - text_splitter = RecursiveCharacterTextSplitter( - chunk_size=4000, - chunk_overlap=200, - length_function=len, - separators=["\n\n", "\n", " ", ""], - ) - - split_docs = text_splitter.split_documents(documents) - print( - f"Successfully processed {len(split_docs)} documents from database DAO data" - ) - return split_docs - - except Exception as e: - print(f"Error extracting DAO data from database: {str(e)}") - return [] - - -async def process_documents( - urls: Optional[List[str]] = None, - directories: Optional[List[str]] = None, - files: Optional[List[str]] = None, - knowledge_collection_name: str = "knowledge_collection", - dao_collection_name: str = "dao_collection", - document_type: Optional[str] = None, - recursive: bool = True, - include_database: bool = False, -) -> None: - """ - Process documents from URLs, directories, files, and database and add them to vector collections. - - URLs, directories, and files go into knowledge_collection_name. - Database DAO data goes into dao_collection_name. - - Args: - urls: List of URLs to process - directories: List of directories to process - files: List of individual files to process - knowledge_collection_name: Collection name for URL and file documents - dao_collection_name: Collection name for database DAO documents - document_type: Optional type to assign to documents in metadata - recursive: Whether to recursively process directories - include_database: Whether to include DAO data from the database - """ - knowledge_documents = [] - dao_documents = [] - - # Process URLs - if urls: - for url in urls: - print(f"\nProcessing documentation from URL: {url}") - docs = await load_documents_from_url(url) - - # Add custom document type if specified - if document_type and docs: - for doc in docs: - doc.metadata["type"] = document_type - - if docs: - print(f"Adding {len(docs)} documents from URL {url}") - knowledge_documents.extend(docs) - else: - print(f"No content was retrieved from URL {url}") - - # Process directories - if directories: - for directory in directories: - print(f"\nProcessing files from directory: {directory}") - file_paths = get_files_from_directory(directory, recursive=recursive) - - for file_path in file_paths: - print(f"Processing file: {file_path}") - docs = load_documents_from_file( - file_path, document_type or "directory_file" - ) - - if docs: - print(f"Adding {len(docs)} documents from file {file_path}") - knowledge_documents.extend(docs) - else: - print(f"No content was retrieved from file {file_path}") - - # Process individual files - if files: - for file_path in files: - print(f"\nProcessing individual file: {file_path}") - docs = load_documents_from_file( - file_path, document_type or "individual_file" - ) - - if docs: - print(f"Adding {len(docs)} documents from file {file_path}") - knowledge_documents.extend(docs) - else: - print(f"No content was retrieved from file {file_path}") - - # Process knowledge documents if any exist - if knowledge_documents: - print( - f"\nProcessing {len(knowledge_documents)} knowledge documents (URLs and files)..." - ) - embeddings = OpenAIEmbeddings() - - # Ensure the knowledge collection exists - try: - backend.get_vector_collection(knowledge_collection_name) - print(f"Using existing vector collection: {knowledge_collection_name}") - except Exception: - embed_dim = 1536 # Default for OpenAI embeddings - if hasattr(embeddings, "embedding_dim"): - embed_dim = embeddings.embedding_dim - backend.create_vector_collection( - knowledge_collection_name, dimensions=embed_dim - ) - print( - f"Created new vector collection: {knowledge_collection_name} with dimensions: {embed_dim}" - ) - - # Add knowledge documents to the vector store - print( - f"Adding {len(knowledge_documents)} documents to {knowledge_collection_name}..." - ) - await add_documents_to_vectors( - collection_name=knowledge_collection_name, - documents=knowledge_documents, - embeddings=embeddings, - ) - print(f"Documents added successfully to {knowledge_collection_name}!") - - # Create an index on the collection for better query performance - print(f"Creating index on vector collection: {knowledge_collection_name}...") - try: - backend.create_vector_index(knowledge_collection_name) - print(f"Index created successfully for {knowledge_collection_name}!") - except Exception as e: - print(f"Error creating index for {knowledge_collection_name}: {str(e)}") - - # Process DAO data from database into separate collection - if include_database: - print("\nProcessing DAO data from database...") - db_docs = extract_dao_documents() - if db_docs: - print( - f"Adding {len(db_docs)} documents from database to {dao_collection_name}" - ) - dao_documents.extend(db_docs) - - # Initialize embeddings for DAO documents - embeddings = OpenAIEmbeddings() - - # Ensure the DAO collection exists - try: - backend.get_vector_collection(dao_collection_name) - print(f"Using existing vector collection: {dao_collection_name}") - except Exception: - embed_dim = 1536 # Default for OpenAI embeddings - if hasattr(embeddings, "embedding_dim"): - embed_dim = embeddings.embedding_dim - backend.create_vector_collection( - dao_collection_name, dimensions=embed_dim - ) - print( - f"Created new vector collection: {dao_collection_name} with dimensions: {embed_dim}" - ) - - # Add DAO documents to the vector store - print(f"Adding {len(dao_documents)} documents to {dao_collection_name}...") - await add_documents_to_vectors( - collection_name=dao_collection_name, - documents=dao_documents, - embeddings=embeddings, - ) - print(f"Documents added successfully to {dao_collection_name}!") - - # Create an index on the collection for better query performance - print(f"Creating index on vector collection: {dao_collection_name}...") - try: - backend.create_vector_index(dao_collection_name) - print(f"Index created successfully for {dao_collection_name}!") - except Exception as e: - print(f"Error creating index for {dao_collection_name}: {str(e)}") - else: - print("No content was retrieved from database") - - if not knowledge_documents and not dao_documents: - print("No documents were loaded from any source. Exiting.") - return - - -async def main() -> None: - """Run the document processor.""" - # Example list of URLs to process - urls = [ - "https://docs.stacks.co/reference/functions", - "https://docs.stacks.co/reference/keywords", - "https://docs.stacks.co/reference/types", - "https://docs.stacks.co/reference/the-stack", - ] - - # Example directories to process - directories = [ - "./aibtcdev-docs", # Replace with actual directories - "./aibtcdev-contracts/contracts/dao", - "./stacks-docs/press-and-top-links", - "./stacks-docs/nakamoto-upgrade", - "./stacks-docs/concepts", - "./stacks-docs/example-contracts", - "./stacks-docs/guides-and-tutorials", - "./stacks-docs/bitcoin-theses-and-reports", - "./stacks-docs/reference", - ] - - # Example individual files to process - files = [] - - # Process the documents and add them to separate vector collections - await process_documents( - urls=urls, - directories=directories, - files=files, - knowledge_collection_name="knowledge_collection", # Collection for URLs and files - dao_collection_name="dao_collection", # Collection for DAO database data - recursive=True, - include_database=True, # Include DAO data from the database - ) - - -if __name__ == "__main__": - asyncio.run(main()) diff --git a/examples/chainhooks/chainhook_20250227_192359.json b/examples/chainhooks/chainhook_20250227_192359.json deleted file mode 100644 index e261ad28..00000000 --- a/examples/chainhooks/chainhook_20250227_192359.json +++ /dev/null @@ -1 +0,0 @@ -{"apply": [{"block_identifier": {"hash": "0x17eb967ff3cffacc6a2128b0c99a1d02280bb1adf7ec42e4259f28048b811094", "index": 222005}, "metadata": {"bitcoin_anchor_block_identifier": {"hash": "0x36203cf8f01b0ef09aff54bc8446a5fc36d141d50ad3398e648c07e5998e286a", "index": 19755}, "block_time": 1740712361, "confirm_microblock_identifier": null, "cycle_number": null, "pox_cycle_index": 982, "pox_cycle_length": 20, "pox_cycle_position": 14, "reward_set": null, "signer_bitvec": "013500000027ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff1f", "signer_public_keys": ["0x02e8620935d58ebffa23c260f6917cbd0915ea17d7a46df17e131540237d335504", "0x03161dd42571db5ca2599ef32774ec0c6c9ef125cbb8f826a63fe527489bb91400", "0x036a44f61d85efa844b42475f107b106dc8fb209ae27813893c3269c59821e0333"], "signer_signature": ["00482c930da16d47c6eee9f2864688a762f41d802d65d496a567da1d2cd974ac7d54eef246785d05135476578fc16eea797c4711f2908b3253fd65c125db733146", "006c8dee60d1439de553418b6c25ed136f2eec929220fbf155e8c453a3cb4655f740fa3b8381b9db1929e7397739a0c09178d1ad60fa01b243e0bfee4fd8fc77cb", "00f59861ae725cdc05a6749087986b1222e680552495d4106deb4c38958bb5a86a7267552e8c0e1c4c962549c1133d3550a1ef23bfcfb4508713f96a613cc51a03"], "stacks_block_hash": "0x4b8eb2dccf17d9e80e184b888f3c376375b1b821a5cb0fb07887f66f6719aeea", "tenure_height": 16951}, "parent_block_identifier": {"hash": "0xf2ccf6bb143a907e46225347afe7928289812826c312e7928539f6c2053cf21a", "index": 222004}, "timestamp": 1740712308, "transactions": [{"metadata": {"description": "transfered: 1000 \u00b5STX from ST2TFVBMRPS5SSNP98DQKQ5JNB2B6NZM91C4K3P7B to ST319CF5WV77KYR1H3GT0GZ7B8Q4AQPY42ETP1VPF", "execution_cost": {"read_count": 0, "read_length": 0, "runtime": 0, "write_count": 0, "write_length": 0}, "fee": 300, "kind": {"type": "NativeTokenTransfer"}, "nonce": 67693, "position": {"index": 0}, "raw_tx": "0x80800000000400b4fdae98b64b9cd6c9436f3b965558966afe890b000000000001086d000000000000012c00019cb0e3e99c819b5acfb7775ff9321db5bfcad43950d5fe07ae3219774af6c34f025b45870b71a04ebf67666e82db8e5867708b71052c9b2d17f5250ad2ffdd5203020000000000051ac2963cbcd9cf3f60311c34087ceb45c8abdbc41300000000000003e800000000000000000000000000000000000000000000000000000000000000000000", "receipt": {"contract_calls_stack": [], "events": [{"data": {"amount": "1000", "recipient": "ST319CF5WV77KYR1H3GT0GZ7B8Q4AQPY42ETP1VPF", "sender": "ST2TFVBMRPS5SSNP98DQKQ5JNB2B6NZM91C4K3P7B"}, "position": {"index": 0}, "type": "STXTransferEvent"}], "mutated_assets_radius": [], "mutated_contracts_radius": []}, "result": "(ok true)", "sender": "ST2TFVBMRPS5SSNP98DQKQ5JNB2B6NZM91C4K3P7B", "sponsor": null, "success": true}, "operations": [{"account": {"address": "ST2TFVBMRPS5SSNP98DQKQ5JNB2B6NZM91C4K3P7B"}, "amount": {"currency": {"decimals": 6, "symbol": "STX"}, "value": 1000}, "operation_identifier": {"index": 0}, "related_operations": [{"index": 1}], "status": "SUCCESS", "type": "DEBIT"}, {"account": {"address": "ST319CF5WV77KYR1H3GT0GZ7B8Q4AQPY42ETP1VPF"}, "amount": {"currency": {"decimals": 6, "symbol": "STX"}, "value": 1000}, "operation_identifier": {"index": 1}, "related_operations": [{"index": 0}], "status": "SUCCESS", "type": "CREDIT"}], "transaction_identifier": {"hash": "0x0dbe9d5398051d4123b2c999bd95a9bdfbc6e1846b2f10e3411a24ef5d5dfe83"}}]}], "chainhook": {"is_streaming_blocks": false, "predicate": {"higher_than": 221993, "scope": "block_height"}, "uuid": "598a3345-cd57-4228-925f-d7c93450c94e"}, "events": [], "rollback": []} \ No newline at end of file diff --git a/examples/chainhooks/chainhook_20250227_192400.json b/examples/chainhooks/chainhook_20250227_192400.json deleted file mode 100644 index 44df9cda..00000000 --- a/examples/chainhooks/chainhook_20250227_192400.json +++ /dev/null @@ -1 +0,0 @@ -{"apply": [{"block_identifier": {"hash": "0x95c1dbf6caddcc085b8e480f616a9151df956945217eaf4a3c7735fb9baae701", "index": 222006}, "metadata": {"bitcoin_anchor_block_identifier": {"hash": "0x36203cf8f01b0ef09aff54bc8446a5fc36d141d50ad3398e648c07e5998e286a", "index": 19755}, "block_time": 1740712383, "confirm_microblock_identifier": null, "cycle_number": null, "pox_cycle_index": 982, "pox_cycle_length": 20, "pox_cycle_position": 14, "reward_set": null, "signer_bitvec": "013500000027ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff1f", "signer_public_keys": ["0x02e8620935d58ebffa23c260f6917cbd0915ea17d7a46df17e131540237d335504", "0x036a44f61d85efa844b42475f107b106dc8fb209ae27813893c3269c59821e0333"], "signer_signature": ["00a74c2d94b3593ac649084b35839abe6a75eca7cbd68567188ca7cea7a34de5af4b201f929fe1f5fdef7cd96feab27d51b2a6e189bb84e84bda18fcbbb5f58736", "011af1f2a6b2c9a65dac361f9c0971190771de0b409cd19804a09d47392e36ff4e5fe1ab10b62486a20cfb308173a2a5b41ea2bac9ef80eb65f48b88780bd35e7d"], "stacks_block_hash": "0x35064e4fd5736b13defe8b9d2041b595db30dca15538d2934ee64ed16447b50f", "tenure_height": 16951}, "parent_block_identifier": {"hash": "0x17eb967ff3cffacc6a2128b0c99a1d02280bb1adf7ec42e4259f28048b811094", "index": 222005}, "timestamp": 1740712308, "transactions": [{"metadata": {"description": "invoked: STMN839RM7GAG79Y9574TTZMEKHKKR6RNZC71CDR.cat-presale-t13::set-stx-price-in-usdt(u796667)", "execution_cost": {"read_count": 5, "read_length": 4180, "runtime": 7035, "write_count": 1, "write_length": 18}, "fee": 196, "kind": {"data": {"args": ["u796667"], "contract_identifier": "STMN839RM7GAG79Y9574TTZMEKHKKR6RNZC71CDR.cat-presale-t13", "method": "set-stx-price-in-usdt"}, "type": "ContractCall"}, "nonce": 274, "position": {"index": 0}, "raw_tx": "0x8080000000040029540d38a1e0a81d3e494e4d6bf474e339e0d8af000000000000011200000000000000c40000cd8e05a34f169d452fa7c9b2f49fa9c8338b53f53252f7465a3eef1e0928b7320bb6897bcd5dec6a3f4e2b4fb1895be294655bcc81c90577e6497455a71fb685030100000000021a29540d38a1e0a81d3e494e4d6bf474e339e0d8af0f6361742d70726573616c652d743133157365742d7374782d70726963652d696e2d757364740000000101000000000000000000000000000c27fb", "receipt": {"contract_calls_stack": [], "events": [], "mutated_assets_radius": [], "mutated_contracts_radius": []}, "result": "(ok true)", "sender": "STMN839RM7GAG79Y9574TTZMEKHKKR6RNZC71CDR", "sponsor": null, "success": true}, "operations": [], "transaction_identifier": {"hash": "0x2328eb66a8c1d0e19ba9065c9dcf69651e09b7503cb7f7652c7b8e421df1bd53"}}]}], "chainhook": {"is_streaming_blocks": false, "predicate": {"higher_than": 221993, "scope": "block_height"}, "uuid": "598a3345-cd57-4228-925f-d7c93450c94e"}, "events": [], "rollback": []} \ No newline at end of file diff --git a/examples/chainhooks/chainhook_20250227_192401.json b/examples/chainhooks/chainhook_20250227_192401.json deleted file mode 100644 index cb90a6ad..00000000 --- a/examples/chainhooks/chainhook_20250227_192401.json +++ /dev/null @@ -1 +0,0 @@ -{"apply": [{"block_identifier": {"hash": "0x15ca3218d18cd4ed29c9047855544522529ed2b9f9fc2dab033f9ec10990dfaf", "index": 222008}, "metadata": {"bitcoin_anchor_block_identifier": {"hash": "0x36203cf8f01b0ef09aff54bc8446a5fc36d141d50ad3398e648c07e5998e286a", "index": 19755}, "block_time": 1740712440, "confirm_microblock_identifier": null, "cycle_number": null, "pox_cycle_index": 982, "pox_cycle_length": 20, "pox_cycle_position": 14, "reward_set": null, "signer_bitvec": "013500000027ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff1f", "signer_public_keys": ["0x02e8620935d58ebffa23c260f6917cbd0915ea17d7a46df17e131540237d335504", "0x03161dd42571db5ca2599ef32774ec0c6c9ef125cbb8f826a63fe527489bb91400", "0x036a44f61d85efa844b42475f107b106dc8fb209ae27813893c3269c59821e0333"], "signer_signature": ["0131d128401c294b8c075e1c00fa510cf9d5f2dd87f1c523e56970258db629d3510ec2fc8343b3683429173d4047beaa27b5fb08f4a0789d32bc987d3fb64fa114", "00c05ceadd89e2c4ff1d5626715c7940b191ed012d5e4e9e9ef9c5e13c6af9c6a006a5bda3da9eb5bda98cb965cd38c3e5b16967230a37da7fd017edf5346dcea1", "0041f4f177c46e678647c71dbbcfb3010470453ae116751726309e45ac27367d225d422686bb6ebf98ff39df1af3e2926ec22438b2a14cacf66d9a7f8ef8c7e8da"], "stacks_block_hash": "0xa8c82ae406003bc32598399b0501ed5d1c3398098b350db8aa44351ac6cb2942", "tenure_height": 16951}, "parent_block_identifier": {"hash": "0x041501a452864c9220ed10213ac324fd606619e8ce117cbff8c55cfe19cdbd91", "index": 222007}, "timestamp": 1740712308, "transactions": [{"metadata": {"description": "invoked: STMN839RM7GAG79Y9574TTZMEKHKKR6RNZC71CDR.cat-presale-t13::buy-tokens(u10)", "execution_cost": {"read_count": 11, "read_length": 4247, "runtime": 30715, "write_count": 3, "write_length": 114}, "fee": 3000, "kind": {"data": {"args": ["u10"], "contract_identifier": "STMN839RM7GAG79Y9574TTZMEKHKKR6RNZC71CDR.cat-presale-t13", "method": "buy-tokens"}, "type": "ContractCall"}, "nonce": 30, "position": {"index": 0}, "raw_tx": "0x80800000000400a3ab3da9bb7bffc2e27a89aece2305ba3f473a04000000000000001e0000000000000bb8000165d33c30ffed3bb2ff604f0d647ee780730db2b3395503dd25fc13d1541751575e3810e17799b017b5cb7c5de0a73a7bbf57e40788cef2661854177e29d4951f030100000000021a29540d38a1e0a81d3e494e4d6bf474e339e0d8af0f6361742d70726573616c652d7431330a6275792d746f6b656e7300000001010000000000000000000000000000000a", "receipt": {"contract_calls_stack": [], "events": [{"data": {"amount": "10000000", "recipient": "STMN839RM7GAG79Y9574TTZMEKHKKR6RNZC71CDR.cat-presale-t13", "sender": "ST2HTPFD9QDXZZGQ2FA4TXKH30PX3YHST0H4JFE0E"}, "position": {"index": 0}, "type": "STXTransferEvent"}], "mutated_assets_radius": [], "mutated_contracts_radius": []}, "result": "(ok true)", "sender": "ST2HTPFD9QDXZZGQ2FA4TXKH30PX3YHST0H4JFE0E", "sponsor": null, "success": true}, "operations": [{"account": {"address": "ST2HTPFD9QDXZZGQ2FA4TXKH30PX3YHST0H4JFE0E"}, "amount": {"currency": {"decimals": 6, "symbol": "STX"}, "value": 10000000}, "operation_identifier": {"index": 0}, "related_operations": [{"index": 1}], "status": "SUCCESS", "type": "DEBIT"}, {"account": {"address": "STMN839RM7GAG79Y9574TTZMEKHKKR6RNZC71CDR.cat-presale-t13"}, "amount": {"currency": {"decimals": 6, "symbol": "STX"}, "value": 10000000}, "operation_identifier": {"index": 1}, "related_operations": [{"index": 0}], "status": "SUCCESS", "type": "CREDIT"}], "transaction_identifier": {"hash": "0xbf30e77f2f6c47f6c6d21a5423755c5f71c0c0202b8e63b63fba173a9a1b0990"}}, {"metadata": {"description": "transfered: 1000 \u00b5STX from ST221Z6TDTC5E0BYR2V624Q2ST6R0Q71T78WTAX6H to ST2TFVBMRPS5SSNP98DQKQ5JNB2B6NZM91C4K3P7B", "execution_cost": {"read_count": 0, "read_length": 0, "runtime": 0, "write_count": 0, "write_length": 0}, "fee": 300, "kind": {"type": "NativeTokenTransfer"}, "nonce": 67694, "position": {"index": 1}, "raw_tx": "0x80800000000400841f9b4dd30ae02fd816cc225c59d1b00b9c3a3a000000000001086e000000000000012c000089badf21ef111c795c58f8db24e60dc393108faa35bf0169dccadd6f7aa7449a4bfd273a7fbe7ac8c8349d5b0340852432c78446e9c7cf1b161b2c792e3d226303020000000000051ab4fdae98b64b9cd6c9436f3b965558966afe890b00000000000003e800000000000000000000000000000000000000000000000000000000000000000000", "receipt": {"contract_calls_stack": [], "events": [{"data": {"amount": "1000", "recipient": "ST2TFVBMRPS5SSNP98DQKQ5JNB2B6NZM91C4K3P7B", "sender": "ST221Z6TDTC5E0BYR2V624Q2ST6R0Q71T78WTAX6H"}, "position": {"index": 1}, "type": "STXTransferEvent"}], "mutated_assets_radius": [], "mutated_contracts_radius": []}, "result": "(ok true)", "sender": "ST221Z6TDTC5E0BYR2V624Q2ST6R0Q71T78WTAX6H", "sponsor": null, "success": true}, "operations": [{"account": {"address": "ST221Z6TDTC5E0BYR2V624Q2ST6R0Q71T78WTAX6H"}, "amount": {"currency": {"decimals": 6, "symbol": "STX"}, "value": 1000}, "operation_identifier": {"index": 0}, "related_operations": [{"index": 1}], "status": "SUCCESS", "type": "DEBIT"}, {"account": {"address": "ST2TFVBMRPS5SSNP98DQKQ5JNB2B6NZM91C4K3P7B"}, "amount": {"currency": {"decimals": 6, "symbol": "STX"}, "value": 1000}, "operation_identifier": {"index": 1}, "related_operations": [{"index": 0}], "status": "SUCCESS", "type": "CREDIT"}], "transaction_identifier": {"hash": "0xb869e128bb80287b6f267523064eabd40e4cad2bc6d6cdcaca0147bd3397be03"}}]}], "chainhook": {"is_streaming_blocks": false, "predicate": {"higher_than": 221993, "scope": "block_height"}, "uuid": "598a3345-cd57-4228-925f-d7c93450c94e"}, "events": [], "rollback": []} \ No newline at end of file diff --git a/examples/chainhooks/chainhook_20250227_192403.json b/examples/chainhooks/chainhook_20250227_192403.json deleted file mode 100644 index 84447d53..00000000 --- a/examples/chainhooks/chainhook_20250227_192403.json +++ /dev/null @@ -1 +0,0 @@ -{"apply": [{"block_identifier": {"hash": "0x862ddd9b86badb375331a8c5fe6b2a38ea025484783909b06192a938e24409a3", "index": 222009}, "metadata": {"bitcoin_anchor_block_identifier": {"hash": "0x36203cf8f01b0ef09aff54bc8446a5fc36d141d50ad3398e648c07e5998e286a", "index": 19755}, "block_time": 1740712479, "confirm_microblock_identifier": null, "cycle_number": null, "pox_cycle_index": 982, "pox_cycle_length": 20, "pox_cycle_position": 14, "reward_set": null, "signer_bitvec": "013500000027ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff1f", "signer_public_keys": ["0x02e8620935d58ebffa23c260f6917cbd0915ea17d7a46df17e131540237d335504", "0x03161dd42571db5ca2599ef32774ec0c6c9ef125cbb8f826a63fe527489bb91400", "0x036a44f61d85efa844b42475f107b106dc8fb209ae27813893c3269c59821e0333"], "signer_signature": ["019e9ab35fe790b321c1199dc55ec80c957304beda2970ae0749d3d3de117e76ea57208801e63749515c829649ad5969178901b4397be3325e334fea1d9a623a2e", "014107a722262cbfe5255617f829c77a755e85939d2e6a98442f2cb870c0e72b0f70bf30f74e8e042a8991fe62215b0b8428d70f24b87bffa2970e86cc3ab655d9", "012e89e34795b7dee4dda561b62d210ab8e00a09bfbe4816a8d5ae76d346876fb92b64b8b449e25172debe8de5fde3b70b8bb86152b5c7428195cbf3bdaefdbd79"], "stacks_block_hash": "0xb4d9710e22d9f5281e014d042fef77e962f929b7e627cda06674a4f80a8f41d6", "tenure_height": 16951}, "parent_block_identifier": {"hash": "0x15ca3218d18cd4ed29c9047855544522529ed2b9f9fc2dab033f9ec10990dfaf", "index": 222008}, "timestamp": 1740712308, "transactions": [{"metadata": {"description": "transfered: 1000 \u00b5STX from ST2TFVBMRPS5SSNP98DQKQ5JNB2B6NZM91C4K3P7B to ST319CF5WV77KYR1H3GT0GZ7B8Q4AQPY42ETP1VPF", "execution_cost": {"read_count": 0, "read_length": 0, "runtime": 0, "write_count": 0, "write_length": 0}, "fee": 300, "kind": {"type": "NativeTokenTransfer"}, "nonce": 67694, "position": {"index": 0}, "raw_tx": "0x80800000000400b4fdae98b64b9cd6c9436f3b965558966afe890b000000000001086e000000000000012c000056ad88ea866b0be5afaacb2f4364d9e8c08d6794ab0a690f09865349be7688f3615f8d045a586c252748b1a63242c53426023406211b9f301ddf5fed4ed5ef4e03020000000000051ac2963cbcd9cf3f60311c34087ceb45c8abdbc41300000000000003e800000000000000000000000000000000000000000000000000000000000000000000", "receipt": {"contract_calls_stack": [], "events": [{"data": {"amount": "1000", "recipient": "ST319CF5WV77KYR1H3GT0GZ7B8Q4AQPY42ETP1VPF", "sender": "ST2TFVBMRPS5SSNP98DQKQ5JNB2B6NZM91C4K3P7B"}, "position": {"index": 0}, "type": "STXTransferEvent"}], "mutated_assets_radius": [], "mutated_contracts_radius": []}, "result": "(ok true)", "sender": "ST2TFVBMRPS5SSNP98DQKQ5JNB2B6NZM91C4K3P7B", "sponsor": null, "success": true}, "operations": [{"account": {"address": "ST2TFVBMRPS5SSNP98DQKQ5JNB2B6NZM91C4K3P7B"}, "amount": {"currency": {"decimals": 6, "symbol": "STX"}, "value": 1000}, "operation_identifier": {"index": 0}, "related_operations": [{"index": 1}], "status": "SUCCESS", "type": "DEBIT"}, {"account": {"address": "ST319CF5WV77KYR1H3GT0GZ7B8Q4AQPY42ETP1VPF"}, "amount": {"currency": {"decimals": 6, "symbol": "STX"}, "value": 1000}, "operation_identifier": {"index": 1}, "related_operations": [{"index": 0}], "status": "SUCCESS", "type": "CREDIT"}], "transaction_identifier": {"hash": "0x413c15e4d950ce6b51b476103faa5e139bf6fb3d9eee8b2d40f4ee6338ff9806"}}]}], "chainhook": {"is_streaming_blocks": false, "predicate": {"higher_than": 221993, "scope": "block_height"}, "uuid": "598a3345-cd57-4228-925f-d7c93450c94e"}, "events": [], "rollback": []} \ No newline at end of file diff --git a/examples/chainhooks/chainhook_20250227_192405.json b/examples/chainhooks/chainhook_20250227_192405.json deleted file mode 100644 index 5b68a92d..00000000 --- a/examples/chainhooks/chainhook_20250227_192405.json +++ /dev/null @@ -1 +0,0 @@ -{"apply": [{"block_identifier": {"hash": "0xc93f68701ee5103d8cac5d90c08a1962ee0f2897ec021eddb86851505ef4eb2b", "index": 222011}, "metadata": {"bitcoin_anchor_block_identifier": {"hash": "0x36203cf8f01b0ef09aff54bc8446a5fc36d141d50ad3398e648c07e5998e286a", "index": 19755}, "block_time": 1740712530, "confirm_microblock_identifier": null, "cycle_number": null, "pox_cycle_index": 982, "pox_cycle_length": 20, "pox_cycle_position": 14, "reward_set": null, "signer_bitvec": "013500000027ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff1f", "signer_public_keys": ["0x02e8620935d58ebffa23c260f6917cbd0915ea17d7a46df17e131540237d335504", "0x03161dd42571db5ca2599ef32774ec0c6c9ef125cbb8f826a63fe527489bb91400", "0x036a44f61d85efa844b42475f107b106dc8fb209ae27813893c3269c59821e0333"], "signer_signature": ["01cb2edc820a5a44edbcfda911fb5eba1f12a39b8176b885f55e53be090699afde297f0a0ce4e35688e651a898ebdc745596a830eb5928edb870ffc4d178541d71", "0001178d5a6e0c8d50a6c71ab881d27f4991ba29a0d20f75e810984c4587696a53336c27de0171b43697d7d6001ee3db0a1713d40ea046977097389fd26b1c29bc", "0080b1a3a8af8d7c4b3ac7f643cc229775d0b5c01addb4eef240817cb2994887e7070a044fc83c8d238c7815bea7cdbc2867dd357dd7d18573ef3ec1c0fc0f2e7b"], "stacks_block_hash": "0x96b5cc54197c931e161ef0c45fad575ea907eac13153e8499bb9b43e849a49ac", "tenure_height": 16951}, "parent_block_identifier": {"hash": "0x0ab5329b1aac18a9edb27908919a2337a2f1d46c56790e2e011b8baabff457b1", "index": 222010}, "timestamp": 1740712308, "transactions": [{"metadata": {"description": "transfered: 1000 \u00b5STX from ST319CF5WV77KYR1H3GT0GZ7B8Q4AQPY42ETP1VPF to ST221Z6TDTC5E0BYR2V624Q2ST6R0Q71T78WTAX6H", "execution_cost": {"read_count": 0, "read_length": 0, "runtime": 0, "write_count": 0, "write_length": 0}, "fee": 300, "kind": {"type": "NativeTokenTransfer"}, "nonce": 67695, "position": {"index": 0}, "raw_tx": "0x80800000000400c2963cbcd9cf3f60311c34087ceb45c8abdbc413000000000001086f000000000000012c000141267c91d91c7deba77c99acb013c98903568f17540bc593de4ccbdb52402fc6218739e8964395a91cd90e31fa2aac712605fd0b20d2ec942ac5d8a43ed5d31203020000000000051a841f9b4dd30ae02fd816cc225c59d1b00b9c3a3a00000000000003e800000000000000000000000000000000000000000000000000000000000000000000", "receipt": {"contract_calls_stack": [], "events": [{"data": {"amount": "1000", "recipient": "ST221Z6TDTC5E0BYR2V624Q2ST6R0Q71T78WTAX6H", "sender": "ST319CF5WV77KYR1H3GT0GZ7B8Q4AQPY42ETP1VPF"}, "position": {"index": 0}, "type": "STXTransferEvent"}], "mutated_assets_radius": [], "mutated_contracts_radius": []}, "result": "(ok true)", "sender": "ST319CF5WV77KYR1H3GT0GZ7B8Q4AQPY42ETP1VPF", "sponsor": null, "success": true}, "operations": [{"account": {"address": "ST319CF5WV77KYR1H3GT0GZ7B8Q4AQPY42ETP1VPF"}, "amount": {"currency": {"decimals": 6, "symbol": "STX"}, "value": 1000}, "operation_identifier": {"index": 0}, "related_operations": [{"index": 1}], "status": "SUCCESS", "type": "DEBIT"}, {"account": {"address": "ST221Z6TDTC5E0BYR2V624Q2ST6R0Q71T78WTAX6H"}, "amount": {"currency": {"decimals": 6, "symbol": "STX"}, "value": 1000}, "operation_identifier": {"index": 1}, "related_operations": [{"index": 0}], "status": "SUCCESS", "type": "CREDIT"}], "transaction_identifier": {"hash": "0x854154d2c03501b69ef4cbf18cd8c06263d39202ab482daac9cceb1f5439c2a5"}}, {"metadata": {"description": "transfered: 500000000 \u00b5STX from ST2TFVBMRPS5SSNP98DQKQ5JNB2B6NZM91C4K3P7B to ST38RMDQFVC462DSJ1CPEW5EYXEZKASQVC8XDGARN", "execution_cost": {"read_count": 0, "read_length": 0, "runtime": 0, "write_count": 0, "write_length": 0}, "fee": 180, "kind": {"type": "NativeTokenTransfer"}, "nonce": 67695, "position": {"index": 1}, "raw_tx": "0x80800000000400b4fdae98b64b9cd6c9436f3b965558966afe890b000000000001086f00000000000000b4000197c15ba5734dc337420e420998fc4f2258229b5a2344bf0d2438106e4a8bd4717df887fdb1e3875e3c327266007d2bb38694b02941e52e40ac0bd7d07db736a303020000000000051ad18a36efdb086137320b2cee15deebbf3566fb62000000001dcd650066617563657400000000000000000000000000000000000000000000000000000000", "receipt": {"contract_calls_stack": [], "events": [{"data": {"amount": "500000000", "recipient": "ST38RMDQFVC462DSJ1CPEW5EYXEZKASQVC8XDGARN", "sender": "ST2TFVBMRPS5SSNP98DQKQ5JNB2B6NZM91C4K3P7B"}, "position": {"index": 1}, "type": "STXTransferEvent"}], "mutated_assets_radius": [], "mutated_contracts_radius": []}, "result": "(ok true)", "sender": "ST2TFVBMRPS5SSNP98DQKQ5JNB2B6NZM91C4K3P7B", "sponsor": null, "success": true}, "operations": [{"account": {"address": "ST2TFVBMRPS5SSNP98DQKQ5JNB2B6NZM91C4K3P7B"}, "amount": {"currency": {"decimals": 6, "symbol": "STX"}, "value": 500000000}, "operation_identifier": {"index": 0}, "related_operations": [{"index": 1}], "status": "SUCCESS", "type": "DEBIT"}, {"account": {"address": "ST38RMDQFVC462DSJ1CPEW5EYXEZKASQVC8XDGARN"}, "amount": {"currency": {"decimals": 6, "symbol": "STX"}, "value": 500000000}, "operation_identifier": {"index": 1}, "related_operations": [{"index": 0}], "status": "SUCCESS", "type": "CREDIT"}], "transaction_identifier": {"hash": "0x1356bf9bbbea75ab265ae57b1501a32bd6ce379d96e96821c9334826e3e0a71b"}}]}], "chainhook": {"is_streaming_blocks": false, "predicate": {"higher_than": 221993, "scope": "block_height"}, "uuid": "598a3345-cd57-4228-925f-d7c93450c94e"}, "events": [], "rollback": []} \ No newline at end of file diff --git a/examples/chainhooks/chainhook_20250227_192406.json b/examples/chainhooks/chainhook_20250227_192406.json deleted file mode 100644 index 6a93913f..00000000 --- a/examples/chainhooks/chainhook_20250227_192406.json +++ /dev/null @@ -1 +0,0 @@ -{"apply": [{"block_identifier": {"hash": "0xa381f3a5496938a561e6764a384e7f8e9fe69f50cd5063ee94a96c5aa961f6f9", "index": 222012}, "metadata": {"bitcoin_anchor_block_identifier": {"hash": "0x3e7804a435b1a5147c986d2acc86c7a4e44f6119383d4c1a969102b6bce24a09", "index": 19756}, "block_time": 1740712560, "confirm_microblock_identifier": null, "cycle_number": null, "pox_cycle_index": 982, "pox_cycle_length": 20, "pox_cycle_position": 15, "reward_set": null, "signer_bitvec": "013500000027ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff1f", "signer_public_keys": ["0x02e8620935d58ebffa23c260f6917cbd0915ea17d7a46df17e131540237d335504", "0x03161dd42571db5ca2599ef32774ec0c6c9ef125cbb8f826a63fe527489bb91400", "0x036a44f61d85efa844b42475f107b106dc8fb209ae27813893c3269c59821e0333"], "signer_signature": ["0059e21971f00f744c94622cc00016e047a27df513ddc8e5b96df6b4e6b243de2c16b62a206008fc09ef4181495fca49e3d8c8b04cb847573747c89cd65a98e0b2", "0188382a2df16efbffd7ac3c39e84ae5b59378906964c59f8f8938764c72e2bf923be766ded2274aae4894db780dea6f96d5c8e275b9ec22c0b54056a21e6b4794", "001500c1cf7fd9adc6b016bccd0086071ce166d16e71646d781e546fe2cb358a7b6c0c98c63b1c2ca00e1708287a3a0c00e180b2770be5dec75049276b4c954238"], "stacks_block_hash": "0x3571e7a9d5c1936a7316dfc9711130bd685abe4e711ed99ccba7f4fd91894b61", "tenure_height": 16952}, "parent_block_identifier": {"hash": "0xc93f68701ee5103d8cac5d90c08a1962ee0f2897ec021eddb86851505ef4eb2b", "index": 222011}, "timestamp": 1740712308, "transactions": [{"metadata": {"description": "tenure change", "execution_cost": {"read_count": 0, "read_length": 0, "runtime": 0, "write_count": 0, "write_length": 0}, "fee": 0, "kind": {"type": "TenureChange"}, "nonce": 38246, "position": {"index": 0}, "raw_tx": "0x808000000004002d58c278d1c9e1a8a26236ac81b543193c10e347000000000000956600000000000000000001c3ef7534121c1a95ce9a7f76d1fa2953ffddfd4e20afed18de076ea2727a4bf80edf32c9ec11f028e2664a87a9a4f3ef58877715a47e81bb3a4531b7381a46810102000000000794c1e3466b8c49845b8147fcf4fe8dabf128437cfedc4175950e3278cac94bbd6feec58f1bab8ae294c1e3466b8c49845b8147fcf4fe8dabf128437cc93f68701ee5103d8cac5d90c08a1962ee0f2897ec021eddb86851505ef4eb2b00000008002d58c278d1c9e1a8a26236ac81b543193c10e347", "receipt": {"contract_calls_stack": [], "events": [], "mutated_assets_radius": [], "mutated_contracts_radius": []}, "result": "(ok true)", "sender": "STPNHGKRT74Y3A52C8VAS0DN8CCKR4738YBAQ3E1", "sponsor": null, "success": true}, "operations": [], "transaction_identifier": {"hash": "0xd232c9f8cdab52d6e31dc73a021d4e5df956c37c06faa6f5a31e04d11adb4c30"}}, {"metadata": {"description": "coinbase", "execution_cost": {"read_count": 0, "read_length": 0, "runtime": 0, "write_count": 0, "write_length": 0}, "fee": 0, "kind": {"type": "Coinbase"}, "nonce": 38247, "position": {"index": 1}, "raw_tx": "0x808000000004002d58c278d1c9e1a8a26236ac81b543193c10e347000000000000956700000000000000000001212cca0f637938e1a73a297d85abcbda32c10774286dd0c295081f85a0b8533f731690fbcfb0acb538872c92bf9c32c1f5c1347f2b7b32ae787f411e4e8e319b0102000000000800000000000000000000000000000000000000000000000000000000000000000917fe366a5dce7d6a595b7686bc87714bc429c654bbf44fbc3885cc103d497fd3c940a6ad26dbeefdf82deb4468ce36f7fceaf67fe63950240846553ab4e259608eb9910988011e98ac801ef5a027de08", "receipt": {"contract_calls_stack": [], "events": [], "mutated_assets_radius": [], "mutated_contracts_radius": []}, "result": "(ok true)", "sender": "STPNHGKRT74Y3A52C8VAS0DN8CCKR4738YBAQ3E1", "sponsor": null, "success": true}, "operations": [], "transaction_identifier": {"hash": "0x4d635ec578c5d5c3e3df382d02b0d3060c05c5385c1853ffb7cbdbe9536c3f3e"}}, {"metadata": {"description": "transfered: 0 \u00b5STX from ST000000000000000000002AMW42H to ST000000000000000000002AMW42H", "execution_cost": {"read_count": 0, "read_length": 0, "runtime": 0, "write_count": 0, "write_length": 0}, "fee": 0, "kind": {"type": "NativeTokenTransfer"}, "nonce": 0, "position": {"index": 2}, "raw_tx": "0x8000000000040000000000000000000000000000000000000000000000000000000000000000000000000001000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000003020000000000051a00000000000000000000000000000000000000000000000000000000426c6f636b2032323230313220746f6b656e20756e6c6f636b730000000000000000", "receipt": {"contract_calls_stack": [], "events": [{"data": {"amount": "13888888889", "recipient": "SN3G4T05D2P399Z4C7XBYM118VGDP875QJBQPH842"}, "position": {"index": 191}, "type": "STXMintEvent"}, {"data": {"amount": "13888888889", "recipient": "SN1VABZBF4ENNXBWGN9W1EME63VV69VJEXWK00161"}, "position": {"index": 106}, "type": "STXMintEvent"}, {"data": {"amount": "13888888889", "recipient": "SN1SGSZE8WYG98HB0E4NFJ37MKKJ7K5T74G6K19N9"}, "position": {"index": 103}, "type": "STXMintEvent"}, {"data": {"amount": "13888888889", "recipient": "SNHWG5N9K21M4FQCZYY1HP4Z9HV2F617PFG32SXR"}, "position": {"index": 27}, "type": "STXMintEvent"}, {"data": {"amount": "13888888889", "recipient": "SN13KWC112MWQ3RXYM3G7NRSVM29KN3X4Z5KFXZQH"}, "position": {"index": 57}, "type": "STXMintEvent"}, {"data": {"amount": "13888888889", "recipient": "SN29JBK904XG97W9ZHP4HWTTFVAZGNG5TF0652YJ7"}, "position": {"index": 124}, "type": "STXMintEvent"}, {"data": {"amount": "13888888889", "recipient": "SNCEH4V73NCPF3ZSS8773ES8MV9JPHTA05F2CEFH"}, "position": {"index": 17}, "type": "STXMintEvent"}, {"data": {"amount": "13888888889", "recipient": "SN2YXJAZC445VF72WSC0716D1BCQ0P7QA1E9V52TN"}, "position": {"index": 163}, "type": "STXMintEvent"}, {"data": {"amount": "13888888889", "recipient": "SN3RD5DR82MWYKTSTFAX8QJETTM4K5FM9VW79359W"}, "position": {"index": 207}, "type": "STXMintEvent"}, {"data": {"amount": "13888888889", "recipient": "SN1EXPXP6AKGPJSE9XHV41KS9BGQ7C40JX5MMCBKP"}, "position": {"index": 85}, "type": "STXMintEvent"}, {"data": {"amount": "13888888889", "recipient": "SN314339F8E6F6D1FR0J0XVE8NS3R8XNNTBWR7WC2"}, "position": {"index": 168}, "type": "STXMintEvent"}, {"data": {"amount": "13888888889", "recipient": "SN3DY2HN93RDKTA5RV186GQHM7WZ9P9JPEQ0KHNEB"}, "position": {"index": 183}, "type": "STXMintEvent"}, {"data": {"amount": "13888888889", "recipient": "SNHRV9XNMET2F1NJNXFKW5DQ2BW6MTREJ4G05FE0"}, "position": {"index": 26}, "type": "STXMintEvent"}, {"data": {"amount": "13888888889", "recipient": "SN1VJ0FZ6T07ESX7FAV5NEJW2N9RDDF0BVXRVHRVX"}, "position": {"index": 107}, "type": "STXMintEvent"}, {"data": {"amount": "13888888889", "recipient": "SN2WRGSJQJ7CS3H1N8X0M3DGDXXCH1NV7XNP1BGVV"}, "position": {"index": 160}, "type": "STXMintEvent"}, {"data": {"amount": "13888888889", "recipient": "SN1SPPY3670JJCAYWAXB8D7HREPGWEBY7J4SCG76V"}, "position": {"index": 104}, "type": "STXMintEvent"}, {"data": {"amount": "13888888889", "recipient": "SN1FM5N69SG62SQHPFKTXTESTFVZB2GZCM0RPCGR9"}, "position": {"index": 87}, "type": "STXMintEvent"}, {"data": {"amount": "13888888889", "recipient": "SNCD491XBEJFM3YPTPX3K8K0TQNYAHHK2M080PHZ"}, "position": {"index": 16}, "type": "STXMintEvent"}, {"data": {"amount": "13888888889", "recipient": "SN10B79PD3DN2XRBVBJWZ3QJ7SETV4ZGRHBFZ1QM9"}, "position": {"index": 48}, "type": "STXMintEvent"}, {"data": {"amount": "13888888889", "recipient": "SNC6E7YJSZDRG7MKP6PX0FA2YDEVDQC6Z0HXP6RS"}, "position": {"index": 15}, "type": "STXMintEvent"}, {"data": {"amount": "13888888889", "recipient": "SN9VJ2YPZ73CHWPZR9NDYAZTX8P5FPAPP47EHXBZ"}, "position": {"index": 13}, "type": "STXMintEvent"}, {"data": {"amount": "13888888889", "recipient": "SNE3YNXYPDC01YTTCW58DJJYK3HS17FM5WWD4Q3Z"}, "position": {"index": 19}, "type": "STXMintEvent"}, {"data": {"amount": "13888888889", "recipient": "SN15H0A2XH7AXD9B9D56SQV5NJZB2ECSWHPW8MFKE"}, "position": {"index": 59}, "type": "STXMintEvent"}, {"data": {"amount": "13888888889", "recipient": "SN16WKZXCF0Y87EZXWRZFS3EF39DYSEHT4HMZ928T"}, "position": {"index": 64}, "type": "STXMintEvent"}, {"data": {"amount": "13888888889", "recipient": "SN1PYWRYBRHVADBQT1XCYYC4ZSEMT1CBN4VNX6ZP9"}, "position": {"index": 99}, "type": "STXMintEvent"}, {"data": {"amount": "13888888889", "recipient": "SN6WQ4PTPHC2S4Q7D2RBRSTCER7BD48TTAQPYRGF"}, "position": {"index": 9}, "type": "STXMintEvent"}, {"data": {"amount": "13888888889", "recipient": "SN1AN0ZVPGJY5TAJ9125C59A19K630PNXZ27WT5EB"}, "position": {"index": 73}, "type": "STXMintEvent"}, {"data": {"amount": "13888888889", "recipient": "SN1ZFD206K49D42HMQ3ZGXY8B313M4KN5GJNJ1ZTB"}, "position": {"index": 112}, "type": "STXMintEvent"}, {"data": {"amount": "13888888889", "recipient": "SN2A6V21SHZKCVMZHKE64WZ26CCRFDEMYP2AE2DWM"}, "position": {"index": 125}, "type": "STXMintEvent"}, {"data": {"amount": "13888888889", "recipient": "SN2EW7FACSH6DNYZ3ZE56DEY7Q4Q6CGAHBTM460PN"}, "position": {"index": 141}, "type": "STXMintEvent"}, {"data": {"amount": "13888888889", "recipient": "SN2JR0VA2Y13RFWTKXSDNDEW38EZ3A1X08F1RY6HX"}, "position": {"index": 146}, "type": "STXMintEvent"}, {"data": {"amount": "13888888889", "recipient": "SN3K05G2N8RVDSZZ25F1DK6R8MTPF7YSCHE15AM28"}, "position": {"index": 194}, "type": "STXMintEvent"}, {"data": {"amount": "13888888889", "recipient": "SN3XX07N1GWR12AVVE979K9VHEGHB9MD3RW9X2DFS"}, "position": {"index": 214}, "type": "STXMintEvent"}, {"data": {"amount": "13888888889", "recipient": "SNJE9A15HR6YNGZ767F9WFN0GZQZDCNGC4T40KKD"}, "position": {"index": 28}, "type": "STXMintEvent"}, {"data": {"amount": "13888888889", "recipient": "SN2EMRY775PNYH1R80K24ER3JHTWFADTYD0PBY5HY"}, "position": {"index": 140}, "type": "STXMintEvent"}, {"data": {"amount": "13888888889", "recipient": "SN1DM449RKB08S1M3JGP3H53HWKP1THXG6EF4WPBB"}, "position": {"index": 81}, "type": "STXMintEvent"}, {"data": {"amount": "13888888889", "recipient": "SN2A9A2657E2WB4KQNF2X56EXX8BNR00A9Y718VKP"}, "position": {"index": 126}, "type": "STXMintEvent"}, {"data": {"amount": "13888888889", "recipient": "SN1NR10AG6AK2WGZGB6JNRAFGF2R29SQS50BVCRTB"}, "position": {"index": 95}, "type": "STXMintEvent"}, {"data": {"amount": "13888888889", "recipient": "SN3636BXKN7X03TE9NFTCEVGQJ7X1M4EYXR3SY3BT"}, "position": {"index": 172}, "type": "STXMintEvent"}, {"data": {"amount": "13888888889", "recipient": "SN36QQQACZ6V8JKDMZFX5G5V7S8S0E3DC6TVVXKD3"}, "position": {"index": 173}, "type": "STXMintEvent"}, {"data": {"amount": "13888888889", "recipient": "SN151ENM0ZP6V7P1FDP51AQYYDYNB5S9ECPEWW7BZ"}, "position": {"index": 58}, "type": "STXMintEvent"}, {"data": {"amount": "13888888889", "recipient": "SN1AES7Q0YMTY3T38FNWVTK0J14NAHS1N689N1B93"}, "position": {"index": 72}, "type": "STXMintEvent"}, {"data": {"amount": "13888888889", "recipient": "SNRVME1H0F653SR211EK64R3TBMWASB67XKS3H8D"}, "position": {"index": 37}, "type": "STXMintEvent"}, {"data": {"amount": "13888888889", "recipient": "SN2QH4JXV2A7HZZD8TA1Y1J8KZM7PD62BTXT3SJYJ"}, "position": {"index": 149}, "type": "STXMintEvent"}, {"data": {"amount": "13888888889", "recipient": "SN2RVB0CYJJA38QC4GNGX37XB1R6NDMW6K91FVG94"}, "position": {"index": 151}, "type": "STXMintEvent"}, {"data": {"amount": "13888888889", "recipient": "SN9GQZFK5HBP22ZWRBYS110YYXVX4NXHAB1C0QX4"}, "position": {"index": 12}, "type": "STXMintEvent"}, {"data": {"amount": "13888888889", "recipient": "SN3AZ5GSJJWQE0TD0NN3HGSCACKFGFD5QXYNC48BA"}, "position": {"index": 179}, "type": "STXMintEvent"}, {"data": {"amount": "13888888889", "recipient": "SN1GTTQ8NAK8YAP374VWV1F3CMPZGYWFCN3KGHHGF"}, "position": {"index": 89}, "type": "STXMintEvent"}, {"data": {"amount": "13888888889", "recipient": "SN132F6Z8GXSHNF5JQW9RKJ4FYPZBN7QD8RYFBYVX"}, "position": {"index": 53}, "type": "STXMintEvent"}, {"data": {"amount": "13888888889", "recipient": "SN1R3XKG0WFSC8F608ZZGQ5EF3XA6ZTEV9Q62XHXS"}, "position": {"index": 100}, "type": "STXMintEvent"}, {"data": {"amount": "13888888889", "recipient": "SN273YMQXXQAQQ430N4B40282GKFPZW8XXCV9X6JA"}, "position": {"index": 122}, "type": "STXMintEvent"}, {"data": {"amount": "13888888889", "recipient": "SN2BHJM5BXAZRPREWY4ZEKD58CFTPDQHXTADAW6MV"}, "position": {"index": 130}, "type": "STXMintEvent"}, {"data": {"amount": "13888888889", "recipient": "SN12APCFJM8CEGC5FZMKD8EDHW4TQ8RCJD1KADMG9"}, "position": {"index": 52}, "type": "STXMintEvent"}, {"data": {"amount": "13888888889", "recipient": "SN2CGGK7KPNYMYEQ0Q49ZV9KF4VQES3PED53VFGJM"}, "position": {"index": 133}, "type": "STXMintEvent"}, {"data": {"amount": "13888888889", "recipient": "SN3FECN2P5RRX4GJDD97TH4KTH78GHAXW10NQ8JME"}, "position": {"index": 186}, "type": "STXMintEvent"}, {"data": {"amount": "13888888889", "recipient": "SNH20XGJG1N6SH89A4PWTABXFZ1MJ42VAZQCBXV0"}, "position": {"index": 24}, "type": "STXMintEvent"}, {"data": {"amount": "13888888889", "recipient": "SNFQQJ13C085Z3PY1FHJE19V1ZENZ1RKJ2PD4SK1"}, "position": {"index": 21}, "type": "STXMintEvent"}, {"data": {"amount": "13888888889", "recipient": "SNNPX3JZ0A7T8HWFYAV22AHQDR1T3XGR0C7FBC05"}, "position": {"index": 31}, "type": "STXMintEvent"}, {"data": {"amount": "13888888889", "recipient": "SN24K04B9ZNGRSZSX5FNJ8N946CC9Q7H2N342V0PN"}, "position": {"index": 120}, "type": "STXMintEvent"}, {"data": {"amount": "13888888889", "recipient": "SN1PW61KPTCDJHKFQGVJ9PC06QHGCDBH0600MZ4Q5"}, "position": {"index": 98}, "type": "STXMintEvent"}, {"data": {"amount": "13888888889", "recipient": "SN1BT2Q02ZRZC3C7DHCFP5JYWAQQ20422YJ5H0GN7"}, "position": {"index": 75}, "type": "STXMintEvent"}, {"data": {"amount": "13888888889", "recipient": "SN134H42XHC5B1VT5P4EP6VAYKBQ3V2594NFAKMF3"}, "position": {"index": 55}, "type": "STXMintEvent"}, {"data": {"amount": "13888888889", "recipient": "SN300E1H03P0D548QTKYJPGXPC1T86BJM5SX2CE8P"}, "position": {"index": 166}, "type": "STXMintEvent"}, {"data": {"amount": "13888888889", "recipient": "SN321ZKMJP7M22THC0EN9C9N90AY4DBMKQJFDJHDJ"}, "position": {"index": 169}, "type": "STXMintEvent"}, {"data": {"amount": "13888888889", "recipient": "SN3F8SVA319PENA122YX3Z4W37DNEGSJGFWC3XTJW"}, "position": {"index": 184}, "type": "STXMintEvent"}, {"data": {"amount": "13888888889", "recipient": "SN3FTYEC9F6CV2QQX7S5KVDPTPZ8QQ3KV3PJZ09Q5"}, "position": {"index": 189}, "type": "STXMintEvent"}, {"data": {"amount": "13888888889", "recipient": "SN3GQP1V1E9Z4H2SA5QFE5PCSBTR07RAMDSGR9GW7"}, "position": {"index": 192}, "type": "STXMintEvent"}, {"data": {"amount": "13888888889", "recipient": "SN19N4M19FZYZAAY2TC1DASKGAVPPCBXN1N64Z2HQ"}, "position": {"index": 70}, "type": "STXMintEvent"}, {"data": {"amount": "13888888889", "recipient": "SN2WJ90NTWFE8JZ4GG43KVHN5G51XAMAQGW9YCFYF"}, "position": {"index": 159}, "type": "STXMintEvent"}, {"data": {"amount": "13888888889", "recipient": "SN258WG743EMD6VPA49S9C8CPXN1WEKXK7PQD9YAH"}, "position": {"index": 121}, "type": "STXMintEvent"}, {"data": {"amount": "13888888889", "recipient": "SN2AMR01NE8AKKGW8AAP4Q82CDE9CJY9Y3CZ4BCBZ"}, "position": {"index": 127}, "type": "STXMintEvent"}, {"data": {"amount": "13888888889", "recipient": "SN37CGYEDQKR2Z8Y72XTJZ991E0PMQ782GAC1CMPW"}, "position": {"index": 174}, "type": "STXMintEvent"}, {"data": {"amount": "13888888889", "recipient": "SN3FKK9V6M7YGPP4SJCQAXA5KYVAKEY74YME7ZZ5R"}, "position": {"index": 188}, "type": "STXMintEvent"}, {"data": {"amount": "13888888889", "recipient": "SN38R5VTRA3K15V0ZD45QXX24XF6APEF9X4BSGGME"}, "position": {"index": 175}, "type": "STXMintEvent"}, {"data": {"amount": "13888888889", "recipient": "SN3TKXA8BBCV93ZYMRCWTB9B7HPY2JWT78JQ8M6EV"}, "position": {"index": 211}, "type": "STXMintEvent"}, {"data": {"amount": "13888888889", "recipient": "SN3FJD9GBMEH2CENDNXRHZMCVH231TW6EW3H5MHH3"}, "position": {"index": 187}, "type": "STXMintEvent"}, {"data": {"amount": "13888888889", "recipient": "SN1811XEFECGZN9K567Z63D9QZHQ7VN921Q6N1SBN"}, "position": {"index": 65}, "type": "STXMintEvent"}, {"data": {"amount": "13888888889", "recipient": "SN2KCEP88YKFJ6TW1245C4GDRVKSC06SXJKS5JV6M"}, "position": {"index": 147}, "type": "STXMintEvent"}, {"data": {"amount": "13888888889", "recipient": "SNX51SYDER1MYNCG1TGS8Q53QRTGGA5CKY0P6R4Y"}, "position": {"index": 43}, "type": "STXMintEvent"}, {"data": {"amount": "13888888889", "recipient": "SN1PTP8Q5M5FEXQ8NQ6V3QV21HJNC8P754EMP27M7"}, "position": {"index": 97}, "type": "STXMintEvent"}, {"data": {"amount": "13888888889", "recipient": "SN3PJJD7PNC00HQ2G5QP5NEVNMZ112XCNGT86MWTE"}, "position": {"index": 202}, "type": "STXMintEvent"}, {"data": {"amount": "13888888889", "recipient": "SN3QCD3EZD98VXHVSSBVTA245V7XQ2YY57HCPNN4C"}, "position": {"index": 205}, "type": "STXMintEvent"}, {"data": {"amount": "13888888889", "recipient": "SN2Y14Y0NQYSPY6JAMNNHGDSWQ432KHWCJD36V0BW"}, "position": {"index": 161}, "type": "STXMintEvent"}, {"data": {"amount": "13888888889", "recipient": "SNHVKNMAKPVT2JV2Z3FTD4KKGB84P6T8CZ114MN"}, "position": {"index": 2}, "type": "STXMintEvent"}, {"data": {"amount": "13888888889", "recipient": "SN18XYT8V0948TZXQMTGS0K77KNKSHRRYPAK069JJ"}, "position": {"index": 67}, "type": "STXMintEvent"}, {"data": {"amount": "13888888889", "recipient": "SN3Z4MMRJ29FVZB38FGYPE94N1D8ZGF55R7YWH00A"}, "position": {"index": 218}, "type": "STXMintEvent"}, {"data": {"amount": "13888888889", "recipient": "SN1NXXGQJ5MRH5Z4DBHQ22VNCK37852EJG73JYWBP"}, "position": {"index": 96}, "type": "STXMintEvent"}, {"data": {"amount": "13888888889", "recipient": "SN3Z4TZZKR44VZYABNNDCQBZ15KF6PB6XCKRR5SKV"}, "position": {"index": 219}, "type": "STXMintEvent"}, {"data": {"amount": "13888888889", "recipient": "SNAMB8AJ6ACSPJ4ZHREMDD6M9KSJ1DMNM205GYD4"}, "position": {"index": 14}, "type": "STXMintEvent"}, {"data": {"amount": "13888888889", "recipient": "SN23QQVYTWX4AQN8ANSHGCT6ZMJQMKACSYBJTM703"}, "position": {"index": 118}, "type": "STXMintEvent"}, {"data": {"amount": "13888888889", "recipient": "SNQJGW4SXH4K5EK7X7KZ2SSKGZWN4PY2QTS72AWA"}, "position": {"index": 33}, "type": "STXMintEvent"}, {"data": {"amount": "13888888889", "recipient": "SN1Y2RTKFVTXE798X7F9ECF31F7TAX18Y7EV4WT6N"}, "position": {"index": 109}, "type": "STXMintEvent"}, {"data": {"amount": "13888888889", "recipient": "SN1SR5331QG1W4CNVV0WWXJG7QA4AS9JDSK2J8DSG"}, "position": {"index": 105}, "type": "STXMintEvent"}, {"data": {"amount": "13888888889", "recipient": "SN22N70KQZH6QX140F48N6MCX2MBQ7CXG1V2QKJ0"}, "position": {"index": 5}, "type": "STXMintEvent"}, {"data": {"amount": "13888888889", "recipient": "SN1X8CHD89R2ER9MVV7V7HJYCZS00CCF7KPJ83XZV"}, "position": {"index": 108}, "type": "STXMintEvent"}, {"data": {"amount": "13888888889", "recipient": "SN1DT32505EN0VG8J1T09KQDP4RV08AW9VWSMBHNF"}, "position": {"index": 83}, "type": "STXMintEvent"}, {"data": {"amount": "13888888889", "recipient": "SN16988WYZA10B9XR398BA7B0ESZ31QQY3A8N8AJA"}, "position": {"index": 63}, "type": "STXMintEvent"}, {"data": {"amount": "13888888889", "recipient": "SN1RM13C4SJ1Y5K13DE89P3HKXRKNZF8M61P2A4S4"}, "position": {"index": 102}, "type": "STXMintEvent"}, {"data": {"amount": "13888888889", "recipient": "SN1ZG3AZ6SJR70HMNAK8T1EHM6KE0CSJHAQM94618"}, "position": {"index": 113}, "type": "STXMintEvent"}, {"data": {"amount": "13888888889", "recipient": "SNRH38HQ4K4C1MJCKV9BR03J6F3TH3XH4CYWZBAZ"}, "position": {"index": 36}, "type": "STXMintEvent"}, {"data": {"amount": "13888888889", "recipient": "SN20P0CCF6MXCRY5W8SX2RW05PJSWQTY6181C9JJZ"}, "position": {"index": 114}, "type": "STXMintEvent"}, {"data": {"amount": "13888888889", "recipient": "SN2B4HTY4A6DZB97ZE2A97Z0MH3M6882MYHDV5B2G"}, "position": {"index": 129}, "type": "STXMintEvent"}, {"data": {"amount": "13888888889", "recipient": "SN1TED7Y8VW1STC7ZWN35R5RFT1K3YEXCFZ6MR1P"}, "position": {"index": 4}, "type": "STXMintEvent"}, {"data": {"amount": "13888888889", "recipient": "SN33DHH8YV724ECVCB9MV79RVDY3FBB0YGVB3P3NK"}, "position": {"index": 171}, "type": "STXMintEvent"}, {"data": {"amount": "13888888889", "recipient": "SNY83XRZ7EEPP30XN06HKGMY01H43A72S1GE63Q0"}, "position": {"index": 47}, "type": "STXMintEvent"}, {"data": {"amount": "13888888889", "recipient": "SNRAM6QXPQ6TY6NY81X9APCZ21CE6RKYE4WT3WH6"}, "position": {"index": 35}, "type": "STXMintEvent"}, {"data": {"amount": "13888888889", "recipient": "SNX0RJKG39T1B1KV9WTFND5PQ3QCP9P2G8Y6S5MN"}, "position": {"index": 42}, "type": "STXMintEvent"}, {"data": {"amount": "13888888889", "recipient": "SNXF7W3ZGR215RVAJGC3KY2ZXSATPVZQBXNZDZQ6"}, "position": {"index": 44}, "type": "STXMintEvent"}, {"data": {"amount": "13888888889", "recipient": "SN1655NH9E4YQQGM8JYQHE6JEZJWG7RV27K7EW075"}, "position": {"index": 62}, "type": "STXMintEvent"}, {"data": {"amount": "13888888889", "recipient": "SNXJK5EQCZ5MDW500TZY774P4Q6M1Q7DNC1521AF"}, "position": {"index": 45}, "type": "STXMintEvent"}, {"data": {"amount": "13888888889", "recipient": "SN2E881PWWVDGQZ0H3YTWSM01HTX7VGYKFZVQ4D1V"}, "position": {"index": 139}, "type": "STXMintEvent"}, {"data": {"amount": "13888888889", "recipient": "SNK0SQ2EQ5G90PABM9NMY5Z67Z43RVVFN3PTDJ5R"}, "position": {"index": 29}, "type": "STXMintEvent"}, {"data": {"amount": "13888888889", "recipient": "SN2W1ZXAH2RQVPYBE0PF14SB6RZYTBJR8HMPC4AVX"}, "position": {"index": 157}, "type": "STXMintEvent"}, {"data": {"amount": "13888888889", "recipient": "SN3K7BVTKT0SGA9XM0QXN526N65VVP1MDGKK0MXWV"}, "position": {"index": 195}, "type": "STXMintEvent"}, {"data": {"amount": "13888888889", "recipient": "SNWDN85MGJRP8PG3WA8P8N1XNAHFPTR0DJDV4QKP"}, "position": {"index": 40}, "type": "STXMintEvent"}, {"data": {"amount": "13888888889", "recipient": "SN3N393B6YPDG11TM939D9J6XW506V2MPK2P6TAE3"}, "position": {"index": 199}, "type": "STXMintEvent"}, {"data": {"amount": "13888888889", "recipient": "SN1F0NPNTYZNB0H2RXF9RK8KA84NBF2NJB8Z4VZKF"}, "position": {"index": 86}, "type": "STXMintEvent"}, {"data": {"amount": "13888888889", "recipient": "SN1BHCZK4TQM5AJNJ4J731KQ5EQN74QP0782NXCN2"}, "position": {"index": 74}, "type": "STXMintEvent"}, {"data": {"amount": "13888888889", "recipient": "SN3G22GCA9XAA4KBV4BSP8Q1XDZV0JYQDS0BE0F3Z"}, "position": {"index": 190}, "type": "STXMintEvent"}, {"data": {"amount": "13888888889", "recipient": "SN15ZMWSXHWMKXBNV0E5ADVKP3GNCAR1GGTP4B7XG"}, "position": {"index": 61}, "type": "STXMintEvent"}, {"data": {"amount": "13888888889", "recipient": "SN39MDEJA89NBVT8DBB02S0EPHGF9X4FY92KA9QDV"}, "position": {"index": 178}, "type": "STXMintEvent"}, {"data": {"amount": "13888888889", "recipient": "SN3SKWPJFF898QKYS3TNF3R9GZ28T3XS7JKK2HNXP"}, "position": {"index": 208}, "type": "STXMintEvent"}, {"data": {"amount": "13888888889", "recipient": "SN21Z84320P7975TAJ892Z3WCRHGVWQJF9JCSABT2"}, "position": {"index": 116}, "type": "STXMintEvent"}, {"data": {"amount": "13888888889", "recipient": "SN3YJ0419DWYW0JFSYP012XNE2Z162B9B258NG6DC"}, "position": {"index": 217}, "type": "STXMintEvent"}, {"data": {"amount": "13888888889", "recipient": "SN3KVQRVBSDSJK7Q4MZN08K39VCJF0ZYW7EN3ABAR"}, "position": {"index": 197}, "type": "STXMintEvent"}, {"data": {"amount": "13888888889", "recipient": "SNE833SYZZDN8KAR2JGPFXHHZM4T9158HR0FEGTC"}, "position": {"index": 20}, "type": "STXMintEvent"}, {"data": {"amount": "13888888889", "recipient": "SN2DDW0PRAQHXA87YM9QEMBCWYK472DPVJ1SDQ8XK"}, "position": {"index": 137}, "type": "STXMintEvent"}, {"data": {"amount": "13888888889", "recipient": "SN2T25TMD0T42KFZS5SVB92W3GH2SND2E7PT8649G"}, "position": {"index": 153}, "type": "STXMintEvent"}, {"data": {"amount": "13888888889", "recipient": "SN2YNCDAFVHVYVDETH4FYW7SK3HQB3Q9VNDE15FFX"}, "position": {"index": 162}, "type": "STXMintEvent"}, {"data": {"amount": "13888888889", "recipient": "SN392SPKHT0FPF79JY1RAWC0TPQ18TZX3JZEKX011"}, "position": {"index": 177}, "type": "STXMintEvent"}, {"data": {"amount": "13888888889", "recipient": "SN2ZNZ818AENAF2G6DJV0XDSZNMNPBQN7RY7GEB8F"}, "position": {"index": 164}, "type": "STXMintEvent"}, {"data": {"amount": "13888888889", "recipient": "SN3P7Z5YDNDHA9BSG1V0HAT2ER1YTJ39FPBDDHD8F"}, "position": {"index": 201}, "type": "STXMintEvent"}, {"data": {"amount": "13888888889", "recipient": "SN249H77RDXJ48W7QJ4HSASMM126C4MRXX4MS7AHJ"}, "position": {"index": 119}, "type": "STXMintEvent"}, {"data": {"amount": "13888888889", "recipient": "SN2CRH16XWJX9WHZDYRANTWDBGEHVM53B4RKRKTS9"}, "position": {"index": 135}, "type": "STXMintEvent"}, {"data": {"amount": "13888888889", "recipient": "SN2FEC3P0G7EHZJGR0842W5SFMMGY113WGVS3H3XQ"}, "position": {"index": 143}, "type": "STXMintEvent"}, {"data": {"amount": "13888888889", "recipient": "SN2VQK578D964GF7TWPB663P70452KG67BJZPR3NG"}, "position": {"index": 156}, "type": "STXMintEvent"}, {"data": {"amount": "13888888889", "recipient": "SNHGXHX0ZBK76FQVC1P5KBM7KWGS353KTQC9TKZ7"}, "position": {"index": 25}, "type": "STXMintEvent"}, {"data": {"amount": "13888888889", "recipient": "SN3AZ0ETGMVTW5SMAVFSXCD0HHRQN2KCASMKSBRS"}, "position": {"index": 7}, "type": "STXMintEvent"}, {"data": {"amount": "13888888889", "recipient": "SNGYRJJTTFXK15RAZZ8P76N33Z50QKBFZYSSEB3M"}, "position": {"index": 23}, "type": "STXMintEvent"}, {"data": {"amount": "13888888889", "recipient": "SN1HBDPD5XWXMAF977QBG9205SFE0J8ADT82V1A3A"}, "position": {"index": 90}, "type": "STXMintEvent"}, {"data": {"amount": "13888888889", "recipient": "SN3Y80TWZ5ZGDD8CP0P1GB7YRB75PE3X160FSBMAZ"}, "position": {"index": 216}, "type": "STXMintEvent"}, {"data": {"amount": "13888888889", "recipient": "SN3Q333YEXECPPTCDYV6MMFCYVQCXWXHTN0GZBH4X"}, "position": {"index": 203}, "type": "STXMintEvent"}, {"data": {"amount": "13888888889", "recipient": "SN2EZKAJ1P98VCSK2TJF45B770HVD1TKTQPFQ7D8B"}, "position": {"index": 142}, "type": "STXMintEvent"}, {"data": {"amount": "13888888889", "recipient": "SN1RAEKJK8C5TN3X1C76S91QXWYXSAGE20D6Y3B5F"}, "position": {"index": 101}, "type": "STXMintEvent"}, {"data": {"amount": "13888888889", "recipient": "SN2C7MR08SAYE0ZQCBKEDTWS0BQYKYMZKYHQQGCA6"}, "position": {"index": 132}, "type": "STXMintEvent"}, {"data": {"amount": "13888888889", "recipient": "SN195HAZQZNY4HSS4MJKZG803WW9RVX4J2EE8ZA3S"}, "position": {"index": 68}, "type": "STXMintEvent"}, {"data": {"amount": "13888888889", "recipient": "SN22M5VNBKZ8R8V3NBV37THNP27M5QDQ32NHCC9YF"}, "position": {"index": 117}, "type": "STXMintEvent"}, {"data": {"amount": "13888888889", "recipient": "SN2B1A805S5TX2XYAFW80F4KY5FMEYZHNBEY0RW6Z"}, "position": {"index": 128}, "type": "STXMintEvent"}, {"data": {"amount": "13888888889", "recipient": "SN2HFHDGCMCYBH7SEZ82JSQ5ME87PQ8FRN21YF5WV"}, "position": {"index": 145}, "type": "STXMintEvent"}, {"data": {"amount": "13888888889", "recipient": "SN2S8TBDQKMAEQ9DNFFKPWJXH2QYK92487WGEVWMY"}, "position": {"index": 152}, "type": "STXMintEvent"}, {"data": {"amount": "13888888889", "recipient": "SN3KHTES01XH7RYD2CZX5HGJ0DFRYQ7C8X6CA7WYV"}, "position": {"index": 196}, "type": "STXMintEvent"}, {"data": {"amount": "13888888889", "recipient": "SN30JN9TN414XQEGKVDGHFV1MBJCGQPNMT6MDD0HT"}, "position": {"index": 167}, "type": "STXMintEvent"}, {"data": {"amount": "13888888889", "recipient": "SN263J2J32B0WKRKY5GVMXSVWDPHHBF3GVM8MCB0"}, "position": {"index": 6}, "type": "STXMintEvent"}, {"data": {"amount": "13888888889", "recipient": "SNPE609M949ERBAATR32V701XYZ3J09CRMCJVWGE"}, "position": {"index": 32}, "type": "STXMintEvent"}, {"data": {"amount": "13888888889", "recipient": "SN3Q7A7FQ1SK1437NZ1XBNA5TXWGW6GE3N2WG996R"}, "position": {"index": 204}, "type": "STXMintEvent"}, {"data": {"amount": "13888888889", "recipient": "SN67J44P364T2M385FYN39B628JBWM9S5AVXVBD6"}, "position": {"index": 8}, "type": "STXMintEvent"}, {"data": {"amount": "13888888889", "recipient": "SN10JM5Z6YV9VFWKWD95Q34QPDTAJ83H0RG9EENWT"}, "position": {"index": 49}, "type": "STXMintEvent"}, {"data": {"amount": "13888888889", "recipient": "SN216XEF6SQSZ4H6KQKPEA99G6N8A06Y7JTV9PV2S"}, "position": {"index": 115}, "type": "STXMintEvent"}, {"data": {"amount": "13888888889", "recipient": "SN3004YRJ5WSXAVETG9ZM2DJ6JY7GE48BQK7GMJ54"}, "position": {"index": 165}, "type": "STXMintEvent"}, {"data": {"amount": "13888888889", "recipient": "SNN2AMTE6QZT8R5ZSPYAQXFN957XTSW21VBB4V7B"}, "position": {"index": 30}, "type": "STXMintEvent"}, {"data": {"amount": "13888888889", "recipient": "SNSG0D3Y4DZH3R0MBVD74T3MT95N17A22Z6ZE9CP"}, "position": {"index": 38}, "type": "STXMintEvent"}, {"data": {"amount": "13888888889", "recipient": "SN38XHH81W66GT6RTPZ9YHRTVEQN0BCECV4YBK4TB"}, "position": {"index": 176}, "type": "STXMintEvent"}, {"data": {"amount": "13888888889", "recipient": "SN2WBD2JGE9GZ4P6MXNYK5NBH5ZVWW9V5M669YVA7"}, "position": {"index": 158}, "type": "STXMintEvent"}, {"data": {"amount": "13888888889", "recipient": "SN13KQ31KQ4QGTXTC3CP709YENW2VAE5KSVGBAFYW"}, "position": {"index": 56}, "type": "STXMintEvent"}, {"data": {"amount": "13888888889", "recipient": "SN1DJBHS7FDJ3G4B9WH9992F4GKNV81V94NA6Y108"}, "position": {"index": 80}, "type": "STXMintEvent"}, {"data": {"amount": "13888888889", "recipient": "SN3DWPBD979T9WTWAKSHZ0YW6NQ8M9QWC99853XP0"}, "position": {"index": 182}, "type": "STXMintEvent"}, {"data": {"amount": "13888888889", "recipient": "SN3SPJWQP1TAZHV68XDGHDX9D810HNGV7X194YCFD"}, "position": {"index": 210}, "type": "STXMintEvent"}, {"data": {"amount": "13888888889", "recipient": "SN12WSR4RS455TJRT7RGNWRG4Y7QXEH8Y9G72T2N"}, "position": {"index": 3}, "type": "STXMintEvent"}, {"data": {"amount": "13888888889", "recipient": "SNR2YHFR9T25NW30R2K49R8TNPWBDFFS98NP31WQ"}, "position": {"index": 34}, "type": "STXMintEvent"}, {"data": {"amount": "13888888889", "recipient": "SNY6EVGGEESW110SC37PSJDMWTMAWNQVR7M0AE68"}, "position": {"index": 46}, "type": "STXMintEvent"}, {"data": {"amount": "13888888889", "recipient": "SN0FBQM0B0M1NTF8PE96FGPSXECDRD0YY0BB4R6M"}, "position": {"index": 1}, "type": "STXMintEvent"}, {"data": {"amount": "13888888889", "recipient": "SN1MY43X1H2P1EH45E25B60RT52ARCVDHB24KR576"}, "position": {"index": 94}, "type": "STXMintEvent"}, {"data": {"amount": "13888888889", "recipient": "SN184FZMGFK3VHWHDCXA7KVENARZQEC7PXA9SX8JQ"}, "position": {"index": 66}, "type": "STXMintEvent"}, {"data": {"amount": "13888888889", "recipient": "SN1Y5R2K2V07F3ZJW3JY0SDQ3XXTDB8K5FS026413"}, "position": {"index": 110}, "type": "STXMintEvent"}, {"data": {"amount": "13888888889", "recipient": "SN1YNX52QH5EF07751DNH3QJKJY8NVAPAMG9D92EW"}, "position": {"index": 111}, "type": "STXMintEvent"}, {"data": {"amount": "13888888889", "recipient": "SNFX2RF4E11TMJW27AFM0K90C2P54JEC4RGVGHVG"}, "position": {"index": 22}, "type": "STXMintEvent"}, {"data": {"amount": "13888888889", "recipient": "SNWR4Z6JHXDSJ4Y0WWZZAP3K5XX9H4XN0793G5N9"}, "position": {"index": 41}, "type": "STXMintEvent"}, {"data": {"amount": "13888888889", "recipient": "SN3NW1ETDJH9ZDKKEQ05V4TYK5JY2PEA6YS3MWRE1"}, "position": {"index": 200}, "type": "STXMintEvent"}, {"data": {"amount": "13888888889", "recipient": "SN15WA9YNEXQ4BAD3CJ4CWS2WXSHH8X7TAFH3RBJQ"}, "position": {"index": 60}, "type": "STXMintEvent"}, {"data": {"amount": "13888888889", "recipient": "SN19FAXPBN7XG07E5FMMHMCMZ5DT4T5NS1RGSQ838"}, "position": {"index": 69}, "type": "STXMintEvent"}, {"data": {"amount": "13888888889", "recipient": "SN3SMSXG9DXXAQYFV7XDQ99KB43HZB2572B75RCE7"}, "position": {"index": 209}, "type": "STXMintEvent"}, {"data": {"amount": "13888888889", "recipient": "SN1A29CMAQTG2M73Z0XKQMD47X8V2PX46Y9W2M41Q"}, "position": {"index": 71}, "type": "STXMintEvent"}, {"data": {"amount": "13888888889", "recipient": "SN2DB00PT5PS33YX5PRRF9STHMGE9QKPSB0F2P01W"}, "position": {"index": 136}, "type": "STXMintEvent"}, {"data": {"amount": "13888888889", "recipient": "SN7Y95JQT7EYZN7Z4BYM1M23VH05E3HM531W3D0J"}, "position": {"index": 11}, "type": "STXMintEvent"}, {"data": {"amount": "13888888889", "recipient": "SN1MV1JYJJ71F195YB2N76N8E9F37RV1WJB88CN4F"}, "position": {"index": 93}, "type": "STXMintEvent"}, {"data": {"amount": "13888888889", "recipient": "SN2GK2FSP1N60C71PBKJFYCFHNYKKM326D8MDY1RY"}, "position": {"index": 144}, "type": "STXMintEvent"}, {"data": {"amount": "13888888889", "recipient": "SN2BJBZX8EREHZRET5AY512MS42W7WSFA8ZSP8ND5"}, "position": {"index": 131}, "type": "STXMintEvent"}, {"data": {"amount": "13888888889", "recipient": "SN1GKGCH2QJJTTX0MNPEQ834J7Z4Z0YH45J7TTE0V"}, "position": {"index": 88}, "type": "STXMintEvent"}, {"data": {"amount": "13888888889", "recipient": "SN2CMAX76MDDG9YZ2ZTATW1H2HMA5NVVBEPD5QJBK"}, "position": {"index": 134}, "type": "STXMintEvent"}, {"data": {"amount": "13888888889", "recipient": "SN11M6VMKSSW7TCERW2WERT52A5VF95PSK3J15XZ5"}, "position": {"index": 50}, "type": "STXMintEvent"}, {"data": {"amount": "13888888889", "recipient": "SN1DC2AV2ZAKEYZ9PAQD4PS8YBCA543Z23ZF60AJH"}, "position": {"index": 79}, "type": "STXMintEvent"}, {"data": {"amount": "13888888889", "recipient": "SN716VHXD8KFVW9B59WF8VCSX43TQWD470C87RHJ"}, "position": {"index": 10}, "type": "STXMintEvent"}, {"data": {"amount": "13888888889", "recipient": "SN1CFGWHNWJQ0BB653GTDSD5PPG3DFAR3NV12TZ0H"}, "position": {"index": 77}, "type": "STXMintEvent"}, {"data": {"amount": "13888888889", "recipient": "SN1DNJXB0D07SR09XXY22HZCNKM0843GM4ZZ9GQS2"}, "position": {"index": 82}, "type": "STXMintEvent"}, {"data": {"amount": "13888888889", "recipient": "SN02ENSM1ZD4EKE6D3AB0JXTJMH7N4DPK7QGRA06"}, "position": {"index": 0}, "type": "STXMintEvent"}, {"data": {"amount": "13888888889", "recipient": "SN3VMAKTANXRB2JGVAH1WFVV79Y14AG32R2J8CM94"}, "position": {"index": 213}, "type": "STXMintEvent"}, {"data": {"amount": "13888888889", "recipient": "SN336PPYX6SYPS7FB4NGVCK5Q26F5EK5P7C0JY2DP"}, "position": {"index": 170}, "type": "STXMintEvent"}, {"data": {"amount": "13888888889", "recipient": "SN2TQD42M5EPVH950BABYB57HP70VKQAZPT7F4C2D"}, "position": {"index": 155}, "type": "STXMintEvent"}, {"data": {"amount": "13888888889", "recipient": "SN1HM73Y32CK85WKFY6C1QBG5WZJD5FXH8RBTXBZ0"}, "position": {"index": 91}, "type": "STXMintEvent"}, {"data": {"amount": "13888888889", "recipient": "SN2M46XMP4PVR77W7WRSEP9PBT0NRRWT3A1G3XB4D"}, "position": {"index": 148}, "type": "STXMintEvent"}, {"data": {"amount": "13888888889", "recipient": "SN3FCN36TP0K7XH14F1P6FCH481FX3ZRFFR6WWFTD"}, "position": {"index": 185}, "type": "STXMintEvent"}, {"data": {"amount": "13888888889", "recipient": "SN1D3GW93JBQ6D3XNQW4A8NFTG0HRT6KE6Q84XPJ5"}, "position": {"index": 78}, "type": "STXMintEvent"}, {"data": {"amount": "13888888889", "recipient": "SN1E6B4PWAFTCPYAR0M8RM4Y8D103NGCAH50ZYCDQ"}, "position": {"index": 84}, "type": "STXMintEvent"}, {"data": {"amount": "13888888889", "recipient": "SN3M2JD8QSNMS9D2AZYD7K4SKDTTDZDXV0D3FRHFA"}, "position": {"index": 198}, "type": "STXMintEvent"}, {"data": {"amount": "13888888889", "recipient": "SN3R4M5E15SXSWY02P695EGQE9XS0JV3BJ8C7MN1N"}, "position": {"index": 206}, "type": "STXMintEvent"}, {"data": {"amount": "13888888889", "recipient": "SN3VJRQ8JV8JH15DFAQR3EA6NWXB991J0E4EDDHNN"}, "position": {"index": 212}, "type": "STXMintEvent"}, {"data": {"amount": "13888888889", "recipient": "SN28ESNQ84MAW9QWQ1NR5C7CM9JFJZD7472NFD2F8"}, "position": {"index": 123}, "type": "STXMintEvent"}, {"data": {"amount": "13888888889", "recipient": "SN3JE58XS2B2AFZD8K4XJQEAX2KWDG22DMVTFSE6A"}, "position": {"index": 193}, "type": "STXMintEvent"}, {"data": {"amount": "13888888889", "recipient": "SN1MGBDESSRDSXGB1Q195WS3JJSCVGTZS0NHXKASK"}, "position": {"index": 92}, "type": "STXMintEvent"}, {"data": {"amount": "13888888889", "recipient": "SN3CJGVFC88TS0SFXGTBB9YQP4XACRAAMMD85ERHG"}, "position": {"index": 181}, "type": "STXMintEvent"}, {"data": {"amount": "13888888889", "recipient": "SN2TJFAMJQNH1WAZB55DDTPF2YAQ11YZVG40Y80RG"}, "position": {"index": 154}, "type": "STXMintEvent"}, {"data": {"amount": "13888888889", "recipient": "SNW8XQZAJJMSJKG988QBM9AF0V3AM5TG8JQN02FJ"}, "position": {"index": 39}, "type": "STXMintEvent"}, {"data": {"amount": "13888888889", "recipient": "SN2RKY47JZFYR7YM1DSTN8RJDZ15PXBKS307EYK51"}, "position": {"index": 150}, "type": "STXMintEvent"}, {"data": {"amount": "13888888889", "recipient": "SN3BVA28SFJSHKB064N8F1PW8E5HCJ1TN6W4GZNXJ"}, "position": {"index": 180}, "type": "STXMintEvent"}, {"data": {"amount": "13888888889", "recipient": "SN1C4P3RVJK9ZQCMZ3D8SFNG3QTHQXQ4RKB1HEH3Z"}, "position": {"index": 76}, "type": "STXMintEvent"}, {"data": {"amount": "13888888889", "recipient": "SN2DXA6AGPR6G9TS7R2MBDJKQS9QEZKZSKQQJ7F6X"}, "position": {"index": 138}, "type": "STXMintEvent"}, {"data": {"amount": "13888888889", "recipient": "SN3Y7SBVHA7PW333T9S3QMJ0GP8HX8NBYA8AGVN2K"}, "position": {"index": 215}, "type": "STXMintEvent"}, {"data": {"amount": "13888888889", "recipient": "SN132WBPXH7WX6YXN5RY9AZKNX8WSYZN79XGWNP31"}, "position": {"index": 54}, "type": "STXMintEvent"}, {"data": {"amount": "13888888889", "recipient": "SNDW0TZYWSFEVZHFE4NCFARHY8CNPRQPA0KYG8AV"}, "position": {"index": 18}, "type": "STXMintEvent"}, {"data": {"amount": "13888888889", "recipient": "SN11WG8D1KF3K6S7RK6Y3RET38A2ERS3PYHD279E7"}, "position": {"index": 51}, "type": "STXMintEvent"}], "mutated_assets_radius": [], "mutated_contracts_radius": []}, "result": "(ok true)", "sender": "ST000000000000000000002AMW42H", "sponsor": null, "success": true}, "operations": [{"account": {"address": "SN3G4T05D2P399Z4C7XBYM118VGDP875QJBQPH842"}, "amount": {"currency": {"decimals": 6, "symbol": "STX"}, "value": 13888888889}, "operation_identifier": {"index": 0}, "status": "SUCCESS", "type": "CREDIT"}, {"account": {"address": "SN1VABZBF4ENNXBWGN9W1EME63VV69VJEXWK00161"}, "amount": {"currency": {"decimals": 6, "symbol": "STX"}, "value": 13888888889}, "operation_identifier": {"index": 1}, "status": "SUCCESS", "type": "CREDIT"}, {"account": {"address": "SN1SGSZE8WYG98HB0E4NFJ37MKKJ7K5T74G6K19N9"}, "amount": {"currency": {"decimals": 6, "symbol": "STX"}, "value": 13888888889}, "operation_identifier": {"index": 2}, "status": "SUCCESS", "type": "CREDIT"}, {"account": {"address": "SNHWG5N9K21M4FQCZYY1HP4Z9HV2F617PFG32SXR"}, "amount": {"currency": {"decimals": 6, "symbol": "STX"}, "value": 13888888889}, "operation_identifier": {"index": 3}, "status": "SUCCESS", "type": "CREDIT"}, {"account": {"address": "SN13KWC112MWQ3RXYM3G7NRSVM29KN3X4Z5KFXZQH"}, "amount": {"currency": {"decimals": 6, "symbol": "STX"}, "value": 13888888889}, "operation_identifier": {"index": 4}, "status": "SUCCESS", "type": "CREDIT"}, {"account": {"address": "SN29JBK904XG97W9ZHP4HWTTFVAZGNG5TF0652YJ7"}, "amount": {"currency": {"decimals": 6, "symbol": "STX"}, "value": 13888888889}, "operation_identifier": {"index": 5}, "status": "SUCCESS", "type": "CREDIT"}, {"account": {"address": "SNCEH4V73NCPF3ZSS8773ES8MV9JPHTA05F2CEFH"}, "amount": {"currency": {"decimals": 6, "symbol": "STX"}, "value": 13888888889}, "operation_identifier": {"index": 6}, "status": "SUCCESS", "type": "CREDIT"}, {"account": {"address": "SN2YXJAZC445VF72WSC0716D1BCQ0P7QA1E9V52TN"}, "amount": {"currency": {"decimals": 6, "symbol": "STX"}, "value": 13888888889}, "operation_identifier": {"index": 7}, "status": "SUCCESS", "type": "CREDIT"}, {"account": {"address": "SN3RD5DR82MWYKTSTFAX8QJETTM4K5FM9VW79359W"}, "amount": {"currency": {"decimals": 6, "symbol": "STX"}, "value": 13888888889}, "operation_identifier": {"index": 8}, "status": "SUCCESS", "type": "CREDIT"}, {"account": {"address": "SN1EXPXP6AKGPJSE9XHV41KS9BGQ7C40JX5MMCBKP"}, "amount": {"currency": {"decimals": 6, "symbol": "STX"}, "value": 13888888889}, "operation_identifier": {"index": 9}, "status": "SUCCESS", "type": "CREDIT"}, {"account": {"address": "SN314339F8E6F6D1FR0J0XVE8NS3R8XNNTBWR7WC2"}, "amount": {"currency": {"decimals": 6, "symbol": "STX"}, "value": 13888888889}, "operation_identifier": {"index": 10}, "status": "SUCCESS", "type": "CREDIT"}, {"account": {"address": "SN3DY2HN93RDKTA5RV186GQHM7WZ9P9JPEQ0KHNEB"}, "amount": {"currency": {"decimals": 6, "symbol": "STX"}, "value": 13888888889}, "operation_identifier": {"index": 11}, "status": "SUCCESS", "type": "CREDIT"}, {"account": {"address": "SNHRV9XNMET2F1NJNXFKW5DQ2BW6MTREJ4G05FE0"}, "amount": {"currency": {"decimals": 6, "symbol": "STX"}, "value": 13888888889}, "operation_identifier": {"index": 12}, "status": "SUCCESS", "type": "CREDIT"}, {"account": {"address": "SN1VJ0FZ6T07ESX7FAV5NEJW2N9RDDF0BVXRVHRVX"}, "amount": {"currency": {"decimals": 6, "symbol": "STX"}, "value": 13888888889}, "operation_identifier": {"index": 13}, "status": "SUCCESS", "type": "CREDIT"}, {"account": {"address": "SN2WRGSJQJ7CS3H1N8X0M3DGDXXCH1NV7XNP1BGVV"}, "amount": {"currency": {"decimals": 6, "symbol": "STX"}, "value": 13888888889}, "operation_identifier": {"index": 14}, "status": "SUCCESS", "type": "CREDIT"}, {"account": {"address": "SN1SPPY3670JJCAYWAXB8D7HREPGWEBY7J4SCG76V"}, "amount": {"currency": {"decimals": 6, "symbol": "STX"}, "value": 13888888889}, "operation_identifier": {"index": 15}, "status": "SUCCESS", "type": "CREDIT"}, {"account": {"address": "SN1FM5N69SG62SQHPFKTXTESTFVZB2GZCM0RPCGR9"}, "amount": {"currency": {"decimals": 6, "symbol": "STX"}, "value": 13888888889}, "operation_identifier": {"index": 16}, "status": "SUCCESS", "type": "CREDIT"}, {"account": {"address": "SNCD491XBEJFM3YPTPX3K8K0TQNYAHHK2M080PHZ"}, "amount": {"currency": {"decimals": 6, "symbol": "STX"}, "value": 13888888889}, "operation_identifier": {"index": 17}, "status": "SUCCESS", "type": "CREDIT"}, {"account": {"address": "SN10B79PD3DN2XRBVBJWZ3QJ7SETV4ZGRHBFZ1QM9"}, "amount": {"currency": {"decimals": 6, "symbol": "STX"}, "value": 13888888889}, "operation_identifier": {"index": 18}, "status": "SUCCESS", "type": "CREDIT"}, {"account": {"address": "SNC6E7YJSZDRG7MKP6PX0FA2YDEVDQC6Z0HXP6RS"}, "amount": {"currency": {"decimals": 6, "symbol": "STX"}, "value": 13888888889}, "operation_identifier": {"index": 19}, "status": "SUCCESS", "type": "CREDIT"}, {"account": {"address": "SN9VJ2YPZ73CHWPZR9NDYAZTX8P5FPAPP47EHXBZ"}, "amount": {"currency": {"decimals": 6, "symbol": "STX"}, "value": 13888888889}, "operation_identifier": {"index": 20}, "status": "SUCCESS", "type": "CREDIT"}, {"account": {"address": "SNE3YNXYPDC01YTTCW58DJJYK3HS17FM5WWD4Q3Z"}, "amount": {"currency": {"decimals": 6, "symbol": "STX"}, "value": 13888888889}, "operation_identifier": {"index": 21}, "status": "SUCCESS", "type": "CREDIT"}, {"account": {"address": "SN15H0A2XH7AXD9B9D56SQV5NJZB2ECSWHPW8MFKE"}, "amount": {"currency": {"decimals": 6, "symbol": "STX"}, "value": 13888888889}, "operation_identifier": {"index": 22}, "status": "SUCCESS", "type": "CREDIT"}, {"account": {"address": "SN16WKZXCF0Y87EZXWRZFS3EF39DYSEHT4HMZ928T"}, "amount": {"currency": {"decimals": 6, "symbol": "STX"}, "value": 13888888889}, "operation_identifier": {"index": 23}, "status": "SUCCESS", "type": "CREDIT"}, {"account": {"address": "SN1PYWRYBRHVADBQT1XCYYC4ZSEMT1CBN4VNX6ZP9"}, "amount": {"currency": {"decimals": 6, "symbol": "STX"}, "value": 13888888889}, "operation_identifier": {"index": 24}, "status": "SUCCESS", "type": "CREDIT"}, {"account": {"address": "SN6WQ4PTPHC2S4Q7D2RBRSTCER7BD48TTAQPYRGF"}, "amount": {"currency": {"decimals": 6, "symbol": "STX"}, "value": 13888888889}, "operation_identifier": {"index": 25}, "status": "SUCCESS", "type": "CREDIT"}, {"account": {"address": "SN1AN0ZVPGJY5TAJ9125C59A19K630PNXZ27WT5EB"}, "amount": {"currency": {"decimals": 6, "symbol": "STX"}, "value": 13888888889}, "operation_identifier": {"index": 26}, "status": "SUCCESS", "type": "CREDIT"}, {"account": {"address": "SN1ZFD206K49D42HMQ3ZGXY8B313M4KN5GJNJ1ZTB"}, "amount": {"currency": {"decimals": 6, "symbol": "STX"}, "value": 13888888889}, "operation_identifier": {"index": 27}, "status": "SUCCESS", "type": "CREDIT"}, {"account": {"address": "SN2A6V21SHZKCVMZHKE64WZ26CCRFDEMYP2AE2DWM"}, "amount": {"currency": {"decimals": 6, "symbol": "STX"}, "value": 13888888889}, "operation_identifier": {"index": 28}, "status": "SUCCESS", "type": "CREDIT"}, {"account": {"address": "SN2EW7FACSH6DNYZ3ZE56DEY7Q4Q6CGAHBTM460PN"}, "amount": {"currency": {"decimals": 6, "symbol": "STX"}, "value": 13888888889}, "operation_identifier": {"index": 29}, "status": "SUCCESS", "type": "CREDIT"}, {"account": {"address": "SN2JR0VA2Y13RFWTKXSDNDEW38EZ3A1X08F1RY6HX"}, "amount": {"currency": {"decimals": 6, "symbol": "STX"}, "value": 13888888889}, "operation_identifier": {"index": 30}, "status": "SUCCESS", "type": "CREDIT"}, {"account": {"address": "SN3K05G2N8RVDSZZ25F1DK6R8MTPF7YSCHE15AM28"}, "amount": {"currency": {"decimals": 6, "symbol": "STX"}, "value": 13888888889}, "operation_identifier": {"index": 31}, "status": "SUCCESS", "type": "CREDIT"}, {"account": {"address": "SN3XX07N1GWR12AVVE979K9VHEGHB9MD3RW9X2DFS"}, "amount": {"currency": {"decimals": 6, "symbol": "STX"}, "value": 13888888889}, "operation_identifier": {"index": 32}, "status": "SUCCESS", "type": "CREDIT"}, {"account": {"address": "SNJE9A15HR6YNGZ767F9WFN0GZQZDCNGC4T40KKD"}, "amount": {"currency": {"decimals": 6, "symbol": "STX"}, "value": 13888888889}, "operation_identifier": {"index": 33}, "status": "SUCCESS", "type": "CREDIT"}, {"account": {"address": "SN2EMRY775PNYH1R80K24ER3JHTWFADTYD0PBY5HY"}, "amount": {"currency": {"decimals": 6, "symbol": "STX"}, "value": 13888888889}, "operation_identifier": {"index": 34}, "status": "SUCCESS", "type": "CREDIT"}, {"account": {"address": "SN1DM449RKB08S1M3JGP3H53HWKP1THXG6EF4WPBB"}, "amount": {"currency": {"decimals": 6, "symbol": "STX"}, "value": 13888888889}, "operation_identifier": {"index": 35}, "status": "SUCCESS", "type": "CREDIT"}, {"account": {"address": "SN2A9A2657E2WB4KQNF2X56EXX8BNR00A9Y718VKP"}, "amount": {"currency": {"decimals": 6, "symbol": "STX"}, "value": 13888888889}, "operation_identifier": {"index": 36}, "status": "SUCCESS", "type": "CREDIT"}, {"account": {"address": "SN1NR10AG6AK2WGZGB6JNRAFGF2R29SQS50BVCRTB"}, "amount": {"currency": {"decimals": 6, "symbol": "STX"}, "value": 13888888889}, "operation_identifier": {"index": 37}, "status": "SUCCESS", "type": "CREDIT"}, {"account": {"address": "SN3636BXKN7X03TE9NFTCEVGQJ7X1M4EYXR3SY3BT"}, "amount": {"currency": {"decimals": 6, "symbol": "STX"}, "value": 13888888889}, "operation_identifier": {"index": 38}, "status": "SUCCESS", "type": "CREDIT"}, {"account": {"address": "SN36QQQACZ6V8JKDMZFX5G5V7S8S0E3DC6TVVXKD3"}, "amount": {"currency": {"decimals": 6, "symbol": "STX"}, "value": 13888888889}, "operation_identifier": {"index": 39}, "status": "SUCCESS", "type": "CREDIT"}, {"account": {"address": "SN151ENM0ZP6V7P1FDP51AQYYDYNB5S9ECPEWW7BZ"}, "amount": {"currency": {"decimals": 6, "symbol": "STX"}, "value": 13888888889}, "operation_identifier": {"index": 40}, "status": "SUCCESS", "type": "CREDIT"}, {"account": {"address": "SN1AES7Q0YMTY3T38FNWVTK0J14NAHS1N689N1B93"}, "amount": {"currency": {"decimals": 6, "symbol": "STX"}, "value": 13888888889}, "operation_identifier": {"index": 41}, "status": "SUCCESS", "type": "CREDIT"}, {"account": {"address": "SNRVME1H0F653SR211EK64R3TBMWASB67XKS3H8D"}, "amount": {"currency": {"decimals": 6, "symbol": "STX"}, "value": 13888888889}, "operation_identifier": {"index": 42}, "status": "SUCCESS", "type": "CREDIT"}, {"account": {"address": "SN2QH4JXV2A7HZZD8TA1Y1J8KZM7PD62BTXT3SJYJ"}, "amount": {"currency": {"decimals": 6, "symbol": "STX"}, "value": 13888888889}, "operation_identifier": {"index": 43}, "status": "SUCCESS", "type": "CREDIT"}, {"account": {"address": "SN2RVB0CYJJA38QC4GNGX37XB1R6NDMW6K91FVG94"}, "amount": {"currency": {"decimals": 6, "symbol": "STX"}, "value": 13888888889}, "operation_identifier": {"index": 44}, "status": "SUCCESS", "type": "CREDIT"}, {"account": {"address": "SN9GQZFK5HBP22ZWRBYS110YYXVX4NXHAB1C0QX4"}, "amount": {"currency": {"decimals": 6, "symbol": "STX"}, "value": 13888888889}, "operation_identifier": {"index": 45}, "status": "SUCCESS", "type": "CREDIT"}, {"account": {"address": "SN3AZ5GSJJWQE0TD0NN3HGSCACKFGFD5QXYNC48BA"}, "amount": {"currency": {"decimals": 6, "symbol": "STX"}, "value": 13888888889}, "operation_identifier": {"index": 46}, "status": "SUCCESS", "type": "CREDIT"}, {"account": {"address": "SN1GTTQ8NAK8YAP374VWV1F3CMPZGYWFCN3KGHHGF"}, "amount": {"currency": {"decimals": 6, "symbol": "STX"}, "value": 13888888889}, "operation_identifier": {"index": 47}, "status": "SUCCESS", "type": "CREDIT"}, {"account": {"address": "SN132F6Z8GXSHNF5JQW9RKJ4FYPZBN7QD8RYFBYVX"}, "amount": {"currency": {"decimals": 6, "symbol": "STX"}, "value": 13888888889}, "operation_identifier": {"index": 48}, "status": "SUCCESS", "type": "CREDIT"}, {"account": {"address": "SN1R3XKG0WFSC8F608ZZGQ5EF3XA6ZTEV9Q62XHXS"}, "amount": {"currency": {"decimals": 6, "symbol": "STX"}, "value": 13888888889}, "operation_identifier": {"index": 49}, "status": "SUCCESS", "type": "CREDIT"}, {"account": {"address": "SN273YMQXXQAQQ430N4B40282GKFPZW8XXCV9X6JA"}, "amount": {"currency": {"decimals": 6, "symbol": "STX"}, "value": 13888888889}, "operation_identifier": {"index": 50}, "status": "SUCCESS", "type": "CREDIT"}, {"account": {"address": "SN2BHJM5BXAZRPREWY4ZEKD58CFTPDQHXTADAW6MV"}, "amount": {"currency": {"decimals": 6, "symbol": "STX"}, "value": 13888888889}, "operation_identifier": {"index": 51}, "status": "SUCCESS", "type": "CREDIT"}, {"account": {"address": "SN12APCFJM8CEGC5FZMKD8EDHW4TQ8RCJD1KADMG9"}, "amount": {"currency": {"decimals": 6, "symbol": "STX"}, "value": 13888888889}, "operation_identifier": {"index": 52}, "status": "SUCCESS", "type": "CREDIT"}, {"account": {"address": "SN2CGGK7KPNYMYEQ0Q49ZV9KF4VQES3PED53VFGJM"}, "amount": {"currency": {"decimals": 6, "symbol": "STX"}, "value": 13888888889}, "operation_identifier": {"index": 53}, "status": "SUCCESS", "type": "CREDIT"}, {"account": {"address": "SN3FECN2P5RRX4GJDD97TH4KTH78GHAXW10NQ8JME"}, "amount": {"currency": {"decimals": 6, "symbol": "STX"}, "value": 13888888889}, "operation_identifier": {"index": 54}, "status": "SUCCESS", "type": "CREDIT"}, {"account": {"address": "SNH20XGJG1N6SH89A4PWTABXFZ1MJ42VAZQCBXV0"}, "amount": {"currency": {"decimals": 6, "symbol": "STX"}, "value": 13888888889}, "operation_identifier": {"index": 55}, "status": "SUCCESS", "type": "CREDIT"}, {"account": {"address": "SNFQQJ13C085Z3PY1FHJE19V1ZENZ1RKJ2PD4SK1"}, "amount": {"currency": {"decimals": 6, "symbol": "STX"}, "value": 13888888889}, "operation_identifier": {"index": 56}, "status": "SUCCESS", "type": "CREDIT"}, {"account": {"address": "SNNPX3JZ0A7T8HWFYAV22AHQDR1T3XGR0C7FBC05"}, "amount": {"currency": {"decimals": 6, "symbol": "STX"}, "value": 13888888889}, "operation_identifier": {"index": 57}, "status": "SUCCESS", "type": "CREDIT"}, {"account": {"address": "SN24K04B9ZNGRSZSX5FNJ8N946CC9Q7H2N342V0PN"}, "amount": {"currency": {"decimals": 6, "symbol": "STX"}, "value": 13888888889}, "operation_identifier": {"index": 58}, "status": "SUCCESS", "type": "CREDIT"}, {"account": {"address": "SN1PW61KPTCDJHKFQGVJ9PC06QHGCDBH0600MZ4Q5"}, "amount": {"currency": {"decimals": 6, "symbol": "STX"}, "value": 13888888889}, "operation_identifier": {"index": 59}, "status": "SUCCESS", "type": "CREDIT"}, {"account": {"address": "SN1BT2Q02ZRZC3C7DHCFP5JYWAQQ20422YJ5H0GN7"}, "amount": {"currency": {"decimals": 6, "symbol": "STX"}, "value": 13888888889}, "operation_identifier": {"index": 60}, "status": "SUCCESS", "type": "CREDIT"}, {"account": {"address": "SN134H42XHC5B1VT5P4EP6VAYKBQ3V2594NFAKMF3"}, "amount": {"currency": {"decimals": 6, "symbol": "STX"}, "value": 13888888889}, "operation_identifier": {"index": 61}, "status": "SUCCESS", "type": "CREDIT"}, {"account": {"address": "SN300E1H03P0D548QTKYJPGXPC1T86BJM5SX2CE8P"}, "amount": {"currency": {"decimals": 6, "symbol": "STX"}, "value": 13888888889}, "operation_identifier": {"index": 62}, "status": "SUCCESS", "type": "CREDIT"}, {"account": {"address": "SN321ZKMJP7M22THC0EN9C9N90AY4DBMKQJFDJHDJ"}, "amount": {"currency": {"decimals": 6, "symbol": "STX"}, "value": 13888888889}, "operation_identifier": {"index": 63}, "status": "SUCCESS", "type": "CREDIT"}, {"account": {"address": "SN3F8SVA319PENA122YX3Z4W37DNEGSJGFWC3XTJW"}, "amount": {"currency": {"decimals": 6, "symbol": "STX"}, "value": 13888888889}, "operation_identifier": {"index": 64}, "status": "SUCCESS", "type": "CREDIT"}, {"account": {"address": "SN3FTYEC9F6CV2QQX7S5KVDPTPZ8QQ3KV3PJZ09Q5"}, "amount": {"currency": {"decimals": 6, "symbol": "STX"}, "value": 13888888889}, "operation_identifier": {"index": 65}, "status": "SUCCESS", "type": "CREDIT"}, {"account": {"address": "SN3GQP1V1E9Z4H2SA5QFE5PCSBTR07RAMDSGR9GW7"}, "amount": {"currency": {"decimals": 6, "symbol": "STX"}, "value": 13888888889}, "operation_identifier": {"index": 66}, "status": "SUCCESS", "type": "CREDIT"}, {"account": {"address": "SN19N4M19FZYZAAY2TC1DASKGAVPPCBXN1N64Z2HQ"}, "amount": {"currency": {"decimals": 6, "symbol": "STX"}, "value": 13888888889}, "operation_identifier": {"index": 67}, "status": "SUCCESS", "type": "CREDIT"}, {"account": {"address": "SN2WJ90NTWFE8JZ4GG43KVHN5G51XAMAQGW9YCFYF"}, "amount": {"currency": {"decimals": 6, "symbol": "STX"}, "value": 13888888889}, "operation_identifier": {"index": 68}, "status": "SUCCESS", "type": "CREDIT"}, {"account": {"address": "SN258WG743EMD6VPA49S9C8CPXN1WEKXK7PQD9YAH"}, "amount": {"currency": {"decimals": 6, "symbol": "STX"}, "value": 13888888889}, "operation_identifier": {"index": 69}, "status": "SUCCESS", "type": "CREDIT"}, {"account": {"address": "SN2AMR01NE8AKKGW8AAP4Q82CDE9CJY9Y3CZ4BCBZ"}, "amount": {"currency": {"decimals": 6, "symbol": "STX"}, "value": 13888888889}, "operation_identifier": {"index": 70}, "status": "SUCCESS", "type": "CREDIT"}, {"account": {"address": "SN37CGYEDQKR2Z8Y72XTJZ991E0PMQ782GAC1CMPW"}, "amount": {"currency": {"decimals": 6, "symbol": "STX"}, "value": 13888888889}, "operation_identifier": {"index": 71}, "status": "SUCCESS", "type": "CREDIT"}, {"account": {"address": "SN3FKK9V6M7YGPP4SJCQAXA5KYVAKEY74YME7ZZ5R"}, "amount": {"currency": {"decimals": 6, "symbol": "STX"}, "value": 13888888889}, "operation_identifier": {"index": 72}, "status": "SUCCESS", "type": "CREDIT"}, {"account": {"address": "SN38R5VTRA3K15V0ZD45QXX24XF6APEF9X4BSGGME"}, "amount": {"currency": {"decimals": 6, "symbol": "STX"}, "value": 13888888889}, "operation_identifier": {"index": 73}, "status": "SUCCESS", "type": "CREDIT"}, {"account": {"address": "SN3TKXA8BBCV93ZYMRCWTB9B7HPY2JWT78JQ8M6EV"}, "amount": {"currency": {"decimals": 6, "symbol": "STX"}, "value": 13888888889}, "operation_identifier": {"index": 74}, "status": "SUCCESS", "type": "CREDIT"}, {"account": {"address": "SN3FJD9GBMEH2CENDNXRHZMCVH231TW6EW3H5MHH3"}, "amount": {"currency": {"decimals": 6, "symbol": "STX"}, "value": 13888888889}, "operation_identifier": {"index": 75}, "status": "SUCCESS", "type": "CREDIT"}, {"account": {"address": "SN1811XEFECGZN9K567Z63D9QZHQ7VN921Q6N1SBN"}, "amount": {"currency": {"decimals": 6, "symbol": "STX"}, "value": 13888888889}, "operation_identifier": {"index": 76}, "status": "SUCCESS", "type": "CREDIT"}, {"account": {"address": "SN2KCEP88YKFJ6TW1245C4GDRVKSC06SXJKS5JV6M"}, "amount": {"currency": {"decimals": 6, "symbol": "STX"}, "value": 13888888889}, "operation_identifier": {"index": 77}, "status": "SUCCESS", "type": "CREDIT"}, {"account": {"address": "SNX51SYDER1MYNCG1TGS8Q53QRTGGA5CKY0P6R4Y"}, "amount": {"currency": {"decimals": 6, "symbol": "STX"}, "value": 13888888889}, "operation_identifier": {"index": 78}, "status": "SUCCESS", "type": "CREDIT"}, {"account": {"address": "SN1PTP8Q5M5FEXQ8NQ6V3QV21HJNC8P754EMP27M7"}, "amount": {"currency": {"decimals": 6, "symbol": "STX"}, "value": 13888888889}, "operation_identifier": {"index": 79}, "status": "SUCCESS", "type": "CREDIT"}, {"account": {"address": "SN3PJJD7PNC00HQ2G5QP5NEVNMZ112XCNGT86MWTE"}, "amount": {"currency": {"decimals": 6, "symbol": "STX"}, "value": 13888888889}, "operation_identifier": {"index": 80}, "status": "SUCCESS", "type": "CREDIT"}, {"account": {"address": "SN3QCD3EZD98VXHVSSBVTA245V7XQ2YY57HCPNN4C"}, "amount": {"currency": {"decimals": 6, "symbol": "STX"}, "value": 13888888889}, "operation_identifier": {"index": 81}, "status": "SUCCESS", "type": "CREDIT"}, {"account": {"address": "SN2Y14Y0NQYSPY6JAMNNHGDSWQ432KHWCJD36V0BW"}, "amount": {"currency": {"decimals": 6, "symbol": "STX"}, "value": 13888888889}, "operation_identifier": {"index": 82}, "status": "SUCCESS", "type": "CREDIT"}, {"account": {"address": "SNHVKNMAKPVT2JV2Z3FTD4KKGB84P6T8CZ114MN"}, "amount": {"currency": {"decimals": 6, "symbol": "STX"}, "value": 13888888889}, "operation_identifier": {"index": 83}, "status": "SUCCESS", "type": "CREDIT"}, {"account": {"address": "SN18XYT8V0948TZXQMTGS0K77KNKSHRRYPAK069JJ"}, "amount": {"currency": {"decimals": 6, "symbol": "STX"}, "value": 13888888889}, "operation_identifier": {"index": 84}, "status": "SUCCESS", "type": "CREDIT"}, {"account": {"address": "SN3Z4MMRJ29FVZB38FGYPE94N1D8ZGF55R7YWH00A"}, "amount": {"currency": {"decimals": 6, "symbol": "STX"}, "value": 13888888889}, "operation_identifier": {"index": 85}, "status": "SUCCESS", "type": "CREDIT"}, {"account": {"address": "SN1NXXGQJ5MRH5Z4DBHQ22VNCK37852EJG73JYWBP"}, "amount": {"currency": {"decimals": 6, "symbol": "STX"}, "value": 13888888889}, "operation_identifier": {"index": 86}, "status": "SUCCESS", "type": "CREDIT"}, {"account": {"address": "SN3Z4TZZKR44VZYABNNDCQBZ15KF6PB6XCKRR5SKV"}, "amount": {"currency": {"decimals": 6, "symbol": "STX"}, "value": 13888888889}, "operation_identifier": {"index": 87}, "status": "SUCCESS", "type": "CREDIT"}, {"account": {"address": "SNAMB8AJ6ACSPJ4ZHREMDD6M9KSJ1DMNM205GYD4"}, "amount": {"currency": {"decimals": 6, "symbol": "STX"}, "value": 13888888889}, "operation_identifier": {"index": 88}, "status": "SUCCESS", "type": "CREDIT"}, {"account": {"address": "SN23QQVYTWX4AQN8ANSHGCT6ZMJQMKACSYBJTM703"}, "amount": {"currency": {"decimals": 6, "symbol": "STX"}, "value": 13888888889}, "operation_identifier": {"index": 89}, "status": "SUCCESS", "type": "CREDIT"}, {"account": {"address": "SNQJGW4SXH4K5EK7X7KZ2SSKGZWN4PY2QTS72AWA"}, "amount": {"currency": {"decimals": 6, "symbol": "STX"}, "value": 13888888889}, "operation_identifier": {"index": 90}, "status": "SUCCESS", "type": "CREDIT"}, {"account": {"address": "SN1Y2RTKFVTXE798X7F9ECF31F7TAX18Y7EV4WT6N"}, "amount": {"currency": {"decimals": 6, "symbol": "STX"}, "value": 13888888889}, "operation_identifier": {"index": 91}, "status": "SUCCESS", "type": "CREDIT"}, {"account": {"address": "SN1SR5331QG1W4CNVV0WWXJG7QA4AS9JDSK2J8DSG"}, "amount": {"currency": {"decimals": 6, "symbol": "STX"}, "value": 13888888889}, "operation_identifier": {"index": 92}, "status": "SUCCESS", "type": "CREDIT"}, {"account": {"address": "SN22N70KQZH6QX140F48N6MCX2MBQ7CXG1V2QKJ0"}, "amount": {"currency": {"decimals": 6, "symbol": "STX"}, "value": 13888888889}, "operation_identifier": {"index": 93}, "status": "SUCCESS", "type": "CREDIT"}, {"account": {"address": "SN1X8CHD89R2ER9MVV7V7HJYCZS00CCF7KPJ83XZV"}, "amount": {"currency": {"decimals": 6, "symbol": "STX"}, "value": 13888888889}, "operation_identifier": {"index": 94}, "status": "SUCCESS", "type": "CREDIT"}, {"account": {"address": "SN1DT32505EN0VG8J1T09KQDP4RV08AW9VWSMBHNF"}, "amount": {"currency": {"decimals": 6, "symbol": "STX"}, "value": 13888888889}, "operation_identifier": {"index": 95}, "status": "SUCCESS", "type": "CREDIT"}, {"account": {"address": "SN16988WYZA10B9XR398BA7B0ESZ31QQY3A8N8AJA"}, "amount": {"currency": {"decimals": 6, "symbol": "STX"}, "value": 13888888889}, "operation_identifier": {"index": 96}, "status": "SUCCESS", "type": "CREDIT"}, {"account": {"address": "SN1RM13C4SJ1Y5K13DE89P3HKXRKNZF8M61P2A4S4"}, "amount": {"currency": {"decimals": 6, "symbol": "STX"}, "value": 13888888889}, "operation_identifier": {"index": 97}, "status": "SUCCESS", "type": "CREDIT"}, {"account": {"address": "SN1ZG3AZ6SJR70HMNAK8T1EHM6KE0CSJHAQM94618"}, "amount": {"currency": {"decimals": 6, "symbol": "STX"}, "value": 13888888889}, "operation_identifier": {"index": 98}, "status": "SUCCESS", "type": "CREDIT"}, {"account": {"address": "SNRH38HQ4K4C1MJCKV9BR03J6F3TH3XH4CYWZBAZ"}, "amount": {"currency": {"decimals": 6, "symbol": "STX"}, "value": 13888888889}, "operation_identifier": {"index": 99}, "status": "SUCCESS", "type": "CREDIT"}, {"account": {"address": "SN20P0CCF6MXCRY5W8SX2RW05PJSWQTY6181C9JJZ"}, "amount": {"currency": {"decimals": 6, "symbol": "STX"}, "value": 13888888889}, "operation_identifier": {"index": 100}, "status": "SUCCESS", "type": "CREDIT"}, {"account": {"address": "SN2B4HTY4A6DZB97ZE2A97Z0MH3M6882MYHDV5B2G"}, "amount": {"currency": {"decimals": 6, "symbol": "STX"}, "value": 13888888889}, "operation_identifier": {"index": 101}, "status": "SUCCESS", "type": "CREDIT"}, {"account": {"address": "SN1TED7Y8VW1STC7ZWN35R5RFT1K3YEXCFZ6MR1P"}, "amount": {"currency": {"decimals": 6, "symbol": "STX"}, "value": 13888888889}, "operation_identifier": {"index": 102}, "status": "SUCCESS", "type": "CREDIT"}, {"account": {"address": "SN33DHH8YV724ECVCB9MV79RVDY3FBB0YGVB3P3NK"}, "amount": {"currency": {"decimals": 6, "symbol": "STX"}, "value": 13888888889}, "operation_identifier": {"index": 103}, "status": "SUCCESS", "type": "CREDIT"}, {"account": {"address": "SNY83XRZ7EEPP30XN06HKGMY01H43A72S1GE63Q0"}, "amount": {"currency": {"decimals": 6, "symbol": "STX"}, "value": 13888888889}, "operation_identifier": {"index": 104}, "status": "SUCCESS", "type": "CREDIT"}, {"account": {"address": "SNRAM6QXPQ6TY6NY81X9APCZ21CE6RKYE4WT3WH6"}, "amount": {"currency": {"decimals": 6, "symbol": "STX"}, "value": 13888888889}, "operation_identifier": {"index": 105}, "status": "SUCCESS", "type": "CREDIT"}, {"account": {"address": "SNX0RJKG39T1B1KV9WTFND5PQ3QCP9P2G8Y6S5MN"}, "amount": {"currency": {"decimals": 6, "symbol": "STX"}, "value": 13888888889}, "operation_identifier": {"index": 106}, "status": "SUCCESS", "type": "CREDIT"}, {"account": {"address": "SNXF7W3ZGR215RVAJGC3KY2ZXSATPVZQBXNZDZQ6"}, "amount": {"currency": {"decimals": 6, "symbol": "STX"}, "value": 13888888889}, "operation_identifier": {"index": 107}, "status": "SUCCESS", "type": "CREDIT"}, {"account": {"address": "SN1655NH9E4YQQGM8JYQHE6JEZJWG7RV27K7EW075"}, "amount": {"currency": {"decimals": 6, "symbol": "STX"}, "value": 13888888889}, "operation_identifier": {"index": 108}, "status": "SUCCESS", "type": "CREDIT"}, {"account": {"address": "SNXJK5EQCZ5MDW500TZY774P4Q6M1Q7DNC1521AF"}, "amount": {"currency": {"decimals": 6, "symbol": "STX"}, "value": 13888888889}, "operation_identifier": {"index": 109}, "status": "SUCCESS", "type": "CREDIT"}, {"account": {"address": "SN2E881PWWVDGQZ0H3YTWSM01HTX7VGYKFZVQ4D1V"}, "amount": {"currency": {"decimals": 6, "symbol": "STX"}, "value": 13888888889}, "operation_identifier": {"index": 110}, "status": "SUCCESS", "type": "CREDIT"}, {"account": {"address": "SNK0SQ2EQ5G90PABM9NMY5Z67Z43RVVFN3PTDJ5R"}, "amount": {"currency": {"decimals": 6, "symbol": "STX"}, "value": 13888888889}, "operation_identifier": {"index": 111}, "status": "SUCCESS", "type": "CREDIT"}, {"account": {"address": "SN2W1ZXAH2RQVPYBE0PF14SB6RZYTBJR8HMPC4AVX"}, "amount": {"currency": {"decimals": 6, "symbol": "STX"}, "value": 13888888889}, "operation_identifier": {"index": 112}, "status": "SUCCESS", "type": "CREDIT"}, {"account": {"address": "SN3K7BVTKT0SGA9XM0QXN526N65VVP1MDGKK0MXWV"}, "amount": {"currency": {"decimals": 6, "symbol": "STX"}, "value": 13888888889}, "operation_identifier": {"index": 113}, "status": "SUCCESS", "type": "CREDIT"}, {"account": {"address": "SNWDN85MGJRP8PG3WA8P8N1XNAHFPTR0DJDV4QKP"}, "amount": {"currency": {"decimals": 6, "symbol": "STX"}, "value": 13888888889}, "operation_identifier": {"index": 114}, "status": "SUCCESS", "type": "CREDIT"}, {"account": {"address": "SN3N393B6YPDG11TM939D9J6XW506V2MPK2P6TAE3"}, "amount": {"currency": {"decimals": 6, "symbol": "STX"}, "value": 13888888889}, "operation_identifier": {"index": 115}, "status": "SUCCESS", "type": "CREDIT"}, {"account": {"address": "SN1F0NPNTYZNB0H2RXF9RK8KA84NBF2NJB8Z4VZKF"}, "amount": {"currency": {"decimals": 6, "symbol": "STX"}, "value": 13888888889}, "operation_identifier": {"index": 116}, "status": "SUCCESS", "type": "CREDIT"}, {"account": {"address": "SN1BHCZK4TQM5AJNJ4J731KQ5EQN74QP0782NXCN2"}, "amount": {"currency": {"decimals": 6, "symbol": "STX"}, "value": 13888888889}, "operation_identifier": {"index": 117}, "status": "SUCCESS", "type": "CREDIT"}, {"account": {"address": "SN3G22GCA9XAA4KBV4BSP8Q1XDZV0JYQDS0BE0F3Z"}, "amount": {"currency": {"decimals": 6, "symbol": "STX"}, "value": 13888888889}, "operation_identifier": {"index": 118}, "status": "SUCCESS", "type": "CREDIT"}, {"account": {"address": "SN15ZMWSXHWMKXBNV0E5ADVKP3GNCAR1GGTP4B7XG"}, "amount": {"currency": {"decimals": 6, "symbol": "STX"}, "value": 13888888889}, "operation_identifier": {"index": 119}, "status": "SUCCESS", "type": "CREDIT"}, {"account": {"address": "SN39MDEJA89NBVT8DBB02S0EPHGF9X4FY92KA9QDV"}, "amount": {"currency": {"decimals": 6, "symbol": "STX"}, "value": 13888888889}, "operation_identifier": {"index": 120}, "status": "SUCCESS", "type": "CREDIT"}, {"account": {"address": "SN3SKWPJFF898QKYS3TNF3R9GZ28T3XS7JKK2HNXP"}, "amount": {"currency": {"decimals": 6, "symbol": "STX"}, "value": 13888888889}, "operation_identifier": {"index": 121}, "status": "SUCCESS", "type": "CREDIT"}, {"account": {"address": "SN21Z84320P7975TAJ892Z3WCRHGVWQJF9JCSABT2"}, "amount": {"currency": {"decimals": 6, "symbol": "STX"}, "value": 13888888889}, "operation_identifier": {"index": 122}, "status": "SUCCESS", "type": "CREDIT"}, {"account": {"address": "SN3YJ0419DWYW0JFSYP012XNE2Z162B9B258NG6DC"}, "amount": {"currency": {"decimals": 6, "symbol": "STX"}, "value": 13888888889}, "operation_identifier": {"index": 123}, "status": "SUCCESS", "type": "CREDIT"}, {"account": {"address": "SN3KVQRVBSDSJK7Q4MZN08K39VCJF0ZYW7EN3ABAR"}, "amount": {"currency": {"decimals": 6, "symbol": "STX"}, "value": 13888888889}, "operation_identifier": {"index": 124}, "status": "SUCCESS", "type": "CREDIT"}, {"account": {"address": "SNE833SYZZDN8KAR2JGPFXHHZM4T9158HR0FEGTC"}, "amount": {"currency": {"decimals": 6, "symbol": "STX"}, "value": 13888888889}, "operation_identifier": {"index": 125}, "status": "SUCCESS", "type": "CREDIT"}, {"account": {"address": "SN2DDW0PRAQHXA87YM9QEMBCWYK472DPVJ1SDQ8XK"}, "amount": {"currency": {"decimals": 6, "symbol": "STX"}, "value": 13888888889}, "operation_identifier": {"index": 126}, "status": "SUCCESS", "type": "CREDIT"}, {"account": {"address": "SN2T25TMD0T42KFZS5SVB92W3GH2SND2E7PT8649G"}, "amount": {"currency": {"decimals": 6, "symbol": "STX"}, "value": 13888888889}, "operation_identifier": {"index": 127}, "status": "SUCCESS", "type": "CREDIT"}, {"account": {"address": "SN2YNCDAFVHVYVDETH4FYW7SK3HQB3Q9VNDE15FFX"}, "amount": {"currency": {"decimals": 6, "symbol": "STX"}, "value": 13888888889}, "operation_identifier": {"index": 128}, "status": "SUCCESS", "type": "CREDIT"}, {"account": {"address": "SN392SPKHT0FPF79JY1RAWC0TPQ18TZX3JZEKX011"}, "amount": {"currency": {"decimals": 6, "symbol": "STX"}, "value": 13888888889}, "operation_identifier": {"index": 129}, "status": "SUCCESS", "type": "CREDIT"}, {"account": {"address": "SN2ZNZ818AENAF2G6DJV0XDSZNMNPBQN7RY7GEB8F"}, "amount": {"currency": {"decimals": 6, "symbol": "STX"}, "value": 13888888889}, "operation_identifier": {"index": 130}, "status": "SUCCESS", "type": "CREDIT"}, {"account": {"address": "SN3P7Z5YDNDHA9BSG1V0HAT2ER1YTJ39FPBDDHD8F"}, "amount": {"currency": {"decimals": 6, "symbol": "STX"}, "value": 13888888889}, "operation_identifier": {"index": 131}, "status": "SUCCESS", "type": "CREDIT"}, {"account": {"address": "SN249H77RDXJ48W7QJ4HSASMM126C4MRXX4MS7AHJ"}, "amount": {"currency": {"decimals": 6, "symbol": "STX"}, "value": 13888888889}, "operation_identifier": {"index": 132}, "status": "SUCCESS", "type": "CREDIT"}, {"account": {"address": "SN2CRH16XWJX9WHZDYRANTWDBGEHVM53B4RKRKTS9"}, "amount": {"currency": {"decimals": 6, "symbol": "STX"}, "value": 13888888889}, "operation_identifier": {"index": 133}, "status": "SUCCESS", "type": "CREDIT"}, {"account": {"address": "SN2FEC3P0G7EHZJGR0842W5SFMMGY113WGVS3H3XQ"}, "amount": {"currency": {"decimals": 6, "symbol": "STX"}, "value": 13888888889}, "operation_identifier": {"index": 134}, "status": "SUCCESS", "type": "CREDIT"}, {"account": {"address": "SN2VQK578D964GF7TWPB663P70452KG67BJZPR3NG"}, "amount": {"currency": {"decimals": 6, "symbol": "STX"}, "value": 13888888889}, "operation_identifier": {"index": 135}, "status": "SUCCESS", "type": "CREDIT"}, {"account": {"address": "SNHGXHX0ZBK76FQVC1P5KBM7KWGS353KTQC9TKZ7"}, "amount": {"currency": {"decimals": 6, "symbol": "STX"}, "value": 13888888889}, "operation_identifier": {"index": 136}, "status": "SUCCESS", "type": "CREDIT"}, {"account": {"address": "SN3AZ0ETGMVTW5SMAVFSXCD0HHRQN2KCASMKSBRS"}, "amount": {"currency": {"decimals": 6, "symbol": "STX"}, "value": 13888888889}, "operation_identifier": {"index": 137}, "status": "SUCCESS", "type": "CREDIT"}, {"account": {"address": "SNGYRJJTTFXK15RAZZ8P76N33Z50QKBFZYSSEB3M"}, "amount": {"currency": {"decimals": 6, "symbol": "STX"}, "value": 13888888889}, "operation_identifier": {"index": 138}, "status": "SUCCESS", "type": "CREDIT"}, {"account": {"address": "SN1HBDPD5XWXMAF977QBG9205SFE0J8ADT82V1A3A"}, "amount": {"currency": {"decimals": 6, "symbol": "STX"}, "value": 13888888889}, "operation_identifier": {"index": 139}, "status": "SUCCESS", "type": "CREDIT"}, {"account": {"address": "SN3Y80TWZ5ZGDD8CP0P1GB7YRB75PE3X160FSBMAZ"}, "amount": {"currency": {"decimals": 6, "symbol": "STX"}, "value": 13888888889}, "operation_identifier": {"index": 140}, "status": "SUCCESS", "type": "CREDIT"}, {"account": {"address": "SN3Q333YEXECPPTCDYV6MMFCYVQCXWXHTN0GZBH4X"}, "amount": {"currency": {"decimals": 6, "symbol": "STX"}, "value": 13888888889}, "operation_identifier": {"index": 141}, "status": "SUCCESS", "type": "CREDIT"}, {"account": {"address": "SN2EZKAJ1P98VCSK2TJF45B770HVD1TKTQPFQ7D8B"}, "amount": {"currency": {"decimals": 6, "symbol": "STX"}, "value": 13888888889}, "operation_identifier": {"index": 142}, "status": "SUCCESS", "type": "CREDIT"}, {"account": {"address": "SN1RAEKJK8C5TN3X1C76S91QXWYXSAGE20D6Y3B5F"}, "amount": {"currency": {"decimals": 6, "symbol": "STX"}, "value": 13888888889}, "operation_identifier": {"index": 143}, "status": "SUCCESS", "type": "CREDIT"}, {"account": {"address": "SN2C7MR08SAYE0ZQCBKEDTWS0BQYKYMZKYHQQGCA6"}, "amount": {"currency": {"decimals": 6, "symbol": "STX"}, "value": 13888888889}, "operation_identifier": {"index": 144}, "status": "SUCCESS", "type": "CREDIT"}, {"account": {"address": "SN195HAZQZNY4HSS4MJKZG803WW9RVX4J2EE8ZA3S"}, "amount": {"currency": {"decimals": 6, "symbol": "STX"}, "value": 13888888889}, "operation_identifier": {"index": 145}, "status": "SUCCESS", "type": "CREDIT"}, {"account": {"address": "SN22M5VNBKZ8R8V3NBV37THNP27M5QDQ32NHCC9YF"}, "amount": {"currency": {"decimals": 6, "symbol": "STX"}, "value": 13888888889}, "operation_identifier": {"index": 146}, "status": "SUCCESS", "type": "CREDIT"}, {"account": {"address": "SN2B1A805S5TX2XYAFW80F4KY5FMEYZHNBEY0RW6Z"}, "amount": {"currency": {"decimals": 6, "symbol": "STX"}, "value": 13888888889}, "operation_identifier": {"index": 147}, "status": "SUCCESS", "type": "CREDIT"}, {"account": {"address": "SN2HFHDGCMCYBH7SEZ82JSQ5ME87PQ8FRN21YF5WV"}, "amount": {"currency": {"decimals": 6, "symbol": "STX"}, "value": 13888888889}, "operation_identifier": {"index": 148}, "status": "SUCCESS", "type": "CREDIT"}, {"account": {"address": "SN2S8TBDQKMAEQ9DNFFKPWJXH2QYK92487WGEVWMY"}, "amount": {"currency": {"decimals": 6, "symbol": "STX"}, "value": 13888888889}, "operation_identifier": {"index": 149}, "status": "SUCCESS", "type": "CREDIT"}, {"account": {"address": "SN3KHTES01XH7RYD2CZX5HGJ0DFRYQ7C8X6CA7WYV"}, "amount": {"currency": {"decimals": 6, "symbol": "STX"}, "value": 13888888889}, "operation_identifier": {"index": 150}, "status": "SUCCESS", "type": "CREDIT"}, {"account": {"address": "SN30JN9TN414XQEGKVDGHFV1MBJCGQPNMT6MDD0HT"}, "amount": {"currency": {"decimals": 6, "symbol": "STX"}, "value": 13888888889}, "operation_identifier": {"index": 151}, "status": "SUCCESS", "type": "CREDIT"}, {"account": {"address": "SN263J2J32B0WKRKY5GVMXSVWDPHHBF3GVM8MCB0"}, "amount": {"currency": {"decimals": 6, "symbol": "STX"}, "value": 13888888889}, "operation_identifier": {"index": 152}, "status": "SUCCESS", "type": "CREDIT"}, {"account": {"address": "SNPE609M949ERBAATR32V701XYZ3J09CRMCJVWGE"}, "amount": {"currency": {"decimals": 6, "symbol": "STX"}, "value": 13888888889}, "operation_identifier": {"index": 153}, "status": "SUCCESS", "type": "CREDIT"}, {"account": {"address": "SN3Q7A7FQ1SK1437NZ1XBNA5TXWGW6GE3N2WG996R"}, "amount": {"currency": {"decimals": 6, "symbol": "STX"}, "value": 13888888889}, "operation_identifier": {"index": 154}, "status": "SUCCESS", "type": "CREDIT"}, {"account": {"address": "SN67J44P364T2M385FYN39B628JBWM9S5AVXVBD6"}, "amount": {"currency": {"decimals": 6, "symbol": "STX"}, "value": 13888888889}, "operation_identifier": {"index": 155}, "status": "SUCCESS", "type": "CREDIT"}, {"account": {"address": "SN10JM5Z6YV9VFWKWD95Q34QPDTAJ83H0RG9EENWT"}, "amount": {"currency": {"decimals": 6, "symbol": "STX"}, "value": 13888888889}, "operation_identifier": {"index": 156}, "status": "SUCCESS", "type": "CREDIT"}, {"account": {"address": "SN216XEF6SQSZ4H6KQKPEA99G6N8A06Y7JTV9PV2S"}, "amount": {"currency": {"decimals": 6, "symbol": "STX"}, "value": 13888888889}, "operation_identifier": {"index": 157}, "status": "SUCCESS", "type": "CREDIT"}, {"account": {"address": "SN3004YRJ5WSXAVETG9ZM2DJ6JY7GE48BQK7GMJ54"}, "amount": {"currency": {"decimals": 6, "symbol": "STX"}, "value": 13888888889}, "operation_identifier": {"index": 158}, "status": "SUCCESS", "type": "CREDIT"}, {"account": {"address": "SNN2AMTE6QZT8R5ZSPYAQXFN957XTSW21VBB4V7B"}, "amount": {"currency": {"decimals": 6, "symbol": "STX"}, "value": 13888888889}, "operation_identifier": {"index": 159}, "status": "SUCCESS", "type": "CREDIT"}, {"account": {"address": "SNSG0D3Y4DZH3R0MBVD74T3MT95N17A22Z6ZE9CP"}, "amount": {"currency": {"decimals": 6, "symbol": "STX"}, "value": 13888888889}, "operation_identifier": {"index": 160}, "status": "SUCCESS", "type": "CREDIT"}, {"account": {"address": "SN38XHH81W66GT6RTPZ9YHRTVEQN0BCECV4YBK4TB"}, "amount": {"currency": {"decimals": 6, "symbol": "STX"}, "value": 13888888889}, "operation_identifier": {"index": 161}, "status": "SUCCESS", "type": "CREDIT"}, {"account": {"address": "SN2WBD2JGE9GZ4P6MXNYK5NBH5ZVWW9V5M669YVA7"}, "amount": {"currency": {"decimals": 6, "symbol": "STX"}, "value": 13888888889}, "operation_identifier": {"index": 162}, "status": "SUCCESS", "type": "CREDIT"}, {"account": {"address": "SN13KQ31KQ4QGTXTC3CP709YENW2VAE5KSVGBAFYW"}, "amount": {"currency": {"decimals": 6, "symbol": "STX"}, "value": 13888888889}, "operation_identifier": {"index": 163}, "status": "SUCCESS", "type": "CREDIT"}, {"account": {"address": "SN1DJBHS7FDJ3G4B9WH9992F4GKNV81V94NA6Y108"}, "amount": {"currency": {"decimals": 6, "symbol": "STX"}, "value": 13888888889}, "operation_identifier": {"index": 164}, "status": "SUCCESS", "type": "CREDIT"}, {"account": {"address": "SN3DWPBD979T9WTWAKSHZ0YW6NQ8M9QWC99853XP0"}, "amount": {"currency": {"decimals": 6, "symbol": "STX"}, "value": 13888888889}, "operation_identifier": {"index": 165}, "status": "SUCCESS", "type": "CREDIT"}, {"account": {"address": "SN3SPJWQP1TAZHV68XDGHDX9D810HNGV7X194YCFD"}, "amount": {"currency": {"decimals": 6, "symbol": "STX"}, "value": 13888888889}, "operation_identifier": {"index": 166}, "status": "SUCCESS", "type": "CREDIT"}, {"account": {"address": "SN12WSR4RS455TJRT7RGNWRG4Y7QXEH8Y9G72T2N"}, "amount": {"currency": {"decimals": 6, "symbol": "STX"}, "value": 13888888889}, "operation_identifier": {"index": 167}, "status": "SUCCESS", "type": "CREDIT"}, {"account": {"address": "SNR2YHFR9T25NW30R2K49R8TNPWBDFFS98NP31WQ"}, "amount": {"currency": {"decimals": 6, "symbol": "STX"}, "value": 13888888889}, "operation_identifier": {"index": 168}, "status": "SUCCESS", "type": "CREDIT"}, {"account": {"address": "SNY6EVGGEESW110SC37PSJDMWTMAWNQVR7M0AE68"}, "amount": {"currency": {"decimals": 6, "symbol": "STX"}, "value": 13888888889}, "operation_identifier": {"index": 169}, "status": "SUCCESS", "type": "CREDIT"}, {"account": {"address": "SN0FBQM0B0M1NTF8PE96FGPSXECDRD0YY0BB4R6M"}, "amount": {"currency": {"decimals": 6, "symbol": "STX"}, "value": 13888888889}, "operation_identifier": {"index": 170}, "status": "SUCCESS", "type": "CREDIT"}, {"account": {"address": "SN1MY43X1H2P1EH45E25B60RT52ARCVDHB24KR576"}, "amount": {"currency": {"decimals": 6, "symbol": "STX"}, "value": 13888888889}, "operation_identifier": {"index": 171}, "status": "SUCCESS", "type": "CREDIT"}, {"account": {"address": "SN184FZMGFK3VHWHDCXA7KVENARZQEC7PXA9SX8JQ"}, "amount": {"currency": {"decimals": 6, "symbol": "STX"}, "value": 13888888889}, "operation_identifier": {"index": 172}, "status": "SUCCESS", "type": "CREDIT"}, {"account": {"address": "SN1Y5R2K2V07F3ZJW3JY0SDQ3XXTDB8K5FS026413"}, "amount": {"currency": {"decimals": 6, "symbol": "STX"}, "value": 13888888889}, "operation_identifier": {"index": 173}, "status": "SUCCESS", "type": "CREDIT"}, {"account": {"address": "SN1YNX52QH5EF07751DNH3QJKJY8NVAPAMG9D92EW"}, "amount": {"currency": {"decimals": 6, "symbol": "STX"}, "value": 13888888889}, "operation_identifier": {"index": 174}, "status": "SUCCESS", "type": "CREDIT"}, {"account": {"address": "SNFX2RF4E11TMJW27AFM0K90C2P54JEC4RGVGHVG"}, "amount": {"currency": {"decimals": 6, "symbol": "STX"}, "value": 13888888889}, "operation_identifier": {"index": 175}, "status": "SUCCESS", "type": "CREDIT"}, {"account": {"address": "SNWR4Z6JHXDSJ4Y0WWZZAP3K5XX9H4XN0793G5N9"}, "amount": {"currency": {"decimals": 6, "symbol": "STX"}, "value": 13888888889}, "operation_identifier": {"index": 176}, "status": "SUCCESS", "type": "CREDIT"}, {"account": {"address": "SN3NW1ETDJH9ZDKKEQ05V4TYK5JY2PEA6YS3MWRE1"}, "amount": {"currency": {"decimals": 6, "symbol": "STX"}, "value": 13888888889}, "operation_identifier": {"index": 177}, "status": "SUCCESS", "type": "CREDIT"}, {"account": {"address": "SN15WA9YNEXQ4BAD3CJ4CWS2WXSHH8X7TAFH3RBJQ"}, "amount": {"currency": {"decimals": 6, "symbol": "STX"}, "value": 13888888889}, "operation_identifier": {"index": 178}, "status": "SUCCESS", "type": "CREDIT"}, {"account": {"address": "SN19FAXPBN7XG07E5FMMHMCMZ5DT4T5NS1RGSQ838"}, "amount": {"currency": {"decimals": 6, "symbol": "STX"}, "value": 13888888889}, "operation_identifier": {"index": 179}, "status": "SUCCESS", "type": "CREDIT"}, {"account": {"address": "SN3SMSXG9DXXAQYFV7XDQ99KB43HZB2572B75RCE7"}, "amount": {"currency": {"decimals": 6, "symbol": "STX"}, "value": 13888888889}, "operation_identifier": {"index": 180}, "status": "SUCCESS", "type": "CREDIT"}, {"account": {"address": "SN1A29CMAQTG2M73Z0XKQMD47X8V2PX46Y9W2M41Q"}, "amount": {"currency": {"decimals": 6, "symbol": "STX"}, "value": 13888888889}, "operation_identifier": {"index": 181}, "status": "SUCCESS", "type": "CREDIT"}, {"account": {"address": "SN2DB00PT5PS33YX5PRRF9STHMGE9QKPSB0F2P01W"}, "amount": {"currency": {"decimals": 6, "symbol": "STX"}, "value": 13888888889}, "operation_identifier": {"index": 182}, "status": "SUCCESS", "type": "CREDIT"}, {"account": {"address": "SN7Y95JQT7EYZN7Z4BYM1M23VH05E3HM531W3D0J"}, "amount": {"currency": {"decimals": 6, "symbol": "STX"}, "value": 13888888889}, "operation_identifier": {"index": 183}, "status": "SUCCESS", "type": "CREDIT"}, {"account": {"address": "SN1MV1JYJJ71F195YB2N76N8E9F37RV1WJB88CN4F"}, "amount": {"currency": {"decimals": 6, "symbol": "STX"}, "value": 13888888889}, "operation_identifier": {"index": 184}, "status": "SUCCESS", "type": "CREDIT"}, {"account": {"address": "SN2GK2FSP1N60C71PBKJFYCFHNYKKM326D8MDY1RY"}, "amount": {"currency": {"decimals": 6, "symbol": "STX"}, "value": 13888888889}, "operation_identifier": {"index": 185}, "status": "SUCCESS", "type": "CREDIT"}, {"account": {"address": "SN2BJBZX8EREHZRET5AY512MS42W7WSFA8ZSP8ND5"}, "amount": {"currency": {"decimals": 6, "symbol": "STX"}, "value": 13888888889}, "operation_identifier": {"index": 186}, "status": "SUCCESS", "type": "CREDIT"}, {"account": {"address": "SN1GKGCH2QJJTTX0MNPEQ834J7Z4Z0YH45J7TTE0V"}, "amount": {"currency": {"decimals": 6, "symbol": "STX"}, "value": 13888888889}, "operation_identifier": {"index": 187}, "status": "SUCCESS", "type": "CREDIT"}, {"account": {"address": "SN2CMAX76MDDG9YZ2ZTATW1H2HMA5NVVBEPD5QJBK"}, "amount": {"currency": {"decimals": 6, "symbol": "STX"}, "value": 13888888889}, "operation_identifier": {"index": 188}, "status": "SUCCESS", "type": "CREDIT"}, {"account": {"address": "SN11M6VMKSSW7TCERW2WERT52A5VF95PSK3J15XZ5"}, "amount": {"currency": {"decimals": 6, "symbol": "STX"}, "value": 13888888889}, "operation_identifier": {"index": 189}, "status": "SUCCESS", "type": "CREDIT"}, {"account": {"address": "SN1DC2AV2ZAKEYZ9PAQD4PS8YBCA543Z23ZF60AJH"}, "amount": {"currency": {"decimals": 6, "symbol": "STX"}, "value": 13888888889}, "operation_identifier": {"index": 190}, "status": "SUCCESS", "type": "CREDIT"}, {"account": {"address": "SN716VHXD8KFVW9B59WF8VCSX43TQWD470C87RHJ"}, "amount": {"currency": {"decimals": 6, "symbol": "STX"}, "value": 13888888889}, "operation_identifier": {"index": 191}, "status": "SUCCESS", "type": "CREDIT"}, {"account": {"address": "SN1CFGWHNWJQ0BB653GTDSD5PPG3DFAR3NV12TZ0H"}, "amount": {"currency": {"decimals": 6, "symbol": "STX"}, "value": 13888888889}, "operation_identifier": {"index": 192}, "status": "SUCCESS", "type": "CREDIT"}, {"account": {"address": "SN1DNJXB0D07SR09XXY22HZCNKM0843GM4ZZ9GQS2"}, "amount": {"currency": {"decimals": 6, "symbol": "STX"}, "value": 13888888889}, "operation_identifier": {"index": 193}, "status": "SUCCESS", "type": "CREDIT"}, {"account": {"address": "SN02ENSM1ZD4EKE6D3AB0JXTJMH7N4DPK7QGRA06"}, "amount": {"currency": {"decimals": 6, "symbol": "STX"}, "value": 13888888889}, "operation_identifier": {"index": 194}, "status": "SUCCESS", "type": "CREDIT"}, {"account": {"address": "SN3VMAKTANXRB2JGVAH1WFVV79Y14AG32R2J8CM94"}, "amount": {"currency": {"decimals": 6, "symbol": "STX"}, "value": 13888888889}, "operation_identifier": {"index": 195}, "status": "SUCCESS", "type": "CREDIT"}, {"account": {"address": "SN336PPYX6SYPS7FB4NGVCK5Q26F5EK5P7C0JY2DP"}, "amount": {"currency": {"decimals": 6, "symbol": "STX"}, "value": 13888888889}, "operation_identifier": {"index": 196}, "status": "SUCCESS", "type": "CREDIT"}, {"account": {"address": "SN2TQD42M5EPVH950BABYB57HP70VKQAZPT7F4C2D"}, "amount": {"currency": {"decimals": 6, "symbol": "STX"}, "value": 13888888889}, "operation_identifier": {"index": 197}, "status": "SUCCESS", "type": "CREDIT"}, {"account": {"address": "SN1HM73Y32CK85WKFY6C1QBG5WZJD5FXH8RBTXBZ0"}, "amount": {"currency": {"decimals": 6, "symbol": "STX"}, "value": 13888888889}, "operation_identifier": {"index": 198}, "status": "SUCCESS", "type": "CREDIT"}, {"account": {"address": "SN2M46XMP4PVR77W7WRSEP9PBT0NRRWT3A1G3XB4D"}, "amount": {"currency": {"decimals": 6, "symbol": "STX"}, "value": 13888888889}, "operation_identifier": {"index": 199}, "status": "SUCCESS", "type": "CREDIT"}, {"account": {"address": "SN3FCN36TP0K7XH14F1P6FCH481FX3ZRFFR6WWFTD"}, "amount": {"currency": {"decimals": 6, "symbol": "STX"}, "value": 13888888889}, "operation_identifier": {"index": 200}, "status": "SUCCESS", "type": "CREDIT"}, {"account": {"address": "SN1D3GW93JBQ6D3XNQW4A8NFTG0HRT6KE6Q84XPJ5"}, "amount": {"currency": {"decimals": 6, "symbol": "STX"}, "value": 13888888889}, "operation_identifier": {"index": 201}, "status": "SUCCESS", "type": "CREDIT"}, {"account": {"address": "SN1E6B4PWAFTCPYAR0M8RM4Y8D103NGCAH50ZYCDQ"}, "amount": {"currency": {"decimals": 6, "symbol": "STX"}, "value": 13888888889}, "operation_identifier": {"index": 202}, "status": "SUCCESS", "type": "CREDIT"}, {"account": {"address": "SN3M2JD8QSNMS9D2AZYD7K4SKDTTDZDXV0D3FRHFA"}, "amount": {"currency": {"decimals": 6, "symbol": "STX"}, "value": 13888888889}, "operation_identifier": {"index": 203}, "status": "SUCCESS", "type": "CREDIT"}, {"account": {"address": "SN3R4M5E15SXSWY02P695EGQE9XS0JV3BJ8C7MN1N"}, "amount": {"currency": {"decimals": 6, "symbol": "STX"}, "value": 13888888889}, "operation_identifier": {"index": 204}, "status": "SUCCESS", "type": "CREDIT"}, {"account": {"address": "SN3VJRQ8JV8JH15DFAQR3EA6NWXB991J0E4EDDHNN"}, "amount": {"currency": {"decimals": 6, "symbol": "STX"}, "value": 13888888889}, "operation_identifier": {"index": 205}, "status": "SUCCESS", "type": "CREDIT"}, {"account": {"address": "SN28ESNQ84MAW9QWQ1NR5C7CM9JFJZD7472NFD2F8"}, "amount": {"currency": {"decimals": 6, "symbol": "STX"}, "value": 13888888889}, "operation_identifier": {"index": 206}, "status": "SUCCESS", "type": "CREDIT"}, {"account": {"address": "SN3JE58XS2B2AFZD8K4XJQEAX2KWDG22DMVTFSE6A"}, "amount": {"currency": {"decimals": 6, "symbol": "STX"}, "value": 13888888889}, "operation_identifier": {"index": 207}, "status": "SUCCESS", "type": "CREDIT"}, {"account": {"address": "SN1MGBDESSRDSXGB1Q195WS3JJSCVGTZS0NHXKASK"}, "amount": {"currency": {"decimals": 6, "symbol": "STX"}, "value": 13888888889}, "operation_identifier": {"index": 208}, "status": "SUCCESS", "type": "CREDIT"}, {"account": {"address": "SN3CJGVFC88TS0SFXGTBB9YQP4XACRAAMMD85ERHG"}, "amount": {"currency": {"decimals": 6, "symbol": "STX"}, "value": 13888888889}, "operation_identifier": {"index": 209}, "status": "SUCCESS", "type": "CREDIT"}, {"account": {"address": "SN2TJFAMJQNH1WAZB55DDTPF2YAQ11YZVG40Y80RG"}, "amount": {"currency": {"decimals": 6, "symbol": "STX"}, "value": 13888888889}, "operation_identifier": {"index": 210}, "status": "SUCCESS", "type": "CREDIT"}, {"account": {"address": "SNW8XQZAJJMSJKG988QBM9AF0V3AM5TG8JQN02FJ"}, "amount": {"currency": {"decimals": 6, "symbol": "STX"}, "value": 13888888889}, "operation_identifier": {"index": 211}, "status": "SUCCESS", "type": "CREDIT"}, {"account": {"address": "SN2RKY47JZFYR7YM1DSTN8RJDZ15PXBKS307EYK51"}, "amount": {"currency": {"decimals": 6, "symbol": "STX"}, "value": 13888888889}, "operation_identifier": {"index": 212}, "status": "SUCCESS", "type": "CREDIT"}, {"account": {"address": "SN3BVA28SFJSHKB064N8F1PW8E5HCJ1TN6W4GZNXJ"}, "amount": {"currency": {"decimals": 6, "symbol": "STX"}, "value": 13888888889}, "operation_identifier": {"index": 213}, "status": "SUCCESS", "type": "CREDIT"}, {"account": {"address": "SN1C4P3RVJK9ZQCMZ3D8SFNG3QTHQXQ4RKB1HEH3Z"}, "amount": {"currency": {"decimals": 6, "symbol": "STX"}, "value": 13888888889}, "operation_identifier": {"index": 214}, "status": "SUCCESS", "type": "CREDIT"}, {"account": {"address": "SN2DXA6AGPR6G9TS7R2MBDJKQS9QEZKZSKQQJ7F6X"}, "amount": {"currency": {"decimals": 6, "symbol": "STX"}, "value": 13888888889}, "operation_identifier": {"index": 215}, "status": "SUCCESS", "type": "CREDIT"}, {"account": {"address": "SN3Y7SBVHA7PW333T9S3QMJ0GP8HX8NBYA8AGVN2K"}, "amount": {"currency": {"decimals": 6, "symbol": "STX"}, "value": 13888888889}, "operation_identifier": {"index": 216}, "status": "SUCCESS", "type": "CREDIT"}, {"account": {"address": "SN132WBPXH7WX6YXN5RY9AZKNX8WSYZN79XGWNP31"}, "amount": {"currency": {"decimals": 6, "symbol": "STX"}, "value": 13888888889}, "operation_identifier": {"index": 217}, "status": "SUCCESS", "type": "CREDIT"}, {"account": {"address": "SNDW0TZYWSFEVZHFE4NCFARHY8CNPRQPA0KYG8AV"}, "amount": {"currency": {"decimals": 6, "symbol": "STX"}, "value": 13888888889}, "operation_identifier": {"index": 218}, "status": "SUCCESS", "type": "CREDIT"}, {"account": {"address": "SN11WG8D1KF3K6S7RK6Y3RET38A2ERS3PYHD279E7"}, "amount": {"currency": {"decimals": 6, "symbol": "STX"}, "value": 13888888889}, "operation_identifier": {"index": 219}, "status": "SUCCESS", "type": "CREDIT"}], "transaction_identifier": {"hash": "0xd5aab1e2b34534723c58afa7f20f543dcbda37594393e9d0617385b2f47d7aab"}}]}], "chainhook": {"is_streaming_blocks": false, "predicate": {"higher_than": 221993, "scope": "block_height"}, "uuid": "598a3345-cd57-4228-925f-d7c93450c94e"}, "events": [], "rollback": []} \ No newline at end of file diff --git a/examples/chainhooks/chainhook_20250227_192408.json b/examples/chainhooks/chainhook_20250227_192408.json deleted file mode 100644 index e059ca8c..00000000 --- a/examples/chainhooks/chainhook_20250227_192408.json +++ /dev/null @@ -1 +0,0 @@ -{"apply": [{"block_identifier": {"hash": "0x6ba7f3153f0f97c1fb3ffcd96578231aa3b6780b5876e7f2687067548140b94d", "index": 222014}, "metadata": {"bitcoin_anchor_block_identifier": {"hash": "0x3e7804a435b1a5147c986d2acc86c7a4e44f6119383d4c1a969102b6bce24a09", "index": 19756}, "block_time": 1740712621, "confirm_microblock_identifier": null, "cycle_number": null, "pox_cycle_index": 982, "pox_cycle_length": 20, "pox_cycle_position": 15, "reward_set": null, "signer_bitvec": "013500000027ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff1f", "signer_public_keys": ["0x02e8620935d58ebffa23c260f6917cbd0915ea17d7a46df17e131540237d335504", "0x03161dd42571db5ca2599ef32774ec0c6c9ef125cbb8f826a63fe527489bb91400", "0x036a44f61d85efa844b42475f107b106dc8fb209ae27813893c3269c59821e0333"], "signer_signature": ["010612baead0ec165892db1eab9c9f22d8d9e2720ec2f8b127dbdc328cf558898d7989b3e90d939b795a0555ae1221629b8b72ce0435812ca1fe4920285aed7eec", "014a79a2693338a6e2c65575817be3c1ea086fa60750e44f28030f07195f96b28c03fdfc0bc458444764ea294b61a05c69c9a3f4366f370053696c09f88b00c71f", "01f047313c497d2139289c9eedf6612b86b76fc75fc135c6a4f45ae01ba1a19a6c4d31ce6f9bb88e1e67c2ef19b9a50adc317ba3607cf1ff81c1f87b34058c1db7"], "stacks_block_hash": "0x4ba961377bbf075e34c3b25797fabfc44b752e575e145197c563933274c1b50f", "tenure_height": 16952}, "parent_block_identifier": {"hash": "0xd2af5977981bf331bb3b8094b5b0f67d9ef022df10c4a7a03a67a5dad52e5210", "index": 222013}, "timestamp": 1740712542, "transactions": [{"metadata": {"description": "transfered: 1000 \u00b5STX from ST319CF5WV77KYR1H3GT0GZ7B8Q4AQPY42ETP1VPF to ST221Z6TDTC5E0BYR2V624Q2ST6R0Q71T78WTAX6H", "execution_cost": {"read_count": 0, "read_length": 0, "runtime": 0, "write_count": 0, "write_length": 0}, "fee": 300, "kind": {"type": "NativeTokenTransfer"}, "nonce": 67696, "position": {"index": 0}, "raw_tx": "0x80800000000400c2963cbcd9cf3f60311c34087ceb45c8abdbc4130000000000010870000000000000012c0000b74c775bb8dc3c918b9f860a531a83276fc4934a854f7b327349421131b997e83226d43c468e6b1723527860bded60ccafb0f80b706bf6017ca350fffcf9fa1603020000000000051a841f9b4dd30ae02fd816cc225c59d1b00b9c3a3a00000000000003e800000000000000000000000000000000000000000000000000000000000000000000", "receipt": {"contract_calls_stack": [], "events": [{"data": {"amount": "1000", "recipient": "ST221Z6TDTC5E0BYR2V624Q2ST6R0Q71T78WTAX6H", "sender": "ST319CF5WV77KYR1H3GT0GZ7B8Q4AQPY42ETP1VPF"}, "position": {"index": 0}, "type": "STXTransferEvent"}], "mutated_assets_radius": [], "mutated_contracts_radius": []}, "result": "(ok true)", "sender": "ST319CF5WV77KYR1H3GT0GZ7B8Q4AQPY42ETP1VPF", "sponsor": null, "success": true}, "operations": [{"account": {"address": "ST319CF5WV77KYR1H3GT0GZ7B8Q4AQPY42ETP1VPF"}, "amount": {"currency": {"decimals": 6, "symbol": "STX"}, "value": 1000}, "operation_identifier": {"index": 0}, "related_operations": [{"index": 1}], "status": "SUCCESS", "type": "DEBIT"}, {"account": {"address": "ST221Z6TDTC5E0BYR2V624Q2ST6R0Q71T78WTAX6H"}, "amount": {"currency": {"decimals": 6, "symbol": "STX"}, "value": 1000}, "operation_identifier": {"index": 1}, "related_operations": [{"index": 0}], "status": "SUCCESS", "type": "CREDIT"}], "transaction_identifier": {"hash": "0xb77f938235440933b197443968b6b220d6e498f87fdf97b8fe03099d3154703b"}}]}], "chainhook": {"is_streaming_blocks": false, "predicate": {"higher_than": 221993, "scope": "block_height"}, "uuid": "598a3345-cd57-4228-925f-d7c93450c94e"}, "events": [], "rollback": []} \ No newline at end of file diff --git a/examples/chainhooks/chainhook_20250227_192409.json b/examples/chainhooks/chainhook_20250227_192409.json deleted file mode 100644 index 205dd8d0..00000000 --- a/examples/chainhooks/chainhook_20250227_192409.json +++ /dev/null @@ -1 +0,0 @@ -{"apply": [{"block_identifier": {"hash": "0x1973929448ad499f4b3bfbf344741cffa4a5474b5db374af39c6dfcae3999fea", "index": 222016}, "metadata": {"bitcoin_anchor_block_identifier": {"hash": "0x3e7804a435b1a5147c986d2acc86c7a4e44f6119383d4c1a969102b6bce24a09", "index": 19756}, "block_time": 1740712685, "confirm_microblock_identifier": null, "cycle_number": null, "pox_cycle_index": 982, "pox_cycle_length": 20, "pox_cycle_position": 15, "reward_set": null, "signer_bitvec": "013500000027ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff1f", "signer_public_keys": ["0x02e8620935d58ebffa23c260f6917cbd0915ea17d7a46df17e131540237d335504", "0x03161dd42571db5ca2599ef32774ec0c6c9ef125cbb8f826a63fe527489bb91400", "0x036a44f61d85efa844b42475f107b106dc8fb209ae27813893c3269c59821e0333"], "signer_signature": ["0041b224964114d1cf6a8013a52581a3c5487a9bef271856c83687cae33279a8d2189f84dcd5036995069e9ac956ac028e9464ac40e80993cc3695a01a92bf4034", "01b2529349a44a67da799a8da69259a271765a1144e0355c43d63e0cedad9998a72838a3425b1f82042ee4c6b066a4fef953830524802eaebe03c2b905516f453a", "019a4f9374fd0a44cd9dbc9f9a9e90f83e71602b7add530709d92e8c23fa9833387eaece038ac0a6a49b8d34e2d03b53735172457f68c7ea62d5dbc1b705fd507d"], "stacks_block_hash": "0x83ffcdbf76c8acaa16315008292522609f99a1e917d63fcddfc9e6004121fe84", "tenure_height": 16952}, "parent_block_identifier": {"hash": "0x24b437445703d848ae372623642ce739e60c572266a25f91962256ab21dd98d0", "index": 222015}, "timestamp": 1740712542, "transactions": [{"metadata": {"description": "transfered: 1000 \u00b5STX from ST2TFVBMRPS5SSNP98DQKQ5JNB2B6NZM91C4K3P7B to ST319CF5WV77KYR1H3GT0GZ7B8Q4AQPY42ETP1VPF", "execution_cost": {"read_count": 0, "read_length": 0, "runtime": 0, "write_count": 0, "write_length": 0}, "fee": 300, "kind": {"type": "NativeTokenTransfer"}, "nonce": 67696, "position": {"index": 0}, "raw_tx": "0x80800000000400b4fdae98b64b9cd6c9436f3b965558966afe890b0000000000010870000000000000012c000020ff168f089a0f4b822aed24928c40c0cf51586843913f25a1235e4d77b49a8549988411753d423c4b956e509747f7468b0c0fb29b47700e661091e2933be34503020000000000051ac2963cbcd9cf3f60311c34087ceb45c8abdbc41300000000000003e800000000000000000000000000000000000000000000000000000000000000000000", "receipt": {"contract_calls_stack": [], "events": [{"data": {"amount": "1000", "recipient": "ST319CF5WV77KYR1H3GT0GZ7B8Q4AQPY42ETP1VPF", "sender": "ST2TFVBMRPS5SSNP98DQKQ5JNB2B6NZM91C4K3P7B"}, "position": {"index": 0}, "type": "STXTransferEvent"}], "mutated_assets_radius": [], "mutated_contracts_radius": []}, "result": "(ok true)", "sender": "ST2TFVBMRPS5SSNP98DQKQ5JNB2B6NZM91C4K3P7B", "sponsor": null, "success": true}, "operations": [{"account": {"address": "ST2TFVBMRPS5SSNP98DQKQ5JNB2B6NZM91C4K3P7B"}, "amount": {"currency": {"decimals": 6, "symbol": "STX"}, "value": 1000}, "operation_identifier": {"index": 0}, "related_operations": [{"index": 1}], "status": "SUCCESS", "type": "DEBIT"}, {"account": {"address": "ST319CF5WV77KYR1H3GT0GZ7B8Q4AQPY42ETP1VPF"}, "amount": {"currency": {"decimals": 6, "symbol": "STX"}, "value": 1000}, "operation_identifier": {"index": 1}, "related_operations": [{"index": 0}], "status": "SUCCESS", "type": "CREDIT"}], "transaction_identifier": {"hash": "0x3222f011a6ae8353fe402e81c768b9756685d79b355efd18d16fbaf68b6d356d"}}]}], "chainhook": {"is_streaming_blocks": false, "predicate": {"higher_than": 221993, "scope": "block_height"}, "uuid": "598a3345-cd57-4228-925f-d7c93450c94e"}, "events": [], "rollback": []} \ No newline at end of file diff --git a/examples/chainhooks/chainhook_20250227_192410.json b/examples/chainhooks/chainhook_20250227_192410.json deleted file mode 100644 index e0107ab5..00000000 --- a/examples/chainhooks/chainhook_20250227_192410.json +++ /dev/null @@ -1 +0,0 @@ -{"apply": [{"block_identifier": {"hash": "0xe06216c9bfdc82c36e4dc9b37c3dfa72be24c4f6edc98459991501fad7d1e04c", "index": 222018}, "metadata": {"bitcoin_anchor_block_identifier": {"hash": "0x3e7804a435b1a5147c986d2acc86c7a4e44f6119383d4c1a969102b6bce24a09", "index": 19756}, "block_time": 1740712744, "confirm_microblock_identifier": null, "cycle_number": null, "pox_cycle_index": 982, "pox_cycle_length": 20, "pox_cycle_position": 15, "reward_set": null, "signer_bitvec": "013500000027ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff1f", "signer_public_keys": ["0x02e8620935d58ebffa23c260f6917cbd0915ea17d7a46df17e131540237d335504", "0x03161dd42571db5ca2599ef32774ec0c6c9ef125cbb8f826a63fe527489bb91400", "0x036a44f61d85efa844b42475f107b106dc8fb209ae27813893c3269c59821e0333"], "signer_signature": ["003e30104547d8df2b1a0d6772a274146061b34008f61ff8b0b38cbd527c489b562c2188e2838c37f725d41d0fabdc87e05e7774a22c74cab971384743ca3a0b0d", "01abed22ab8d5731417bf7638853be25fa10a8da2811cf0ec06477e86533a3d7123da4c3d292df310d279f438b3cc46549ef813c9362f7ed11446d0aa74812292c", "00caee9658db07786a7b4242bb51b11290824ad6b7ce943779764b162e6c030ee53043a200852996b81c90d3605eff007769cac02acd5b976fe9c1bfa87acb34ca"], "stacks_block_hash": "0x6201a1c666bc3fc36b3ee92f464ad01052325b9cc30c39d0e2ca7712e5ffc7a9", "tenure_height": 16952}, "parent_block_identifier": {"hash": "0x38834b3839d60dfa5bfa42c9d670089ec492bdccbe40fdf890c254a2890bd4da", "index": 222017}, "timestamp": 1740712542, "transactions": [{"metadata": {"description": "transfered: 1000 \u00b5STX from ST221Z6TDTC5E0BYR2V624Q2ST6R0Q71T78WTAX6H to ST2TFVBMRPS5SSNP98DQKQ5JNB2B6NZM91C4K3P7B", "execution_cost": {"read_count": 0, "read_length": 0, "runtime": 0, "write_count": 0, "write_length": 0}, "fee": 300, "kind": {"type": "NativeTokenTransfer"}, "nonce": 67697, "position": {"index": 0}, "raw_tx": "0x80800000000400841f9b4dd30ae02fd816cc225c59d1b00b9c3a3a0000000000010871000000000000012c00000e1726aa9215c38993787dd9810a40f4545207d770bb97f60f44ae07e27cf62d796afe84d0aa9d5983c017c628e5f1c2fd0c76d35c432f960a08d4cd87121a0803020000000000051ab4fdae98b64b9cd6c9436f3b965558966afe890b00000000000003e800000000000000000000000000000000000000000000000000000000000000000000", "receipt": {"contract_calls_stack": [], "events": [{"data": {"amount": "1000", "recipient": "ST2TFVBMRPS5SSNP98DQKQ5JNB2B6NZM91C4K3P7B", "sender": "ST221Z6TDTC5E0BYR2V624Q2ST6R0Q71T78WTAX6H"}, "position": {"index": 0}, "type": "STXTransferEvent"}], "mutated_assets_radius": [], "mutated_contracts_radius": []}, "result": "(ok true)", "sender": "ST221Z6TDTC5E0BYR2V624Q2ST6R0Q71T78WTAX6H", "sponsor": null, "success": true}, "operations": [{"account": {"address": "ST221Z6TDTC5E0BYR2V624Q2ST6R0Q71T78WTAX6H"}, "amount": {"currency": {"decimals": 6, "symbol": "STX"}, "value": 1000}, "operation_identifier": {"index": 0}, "related_operations": [{"index": 1}], "status": "SUCCESS", "type": "DEBIT"}, {"account": {"address": "ST2TFVBMRPS5SSNP98DQKQ5JNB2B6NZM91C4K3P7B"}, "amount": {"currency": {"decimals": 6, "symbol": "STX"}, "value": 1000}, "operation_identifier": {"index": 1}, "related_operations": [{"index": 0}], "status": "SUCCESS", "type": "CREDIT"}], "transaction_identifier": {"hash": "0xf7c9a786867929650c50f02da0be569e4b34fd90dbeb462187018037d7aa4714"}}]}], "chainhook": {"is_streaming_blocks": false, "predicate": {"higher_than": 221993, "scope": "block_height"}, "uuid": "598a3345-cd57-4228-925f-d7c93450c94e"}, "events": [], "rollback": []} \ No newline at end of file diff --git a/examples/chainhooks/chainhook_20250227_192411.json b/examples/chainhooks/chainhook_20250227_192411.json deleted file mode 100644 index 9e44e9ea..00000000 --- a/examples/chainhooks/chainhook_20250227_192411.json +++ /dev/null @@ -1 +0,0 @@ -{"apply": [{"block_identifier": {"hash": "0x16483b75ed9a428134a6726cb874e77dc300fb5e4ec72b431e7e1e10b97aefee", "index": 222019}, "metadata": {"bitcoin_anchor_block_identifier": {"hash": "0x3e7804a435b1a5147c986d2acc86c7a4e44f6119383d4c1a969102b6bce24a09", "index": 19756}, "block_time": 1740712779, "confirm_microblock_identifier": null, "cycle_number": null, "pox_cycle_index": 982, "pox_cycle_length": 20, "pox_cycle_position": 15, "reward_set": null, "signer_bitvec": "013500000027ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff1f", "signer_public_keys": ["0x02e8620935d58ebffa23c260f6917cbd0915ea17d7a46df17e131540237d335504", "0x03161dd42571db5ca2599ef32774ec0c6c9ef125cbb8f826a63fe527489bb91400", "0x036a44f61d85efa844b42475f107b106dc8fb209ae27813893c3269c59821e0333"], "signer_signature": ["0060b51988324e5b6fc3b666d978e60d62a3a9d8e841292098e1b42af4e81d1f340c43afbecb317245c8ef4c3d811a1f3653316a2cbd727472931d3d21f5e0868a", "01d293c2371498e3f29c2e81aab3b7b79a89a9e3cf59fd592d5e7319b944c9e93c13a5f76891136bf48a2a11813d541bcc2a7101437e6e82e889c5053a3c405195", "01c141fcecdca8b2426aeb7a59132602e3c36e9c83f81c12a2784d08e997bdae5677ec2eba0cec989d0099a54f5661868c7258e2ceb7d5059f60430eb768c89395"], "stacks_block_hash": "0x4806f7b15159d366b05992c14b0c65886998c5d83230043548b801814728f98f", "tenure_height": 16952}, "parent_block_identifier": {"hash": "0xe06216c9bfdc82c36e4dc9b37c3dfa72be24c4f6edc98459991501fad7d1e04c", "index": 222018}, "timestamp": 1740712542, "transactions": [{"metadata": {"description": "transfered: 1000 \u00b5STX from ST2TFVBMRPS5SSNP98DQKQ5JNB2B6NZM91C4K3P7B to ST319CF5WV77KYR1H3GT0GZ7B8Q4AQPY42ETP1VPF", "execution_cost": {"read_count": 0, "read_length": 0, "runtime": 0, "write_count": 0, "write_length": 0}, "fee": 300, "kind": {"type": "NativeTokenTransfer"}, "nonce": 67697, "position": {"index": 0}, "raw_tx": "0x80800000000400b4fdae98b64b9cd6c9436f3b965558966afe890b0000000000010871000000000000012c00004a88bd888193d2f2f50a04c626ba90e085ee4fb09f36909684e3101694a812a76f492091a466e09970a5776431cd30245dc740a5e90bdad939db96105467ffff03020000000000051ac2963cbcd9cf3f60311c34087ceb45c8abdbc41300000000000003e800000000000000000000000000000000000000000000000000000000000000000000", "receipt": {"contract_calls_stack": [], "events": [{"data": {"amount": "1000", "recipient": "ST319CF5WV77KYR1H3GT0GZ7B8Q4AQPY42ETP1VPF", "sender": "ST2TFVBMRPS5SSNP98DQKQ5JNB2B6NZM91C4K3P7B"}, "position": {"index": 0}, "type": "STXTransferEvent"}], "mutated_assets_radius": [], "mutated_contracts_radius": []}, "result": "(ok true)", "sender": "ST2TFVBMRPS5SSNP98DQKQ5JNB2B6NZM91C4K3P7B", "sponsor": null, "success": true}, "operations": [{"account": {"address": "ST2TFVBMRPS5SSNP98DQKQ5JNB2B6NZM91C4K3P7B"}, "amount": {"currency": {"decimals": 6, "symbol": "STX"}, "value": 1000}, "operation_identifier": {"index": 0}, "related_operations": [{"index": 1}], "status": "SUCCESS", "type": "DEBIT"}, {"account": {"address": "ST319CF5WV77KYR1H3GT0GZ7B8Q4AQPY42ETP1VPF"}, "amount": {"currency": {"decimals": 6, "symbol": "STX"}, "value": 1000}, "operation_identifier": {"index": 1}, "related_operations": [{"index": 0}], "status": "SUCCESS", "type": "CREDIT"}], "transaction_identifier": {"hash": "0xd4ab9a3ce35a01ff2d441efdfb22ae877271294a205503e8ea280e4423afe0fc"}}]}], "chainhook": {"is_streaming_blocks": false, "predicate": {"higher_than": 221993, "scope": "block_height"}, "uuid": "598a3345-cd57-4228-925f-d7c93450c94e"}, "events": [], "rollback": []} \ No newline at end of file diff --git a/examples/chainhooks/chainhook_20250227_192412.json b/examples/chainhooks/chainhook_20250227_192412.json deleted file mode 100644 index 15d6b225..00000000 --- a/examples/chainhooks/chainhook_20250227_192412.json +++ /dev/null @@ -1 +0,0 @@ -{"apply": [{"block_identifier": {"hash": "0x2ef386b35f11d2f0e99bd057d28d41d5488a9673c30aca94f3f5c19fa711bb90", "index": 222020}, "metadata": {"bitcoin_anchor_block_identifier": {"hash": "0x1d9e703b5bff59bcc7589b2c13f40fdcf919a95770203c7afab71971b68b35af", "index": 19757}, "block_time": 1740712827, "confirm_microblock_identifier": null, "cycle_number": null, "pox_cycle_index": 982, "pox_cycle_length": 20, "pox_cycle_position": 16, "reward_set": null, "signer_bitvec": "013500000027ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff1f", "signer_public_keys": ["0x02e8620935d58ebffa23c260f6917cbd0915ea17d7a46df17e131540237d335504", "0x036a44f61d85efa844b42475f107b106dc8fb209ae27813893c3269c59821e0333"], "signer_signature": ["01add4bd52080f9ca8f7edf09a30227d2d0be08a7f868f2848b1f3d9d777e6406a18b192481c9fe3b4952b1852add1437a843a5fb01544421714258a81e969b78c", "005a3ec9d89689663b6f7e452559e2e5fc7b18087c593c7c9e29f1af26aa4e59f72f476946417768f473bd5daee0c4fe3a92fea21bc2cae3dce2498d828b86da09"], "stacks_block_hash": "0x8b7c5c7d93a04a1df807923f3b1ce40d1baa0960192e5d4ec9bc41c93d3a1b21", "tenure_height": 16953}, "parent_block_identifier": {"hash": "0x16483b75ed9a428134a6726cb874e77dc300fb5e4ec72b431e7e1e10b97aefee", "index": 222019}, "timestamp": 1740712542, "transactions": [{"metadata": {"description": "tenure change", "execution_cost": {"read_count": 0, "read_length": 0, "runtime": 0, "write_count": 0, "write_length": 0}, "fee": 0, "kind": {"type": "TenureChange"}, "nonce": 38248, "position": {"index": 0}, "raw_tx": "0x808000000004002d58c278d1c9e1a8a26236ac81b543193c10e3470000000000009568000000000000000000001709dcd9bc9bef4dbf9047a58907e1da8e739633505d63ee6d350b40557ad7584434757fa4b2f64c6ec8ade788511052ff9b4197a42a6406348aa911535c5ee7010200000000077d5bdf9538521bd7e75d85f99efa43ee1ab01d3994c1e3466b8c49845b8147fcf4fe8dabf128437c7d5bdf9538521bd7e75d85f99efa43ee1ab01d3916483b75ed9a428134a6726cb874e77dc300fb5e4ec72b431e7e1e10b97aefee00000008002d58c278d1c9e1a8a26236ac81b543193c10e347", "receipt": {"contract_calls_stack": [], "events": [], "mutated_assets_radius": [], "mutated_contracts_radius": []}, "result": "(ok true)", "sender": "STPNHGKRT74Y3A52C8VAS0DN8CCKR4738YBAQ3E1", "sponsor": null, "success": true}, "operations": [], "transaction_identifier": {"hash": "0x0daa7d7f1028dc0ad28d023f4604b066787a8703cf9d225b71561ed54abfda7b"}}, {"metadata": {"description": "coinbase", "execution_cost": {"read_count": 0, "read_length": 0, "runtime": 0, "write_count": 0, "write_length": 0}, "fee": 0, "kind": {"type": "Coinbase"}, "nonce": 38249, "position": {"index": 1}, "raw_tx": "0x808000000004002d58c278d1c9e1a8a26236ac81b543193c10e34700000000000095690000000000000000000106d31f70f7764a32fee4abc4816e510ceebb3cb1f8af695fd12cd593c75851913a4e3d29c9c78ef28cb8010610331eb4f7fe38acca0d40704419492518e718c4010200000000080000000000000000000000000000000000000000000000000000000000000000096a821176ad01a601a6c09dfb68fccddd71508babf45855c5247faf2601139ad0e04a10f86d8049bdff0de597edf347df0e36a1fcd593a6b7dcc875b28eecf87b42c4964ad38ffd51b49239eec788f809", "receipt": {"contract_calls_stack": [], "events": [], "mutated_assets_radius": [], "mutated_contracts_radius": []}, "result": "(ok true)", "sender": "STPNHGKRT74Y3A52C8VAS0DN8CCKR4738YBAQ3E1", "sponsor": null, "success": true}, "operations": [], "transaction_identifier": {"hash": "0x2121c53d2fa2ce59a3c5a6af95a0ad5aeecd454a150420c9bf95ddf3d341d6a2"}}]}], "chainhook": {"is_streaming_blocks": false, "predicate": {"higher_than": 221993, "scope": "block_height"}, "uuid": "598a3345-cd57-4228-925f-d7c93450c94e"}, "events": [], "rollback": []} \ No newline at end of file diff --git a/examples/chainhooks/chainhook_20250227_192413.json b/examples/chainhooks/chainhook_20250227_192413.json deleted file mode 100644 index e10eed59..00000000 --- a/examples/chainhooks/chainhook_20250227_192413.json +++ /dev/null @@ -1 +0,0 @@ -{"apply": [{"block_identifier": {"hash": "0x77da4d1283cc634d8fe2187016d439d878c79b456d5d37e25d724aeafd9bfc21", "index": 222022}, "metadata": {"bitcoin_anchor_block_identifier": {"hash": "0x1d9e703b5bff59bcc7589b2c13f40fdcf919a95770203c7afab71971b68b35af", "index": 19757}, "block_time": 1740712888, "confirm_microblock_identifier": null, "cycle_number": null, "pox_cycle_index": 982, "pox_cycle_length": 20, "pox_cycle_position": 16, "reward_set": null, "signer_bitvec": "013500000027ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff1f", "signer_public_keys": ["0x02e8620935d58ebffa23c260f6917cbd0915ea17d7a46df17e131540237d335504", "0x03161dd42571db5ca2599ef32774ec0c6c9ef125cbb8f826a63fe527489bb91400", "0x036a44f61d85efa844b42475f107b106dc8fb209ae27813893c3269c59821e0333"], "signer_signature": ["00bcfce2a63cccc040628e7df5dc7395177d2b6290a08866f823462d0790ebca553b908d3d140e4e8e46af937e84d197700a109f67a75561e2fae127bea476ba19", "0188b0606f165f345e2508519cb1e311cf22a8d5ec339c40bceafd7b69ef31031818f59b6fd136a981238c068811dd9bdf07fd179db39aa5df4de27cc730a72bb9", "00862072ff2d3b89333d3375bc4a056909cc10bc564ae5f45094726ea68b49a1f16b162fc8bfe49e6b15e295ae1c66aa5be687afa2cc191174cb9ba1abd42bd7af"], "stacks_block_hash": "0xb55f2d4def995f66f20daedbccf537581da952be60f51d6300fb979297e7978a", "tenure_height": 16953}, "parent_block_identifier": {"hash": "0xbbcaa89e4461a99490799aee98e30ac0af60ed7847e9158a37fd2ad795fef931", "index": 222021}, "timestamp": 1740712814, "transactions": [{"metadata": {"description": "invoked: STMN839RM7GAG79Y9574TTZMEKHKKR6RNZC71CDR.cat-presale-t13::buy-tokens(u10)", "execution_cost": {"read_count": 11, "read_length": 4309, "runtime": 30777, "write_count": 3, "write_length": 114}, "fee": 3000, "kind": {"data": {"args": ["u10"], "contract_identifier": "STMN839RM7GAG79Y9574TTZMEKHKKR6RNZC71CDR.cat-presale-t13", "method": "buy-tokens"}, "type": "ContractCall"}, "nonce": 31, "position": {"index": 0}, "raw_tx": "0x80800000000400a3ab3da9bb7bffc2e27a89aece2305ba3f473a04000000000000001f0000000000000bb80000a0b49c750096a64cb78147025544dec30625080b70cf3c56e3de50f262f4d9061497b5e5d029761f5ae4765acc58349568a737d780df0f79ee19379108224844030100000000021a29540d38a1e0a81d3e494e4d6bf474e339e0d8af0f6361742d70726573616c652d7431330a6275792d746f6b656e7300000001010000000000000000000000000000000a", "receipt": {"contract_calls_stack": [], "events": [{"data": {"amount": "10000000", "recipient": "STMN839RM7GAG79Y9574TTZMEKHKKR6RNZC71CDR.cat-presale-t13", "sender": "ST2HTPFD9QDXZZGQ2FA4TXKH30PX3YHST0H4JFE0E"}, "position": {"index": 0}, "type": "STXTransferEvent"}], "mutated_assets_radius": [], "mutated_contracts_radius": []}, "result": "(ok true)", "sender": "ST2HTPFD9QDXZZGQ2FA4TXKH30PX3YHST0H4JFE0E", "sponsor": null, "success": true}, "operations": [{"account": {"address": "ST2HTPFD9QDXZZGQ2FA4TXKH30PX3YHST0H4JFE0E"}, "amount": {"currency": {"decimals": 6, "symbol": "STX"}, "value": 10000000}, "operation_identifier": {"index": 0}, "related_operations": [{"index": 1}], "status": "SUCCESS", "type": "DEBIT"}, {"account": {"address": "STMN839RM7GAG79Y9574TTZMEKHKKR6RNZC71CDR.cat-presale-t13"}, "amount": {"currency": {"decimals": 6, "symbol": "STX"}, "value": 10000000}, "operation_identifier": {"index": 1}, "related_operations": [{"index": 0}], "status": "SUCCESS", "type": "CREDIT"}], "transaction_identifier": {"hash": "0x59b66a36986fce34f77e05557f94fc29d4e5eb01839f332c82a8eba78d480012"}}, {"metadata": {"description": "transfered: 1000 \u00b5STX from ST221Z6TDTC5E0BYR2V624Q2ST6R0Q71T78WTAX6H to ST319CF5WV77KYR1H3GT0GZ7B8Q4AQPY42ETP1VPF", "execution_cost": {"read_count": 0, "read_length": 0, "runtime": 0, "write_count": 0, "write_length": 0}, "fee": 300, "kind": {"type": "NativeTokenTransfer"}, "nonce": 67698, "position": {"index": 1}, "raw_tx": "0x80800000000400841f9b4dd30ae02fd816cc225c59d1b00b9c3a3a0000000000010872000000000000012c0001b972f189ed8e89a6559ea9f5116aa5d7f3876967d946c82cbafc68d93ca267675d0e604f1516937b85eb91c39c2822900a683fef34c1ce5a2e75fa224e9982e403020000000000051ac2963cbcd9cf3f60311c34087ceb45c8abdbc41300000000000003e800000000000000000000000000000000000000000000000000000000000000000000", "receipt": {"contract_calls_stack": [], "events": [{"data": {"amount": "1000", "recipient": "ST319CF5WV77KYR1H3GT0GZ7B8Q4AQPY42ETP1VPF", "sender": "ST221Z6TDTC5E0BYR2V624Q2ST6R0Q71T78WTAX6H"}, "position": {"index": 1}, "type": "STXTransferEvent"}], "mutated_assets_radius": [], "mutated_contracts_radius": []}, "result": "(ok true)", "sender": "ST221Z6TDTC5E0BYR2V624Q2ST6R0Q71T78WTAX6H", "sponsor": null, "success": true}, "operations": [{"account": {"address": "ST221Z6TDTC5E0BYR2V624Q2ST6R0Q71T78WTAX6H"}, "amount": {"currency": {"decimals": 6, "symbol": "STX"}, "value": 1000}, "operation_identifier": {"index": 0}, "related_operations": [{"index": 1}], "status": "SUCCESS", "type": "DEBIT"}, {"account": {"address": "ST319CF5WV77KYR1H3GT0GZ7B8Q4AQPY42ETP1VPF"}, "amount": {"currency": {"decimals": 6, "symbol": "STX"}, "value": 1000}, "operation_identifier": {"index": 1}, "related_operations": [{"index": 0}], "status": "SUCCESS", "type": "CREDIT"}], "transaction_identifier": {"hash": "0xf1c21c357af61c02b8c947dca74902e45ba69c4352b689317e9b3edf75050999"}}]}], "chainhook": {"is_streaming_blocks": false, "predicate": {"higher_than": 221993, "scope": "block_height"}, "uuid": "598a3345-cd57-4228-925f-d7c93450c94e"}, "events": [], "rollback": []} \ No newline at end of file diff --git a/examples/chainhooks/chainhook_20250227_192414.json b/examples/chainhooks/chainhook_20250227_192414.json deleted file mode 100644 index f92c4db3..00000000 --- a/examples/chainhooks/chainhook_20250227_192414.json +++ /dev/null @@ -1 +0,0 @@ -{"apply": [{"block_identifier": {"hash": "0x737ae297dd62b97388c43f6ca591474b88c0bcd2d9d689cb6de90c8b37c6f37b", "index": 222023}, "metadata": {"bitcoin_anchor_block_identifier": {"hash": "0x1d9e703b5bff59bcc7589b2c13f40fdcf919a95770203c7afab71971b68b35af", "index": 19757}, "block_time": 1740712918, "confirm_microblock_identifier": null, "cycle_number": null, "pox_cycle_index": 982, "pox_cycle_length": 20, "pox_cycle_position": 16, "reward_set": null, "signer_bitvec": "013500000027ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff1f", "signer_public_keys": ["0x02e8620935d58ebffa23c260f6917cbd0915ea17d7a46df17e131540237d335504", "0x03161dd42571db5ca2599ef32774ec0c6c9ef125cbb8f826a63fe527489bb91400", "0x036a44f61d85efa844b42475f107b106dc8fb209ae27813893c3269c59821e0333"], "signer_signature": ["00f04e1be8ffa7787eba7e9c0649033f0df5d39b03c2328e5c801e68a291a7cc5e76f9914582b88bfdbbb5b298f24e2c40ea3c955c37878b8e1d3da670585e6243", "00a82c724186e5b98482452b3b10cdebcb42b3432a7774e19e11a8bfdd24ed81fe2eef3d44fbc832785c521770fcc2d2fa0d0498b76b64a86a3caf1c2cba2afef3", "00d0422467268e7a579a8fe0ad6c0d2c77ca0b6fbe7bc560fc17c4f0ac3b134c3b67f9fc6da7c34471ed854cf4b7dd8e57bc3dfe26470e2c140f7db6d7dffce759"], "stacks_block_hash": "0x0394f343864194761faf908a499d3f7517a0bd7e454eefcb7ec5964e9b33d62e", "tenure_height": 16953}, "parent_block_identifier": {"hash": "0x77da4d1283cc634d8fe2187016d439d878c79b456d5d37e25d724aeafd9bfc21", "index": 222022}, "timestamp": 1740712814, "transactions": [{"metadata": {"description": "transfered: 1000 \u00b5STX from ST319CF5WV77KYR1H3GT0GZ7B8Q4AQPY42ETP1VPF to ST221Z6TDTC5E0BYR2V624Q2ST6R0Q71T78WTAX6H", "execution_cost": {"read_count": 0, "read_length": 0, "runtime": 0, "write_count": 0, "write_length": 0}, "fee": 300, "kind": {"type": "NativeTokenTransfer"}, "nonce": 67699, "position": {"index": 0}, "raw_tx": "0x80800000000400c2963cbcd9cf3f60311c34087ceb45c8abdbc4130000000000010873000000000000012c000003beaeacc410976d2ba6a3b00aff7eb4829f29c7fff5217afb932ec534a248be002c58845e2d0229044e5eabf471fa6a39c9664edcc6b4436d725caf89b40f5803020000000000051a841f9b4dd30ae02fd816cc225c59d1b00b9c3a3a00000000000003e800000000000000000000000000000000000000000000000000000000000000000000", "receipt": {"contract_calls_stack": [], "events": [{"data": {"amount": "1000", "recipient": "ST221Z6TDTC5E0BYR2V624Q2ST6R0Q71T78WTAX6H", "sender": "ST319CF5WV77KYR1H3GT0GZ7B8Q4AQPY42ETP1VPF"}, "position": {"index": 0}, "type": "STXTransferEvent"}], "mutated_assets_radius": [], "mutated_contracts_radius": []}, "result": "(ok true)", "sender": "ST319CF5WV77KYR1H3GT0GZ7B8Q4AQPY42ETP1VPF", "sponsor": null, "success": true}, "operations": [{"account": {"address": "ST319CF5WV77KYR1H3GT0GZ7B8Q4AQPY42ETP1VPF"}, "amount": {"currency": {"decimals": 6, "symbol": "STX"}, "value": 1000}, "operation_identifier": {"index": 0}, "related_operations": [{"index": 1}], "status": "SUCCESS", "type": "DEBIT"}, {"account": {"address": "ST221Z6TDTC5E0BYR2V624Q2ST6R0Q71T78WTAX6H"}, "amount": {"currency": {"decimals": 6, "symbol": "STX"}, "value": 1000}, "operation_identifier": {"index": 1}, "related_operations": [{"index": 0}], "status": "SUCCESS", "type": "CREDIT"}], "transaction_identifier": {"hash": "0x7024ab0ede46e9b87d19fe12d159505527e0593809f64940b2ab3f9f64d21feb"}}]}], "chainhook": {"is_streaming_blocks": false, "predicate": {"higher_than": 221993, "scope": "block_height"}, "uuid": "598a3345-cd57-4228-925f-d7c93450c94e"}, "events": [], "rollback": []} \ No newline at end of file diff --git a/examples/chainhooks/chainhook_20250227_192415.json b/examples/chainhooks/chainhook_20250227_192415.json deleted file mode 100644 index d7be8f50..00000000 --- a/examples/chainhooks/chainhook_20250227_192415.json +++ /dev/null @@ -1 +0,0 @@ -{"apply": [{"block_identifier": {"hash": "0x9c5d7d28aed25ebc4ed6447f67b81d2be71553ced3a784ae780df2bf0dabb0e9", "index": 222025}, "metadata": {"bitcoin_anchor_block_identifier": {"hash": "0x1d9e703b5bff59bcc7589b2c13f40fdcf919a95770203c7afab71971b68b35af", "index": 19757}, "block_time": 1740712985, "confirm_microblock_identifier": null, "cycle_number": null, "pox_cycle_index": 982, "pox_cycle_length": 20, "pox_cycle_position": 16, "reward_set": null, "signer_bitvec": "013500000027ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff1f", "signer_public_keys": ["0x02e8620935d58ebffa23c260f6917cbd0915ea17d7a46df17e131540237d335504", "0x03161dd42571db5ca2599ef32774ec0c6c9ef125cbb8f826a63fe527489bb91400", "0x036a44f61d85efa844b42475f107b106dc8fb209ae27813893c3269c59821e0333"], "signer_signature": ["008fbe7dd472ecd2f9ebb29d81949e8ad712925a9382c46c6e3acf49b16abde85b5c7ee477ffd019661cdd2198ad64065b93d09948e4f4ac500e12f67f0b8e952a", "01df3904e50aba97ba7688189fa78d65eca20fdbc7b407260620043f37a3fcf69108d5222a5ad138eb535a565ff0f92c084f656400386e8ac9ba93003d693f20b0", "00391510d00403a46fbb0acd5bf8771ca5b1a611354ecabb69d83e216a3d666b1e7a50c9faae29cb8fe8e1ba9700b15814291fc6a0be48ca14bb3d658abde5b354"], "stacks_block_hash": "0x812b65b5deff1a531d482547f51c471009e55c07a1cb670fe0cccec0140b41d3", "tenure_height": 16953}, "parent_block_identifier": {"hash": "0x52272152395cc9d3930e70c5a3f014bfd39744b9da3aeef7addf57ea1f0ad0cf", "index": 222024}, "timestamp": 1740712814, "transactions": [{"metadata": {"description": "transfered: 1000 \u00b5STX from ST2TFVBMRPS5SSNP98DQKQ5JNB2B6NZM91C4K3P7B to ST319CF5WV77KYR1H3GT0GZ7B8Q4AQPY42ETP1VPF", "execution_cost": {"read_count": 0, "read_length": 0, "runtime": 0, "write_count": 0, "write_length": 0}, "fee": 300, "kind": {"type": "NativeTokenTransfer"}, "nonce": 67699, "position": {"index": 0}, "raw_tx": "0x80800000000400b4fdae98b64b9cd6c9436f3b965558966afe890b0000000000010873000000000000012c0001df094606aed75863e00491383252a183f6c3a2031292f61f0dcf6af608d6ee61653294ef8e13e13d2e22cb9a8da768f23e222982ecfd8bd7ffa6931ba6c39e8b03020000000000051ac2963cbcd9cf3f60311c34087ceb45c8abdbc41300000000000003e800000000000000000000000000000000000000000000000000000000000000000000", "receipt": {"contract_calls_stack": [], "events": [{"data": {"amount": "1000", "recipient": "ST319CF5WV77KYR1H3GT0GZ7B8Q4AQPY42ETP1VPF", "sender": "ST2TFVBMRPS5SSNP98DQKQ5JNB2B6NZM91C4K3P7B"}, "position": {"index": 0}, "type": "STXTransferEvent"}], "mutated_assets_radius": [], "mutated_contracts_radius": []}, "result": "(ok true)", "sender": "ST2TFVBMRPS5SSNP98DQKQ5JNB2B6NZM91C4K3P7B", "sponsor": null, "success": true}, "operations": [{"account": {"address": "ST2TFVBMRPS5SSNP98DQKQ5JNB2B6NZM91C4K3P7B"}, "amount": {"currency": {"decimals": 6, "symbol": "STX"}, "value": 1000}, "operation_identifier": {"index": 0}, "related_operations": [{"index": 1}], "status": "SUCCESS", "type": "DEBIT"}, {"account": {"address": "ST319CF5WV77KYR1H3GT0GZ7B8Q4AQPY42ETP1VPF"}, "amount": {"currency": {"decimals": 6, "symbol": "STX"}, "value": 1000}, "operation_identifier": {"index": 1}, "related_operations": [{"index": 0}], "status": "SUCCESS", "type": "CREDIT"}], "transaction_identifier": {"hash": "0xaba63a75c20d5b5539e9d4fa44f2f6a2cd488287f2354ea07e68e06c97fa516e"}}]}], "chainhook": {"is_streaming_blocks": false, "predicate": {"higher_than": 221993, "scope": "block_height"}, "uuid": "598a3345-cd57-4228-925f-d7c93450c94e"}, "events": [], "rollback": []} \ No newline at end of file diff --git a/examples/daos/example.json b/examples/daos/example.json deleted file mode 100644 index 0b54bc00..00000000 --- a/examples/daos/example.json +++ /dev/null @@ -1,249 +0,0 @@ -{ - "name": "GTC DAO", - "mission": "Our mission statement", - "description": "Detailed description of the DAO", - "extensions": [ - { - "name": "getc-pre-faktory", - "type": "TOKEN", - "subtype": "PRELAUNCH", - "source": "\n;; e2c78b6648a515a61c19863f10b0bc2af6a92f24cb1df5dd5de25bcf8cf29872\n;; aibtc.com DAO faktory.fun PRE @version 1.0\n;; Pre-launch contract for token distribution\n;; Dynamic allocation: 1-7 seats per user in Period 1\n;; Each seat = 0.00020000 BTC, targ", - "hash": "e2c78b6648a515a61c19863f10b0bc2af6a92f24cb1df5dd5de25bcf8cf29872", - "sender": "ST1TZE9ZY61FYR7YM9BR0543XKX9YG5TR9017R4WJ", - "success": true, - "txId": "b6dea933ca28046f08d2ca4b26601777b6a0f4db117baddabae96d672ecbe5a5", - "address": "ST1TZE9ZY61FYR7YM9BR0543XKX9YG5TR9017R4WJ.getc-pre-faktory" - }, - { - "name": "getc-faktory", - "type": "TOKEN", - "subtype": "DAO", - "source": "\n;; ec46c7d8892f53911847a96a79c68e2789734076b302bad7973ca6a38af455b3\n;; getc Powered By Faktory.fun v1.0 \n\n(impl-trait 'STTWD9SPRQVD3P733V89SV0P8RZRZNQADG034F0A.faktory-trait-v1.sip-010-trait)\n(impl-trait 'ST3YT0XW92E6T2FE59B2G5N2WNNFSBZ6MZKQS5D18.ai", - "hash": "ec46c7d8892f53911847a96a79c68e2789734076b302bad7973ca6a38af455b3", - "sender": "ST1TZE9ZY61FYR7YM9BR0543XKX9YG5TR9017R4WJ", - "success": true, - "txId": "3dac45a64cdc495c7d5f459d389bb62f091b09e1307b1de06b2264db17201a82", - "address": "ST1TZE9ZY61FYR7YM9BR0543XKX9YG5TR9017R4WJ.getc-faktory" - }, - { - "name": "xyk-pool-stx-getc-v-1-1", - "type": "TOKEN", - "subtype": "POOL", - "source": ";; Implement XYK pool trait and use SIP 010 trait\n (impl-trait 'ST3VXT52QEQPZ5246A16RFNMR1PRJ96JK6YYX37N8.xyk-pool-trait-v-1-2.xyk-pool-trait)\n (use-trait sip-010-trait 'ST3VXT52QEQPZ5246A16RFNMR1PRJ96JK6YYX37N8.sip-010-trait-ft-standard.sip-010-tr", - "sender": "ST1TZE9ZY61FYR7YM9BR0543XKX9YG5TR9017R4WJ", - "success": true, - "txId": "c7152903ded8db7bcaf2afe8befda6fc22e156316e0358a89674845c66f8b849", - "address": "ST1TZE9ZY61FYR7YM9BR0543XKX9YG5TR9017R4WJ.xyk-pool-stx-getc-v-1-1" - }, - { - "name": "getc-faktory-dex", - "type": "TOKEN", - "subtype": "DEX", - "source": "\n ;; f7197551533e781d7349d2258035a6ed6a7167d91eb0aafe84bedd405569ea5b\n ;; aibtc.dev DAO faktory.fun DEX @version 1.0\n \n (impl-trait 'ST3YT0XW92E6T2FE59B2G5N2WNNFSBZ6MZKQS5D18.aibtc-dao-traits-v2.faktory-dex)\n (impl-trait 'STTWD9SPRQVD3P7", - "hash": "f7197551533e781d7349d2258035a6ed6a7167d91eb0aafe84bedd405569ea5b", - "sender": "ST1TZE9ZY61FYR7YM9BR0543XKX9YG5TR9017R4WJ", - "success": true, - "txId": "bc73e55a13387b112b86e6e9f6ab45f7dade6cbc0d629bf4e793b4c7aa92d810", - "address": "ST1TZE9ZY61FYR7YM9BR0543XKX9YG5TR9017R4WJ.getc-faktory-dex" - }, - { - "name": "getc-base-dao", - "type": "BASE", - "subtype": "DAO", - "source": ";; title: aibtc-dao\n;; version: 1.0.0\n;; summary: An ExecutorDAO implementation for aibtcdev\n\n;; traits\n;;\n\n(impl-trait 'ST3YT0XW92E6T2FE59B2G5N2WNNFSBZ6MZKQS5D18.aibtc-dao-v2.aibtc-base-dao)\n(use-trait proposal-trait 'ST3YT0XW92E6T2FE59B2G5N2WNNFSBZ", - "hash": "b682c7849ec7c022eaff973d2ceb8f83885a01df20b2d4d5d23e2b5a3c0b7e95", - "sender": "ST1TZE9ZY61FYR7YM9BR0543XKX9YG5TR9017R4WJ", - "success": true, - "txId": "55efa226e4a491c00ac5e07c5e781e6b855f348f2ce24bc0ef922eeecde79994", - "address": "ST1TZE9ZY61FYR7YM9BR0543XKX9YG5TR9017R4WJ.getc-base-dao" - }, - { - "name": "getc-action-proposals-v2", - "type": "EXTENSIONS", - "subtype": "ACTION_PROPOSALS", - "source": ";; title: aibtc-action-proposals-v2\n;; version: 2.0.0\n;; summary: An extension that manages voting on predefined actions using a SIP-010 Stacks token.\n;; description: This contract allows voting on specific extension actions with a lower threshold th", - "hash": "2e8f4b6f6efa1bd4b60f6dd1b2e4d58f9396d1195de5f56013a2d6e0dccc870e", - "sender": "ST1TZE9ZY61FYR7YM9BR0543XKX9YG5TR9017R4WJ", - "success": true, - "txId": "a95cbbfb59ba3d06728df183b978b9b6929501f225940b091040a148aded165f", - "address": "ST1TZE9ZY61FYR7YM9BR0543XKX9YG5TR9017R4WJ.getc-action-proposals-v2" - }, - { - "name": "getc-bank-account", - "type": "EXTENSIONS", - "subtype": "BANK_ACCOUNT", - "source": ";; title: aibtc-bank-account\n;; version: 1.0.0\n;; summary: An extension that allows a principal to withdraw STX from the contract with given rules.\n\n;; traits\n;;\n(impl-trait 'ST3YT0XW92E6T2FE59B2G5N2WNNFSBZ6MZKQS5D18.aibtc-dao-traits-v2.extension)\n(i", - "hash": "b135cb33e2107d9f61918b8bc829734795b1070436b830e42f4a02010812e129", - "sender": "ST1TZE9ZY61FYR7YM9BR0543XKX9YG5TR9017R4WJ", - "success": true, - "txId": "a65726dd373dc7c34a34ef593e8ac10ec03b0cc76b9eb20f8fd4c40216a42e50", - "address": "ST1TZE9ZY61FYR7YM9BR0543XKX9YG5TR9017R4WJ.getc-bank-account" - }, - { - "name": "getc-core-proposals-v2", - "type": "EXTENSIONS", - "subtype": "CORE_PROPOSALS", - "source": ";; title: aibtc-core-proposals-v2\n;; version: 2.0.0\n;; summary: An extension that manages voting on proposals to execute Clarity code using a SIP-010 Stacks token.\n;; description: This contract can make changes to core DAO functionality with a high v", - "hash": "73936e4a1f87234a19eefbb05f1eb363b1caa058bbfff68e7e659a583c495aca", - "sender": "ST1TZE9ZY61FYR7YM9BR0543XKX9YG5TR9017R4WJ", - "success": true, - "txId": "8e2a4d396ef3433e5beb5933503464d2d3af2d898534fe9473d96f39ead48e5a", - "address": "ST1TZE9ZY61FYR7YM9BR0543XKX9YG5TR9017R4WJ.getc-core-proposals-v2" - }, - { - "name": "getc-dao-charter", - "type": "EXTENSIONS", - "subtype": "CHARTER", - "source": ";; title: aibtc-dao-charter\n;; version: 1.0.0\n;; summary: An extension that manages the DAO charter and records the DAO's mission and values on-chain.\n;; description: This contract allows the DAO to define its mission and values on-chain, which can b", - "hash": "fe2ddf4b3fa13a9f088770101a5a15426fa160f236a101158694e709b7082538", - "sender": "ST1TZE9ZY61FYR7YM9BR0543XKX9YG5TR9017R4WJ", - "success": true, - "txId": "f2088778303b3cb8ea1ae7e2171543c08603d1274a0b92cd1e843f93da46aae5", - "address": "ST1TZE9ZY61FYR7YM9BR0543XKX9YG5TR9017R4WJ.getc-dao-charter" - }, - { - "name": "getc-onchain-messaging", - "type": "EXTENSIONS", - "subtype": "MESSAGING", - "source": ";; title: aibtc-onchain-messaging\n;; version: 1.0.0\n;; summary: An extension to send messages on-chain to anyone listening to this contract.\n\n;; traits\n;;\n(impl-trait 'ST3YT0XW92E6T2FE59B2G5N2WNNFSBZ6MZKQS5D18.aibtc-dao-traits-v2.extension)\n(impl-tra", - "hash": "4fb92c568534c5fd0ee1a55503b7865565ba0545812590dcab1c1cd06fcb570a", - "sender": "ST1TZE9ZY61FYR7YM9BR0543XKX9YG5TR9017R4WJ", - "success": true, - "txId": "c62a69594fbc77aa71e1c2664b99e39081f1671444f460205c96d3476aac5ed7", - "address": "ST1TZE9ZY61FYR7YM9BR0543XKX9YG5TR9017R4WJ.getc-onchain-messaging" - }, - { - "name": "getc-payments-invoices", - "type": "EXTENSIONS", - "subtype": "PAYMENTS", - "source": ";; title: aibtc-payments\n;; version: 1.0.0\n;; summary: An extension that provides payment processing for DAO services.\n\n;; traits\n;;\n(impl-trait 'ST3YT0XW92E6T2FE59B2G5N2WNNFSBZ6MZKQS5D18.aibtc-dao-traits-v2.extension)\n(impl-trait 'ST3YT0XW92E6T2FE59", - "hash": "03f2db3ce6cf8986489b6107242b98132978bdca3b67bc98f776e175bc4ee155", - "sender": "ST1TZE9ZY61FYR7YM9BR0543XKX9YG5TR9017R4WJ", - "success": true, - "txId": "c8c3d1309994e63b6b7f60ab912a514cde0ec39791957b0de235b90ece74c749", - "address": "ST1TZE9ZY61FYR7YM9BR0543XKX9YG5TR9017R4WJ.getc-payments-invoices" - }, - { - "name": "getc-token-owner", - "type": "EXTENSIONS", - "subtype": "TOKEN_OWNER", - "source": ";; title: aibtc-token-owner\n;; version: 1.0.0\n;; summary: An extension that provides management functions for the dao token\n\n;; traits\n;;\n(impl-trait 'ST3YT0XW92E6T2FE59B2G5N2WNNFSBZ6MZKQS5D18.aibtc-dao-traits-v2.extension)\n(impl-trait 'ST3YT0XW92E6T", - "hash": "3c03a85ff53a5c6f8403cc40c9ad53ea0380b8bc0f9a541639da2093d4fafce6", - "sender": "ST1TZE9ZY61FYR7YM9BR0543XKX9YG5TR9017R4WJ", - "success": true, - "txId": "d94a49848de2edf59f9159f83a4960fdb858d4c09f8d8d18a7f5f916b3fa30ee", - "address": "ST1TZE9ZY61FYR7YM9BR0543XKX9YG5TR9017R4WJ.getc-token-owner" - }, - { - "name": "getc-treasury", - "type": "EXTENSIONS", - "subtype": "TREASURY", - "source": ";; title: aibtc-treasury\n;; version: 1.0.0\n;; summary: An extension that manages STX, SIP-009 NFTs, and SIP-010 FTs.\n\n;; traits\n;;\n(impl-trait 'ST3YT0XW92E6T2FE59B2G5N2WNNFSBZ6MZKQS5D18.aibtc-dao-traits-v2.extension)\n(impl-trait 'ST3YT0XW92E6T2FE59B2", - "hash": "f364c92ddd077cf2682c501c690d7e1f9c8c8fa3cc1742fdf1672b5fb13ac6e9", - "sender": "ST1TZE9ZY61FYR7YM9BR0543XKX9YG5TR9017R4WJ", - "success": true, - "txId": "9ab4b6abd2c5eae70b3e82a03aa9f9e0cef68b94df8cceba8d1a1f50c1a3f652", - "address": "ST1TZE9ZY61FYR7YM9BR0543XKX9YG5TR9017R4WJ.getc-treasury" - }, - { - "name": "getc-action-add-resource", - "type": "ACTIONS", - "subtype": "PAYMENTS_INVOICES_ADD_RESOURCE", - "source": "(impl-trait 'ST3YT0XW92E6T2FE59B2G5N2WNNFSBZ6MZKQS5D18.aibtc-dao-traits-v2.extension)\n(impl-trait 'ST3YT0XW92E6T2FE59B2G5N2WNNFSBZ6MZKQS5D18.aibtc-dao-traits-v2.action)\n\n(define-constant ERR_UNAUTHORIZED (err u10001))\n(define-constant ERR_INVALID_PAR", - "hash": "c6884121d1d82aebde4e4952d86bb13bbed318bac9ad54f4b30f67d89e0f6b05", - "sender": "ST1TZE9ZY61FYR7YM9BR0543XKX9YG5TR9017R4WJ", - "success": true, - "txId": "674a184657e1dded8d5682d4273c6aec855fc859b969f1ffd9c3ce95fbd2bd09", - "address": "ST1TZE9ZY61FYR7YM9BR0543XKX9YG5TR9017R4WJ.getc-action-add-resource" - }, - { - "name": "getc-action-allow-asset", - "type": "ACTIONS", - "subtype": "TREASURY_ALLOW_ASSET", - "source": "(impl-trait 'ST3YT0XW92E6T2FE59B2G5N2WNNFSBZ6MZKQS5D18.aibtc-dao-traits-v2.extension)\n(impl-trait 'ST3YT0XW92E6T2FE59B2G5N2WNNFSBZ6MZKQS5D18.aibtc-dao-traits-v2.action)\n\n(define-constant ERR_UNAUTHORIZED (err u10001))\n(define-constant ERR_INVALID_PAR", - "hash": "25b328a57126b0a156fac3fb18abf171277e7a8e97200521798bb86f460bd195", - "sender": "ST1TZE9ZY61FYR7YM9BR0543XKX9YG5TR9017R4WJ", - "success": true, - "txId": "dcc8ed87cc3c936dd3afbc65d4d5ba8850c3ee99ecf438fb7281080069c296b3", - "address": "ST1TZE9ZY61FYR7YM9BR0543XKX9YG5TR9017R4WJ.getc-action-allow-asset" - }, - { - "name": "getc-action-send-message", - "type": "ACTIONS", - "subtype": "MESSAGING_SEND_MESSAGE", - "source": "(impl-trait 'ST3YT0XW92E6T2FE59B2G5N2WNNFSBZ6MZKQS5D18.aibtc-dao-traits-v2.extension)\n(impl-trait 'ST3YT0XW92E6T2FE59B2G5N2WNNFSBZ6MZKQS5D18.aibtc-dao-traits-v2.action)\n\n(define-constant ERR_UNAUTHORIZED (err u10001))\n(define-constant ERR_INVALID_PAR", - "hash": "af565cb1202d773dc3f2cfc77a7342a7408e4116889e6e12df5d7705f66c3617", - "sender": "ST1TZE9ZY61FYR7YM9BR0543XKX9YG5TR9017R4WJ", - "success": true, - "txId": "77aa497c46fb1066407d954883b01ebe64dd93bb7d2e9443222864564edd88cb", - "address": "ST1TZE9ZY61FYR7YM9BR0543XKX9YG5TR9017R4WJ.getc-action-send-message" - }, - { - "name": "getc-action-set-account-holder", - "type": "ACTIONS", - "subtype": "BANK_ACCOUNT_SET_ACCOUNT_HOLDER", - "source": "(impl-trait 'ST3YT0XW92E6T2FE59B2G5N2WNNFSBZ6MZKQS5D18.aibtc-dao-traits-v2.extension)\n(impl-trait 'ST3YT0XW92E6T2FE59B2G5N2WNNFSBZ6MZKQS5D18.aibtc-dao-traits-v2.action)\n\n(define-constant ERR_UNAUTHORIZED (err u10001))\n(define-constant ERR_INVALID_PAR", - "hash": "8bffc54f1d8a9b43158fb64f8a38253af2aa8f80f795d3d84a21a62a4a7cb44c", - "sender": "ST1TZE9ZY61FYR7YM9BR0543XKX9YG5TR9017R4WJ", - "success": true, - "txId": "6b063cc8ba2f7fc214a5979e8f89e27369681f1c878d8520bfeddb59e022fc09", - "address": "ST1TZE9ZY61FYR7YM9BR0543XKX9YG5TR9017R4WJ.getc-action-set-account-holder" - }, - { - "name": "getc-action-set-withdrawal-amount", - "type": "ACTIONS", - "subtype": "BANK_ACCOUNT_SET_WITHDRAWAL_AMOUNT", - "source": "(impl-trait 'ST3YT0XW92E6T2FE59B2G5N2WNNFSBZ6MZKQS5D18.aibtc-dao-traits-v2.extension)\n(impl-trait 'ST3YT0XW92E6T2FE59B2G5N2WNNFSBZ6MZKQS5D18.aibtc-dao-traits-v2.action)\n\n(define-constant ERR_UNAUTHORIZED (err u10001))\n(define-constant ERR_INVALID_PAR", - "hash": "67887733991ec39a722c96c2c9b258de05204a2a6371b66d439647604c281c7f", - "sender": "ST1TZE9ZY61FYR7YM9BR0543XKX9YG5TR9017R4WJ", - "success": true, - "txId": "3927ec0901e7e656d134598b04df2b350ae8573f7472fb3927ca0a3360ffa266", - "address": "ST1TZE9ZY61FYR7YM9BR0543XKX9YG5TR9017R4WJ.getc-action-set-withdrawal-amount" - }, - { - "name": "getc-action-set-withdrawal-period", - "type": "ACTIONS", - "subtype": "BANK_ACCOUNT_SET_WITHDRAWAL_PERIOD", - "source": "(impl-trait 'ST3YT0XW92E6T2FE59B2G5N2WNNFSBZ6MZKQS5D18.aibtc-dao-traits-v2.extension)\n(impl-trait 'ST3YT0XW92E6T2FE59B2G5N2WNNFSBZ6MZKQS5D18.aibtc-dao-traits-v2.action)\n\n(define-constant ERR_UNAUTHORIZED (err u10001))\n(define-constant ERR_INVALID_PAR", - "hash": "483867f1f2230a858ff1d6df36df4be44c5c848eb64c9d6172320e529a507daa", - "sender": "ST1TZE9ZY61FYR7YM9BR0543XKX9YG5TR9017R4WJ", - "success": true, - "txId": "7545216901fae3f6589b606cb842ba6173321a701666a2e2446d297823f5941d", - "address": "ST1TZE9ZY61FYR7YM9BR0543XKX9YG5TR9017R4WJ.getc-action-set-withdrawal-period" - }, - { - "name": "getc-action-toggle-resource", - "type": "ACTIONS", - "subtype": "PAYMENTS_INVOICES_TOGGLE_RESOURCE", - "source": "(impl-trait 'ST3YT0XW92E6T2FE59B2G5N2WNNFSBZ6MZKQS5D18.aibtc-dao-traits-v2.extension)\n(impl-trait 'ST3YT0XW92E6T2FE59B2G5N2WNNFSBZ6MZKQS5D18.aibtc-dao-traits-v2.action)\n\n(define-constant ERR_UNAUTHORIZED (err u10001))\n(define-constant ERR_INVALID_PAR", - "hash": "bcd3c0e56e0a19387212e0bd77a74b2e8401f18e475b2f13b30306ff72b25eb6", - "sender": "ST1TZE9ZY61FYR7YM9BR0543XKX9YG5TR9017R4WJ", - "success": true, - "txId": "0aa53b7ef132b22f244d06dacace091ea1a0ace62974b48c93637bc90e6fa81d", - "address": "ST1TZE9ZY61FYR7YM9BR0543XKX9YG5TR9017R4WJ.getc-action-toggle-resource" - }, - { - "name": "getc-base-bootstrap-initialization-v2", - "type": "PROPOSALS", - "subtype": "BOOTSTRAP_INIT", - "source": "(impl-trait 'ST3YT0XW92E6T2FE59B2G5N2WNNFSBZ6MZKQS5D18.aibtc-dao-traits-v2.proposal)\n\n(define-constant CFG_DAO_MANIFEST_TEXT \"All I do is win win win\")\n(define-constant CFG_DAO_MANIFEST_INSCRIPTION_ID \"inscription id\")\n\n(define-public (execute (sende", - "hash": "0582850900adf8b2527ed96944170e0cdc9cae800bce1fd3fb0062dba1a85b13", - "sender": "ST1TZE9ZY61FYR7YM9BR0543XKX9YG5TR9017R4WJ", - "success": true, - "txId": "e7e3869853e3fc03f44ea8cde3097562a92c19f4f72ce1d704dfdfc4528b3850", - "address": "ST1TZE9ZY61FYR7YM9BR0543XKX9YG5TR9017R4WJ.getc-base-bootstrap-initialization-v2" - } - ], - "token": { - "name": "GTC DAO Token", - "symbol": "GTC", - "decimals": 6, - "description": "Token description", - "max_supply": "1000000", - "uri": "https://example.com/token.json", - "image_url": "https://example.com/image.png", - "x_url": "https://x.com/mydao", - "telegram_url": "https://t.me/mydao", - "website_url": "https://mydao.com" - } -} diff --git a/examples/daos/test.json b/examples/daos/test.json deleted file mode 100644 index f4ff7628..00000000 --- a/examples/daos/test.json +++ /dev/null @@ -1,101 +0,0 @@ -{ - "name": "My DAO", - "mission": "Our mission statement", - "description": "Detailed description of the DAO", - "extensions": [ - { - "type": "lfg4-base-bootstrap-initialization-v2", - "contract_principal": "ST3YT0XW92E6T2FE59B2G5N2WNNFSBZ6MZKQS5D18.lfg4-base-bootstrap-initialization-v2", - "tx_id": "0x8b9ec33b1d7ee5b119aa84470b3baee422c4f48f7321b9e10c1ddd281bade4f5" - }, - { - "type": "lfg4-action-proposals-v2", - "contract_principal": "ST3YT0XW92E6T2FE59B2G5N2WNNFSBZ6MZKQS5D18.lfg4-action-proposals-v2", - "tx_id": "0x078c43e7e0247b0d597d7aeb0b73c30742f43d55c02a1d481776057da9c05eaf" - }, - { - "type": "lfg4-bank-account", - "contract_principal": "ST3YT0XW92E6T2FE59B2G5N2WNNFSBZ6MZKQS5D18.lfg4-bank-account", - "tx_id": "0xa831692d89239ce6cde5a73a8e8bfe80c1144d2a527b64b0a3584c92ef37480a" - }, - { - "type": "lfg4-core-proposals-v2", - "contract_principal": "ST3YT0XW92E6T2FE59B2G5N2WNNFSBZ6MZKQS5D18.lfg4-core-proposals-v2", - "tx_id": "0x4ce3c1cebaa0721d6703496a7eec5ace595b4e00bb832036de3e9b0383ab7708" - }, - { - "type": "lfg4-dao-charter", - "contract_principal": "ST3YT0XW92E6T2FE59B2G5N2WNNFSBZ6MZKQS5D18.lfg4-dao-charter", - "tx_id": "0x0a2a280a6fff6efc3e3fdd381832d65b20960f527d71bf8beeb4160cd9225e2d" - }, - { - "type": "lfg4-onchain-messaging", - "contract_principal": "ST3YT0XW92E6T2FE59B2G5N2WNNFSBZ6MZKQS5D18.lfg4-onchain-messaging", - "tx_id": "0x4324f78ca944d5444abd08126c4411f79b5135a3f16fee923233456f0f9813b9" - }, - { - "type": "lfg4-payments-invoices", - "contract_principal": "ST3YT0XW92E6T2FE59B2G5N2WNNFSBZ6MZKQS5D18.lfg4-payments-invoices", - "tx_id": "0xd5e6f968b879896577d3a2211bd161d2acfd3e4c5734e599e8c0bfe74efd64c0" - }, - { - "type": "lfg4-token-owner", - "contract_principal": "ST3YT0XW92E6T2FE59B2G5N2WNNFSBZ6MZKQS5D18.lfg4-token-owner", - "tx_id": "0xff0e0436b523554c3fba0e149cfbf16a8a2eddb4e320f1ccebe47ea16ec5f82c" - }, - { - "type": "lfg4-treasury", - "contract_principal": "ST3YT0XW92E6T2FE59B2G5N2WNNFSBZ6MZKQS5D18.lfg4-treasury", - "tx_id": "0xbed23879e81c7f2d344227a825aac0dc7ece6105e5ce3c261b533021807c10ba" - }, - { - "type": "lfg4-action-add-resource", - "contract_principal": "ST3YT0XW92E6T2FE59B2G5N2WNNFSBZ6MZKQS5D18.lfg4-action-add-resource", - "tx_id": "0x8cf3b3e3ae094a7afa3376d80673ddada92c1717ea0eadecb94bf03bd62a9278" - }, - { - "type": "lfg4-action-allow-asset", - "contract_principal": "ST3YT0XW92E6T2FE59B2G5N2WNNFSBZ6MZKQS5D18.lfg4-action-allow-asset", - "tx_id": "0x1905d7f07be15134536f9c7756fb6196288b0027b394cb211bb62e1dddf8f04b" - }, - { - "type": "lfg4-action-send-message", - "contract_principal": "ST3YT0XW92E6T2FE59B2G5N2WNNFSBZ6MZKQS5D18.lfg4-action-send-message", - "tx_id": "0xda3d2813c7dd62df4b84f1333129d58e6d97663e1e66d83fd88621eea58f63db" - }, - { - "type": "lfg4-action-set-account-holder", - "contract_principal": "ST3YT0XW92E6T2FE59B2G5N2WNNFSBZ6MZKQS5D18.lfg4-action-set-account-holder", - "tx_id": "0x76749d5a71f1a7245f38f65daa5c44246ee5696d1fded8be4b2bfbd86e8a1394" - }, - { - "type": "lfg4-action-set-withdrawal-amount", - "contract_principal": "ST3YT0XW92E6T2FE59B2G5N2WNNFSBZ6MZKQS5D18.lfg4-action-set-withdrawal-amount", - "tx_id": "0xbfb07c19c37f6f8a53dbaf39b5f281a97fc8b693e452ab0614f5dc8bc4a6380d" - }, - { - "type": "lfg4-action-set-withdrawal-period", - "contract_principal": "ST3YT0XW92E6T2FE59B2G5N2WNNFSBZ6MZKQS5D18.lfg4-action-set-withdrawal-period", - "tx_id": "0x2b72c26b38e4faef96ffca6945f2f9750cafbe0d2e066f855f33fb4f2a790832" - }, - { - "type": "lfg4-action-toggle-resource", - "contract_principal": "ST3YT0XW92E6T2FE59B2G5N2WNNFSBZ6MZKQS5D18.lfg4-action-toggle-resource", - "tx_id": "0x85e54a94ad885d4a427706e2e87e1b05a2f41cf312beabb97b6305b849d6a620" - } - ], - "token": { - "name": "GoTimeTest", - "symbol": "LFG4", - "decimals": 6, - "description": "Token description", - "max_supply": "1000000000000000", - "uri": "https://aibtc.dev", - "tx_id": "optional_transaction_id", - "contract_principal": "ST3YT0XW92E6T2FE59B2G5N2WNNFSBZ6MZKQS5D18.lfg4-faktory", - "image_url": "https://example.com/image.png", - "x_url": "https://x.com/mydao", - "telegram_url": "https://t.me/mydao", - "website_url": "https://mydao.com" - } -} diff --git a/lib/alex.py b/lib/alex.py deleted file mode 100644 index 20340955..00000000 --- a/lib/alex.py +++ /dev/null @@ -1,93 +0,0 @@ -import requests - -from config import config - - -class AlexApi: - def __init__(self): - """Initialize the Alex API client.""" - self.base_url = config.api.alex_base_url - self.limits = 500 - - def _get(self, endpoint: str, params: dict = {}): - """Send a GET request to the Alex API endpoint.""" - try: - url = self.base_url + endpoint - headers = {"Accept": "application/json"} - response = requests.get(url, headers=headers, params=params) - response.raise_for_status() - return response.json() - except Exception as e: - raise Exception(f"Alex API GET request error: {str(e)}") - - def get_pairs(self): - """Retrieve a list of available trading pairs.""" - try: - return self._get("v1/public/pairs")["data"] - except Exception as e: - raise Exception(f"Failed to get token pairs: {str(e)}") - - def get_price_history(self, token_address: str): - """Retrieve historical price data for a token address.""" - try: - prices = self._get(f"v1/price_history/{token_address}?limit={self.limits}")[ - "prices" - ] - return [ - {"price": price["avg_price_usd"], "block": price["block_height"]} - for price in prices - ] - except Exception as e: - raise Exception(f"Failed to get token price history: {str(e)}") - - def get_all_swaps(self): - """Retrieve all swap data from the Alex API.""" - try: - return self._get("v1/allswaps") - except Exception as e: - raise Exception(f"Failed to get all swaps: {str(e)}") - - def get_token_pool_volume(self, pool_token_id: str): - """Retrieve pool volume data for a specified pool token ID.""" - try: - return self._get(f"v1/pool_volume/{pool_token_id}?limit={self.limits}")[ - "volume_values" - ] - except Exception as e: - raise Exception(f"Failed to get pool volume: {str(e)}") - - def get_token_pool_agg_history(self, token_address: str, pool_token_id: str): - """Retrieve aggregated price and volume history for a token and pool.""" - try: - prices = self._get(f"v1/price_history/{token_address}?limit={self.limits}")[ - "prices" - ] - volume = self._get(f"v1/pool_volume/{pool_token_id}?limit={self.limits}")[ - "volume_values" - ] - volume_dict = {v["block_height"]: v["volume_24h"] for v in volume} - combined_data = [ - { - "price": price["avg_price_usd"], - "block": price["block_height"], - "volume_24h": volume_dict.get(price["block_height"], None), - } - for price in prices - ] - return combined_data - except Exception as e: - raise Exception(f"Failed to get token price history: {str(e)}") - - def get_token_pool_price(self, pool_token_id: str): - """Retrieve current pool price for a specified pool token ID.""" - try: - return self._get(f"v1/pool_token_price/{pool_token_id}?limit={self.limits}") - except Exception as e: - raise Exception(f"Failed to get pool price: {str(e)}") - - def get_token_tvl(self, pool_token_id: str): - """Retrieve total value locked data for a specified token.""" - try: - return self._get(f"/v1/stats/tvl/{pool_token_id}?limit={self.limits}") - except Exception as e: - raise Exception(f"Failed to get pool volume: {str(e)}") diff --git a/lib/hiro.py b/lib/hiro.py deleted file mode 100644 index 82817bcc..00000000 --- a/lib/hiro.py +++ /dev/null @@ -1,871 +0,0 @@ -import time -from dataclasses import dataclass -from enum import Enum -from functools import wraps -from typing import Any, Dict, List, Optional, TypedDict - -import aiohttp -import requests -from cachetools import TTLCache, cached - -from config import config -from lib.logger import configure_logger - -logger = configure_logger(__name__) - - -class HiroApiError(Exception): - """Base exception for Hiro API errors.""" - - pass - - -class HiroApiRateLimitError(HiroApiError): - """Exception for rate limit errors.""" - - pass - - -class HiroApiTimeoutError(HiroApiError): - """Exception for timeout errors.""" - - pass - - -class ChainType(str, Enum): - """Supported blockchain types for chainhooks.""" - - STACKS = "stacks" - BITCOIN = "bitcoin" - - -class EventScope(str, Enum): - """Supported event scopes for chainhooks.""" - - TXID = "txid" - CONTRACT_CALL = "contract_call" - PRINT_EVENT = "print_event" - FT_EVENT = "ft_event" - NFT_EVENT = "nft_event" - STX_EVENT = "stx_event" - - -@dataclass -class WebhookConfig: - """Configuration for webhook endpoints.""" - - url: str - auth_header: str - retry_count: int = 3 - timeout: int = 10 - events: List[str] = None - - def to_dict(self) -> Dict[str, Any]: - """Convert webhook config to dictionary format.""" - return { - "url": self.url, - "authorization_header": self.auth_header, - "retry_count": self.retry_count, - "timeout": self.timeout, - "events": self.events, - } - - -class ChainHookPredicate(TypedDict): - """Type definition for chainhook predicates.""" - - name: str - chain: str - version: int - networks: Dict[str, Any] - - -class ChainHookBuilder: - """Builder class for creating chainhook predicates.""" - - def __init__( - self, - name: str, - chain_type: ChainType = ChainType.STACKS, - network: str = "testnet", - version: int = 1, - ): - """Initialize the chainhook builder. - - Args: - name: Name of the chainhook - chain_type: Type of blockchain to monitor - network: Network to monitor (testnet/mainnet) - version: API version - """ - self.name = name - self.chain_type = chain_type - self.network = network - self.version = version - self.conditions: Dict[str, Any] = {} - self.start_block: Optional[int] = None - self.end_block: Optional[int] = None - self.decode_clarity_values: bool = True - self.expire_after_occurrence: Optional[int] = None - self.webhook: Optional[WebhookConfig] = None - - def with_transaction_filter(self, txid: str) -> "ChainHookBuilder": - """Add transaction ID filter.""" - self.conditions = {"scope": EventScope.TXID, "equals": txid} - return self - - def with_contract_call_filter( - self, - contract_identifier: str, - method: str, - ) -> "ChainHookBuilder": - """Add contract call filter.""" - self.conditions = { - "scope": EventScope.CONTRACT_CALL, - "method": method, - "contract_identifier": contract_identifier, - } - return self - - def with_print_event_filter( - self, - contract_identifier: str, - topic: str, - ) -> "ChainHookBuilder": - """Add print event filter.""" - self.conditions = { - "scope": EventScope.PRINT_EVENT, - "contract_identifier": contract_identifier, - "topic": topic, - } - return self - - def with_ft_event_filter( - self, - asset_identifier: str, - actions: List[str], - ) -> "ChainHookBuilder": - """Add fungible token event filter.""" - self.conditions = { - "scope": EventScope.FT_EVENT, - "asset_identifier": asset_identifier, - "actions": actions, - } - return self - - def with_nft_event_filter( - self, - asset_identifier: str, - actions: List[str], - ) -> "ChainHookBuilder": - """Add non-fungible token event filter.""" - self.conditions = { - "scope": EventScope.NFT_EVENT, - "asset_identifier": asset_identifier, - "actions": actions, - } - return self - - def with_stx_event_filter( - self, - actions: List[str], - ) -> "ChainHookBuilder": - """Add STX event filter.""" - self.conditions = { - "scope": EventScope.STX_EVENT, - "actions": actions, - } - return self - - def with_blocks( - self, - start_block: Optional[int] = None, - end_block: Optional[int] = None, - ) -> "ChainHookBuilder": - """Set block range.""" - self.start_block = start_block - self.end_block = end_block - return self - - def with_webhook(self, webhook: WebhookConfig) -> "ChainHookBuilder": - """Set webhook configuration.""" - self.webhook = webhook - return self - - def with_expiration(self, occurrences: int) -> "ChainHookBuilder": - """Set expiration after number of occurrences.""" - self.expire_after_occurrence = occurrences - return self - - def build(self) -> ChainHookPredicate: - """Build the chainhook predicate.""" - if not self.conditions: - raise ValueError("No conditions set for chainhook") - if not self.webhook: - raise ValueError("No webhook configured for chainhook") - - network_config = { - "if_this": self.conditions, - "then_that": {"http_post": self.webhook.to_dict()}, - "decode_clarity_values": self.decode_clarity_values, - } - - if self.start_block is not None: - network_config["start_block"] = self.start_block - if self.end_block is not None: - network_config["end_block"] = self.end_block - if self.expire_after_occurrence is not None: - network_config["expire_after_occurrence"] = self.expire_after_occurrence - - return { - "name": self.name, - "chain": self.chain_type, - "version": self.version, - "networks": {self.network: network_config}, - } - - -class BaseHiroApi: - """Base class for Hiro API clients with shared functionality.""" - - # Rate limiting settings - RATE_LIMIT = 100 # requests per minute - RATE_LIMIT_WINDOW = 60 # seconds - - # Retry settings - MAX_RETRIES = 3 - RETRY_DELAY = 1 # seconds - - def __init__(self, base_url: str): - """Initialize the base API client. - - Args: - base_url: The base URL for the API - """ - self.base_url = base_url - self.api_key = config.api.hiro_api_key - if not self.api_key: - raise ValueError("HIRO_API_KEY environment variable is required") - - self._request_times: List[float] = [] - self._cache = TTLCache(maxsize=100, ttl=300) # Cache with 5-minute TTL - self._session: Optional[aiohttp.ClientSession] = None - logger.debug("Initialized API client with base URL: %s", self.base_url) - - def _rate_limit(self) -> None: - """Implement rate limiting.""" - current_time = time.time() - self._request_times = [ - t for t in self._request_times if current_time - t < self.RATE_LIMIT_WINDOW - ] - - if len(self._request_times) >= self.RATE_LIMIT: - sleep_time = self._request_times[0] + self.RATE_LIMIT_WINDOW - current_time - if sleep_time > 0: - logger.warning( - "Rate limit reached, sleeping for %.2f seconds", sleep_time - ) - time.sleep(sleep_time) - - self._request_times.append(current_time) - - def _retry_on_error(func): - """Decorator to retry API calls on transient errors.""" - - @wraps(func) - def wrapper(self, *args, **kwargs): - for attempt in range(self.MAX_RETRIES): - try: - return func(self, *args, **kwargs) - except ( - requests.exceptions.Timeout, - requests.exceptions.ConnectionError, - ) as e: - if attempt == self.MAX_RETRIES - 1: - logger.error( - "Max retries reached for %s: %s", func.__name__, str(e) - ) - raise HiroApiTimeoutError(f"Max retries reached: {str(e)}") - logger.warning( - "Retry attempt %d for %s: %s", - attempt + 1, - func.__name__, - str(e), - ) - time.sleep(self.RETRY_DELAY * (attempt + 1)) - return None - - return wrapper - - @_retry_on_error - def _make_request( - self, - method: str, - endpoint: str, - headers: Optional[Dict[str, str]] = None, - params: Optional[Dict[str, Any]] = None, - json: Optional[Dict[str, Any]] = None, - ) -> Dict[str, Any]: - """Make an HTTP request with retries and rate limiting. - - Args: - method: HTTP method (GET, POST, etc.) - endpoint: API endpoint - headers: Optional request headers - params: Optional query parameters - json: Optional JSON body - - Returns: - Dict containing the response data - """ - try: - self._rate_limit() - url = f"{self.base_url}{endpoint}" - headers = headers or {"Accept": "application/json"} - - logger.debug("Making %s request to %s", method, url) - response = requests.request( - method, url, headers=headers, params=params, json=json - ) - response.raise_for_status() - return response.json() - except requests.exceptions.HTTPError as e: - if e.response.status_code == 429: - logger.error("Rate limit exceeded: %s", str(e)) - raise HiroApiRateLimitError(f"Rate limit exceeded: {str(e)}") - logger.error("HTTP error occurred: %s", str(e)) - raise HiroApiError(f"HTTP error occurred: {str(e)}") - except Exception as e: - logger.error("Unexpected error in request: %s", str(e)) - raise HiroApiError(f"Unexpected error: {str(e)}") - - async def _amake_request( - self, - method: str, - endpoint: str, - headers: Optional[Dict[str, str]] = None, - params: Optional[Dict[str, Any]] = None, - json: Optional[Dict[str, Any]] = None, - ) -> Dict[str, Any]: - """Async version of _make_request.""" - if self._session is None: - self._session = aiohttp.ClientSession() - - try: - self._rate_limit() - url = f"{self.base_url}{endpoint}" - headers = headers or {"Accept": "application/json"} - - logger.debug("Making async %s request to %s", method, url) - async with self._session.request( - method, url, headers=headers, params=params, json=json - ) as response: - response.raise_for_status() - return await response.json() - except aiohttp.ClientError as e: - logger.error("Async request error: %s", str(e)) - raise HiroApiError(f"Async request error: {str(e)}") - - async def close(self) -> None: - """Close the async session.""" - if self._session: - await self._session.close() - self._session = None - - -class PlatformApi(BaseHiroApi): - """Client for interacting with the Hiro Platform API.""" - - def __init__(self): - """Initialize the Platform API client.""" - super().__init__(config.api.platform_base_url) - self.default_webhook = WebhookConfig( - url=config.api.webhook_url, auth_header=config.api.webhook_auth - ) - - def create_chainhook(self, predicate: ChainHookPredicate) -> Dict[str, Any]: - """Create a new chainhook. - - Args: - predicate: The chainhook predicate configuration - - Returns: - Dict containing the response from the API - """ - return self._make_request( - "POST", - f"/v1/ext/{self.api_key}/chainhooks", - headers={"Content-Type": "application/json"}, - json=predicate, - ) - - async def acreate_chainhook(self, predicate: ChainHookPredicate) -> Dict[str, Any]: - """Async version of create_chainhook.""" - return await self._amake_request( - "POST", - f"/v1/ext/{self.api_key}/chainhooks", - headers={"Content-Type": "application/json"}, - json=predicate, - ) - - def create_transaction_hook( - self, - txid: str, - name: str = "tx-monitor", - start_block: Optional[int] = None, - network: str = "testnet", - webhook: Optional[WebhookConfig] = None, - expire_after_occurrence: int = 1, - ) -> Dict[str, Any]: - """Create a chainhook for monitoring specific transactions.""" - predicate = ( - ChainHookBuilder(name, network=network) - .with_transaction_filter(txid) - .with_blocks(start_block) - .with_webhook(webhook or self.default_webhook) - .with_expiration(expire_after_occurrence) - .build() - ) - return self.create_chainhook(predicate) - - def create_contract_call_hook( - self, - contract_identifier: str, - method: str, - name: str = "contract-call-monitor", - start_block: Optional[int] = None, - network: str = "testnet", - webhook: Optional[WebhookConfig] = None, - end_block: Optional[int] = None, - expire_after_occurrence: Optional[int] = None, - ) -> Dict[str, Any]: - """Create a chainhook for monitoring contract calls.""" - builder = ( - ChainHookBuilder(name, network=network) - .with_contract_call_filter(contract_identifier, method) - .with_blocks(start_block, end_block) - .with_webhook(webhook or self.default_webhook) - ) - - if expire_after_occurrence is not None: - builder.with_expiration(expire_after_occurrence) - - return self.create_chainhook(builder.build()) - - def create_ft_event_hook( - self, - asset_identifier: str, - actions: List[str], - name: str = "ft-event-monitor", - start_block: Optional[int] = None, - network: str = "testnet", - webhook: Optional[WebhookConfig] = None, - end_block: Optional[int] = None, - ) -> Dict[str, Any]: - """Create a chainhook for monitoring fungible token events.""" - predicate = ( - ChainHookBuilder(name, network=network) - .with_ft_event_filter(asset_identifier, actions) - .with_blocks(start_block, end_block) - .with_webhook(webhook or self.default_webhook) - .build() - ) - return self.create_chainhook(predicate) - - def create_nft_event_hook( - self, - asset_identifier: str, - actions: List[str], - name: str = "nft-event-monitor", - start_block: Optional[int] = None, - network: str = "testnet", - webhook: Optional[WebhookConfig] = None, - end_block: Optional[int] = None, - ) -> Dict[str, Any]: - """Create a chainhook for monitoring non-fungible token events.""" - predicate = ( - ChainHookBuilder(name, network=network) - .with_nft_event_filter(asset_identifier, actions) - .with_blocks(start_block, end_block) - .with_webhook(webhook or self.default_webhook) - .build() - ) - return self.create_chainhook(predicate) - - def create_stx_event_hook( - self, - actions: List[str], - name: str = "stx-event-monitor", - start_block: Optional[int] = None, - network: str = "testnet", - webhook: Optional[WebhookConfig] = None, - end_block: Optional[int] = None, - ) -> Dict[str, Any]: - """Create a chainhook for monitoring STX events.""" - predicate = ( - ChainHookBuilder(name, network=network) - .with_stx_event_filter(actions) - .with_blocks(start_block, end_block) - .with_webhook(webhook or self.default_webhook) - .build() - ) - return self.create_chainhook(predicate) - - def create_print_event_hook( - self, - contract_identifier: str, - topic: str, - name: str = "print-event-monitor", - start_block: Optional[int] = None, - network: str = "testnet", - webhook: Optional[WebhookConfig] = None, - end_block: Optional[int] = None, - ) -> Dict[str, Any]: - """Create a chainhook for monitoring print events.""" - predicate = ( - ChainHookBuilder(name, network=network) - .with_print_event_filter(contract_identifier, topic) - .with_blocks(start_block, end_block) - .with_webhook(webhook or self.default_webhook) - .build() - ) - return self.create_chainhook(predicate) - - def create_contract_deployment_hook( - self, - txid: str, - name: str = "contract-deployment-monitor", - start_block: Optional[int] = 75996, - network: str = "testnet", - end_block: Optional[int] = None, - expire_after_occurrence: int = 1, - webhook: Optional[WebhookConfig] = None, - ) -> Dict[str, Any]: - """Create a chainhook for monitoring contract deployments.""" - predicate = ( - ChainHookBuilder(name, network=network) - .with_transaction_filter(txid) - .with_blocks(start_block, end_block) - .with_webhook(webhook or self.default_webhook) - .with_expiration(expire_after_occurrence) - .build() - ) - return self.create_chainhook(predicate) - - def create_dao_x_linkage_hook( - self, - contract_identifier: str, - method: str = "send", - name: str = "dao-x-linkage", - start_block: int = 601924, - network: str = "mainnet", - end_block: Optional[int] = None, - webhook: Optional[WebhookConfig] = None, - ) -> Dict[str, Any]: - """Create a chainhook for monitoring DAO X linkage.""" - predicate = ( - ChainHookBuilder(name, network=network) - .with_contract_call_filter(contract_identifier, method) - .with_blocks(start_block, end_block) - .with_webhook(webhook or self.default_webhook) - .build() - ) - return self.create_chainhook(predicate) - - # Async versions of the hook creation methods - async def acreate_transaction_hook( - self, - txid: str, - name: str = "tx-monitor", - start_block: Optional[int] = None, - network: str = "testnet", - webhook: Optional[WebhookConfig] = None, - expire_after_occurrence: int = 1, - ) -> Dict[str, Any]: - """Async version of create_transaction_hook.""" - predicate = ( - ChainHookBuilder(name, network=network) - .with_transaction_filter(txid) - .with_blocks(start_block) - .with_webhook(webhook or self.default_webhook) - .with_expiration(expire_after_occurrence) - .build() - ) - return await self.acreate_chainhook(predicate) - - async def acreate_contract_call_hook( - self, - contract_identifier: str, - method: str, - name: str = "contract-call-monitor", - start_block: Optional[int] = None, - network: str = "testnet", - webhook: Optional[WebhookConfig] = None, - end_block: Optional[int] = None, - expire_after_occurrence: Optional[int] = None, - ) -> Dict[str, Any]: - """Async version of create_contract_call_hook.""" - builder = ( - ChainHookBuilder(name, network=network) - .with_contract_call_filter(contract_identifier, method) - .with_blocks(start_block, end_block) - .with_webhook(webhook or self.default_webhook) - ) - - if expire_after_occurrence is not None: - builder.with_expiration(expire_after_occurrence) - - return await self.acreate_chainhook(builder.build()) - - async def acreate_dao_x_linkage_hook( - self, - contract_identifier: str, - method: str = "send", - name: str = "dao-x-linkage", - start_block: int = 601924, - network: str = "mainnet", - end_block: Optional[int] = None, - webhook: Optional[WebhookConfig] = None, - ) -> Dict[str, Any]: - """Async version of create_dao_x_linkage_hook.""" - predicate = ( - ChainHookBuilder(name, network=network) - .with_contract_call_filter(contract_identifier, method) - .with_blocks(start_block, end_block) - .with_webhook(webhook or self.default_webhook) - .build() - ) - return await self.acreate_chainhook(predicate) - - -class HiroApi(BaseHiroApi): - """Client for interacting with the Hiro API. - - This client provides methods to interact with various Hiro API endpoints, - organized by category (transactions, blocks, addresses, etc.). - It includes features like rate limiting, retries, caching, and async support. - """ - - # API endpoint categories - ENDPOINTS = { - "transactions": "/extended/v1/tx", - "blocks": "/extended/v1/block", - "addresses": "/extended/v1/address", - "tokens": "/extended/v1/tokens", - "contracts": "/extended/v1/contract", - "burnchain": "/extended/v1/burnchain", - "search": "/extended/v1/search", - "fee_rate": "/extended/v1/fee_rate", - "stx_supply": "/extended/v1/stx_supply", - } - - def __init__(self): - """Initialize the Hiro API client.""" - super().__init__(config.api.hiro_api_url) - - @cached(lambda self: self._cache) - def get_token_holders(self, token: str) -> Dict[str, Any]: - """Retrieve a list of token holders with caching.""" - logger.debug("Retrieving token holders for %s", token) - return self._make_request( - "GET", f"{self.ENDPOINTS['tokens']}/ft/{token}/holders" - ) - - def get_address_balance(self, addr: str) -> Dict[str, Any]: - """Retrieve wallet balance for an address.""" - logger.debug("Retrieving balance for address %s", addr) - return self._make_request( - "GET", f"{self.ENDPOINTS['addresses']}/{addr}/balances" - ) - - # Transaction related endpoints - def get_transaction(self, tx_id: str) -> Dict[str, Any]: - """Get transaction details.""" - return self._make_request("GET", f"/extended/v1/tx/{tx_id}") - - def get_raw_transaction(self, tx_id: str) -> Dict[str, Any]: - """Get raw transaction details.""" - return self._make_request("GET", f"/extended/v1/tx/{tx_id}/raw") - - def get_transactions_by_block(self, block_hash: str) -> Dict[str, Any]: - """Get transactions in a block.""" - return self._make_request("GET", f"/extended/v1/tx/block/{block_hash}") - - def get_transactions_by_block_height(self, height: int) -> Dict[str, Any]: - """Get transactions in a block by height.""" - return self._make_request("GET", f"/extended/v1/tx/block_height/{height}") - - def get_mempool_transactions(self) -> Dict[str, Any]: - """Get pending transactions.""" - return self._make_request("GET", "/extended/v1/tx/mempool") - - def get_dropped_mempool_transactions(self) -> Dict[str, Any]: - """Get dropped transactions.""" - return self._make_request("GET", "/extended/v1/tx/mempool/dropped") - - def get_mempool_stats(self) -> Dict[str, Any]: - """Get mempool statistics.""" - return self._make_request("GET", "/extended/v1/tx/mempool/stats") - - # Block related endpoints - def get_blocks(self) -> Dict[str, Any]: - """Get recent blocks.""" - return self._make_request("GET", "/extended/v1/block") - - def get_block_by_height(self, height: int) -> Dict[str, Any]: - """Get block by height.""" - return self._make_request("GET", f"/extended/v1/block/by_height/{height}") - - def get_block_by_hash(self, block_hash: str) -> Dict[str, Any]: - """Get block by hash.""" - return self._make_request("GET", f"/extended/v1/block/{block_hash}") - - def get_block_by_burn_block_height(self, burn_block_height: int) -> Dict[str, Any]: - """Get block by burn block height.""" - return self._make_request( - "GET", f"/extended/v1/block/by_burn_block_height/{burn_block_height}" - ) - - # Address related endpoints - def get_address_stx_balance(self, principal: str) -> Dict[str, Any]: - """Get STX balance.""" - return self._make_request("GET", f"/extended/v1/address/{principal}/stx") - - def get_address_transactions(self, principal: str) -> Dict[str, Any]: - """Get transactions for an address.""" - return self._make_request( - "GET", f"/extended/v1/address/{principal}/transactions" - ) - - def get_address_transactions_with_transfers(self, principal: str) -> Dict[str, Any]: - """Get transactions with transfers.""" - return self._make_request( - "GET", f"/extended/v1/address/{principal}/transactions_with_transfers" - ) - - def get_address_assets(self, principal: str) -> Dict[str, Any]: - """Get assets owned.""" - return self._make_request("GET", f"/extended/v1/address/{principal}/assets") - - def get_address_mempool(self, principal: str) -> Dict[str, Any]: - """Get mempool transactions.""" - return self._make_request("GET", f"/extended/v1/address/{principal}/mempool") - - def get_address_nonces(self, principal: str) -> Dict[str, Any]: - """Get nonce information.""" - return self._make_request("GET", f"/extended/v1/address/{principal}/nonces") - - # Token related endpoints - def get_nft_holdings(self, **params) -> Dict[str, Any]: - """Get NFT holdings.""" - return self._make_request( - "GET", "/extended/v1/tokens/nft/holdings", params=params - ) - - def get_nft_history(self, **params) -> Dict[str, Any]: - """Get NFT history.""" - return self._make_request( - "GET", "/extended/v1/tokens/nft/history", params=params - ) - - def get_nft_mints(self, **params) -> Dict[str, Any]: - """Get NFT mints.""" - return self._make_request("GET", "/extended/v1/tokens/nft/mints", params=params) - - # Contract related endpoints - def get_contract_by_id(self, contract_id: str) -> Dict[str, Any]: - """Get contract details.""" - return self._make_request("GET", f"/extended/v1/contract/{contract_id}") - - def get_contract_events(self, contract_id: str) -> Dict[str, Any]: - """Get contract events.""" - return self._make_request("GET", f"/extended/v1/contract/{contract_id}/events") - - def get_contract_source( - self, contract_address: str, contract_name: str - ) -> Dict[str, Any]: - """Get the source code of a contract. - - Args: - contract_address: The contract's address - contract_name: The name of the contract - - Returns: - Dict containing the contract source code and metadata - """ - return self._make_request( - "GET", f"/v2/contracts/source/{contract_address}/{contract_name}" - ) - - # Burnchain related endpoints - def get_burnchain_rewards(self) -> Dict[str, Any]: - """Get burnchain rewards.""" - return self._make_request("GET", "/extended/v1/burnchain/rewards") - - def get_address_burnchain_rewards(self, address: str) -> Dict[str, Any]: - """Get burnchain rewards for an address.""" - return self._make_request("GET", f"/extended/v1/burnchain/rewards/{address}") - - def get_address_total_burnchain_rewards(self, address: str) -> Dict[str, Any]: - """Get total burnchain rewards.""" - return self._make_request( - "GET", f"/extended/v1/burnchain/rewards/{address}/total" - ) - - # Utility endpoints - @cached(lambda self: self._cache) - def get_fee_rate(self) -> Dict[str, Any]: - """Get current fee rate with caching.""" - logger.debug("Retrieving current fee rate") - return self._make_request("GET", "/extended/v1/fee_rate") - - @cached(lambda self: self._cache) - def get_stx_supply(self) -> Dict[str, Any]: - """Get STX supply with caching.""" - logger.debug("Retrieving STX supply") - return self._make_request("GET", "/extended/v1/stx_supply") - - @cached(lambda self: self._cache) - def get_stx_price(self) -> float: - """Get the current STX price with caching.""" - logger.debug("Retrieving current STX price") - response = requests.get( - "https://explorer.hiro.so/stxPrice", params={"blockBurnTime": "current"} - ) - response.raise_for_status() - return response.json()["price"] - - # @cached(lambda self: self._cache) - def get_current_block_height(self) -> int: - """Get the current block height""" - logger.debug("Retrieving current block height") - logger.debug(f"Endpoint: {self.ENDPOINTS['blocks']}") - response = self._make_request( - method="GET", - endpoint=self.ENDPOINTS["blocks"], - params={"limit": 1, "offset": 0}, - ) - logger.debug(f"Response: {response}") - return response["results"][0]["height"] - - def search(self, query_id: str) -> Dict[str, Any]: - """Search for blocks, transactions, contracts, or addresses.""" - logger.debug("Performing search for query: %s", query_id) - return self._make_request("GET", f"{self.ENDPOINTS['search']}/{query_id}") - - # Async versions of selected methods - async def aget_token_holders(self, token: str) -> Dict[str, Any]: - """Async version of get_token_holders.""" - logger.debug("Async retrieving token holders for %s", token) - return await self._amake_request( - "GET", f"{self.ENDPOINTS['tokens']}/ft/{token}/holders" - ) - - async def aget_address_balance(self, addr: str) -> Dict[str, Any]: - """Async version of get_address_balance.""" - logger.debug("Async retrieving balance for address %s", addr) - return await self._amake_request( - "GET", f"{self.ENDPOINTS['addresses']}/{addr}/balances" - ) - - # ... add async versions of other methods as needed ... diff --git a/lib/images.py b/lib/images.py index bc2d8055..2fae2d40 100644 --- a/lib/images.py +++ b/lib/images.py @@ -1,10 +1,8 @@ import openai -import requests +import httpx from config import config -openai.api_key = config.api.openai_api_key - class ImageGenerationError(Exception): """Raised when image generation fails""" @@ -25,7 +23,10 @@ def generate_image(prompt: str) -> str: ImageGenerationError: If image generation fails """ try: - client = openai.OpenAI() + client = openai.OpenAI( + api_key=config.chat_llm.api_key, + base_url=config.chat_llm.api_base if config.chat_llm.api_base else None, + ) response = client.images.generate( model="dall-e-3", quality="hd", prompt=prompt, n=1, size="1024x1024" ) @@ -75,11 +76,8 @@ def generate_token_image(name: str, symbol: str, description: str) -> bytes: if not image_url: raise ImageGenerationError("Failed to get image URL") - response = requests.get(image_url) - if response.status_code != 200: - raise ImageGenerationError( - f"Failed to download image: HTTP {response.status_code}" - ) + response = httpx.get(image_url) + response.raise_for_status() if not response.content: raise ImageGenerationError("Downloaded image is empty") @@ -88,6 +86,12 @@ def generate_token_image(name: str, symbol: str, description: str) -> bytes: except ImageGenerationError: raise # Re-raise ImageGenerationError as is + except httpx.HTTPStatusError as e: + raise ImageGenerationError( + f"Failed to download image: HTTP {e.response.status_code}" + ) from e + except httpx.RequestError as e: + raise ImageGenerationError(f"Failed to download image: {str(e)}") from e except Exception as e: raise ImageGenerationError( f"Unexpected error generating token image: {str(e)}" diff --git a/lib/lunarcrush.py b/lib/lunarcrush.py index 3ce9117d..2d4d7f7b 100644 --- a/lib/lunarcrush.py +++ b/lib/lunarcrush.py @@ -1,4 +1,4 @@ -import requests +import httpx from config import config @@ -20,13 +20,21 @@ def _get(self, endpoint: str, params: dict = None) -> dict: "Authorization": f"Bearer {self.api_key}", } # Make the GET request - response = requests.get(url, headers=headers, params=params) + response = httpx.get(url, headers=headers, params=params) # Check for HTTP errors response.raise_for_status() # Return the JSON response data return response.json() + except httpx.HTTPStatusError as e: + # Raise an exception with a custom error message + raise Exception( + f"Lunarcrush API GET request error: HTTP {e.response.status_code} - {str(e)}" + ) + except httpx.RequestError as e: + # Raise an exception with a custom error message + raise Exception(f"Lunarcrush API GET request error: {str(e)}") except Exception as e: # Raise an exception with a custom error message raise Exception(f"Lunarcrush API GET request error: {str(e)}") diff --git a/lib/persona.py b/lib/persona.py index fc48ba39..62a8d5e6 100644 --- a/lib/persona.py +++ b/lib/persona.py @@ -1,23 +1,8 @@ -from backend.models import Agent - - -def generate_persona(agent: Agent): - persona = f""" +def generate_persona(): + persona = """ You are a helpful financial assistant with a light-hearted tone and a positive attitude. You appreciate humor and enjoy making friendly jokes, especially related to finance and technology. No emojis are allowed in responses. No markdown is allowed in responses. - - Your name is {agent.name}. - - Backstory: - {agent.backstory} - - Role: - {agent.role} - - Goal: - {agent.goal} - Knowledge: - Specialize in Stacks blockchain wallet management - Proficient in STX transactions, Clarity smart contracts, and NFT minting diff --git a/lib/twitter.py b/lib/twitter.py deleted file mode 100644 index c5e39069..00000000 --- a/lib/twitter.py +++ /dev/null @@ -1,312 +0,0 @@ -from typing import List, Optional - -from pytwitter import Api -from pytwitter.models import Tweet, User - -from lib.logger import configure_logger - -logger = configure_logger(__name__) - - -class TwitterService: - def __init__( - self, - consumer_key: str, - consumer_secret: str, - access_token: str, - access_secret: str, - client_id: str, - client_secret: str, - ): - """Initialize the Twitter service with API credentials.""" - self.consumer_key = consumer_key - self.consumer_secret = consumer_secret - self.access_token = access_token - self.access_secret = access_secret - self.client_id = client_id - self.client_secret = client_secret - self.client = None - - async def _ainitialize(self) -> None: - self.initialize() - - def initialize(self) -> None: - """Initialize the Twitter client.""" - try: - self.client = Api( - client_id=self.client_id, - client_secret=self.client_secret, - consumer_key=self.consumer_key, - consumer_secret=self.consumer_secret, - access_token=self.access_token, - access_secret=self.access_secret, - application_only_auth=False, - ) - logger.info("Twitter client initialized successfully") - except Exception as e: - logger.error(f"Failed to initialize Twitter client: {str(e)}") - raise - - async def _apost_tweet( - self, text: str, reply_in_reply_to_tweet_id: Optional[str] = None - ) -> Optional[Tweet]: - """ - Post a new tweet or reply to an existing tweet. - - Args: - text: The content of the tweet - reply_in_reply_to_tweet_id: Optional ID of tweet to reply to - - Returns: - Tweet data if successful, None if failed - """ - return self.post_tweet(text, reply_in_reply_to_tweet_id) - - def post_tweet( - self, text: str, reply_in_reply_to_tweet_id: Optional[str] = None - ) -> Optional[Tweet]: - """ - Post a new tweet or reply to an existing tweet. - - Args: - text: The content of the tweet - reply_in_reply_to_tweet_id: Optional ID of tweet to reply to - - Returns: - Tweet data if successful, None if failed - """ - try: - if self.client is None: - raise Exception("Twitter client is not initialized") - response = self.client.create_tweet( - text=text, reply_in_reply_to_tweet_id=reply_in_reply_to_tweet_id - ) - logger.info(f"Successfully posted tweet: {text[:20]}...") - if isinstance(response, Tweet): - return response - except Exception as e: - logger.error(f"Failed to post tweet: {str(e)}") - return None - - async def get_user_by_username(self, username: str) -> Optional[User]: - """ - Get user information by username. - - Args: - username: Twitter username without @ symbol - - Returns: - User data if found, None if not found or error - """ - try: - if self.client is None: - raise Exception("Twitter client is not initialized") - response = self.client.get_user(username=username) - if isinstance(response, User): - return response - except Exception as e: - logger.error(f"Failed to get user info for {username}: {str(e)}") - return None - - async def get_user_by_user_id(self, user_id: str) -> Optional[User]: - """ - Get user information by user ID. - - Args: - username: Twitter username without @ symbol - - Returns: - User data if found, None if not found or error - """ - try: - if self.client is None: - raise Exception("Twitter client is not initialized") - response = self.client.get_user(user_id=user_id) - if isinstance(response, User): - return response - except Exception as e: - logger.error(f"Failed to get user info for {user_id}: {str(e)}") - return None - - async def get_mentions_by_user_id( - self, user_id: str, max_results: int = 100 - ) -> List[Tweet]: - """ - Get mentions for a specific user. - - Args: - user_id: Twitter user ID to get mentions for - max_results: Maximum number of mentions to return (default 100) - - Returns: - List of mention data - """ - try: - if self.client is None: - raise Exception("Twitter client is not initialized") - response = self.client.get_mentions( - user_id=user_id, - max_results=max_results, - tweet_fields=[ - "id", - "text", - "created_at", - "author_id", - "conversation_id", - "in_reply_to_user_id", - "referenced_tweets", - "public_metrics", - "entities", - "attachments", - "context_annotations", - "withheld", - "reply_settings", - "lang", - ], - expansions=[ - "author_id", - "referenced_tweets.id", - "referenced_tweets.id.author_id", - "entities.mentions.username", - "attachments.media_keys", - "attachments.poll_ids", - "in_reply_to_user_id", - "geo.place_id", - ], - user_fields=[ - "id", - "name", - "username", - "created_at", - "description", - "entities", - "location", - "pinned_tweet_id", - "profile_image_url", - "protected", - "public_metrics", - "url", - "verified", - "withheld", - ], - media_fields=[ - "duration_ms", - "height", - "media_key", - "preview_image_url", - "type", - "url", - "width", - "public_metrics", - "alt_text", - ], - place_fields=[ - "contained_within", - "country", - "country_code", - "full_name", - "geo", - "id", - "name", - "place_type", - ], - poll_fields=[ - "duration_minutes", - "end_datetime", - "id", - "options", - "voting_status", - ], - ) - logger.info(f"Successfully retrieved {len(response.data)} mentions") - return response.data - - except Exception as e: - logger.error(f"Failed to get mentions: {str(e)}") - return [] - - async def get_me(self) -> Optional[User]: - """ - Get information about the authenticated user. - - Returns: - User data if successful, None if failed - """ - try: - if self.client is None: - raise Exception("Twitter client is not initialized") - response = self.client.get_me() - if isinstance(response, User): - return response - return None - except Exception as e: - logger.error(f"Failed to get authenticated user info: {str(e)}") - return None - - async def follow_user(self, target_username: str) -> bool: - """ - Follow a user using their username. Uses the authenticated user as the follower. - - Args: - target_username: Username of the account to follow (without @ symbol) - - Returns: - True if successful, False if failed - """ - try: - if self.client is None: - raise Exception("Twitter client is not initialized") - - # Get authenticated user's ID - me = await self.get_me() - if not me: - raise Exception("Failed to get authenticated user info") - - # Get target user's ID - target_user = await self.get_user_by_username(target_username) - if not target_user: - raise Exception(f"Failed to get user info for {target_username}") - - # Follow the user - response = self.client.follow_user( - user_id=me.id, target_user_id=target_user.id - ) - logger.info(f"Successfully followed user: {target_username}") - return True - except Exception as e: - logger.error(f"Failed to follow user {target_username}: {str(e)}") - return False - - async def unfollow_user(self, target_username: str) -> bool: - """ - Unfollow a user using their username. Uses the authenticated user as the unfollower. - - Args: - target_username: Username of the account to unfollow (without @ symbol) - - Returns: - True if successful, False if failed - """ - try: - if self.client is None: - raise Exception("Twitter client is not initialized") - - # Get authenticated user's ID - me = await self.get_me() - if not me: - raise Exception("Failed to get authenticated user info") - - # Get target user's ID - target_user = await self.get_user_by_username(target_username) - if not target_user: - raise Exception(f"Failed to get user info for {target_username}") - - # Unfollow the user - response = self.client.unfollow_user( - user_id=me.id, target_user_id=target_user.id - ) - logger.info(f"Successfully unfollowed user: {target_username}") - return True - except Exception as e: - logger.error(f"Failed to unfollow user {target_username}: {str(e)}") - return False diff --git a/lib/utils.py b/lib/utils.py new file mode 100644 index 00000000..4abcd199 --- /dev/null +++ b/lib/utils.py @@ -0,0 +1,423 @@ +"""Workflow utility functions.""" + +import binascii +import logging +import re +from typing import Dict, List, Optional + +import httpx + +logger = logging.getLogger(__name__) + + +def split_text_into_chunks(text: str, limit: int = 280) -> List[str]: + """Split text into chunks not exceeding the limit without cutting words. + + Args: + text: The text to split into chunks + limit: Maximum character limit per chunk (default 280 for Twitter) + + Returns: + List of text chunks, each under the limit + """ + if not text or not text.strip(): + return [] + + words = text.split() + chunks = [] + current = "" + + for word in words: + # Check if adding this word would exceed the limit + test_length = len(current) + len(word) + (1 if current else 0) + if test_length <= limit: + current = f"{current} {word}".strip() + else: + # Current chunk is full, start a new one + if current: + chunks.append(current) + current = word + + # Add the final chunk if it exists + if current: + chunks.append(current) + + return chunks + + +def create_message_chunks( + main_message: str, + follow_up_message: Optional[str] = None, + limit: int = 280, + add_indices: bool = True, + append_to_each: bool = False, +) -> List[str]: + """Create an array of message chunks from main message and optional follow-up. + + Args: + main_message: The primary message content + follow_up_message: Optional follow-up message to append + limit: Maximum character limit per chunk (default 280 for Twitter) + add_indices: Whether to add thread indices like "(1/4)" to each chunk + append_to_each: If True, append follow_up_message to each chunk instead of creating separate chunks + + Returns: + List of chunked messages ready for sequential posting + """ + chunks = [] + + # Chunk the main message + if main_message and main_message.strip(): + main_chunks = split_text_into_chunks(main_message.strip(), limit) + chunks.extend(main_chunks) + + # Handle follow-up message based on append_to_each flag + if follow_up_message and follow_up_message.strip(): + if append_to_each and chunks: + # When appending to each chunk, we need to optimize space usage + separator = "\n\n" + follow_up_with_separator = f"{separator}{follow_up_message.strip()}" + + # Start with initial chunks and iteratively optimize + optimized_chunks = [] + main_text = main_message.strip() + words = main_text.split() + word_index = 0 + chunk_number = 1 + + while word_index < len(words): + # Estimate the index text for this chunk (we'll refine this) + estimated_total_chunks = max(len(chunks), chunk_number) + temp_index_text = ( + f"({chunk_number}/{estimated_total_chunks}) " if add_indices else "" + ) + + # Calculate available space for main content in this chunk + reserved_space = len(follow_up_with_separator) + len(temp_index_text) + available_main_space = limit - reserved_space + + # Build the chunk by adding words until we approach the limit + current_chunk = "" + chunk_words = [] + + while word_index < len(words): + word = words[word_index] + test_chunk = ( + f"{current_chunk} {word}".strip() if current_chunk else word + ) + + if len(test_chunk) <= available_main_space: + current_chunk = test_chunk + chunk_words.append(word) + word_index += 1 + else: + break + + # If we couldn't fit any words, force at least one word to prevent infinite loop + if not chunk_words and word_index < len(words): + current_chunk = words[word_index] + word_index += 1 + + optimized_chunks.append(current_chunk) + chunk_number += 1 + + # Now we know the exact number of chunks, create final versions with correct indices + total_chunks = len(optimized_chunks) + final_chunks = [] + + for i, chunk in enumerate(optimized_chunks, 1): + # Calculate exact index text at the beginning + index_text = ( + f"({i}/{total_chunks}) " if add_indices and total_chunks > 1 else "" + ) + + # Calculate exact available space for this specific chunk + reserved_space = len(follow_up_with_separator) + len(index_text) + available_main_space = limit - reserved_space + + # Trim chunk if needed to fit exactly (shouldn't happen often with our optimization) + if len(chunk) > available_main_space: + words = chunk.split() + trimmed_chunk = "" + + for word in words: + test_length = ( + len(trimmed_chunk) + len(word) + (1 if trimmed_chunk else 0) + ) + if test_length <= available_main_space: + trimmed_chunk = f"{trimmed_chunk} {word}".strip() + else: + break + + chunk = trimmed_chunk + + # Create the final chunk with index at the beginning - should be as close to 280 as possible + final_chunk = f"{index_text}{chunk}{follow_up_with_separator}" + + # Verify we didn't exceed the limit (safety check) + if len(final_chunk) > limit: + # This shouldn't happen, but if it does, we need to trim more aggressively + excess = len(final_chunk) - limit + words = chunk.split() + while words and excess > 0: + removed_word = words.pop() + excess -= len(removed_word) + 1 # +1 for space + + chunk = " ".join(words) + final_chunk = f"{index_text}{chunk}{follow_up_with_separator}" + + final_chunks.append(final_chunk) + + return final_chunks + else: + # Add follow-up as separate chunks (original behavior) + follow_up_chunks = split_text_into_chunks(follow_up_message.strip(), limit) + chunks.extend(follow_up_chunks) + + # Add thread indices if requested and we have multiple chunks (for non-append_to_each case) + if add_indices and len(chunks) > 1 and not (append_to_each and follow_up_message): + indexed_chunks = [] + total_chunks = len(chunks) + + for i, chunk in enumerate(chunks, 1): + # Calculate space needed for index at the beginning like "(3/4) " + index_text = f"({i}/{total_chunks}) " + index_length = len(index_text) + + # If adding the index would exceed the limit, trim the chunk + if len(chunk) + index_length > limit: + # Trim the chunk to make room for the index, ensuring we don't cut words + available_space = limit - index_length + words = chunk.split() + trimmed_chunk = "" + + for word in words: + test_length = ( + len(trimmed_chunk) + len(word) + (1 if trimmed_chunk else 0) + ) + if test_length <= available_space: + trimmed_chunk = f"{trimmed_chunk} {word}".strip() + else: + break + + chunk = trimmed_chunk + + # Add the index to the beginning of the chunk + indexed_chunk = f"{index_text}{chunk}" + indexed_chunks.append(indexed_chunk) + + return indexed_chunks + + return chunks + + +def extract_image_urls(text: str) -> List[str]: + """ + Extracts image URLs from a string by making HEAD requests to verify Content-Type. + + Args: + text: The input string to search for URLs. + + Returns: + A list of verified image URLs found in the string. + """ + # Find all https URLs in the text + url_pattern = re.compile(r'https://[^\s<>"\'()]+', re.IGNORECASE) + urls = re.findall(url_pattern, text) + + if not urls: + return [] + + image_urls = [] + + # Common image MIME types to check for + image_mime_types = { + "image/jpeg", + "image/jpg", + "image/png", + "image/gif", + "image/webp", + "image/bmp", + "image/svg+xml", + "image/tiff", + "image/ico", + "image/x-icon", + } + + # Use httpx for better async support and modern HTTP handling + try: + with httpx.Client( + timeout=httpx.Timeout(5.0, connect=2.0), # 5s total, 2s connect + follow_redirects=True, + headers={"User-Agent": "Mozilla/5.0 (compatible; ImageBot/1.0)"}, + ) as client: + for url in urls: + try: + # Make HEAD request to check Content-Type without downloading the file + response = client.head(url) + + if response.status_code == 200: + content_type = ( + response.headers.get("Content-Type", "") + .lower() + .split(";")[0] + .strip() + ) + + # Check if it's an image type + if content_type in image_mime_types: + image_urls.append(url) + logger.debug( + f"Found image URL: {url} (Content-Type: {content_type})" + ) + else: + logger.debug( + f"Skipped non-image URL: {url} (Content-Type: {content_type})" + ) + else: + logger.debug( + f"Failed to access URL: {url} (Status: {response.status_code})" + ) + + except httpx.TimeoutException: + logger.debug(f"Timeout checking URL: {url}") + except httpx.RequestError as e: + logger.debug(f"Request error checking URL {url}: {str(e)}") + except Exception as e: + logger.debug(f"Unexpected error checking URL {url}: {str(e)}") + + except Exception as e: + logger.error(f"Failed to initialize HTTP client: {str(e)}") + return [] + + logger.info(f"Found {len(image_urls)} image URLs out of {len(urls)} total URLs") + return image_urls + + +def strip_metadata_section(text: str) -> str: + """Remove metadata section starting with '--- Metadata ---' to the end of the text. + + Args: + text: The input text that may contain a metadata section + + Returns: + The text with the metadata section removed + """ + metadata_pattern = r"--- Metadata ---.*$" + # Remove from '--- Metadata ---' to the end, including the marker + return re.sub(metadata_pattern, "", text, flags=re.DOTALL).rstrip() + + +def decode_hex_parameters(hex_string: Optional[str]) -> Optional[str]: + """Decodes a hexadecimal-encoded string if valid. + + Args: + hex_string: The hexadecimal string to decode. + + Returns: + The decoded string, or None if decoding fails. + """ + if not hex_string: + return None + if hex_string.startswith("0x"): + hex_string = hex_string[2:] # Remove "0x" prefix + try: + decoded_bytes = binascii.unhexlify(hex_string) + + # Handle Clarity hex format which often includes length prefixes + # First 5 bytes typically contain: 4-byte length + 1-byte type indicator + if len(decoded_bytes) > 5 and decoded_bytes[0] == 0x0D: # Length byte check + # Skip the 4-byte length prefix and any potential type indicator + decoded_bytes = decoded_bytes[5:] + + decoded_string = decoded_bytes.decode("utf-8", errors="ignore") + logger.debug(f"Successfully decoded hex string: {hex_string[:20]}...") + return decoded_string + except (binascii.Error, UnicodeDecodeError) as e: + logger.warning(f"Failed to decode hex string: {str(e)}") + return None # Return None if decoding fails + + +# Model pricing data (move this to a config or constants file later if needed) +MODEL_PRICES = { + "gpt-4o": { + "input": 2.50, # $2.50 per million input tokens + "output": 10.00, # $10.00 per million output tokens + }, + "gpt-4.1": { + "input": 2.00, # $2.00 per million input tokens + "output": 8.00, # $8.00 per million output tokens + }, + "gpt-4.1-mini": { + "input": 0.40, # $0.40 per million input tokens + "output": 1.60, # $1.60 per million output tokens + }, + "gpt-4.1-nano": { + "input": 0.10, # $0.10 per million input tokens + "output": 0.40, # $0.40 per million output tokens + }, + # Default to gpt-4.1 pricing if model not found + "default": { + "input": 2.00, + "output": 8.00, + }, +} + + +def calculate_token_cost( + token_usage: Dict[str, int], model_name: str +) -> Dict[str, float]: + """Calculate the cost of token usage based on current pricing. + + Args: + token_usage: Dictionary containing input_tokens and output_tokens + model_name: Name of the model used + + Returns: + Dictionary containing cost breakdown and total cost + """ + # Get pricing for the model, default to gpt-4.1 pricing if not found + model_prices = MODEL_PRICES.get(model_name.lower(), MODEL_PRICES["default"]) + + # Extract token counts, ensuring we get integers and handle None values + try: + input_tokens = int(token_usage.get("input_tokens", 0)) + output_tokens = int(token_usage.get("output_tokens", 0)) + except (TypeError, ValueError) as e: + logger.error(f"Error converting token counts to integers: {str(e)}") + input_tokens = 0 + output_tokens = 0 + + # Calculate costs with more precision + input_cost = (input_tokens / 1_000_000.0) * model_prices["input"] + output_cost = (output_tokens / 1_000_000.0) * model_prices["output"] + total_cost = input_cost + output_cost + + # Create detailed token usage breakdown + token_details = { + "input_tokens": input_tokens, + "output_tokens": output_tokens, + "total_tokens": input_tokens + output_tokens, + "model_name": model_name, + "input_price_per_million": model_prices["input"], + "output_price_per_million": model_prices["output"], + } + + # Add token details if available + if "input_token_details" in token_usage: + token_details["input_token_details"] = token_usage["input_token_details"] + if "output_token_details" in token_usage: + token_details["output_token_details"] = token_usage["output_token_details"] + + # Debug logging with more detail + logger.debug( + f"Cost calculation details: Model={model_name} | Input={input_tokens} tokens * ${model_prices['input']}/1M = ${input_cost:.6f} | Output={output_tokens} tokens * ${model_prices['output']}/1M = ${output_cost:.6f} | Total=${total_cost:.6f} | Token details={token_details}" + ) + + return { + "input_cost": round(input_cost, 6), + "output_cost": round(output_cost, 6), + "total_cost": round(total_cost, 6), + "currency": "USD", + "details": token_details, + } diff --git a/lib/velar.py b/lib/velar.py deleted file mode 100644 index 5fe13304..00000000 --- a/lib/velar.py +++ /dev/null @@ -1,119 +0,0 @@ -import requests - -from config import config - - -class VelarApi: - def __init__(self): - self.base_url = config.api.velar_base_url - - def _get(self, endpoint: str, params: dict = None) -> dict: - """Make a GET request to the Velar API.""" - try: - url = self.base_url + endpoint - headers = {"Accept": "application/json"} - response = requests.get(url, headers=headers, params=params) - - response.raise_for_status() - - return response.json() - except Exception as e: - raise Exception(f"Velar API GET request error: {str(e)}") - - def get_tokens(self) -> str: - """Retrieve a list of tokens from the Velar API.""" - try: - return self._get("swapapp/swap/tokens")["message"] - except Exception as e: - raise Exception(f"Swap data retrieval error: {str(e)}") - - def get_pools(self) -> str: - """Retrieve a list of pools from the Velar API.""" - try: - return self._get("watcherapp/pool")["message"] - except Exception as e: - raise Exception(f"Swap data retrieval error: {str(e)}") - - def get_token_pools(self, token: str) -> str: - """Retrieve pools containing a specific token.""" - try: - pools = self.get_pools() - results = [ - x - for x in pools - if x["token0Symbol"] == token or x["token1Symbol"] == token - ] - return results - except Exception as e: - raise Exception(f"Swap data retrieval error: {str(e)}") - - def get_token_stx_pools(self, token: str) -> str: - """Retrieve pools containing a specific token paired with STX.""" - try: - pools = self.get_pools() - results = [ - x - for x in pools - if (x["token0Symbol"] == token and x["token1Symbol"] == "STX") - or (x["token0Symbol"] == "STX" and x["token1Symbol"] == token) - ] - return results - except Exception as e: - raise Exception(f"Swap data retrieval error: {str(e)}") - - def get_token_price_history(self, token: str, interval: str = "month") -> str: - """Retrieve the price history of a specific token.""" - try: - return self._get( - f"watcherapp/stats/{token}/?type=price&interval={interval}" - ) - except Exception as e: - raise Exception(f"Token stats retrieval error: {str(e)}") - - def get_token_stats(self, token: str) -> str: - """Retrieve statistics for a specific token.""" - try: - return self._get(f"watcherapp/pool/{token}") - except Exception as e: - raise Exception(f"Token pool stats retrieval error: {str(e)}") - - def get_pool_stats_history( - self, poolId: str, type: str, interval: str = "month" - ) -> str: - """Retrieve historical statistics for a specific pool.""" - try: - return self._get( - f"watcherapp/stats/{poolId}?type={type}&interval={interval}" - ) - except Exception as e: - raise Exception(f"Token pool stats history retrieval error: {str(e)}") - - def get_pool_stats_history_agg(self, poolId: str, interval: str = "month") -> str: - """Retrieve and aggregate historical statistics for a specific pool.""" - try: - tvl_data = self._get( - f"watcherapp/stats/{poolId}?type=tvl&interval={interval}" - ) - volume_data = self._get( - f"watcherapp/stats/{poolId}?type=volume&interval={interval}" - ) - price_data = self._get( - f"watcherapp/stats/{poolId}?type=price&interval={interval}" - ) - - aggregated_data = [] - for price, tvl, volume in zip( - price_data["data"], tvl_data["data"], volume_data["data"] - ): - aggregated_data.append( - { - "price": price["value"], - "tvl": tvl["value"], - "datetime": price["datetime"], - "volume": volume["value"], - } - ) - - return aggregated_data - except Exception as e: - raise Exception(f"Token pool stats history retrieval error: {str(e)}") diff --git a/main.py b/main.py index ea33db14..a0c52440 100644 --- a/main.py +++ b/main.py @@ -3,16 +3,16 @@ from fastapi import FastAPI from fastapi.middleware.cors import CORSMiddleware -import api from api import chat, tools, webhooks from config import config from lib.logger import configure_logger -from services import startup -from services.websocket import websocket_manager +from services.communication.websocket_service import websocket_manager # Configure module logger logger = configure_logger(__name__) +_ = config + # Define app app = FastAPI( title="AI BTC Dev Backend", @@ -23,16 +23,7 @@ # Configure CORS app.add_middleware( CORSMiddleware, - allow_origins=[ - "https://sprint.aibtc.dev", - "https://sprint-faster.aibtc.dev", - "https://*.aibtcdev-frontend.pages.dev", # Cloudflare preview deployments - "http://localhost:3000", # Local development - "https://staging.aibtc.chat", - "https://app.aibtc.dev", - "https://aibtc.dev", - "https://app-staging.aibtc.dev", - ], + allow_origin_regex=r"^(https://((sprint|sprint-faster|app|app-staging)\.aibtc\.dev|aibtc\.dev|staging\.aibtc\.chat|[^.]+\.aibtcdev-frontend(-staging)?\.pages\.dev)|http://localhost:3000)$", allow_credentials=True, allow_methods=["*"], allow_headers=["*"], @@ -47,25 +38,25 @@ async def health_check(): # Load API routes -app.include_router(api.tools.router) -app.include_router(api.chat.router) -app.include_router(api.webhooks.router) +app.include_router(tools.router) +app.include_router(chat.router) +app.include_router(webhooks.router) @app.on_event("startup") async def startup_event(): - """Run startup tasks.""" - # Start the WebSocket manager's cleanup task - # Note: This is now redundant as startup.run() will also start the WebSocket manager - # but we'll keep it for clarity and to ensure it's started early + """Run web server startup tasks.""" + logger.info("Starting FastAPI web server...") + # Only start WebSocket manager for web server connections + # Background services (job runners, bot, etc.) are handled by worker.py asyncio.create_task(websocket_manager.start_cleanup_task()) - - # Run other startup tasks - await startup.run() + logger.info("Web server startup complete") @app.on_event("shutdown") async def shutdown_event(): - """Run shutdown tasks.""" - logger.info("Shutting down FastAPI application") - await startup.shutdown() + """Run web server shutdown tasks.""" + logger.info("Shutting down FastAPI web server...") + # Only handle web server specific cleanup + # Background services shutdown is handled by worker.py + logger.info("Web server shutdown complete") diff --git a/proposal_evaluation_test.py b/proposal_evaluation_test.py deleted file mode 100644 index 449b063f..00000000 --- a/proposal_evaluation_test.py +++ /dev/null @@ -1,171 +0,0 @@ -"""Functional test script for the proposal evaluation workflow. - -This script demonstrates the usage of the proposal evaluation workflow -with real-world scenarios. It's not a unit test but rather a functional -test to see the workflow in action. -""" - -import asyncio -from typing import Dict, Optional -from uuid import UUID - -from backend.factory import backend -from backend.models import ( - ProposalCreate, - ProposalType, - QueueMessageCreate, - QueueMessageType, -) -from services.workflows.proposal_evaluation import ( - evaluate_and_vote_on_proposal, - evaluate_proposal_only, -) - - -async def create_test_proposal(dao_id: UUID) -> UUID: - """Create a test proposal for evaluation. - - Args: - dao_id: The ID of the DAO to create the proposal for - - Returns: - The ID of the created proposal - """ - # Create test parameters as a JSON object - parameters = { - "action": "test_action", - "amount": 1000, - "description": "Test proposal for evaluation", - "recipient": "ST1PQHQKV0RJXZFY1DGX8MNSNYVE3VGZJSRTPGZGM", - } - - # Create a test proposal - proposal_data = ProposalCreate( - dao_id=dao_id, - type=ProposalType.ACTION, - parameters=str(parameters), # Convert parameters to string - action="test_action", - contract_principal="ST1PQHQKV0RJXZFY1DGX8MNSNYVE3VGZJSRTPGZGM.test-contract", - creator="ST1PQHQKV0RJXZFY1DGX8MNSNYVE3VGZJSRTPGZGM", - created_at_block=1, - end_block=100, - start_block=1, - liquid_tokens="1000", # Keep as string since that's what the model expects - proposal_id=1, - ) - - try: - # # Create some test tweets for the DAO - # for i in range(3): - # tweet_message = { - # "text": f"Test tweet {i+1} for proposal evaluation", - # "created_at": "2024-03-06T00:00:00Z", - # } - # backend.create_queue_message( - # QueueMessageCreate( - # type=QueueMessageType.TWEET, - # dao_id=dao_id, - # message=tweet_message, - # is_processed=True, - # ) - # ) - # print(f"Created test tweet {i+1} for DAO {dao_id}") - - # Create the proposal - proposal = backend.create_proposal(proposal_data) - print(f"Created test proposal with ID: {proposal.id}") - return proposal.id - except Exception as e: - print(f"Failed to create test proposal: {e}") - raise - - -async def test_proposal_evaluation_workflow(): - """Test the proposal evaluation workflow with different scenarios.""" - try: - # # First, let's run the debug workflow to test basic functionality - # print("Running debug workflow test...") - # debug_result = await debug_proposal_evaluation_workflow() - # print(f"Debug workflow test result: {debug_result}") - - # Now let's test with a real proposal - # First, we need a DAO ID - you would replace this with a real DAO ID - dao_id = UUID( - "cffb355f-50c1-4ec5-8e2f-a0e65547c746" - ) # Replace with real DAO ID - - # Create a test proposal - proposal_id = await create_test_proposal(dao_id) - - # Test scenarios - scenarios = [ - { - "name": "Evaluation Only", - "auto_vote": False, - "confidence_threshold": 0.7, - "description": "Testing proposal evaluation without voting", - }, - { - "name": "Auto-vote Enabled", - "auto_vote": False, # Fixed: Changed to True for auto-vote scenario - "confidence_threshold": 0.7, - "description": "Testing proposal evaluation with auto-voting", - }, - { - "name": "Low Confidence Threshold", - "auto_vote": False, - "confidence_threshold": 0.3, - "description": "Testing with lower confidence threshold", - }, - ] - - # Run each scenario - for scenario in scenarios: - print(f"\nRunning scenario: {scenario['name']}") - print(f"Description: {scenario['description']}") - - try: - if scenario["auto_vote"]: - result = await evaluate_and_vote_on_proposal( - proposal_id=proposal_id, - auto_vote=scenario["auto_vote"], - confidence_threshold=scenario["confidence_threshold"], - dao_id=dao_id, - ) - else: - result = await evaluate_proposal_only( - proposal_id=proposal_id, - wallet_id=UUID("532fd36b-8a9d-4fdd-82d2-25ddcf007488"), - ) - - # Print the results - print("\nEvaluation Results:") - print(f"Success: {result['success']}") - if result["success"]: - print(f"Approval: {result['evaluation']['approve']}") - print(f"Confidence: {result['evaluation']['confidence_score']}") - print(f"Reasoning: {result['evaluation']['reasoning']}") - print(f"Token Usage: {result['token_usage']}") - print(f"Cost: ${result['token_costs']['total_cost']:.4f}") - - if scenario["auto_vote"]: - print(f"Auto-voted: {result['auto_voted']}") - if result["vote_result"]: - print(f"Vote Result: {result['vote_result']}") - if result.get("tx_id"): - print(f"Transaction ID: {result['tx_id']}") - else: - print(f"Error: {result.get('error', 'Unknown error')}") - - except Exception as e: - print(f"Error in scenario {scenario['name']}: {e}") - - except Exception as e: - print(f"Test failed: {e}") - raise - - -if __name__ == "__main__": - - # Run the tests - asyncio.run(test_proposal_evaluation_workflow()) diff --git a/pyproject.toml b/pyproject.toml new file mode 100644 index 00000000..81f69c46 --- /dev/null +++ b/pyproject.toml @@ -0,0 +1,41 @@ +[project] +name = "aibtcdev-backend" +version = "0.1.0" +description = "Add your description here" +readme = "README.md" +requires-python = ">=3.13" +dependencies = [ + "aiohttp==3.12.13", + "apscheduler==3.11.0", + "cachetools==6.1.0", + "fastapi==0.115.13", + "httpx>=0.25.0", + "langchain==0.3.25", + "langchain-community==0.3.25", + "langchain-core>=0.3.56,<1.0.0", + "langchain-openai==0.3.24", + "langchain-text-splitters==0.3.8", + "langgraph==0.4.8", + "openai==1.88.0", + "pgvector<=0.4.0", + "psycopg2-binary==2.9.10", + "pydantic==2.11.7", + "python-dotenv==1.1.0", + "python-magic==0.4.27", + "python-telegram-bot==22.1", + "sqlalchemy==2.0.41", + "starlette<=0.46.0", + "supabase==2.15.3", + "tiktoken==0.9.0", + "tweepy==4.15.0", + "uvicorn==0.34.3", + "vecs==0.4.5", +] + +[project.optional-dependencies] +testing = [ + "pytest==8.3.5", + "pytest-asyncio==0.26.0", + "pytest-mock==3.14.1", + "responses==0.25.7", +] diff --git a/requirements.txt b/requirements.txt deleted file mode 100644 index 5aaff28b..00000000 --- a/requirements.txt +++ /dev/null @@ -1,23 +0,0 @@ -APScheduler==3.11.0 -cachetools==5.5.2 -fastapi==0.115.12 -langchain==0.3.24 -langchain_core>=0.3.56,<1.0.0 -langchain_openai==0.3.12 -langchain_text_splitters==0.3.8 -langgraph==0.4.1 -openai==1.76.2 -pgvector==0.3.6 -psycopg2==2.9.10 -pydantic==2.11.3 -python-dotenv==1.1.0 -python-telegram-bot==21.11.1 -python-twitter-v2==0.9.2 -Requests==2.32.3 -SQLAlchemy==2.0.40 -starlette==0.46.2 -supabase==2.15.1 -tiktoken==0.9.0 -uvicorn==0.34.2 -vecs==0.4.5 -langchain_community==0.3.23 \ No newline at end of file diff --git a/examples/daos/dao.json b/services/ai/__init__.py similarity index 100% rename from examples/daos/dao.json rename to services/ai/__init__.py diff --git a/services/ai/embeddings/__init__.py b/services/ai/embeddings/__init__.py new file mode 100644 index 00000000..e69de29b diff --git a/services/ai/embeddings/embed_service.py b/services/ai/embeddings/embed_service.py new file mode 100644 index 00000000..e9ef5822 --- /dev/null +++ b/services/ai/embeddings/embed_service.py @@ -0,0 +1,117 @@ +"""Embedding service implementation.""" + +from typing import List, Optional + +from langchain_openai import OpenAIEmbeddings + +from config import config +from lib.logger import configure_logger + +logger = configure_logger(__name__) + + +class EmbedService: + """Service for generating text embeddings using OpenAI.""" + + def __init__(self, model_name: Optional[str] = None): + """Initialize the embedding service. + + Args: + model_name: The OpenAI embedding model to use. If None, uses configured default. + """ + self.model_name = model_name or config.embedding.default_model + self._embeddings_client: Optional[OpenAIEmbeddings] = None + + @property + def embeddings_client(self) -> OpenAIEmbeddings: + """Get or create the OpenAI embeddings client.""" + if self._embeddings_client is None: + if not config.embedding.api_key: + raise ValueError("Embedding API key not configured") + + embedding_config = { + "model": self.model_name, + "api_key": config.embedding.api_key, + } + + # Add base_url if configured + if config.embedding.api_base: + embedding_config["base_url"] = config.embedding.api_base + + self._embeddings_client = OpenAIEmbeddings(**embedding_config) + return self._embeddings_client + + async def embed_text(self, text: str) -> Optional[List[float]]: + """Generate embedding for a single text. + + Args: + text: The text to embed + + Returns: + List of floats representing the embedding, or None if failed + """ + if not text or not text.strip(): + logger.warning("Empty text provided for embedding") + return None + + try: + logger.debug(f"Generating embedding for text (length: {len(text)})") + embedding = await self.embeddings_client.aembed_query(text) + logger.debug(f"Generated embedding with dimension: {len(embedding)}") + return embedding + except Exception as e: + logger.error(f"Failed to generate embedding: {str(e)}", exc_info=True) + return None + + async def embed_documents(self, texts: List[str]) -> Optional[List[List[float]]]: + """Generate embeddings for multiple texts. + + Args: + texts: List of texts to embed + + Returns: + List of embeddings, or None if failed + """ + if not texts: + logger.warning("Empty text list provided for embedding") + return None + + # Filter out empty texts + valid_texts = [text for text in texts if text and text.strip()] + if not valid_texts: + logger.warning("No valid texts found for embedding") + return None + + try: + logger.debug(f"Generating embeddings for {len(valid_texts)} texts") + embeddings = await self.embeddings_client.aembed_documents(valid_texts) + logger.debug(f"Generated {len(embeddings)} embeddings") + return embeddings + except Exception as e: + logger.error(f"Failed to generate embeddings: {str(e)}", exc_info=True) + return None + + def is_available(self) -> bool: + """Check if the embedding service is available. + + Returns: + True if the service is properly configured and available + """ + try: + return bool(config.embedding.api_key) + except Exception as e: + logger.error(f"Error checking embedding service availability: {str(e)}") + return False + + async def test_connection(self) -> bool: + """Test the embedding service connection. + + Returns: + True if the service is working correctly + """ + try: + test_embedding = await self.embed_text("test") + return test_embedding is not None and len(test_embedding) > 0 + except Exception as e: + logger.error(f"Embedding service test failed: {str(e)}") + return False diff --git a/services/ai/workflows/__init__.py b/services/ai/workflows/__init__.py new file mode 100644 index 00000000..b6126203 --- /dev/null +++ b/services/ai/workflows/__init__.py @@ -0,0 +1,94 @@ +from services.ai.workflows.agents.proposal_recommendation import ( + ProposalRecommendationAgent, +) +from services.ai.workflows.base import ( + BaseWorkflow, + BaseWorkflowMixin, + ExecutionError, + LangGraphError, + MessageContent, + MessageProcessor, + StateType, + StreamingCallbackHandler, + StreamingError, + ValidationError, +) +from services.ai.workflows.chat import ( + ChatService, + ChatWorkflow, + execute_chat_stream, +) +from services.ai.workflows.mixins.planning_mixin import PlanningCapability +from services.ai.workflows.mixins.vector_mixin import ( + VectorRetrievalCapability, + add_documents_to_vectors, +) +from services.ai.workflows.mixins.web_search_mixin import WebSearchCapability +from services.ai.workflows.proposal_evaluation import ( + ProposalEvaluationWorkflow, +) +from services.ai.workflows.tweet_analysis import ( + TweetAnalysisWorkflow, + analyze_tweet, +) +from services.ai.workflows.tweet_generator import ( + TweetGeneratorWorkflow, + generate_dao_tweet, +) +from services.ai.workflows.workflow_service import ( + BaseWorkflowService, + WorkflowBuilder, + WorkflowFactory, + WorkflowService, + execute_workflow_stream, +) +from services.ai.workflows.utils.model_factory import ( + ModelConfig, + create_chat_openai, + create_planning_llm, + create_reasoning_llm, + get_default_model_name, + get_default_temperature, +) + +__all__ = [ + "BaseWorkflow", + "BaseWorkflowMixin", + "ExecutionError", + "LangGraphError", + "StateType", + "StreamingError", + "ValidationError", + "VectorRetrievalCapability", + "BaseWorkflowService", + "WorkflowBuilder", + "WorkflowFactory", + "WorkflowService", + "execute_workflow_stream", + "MessageContent", + "MessageProcessor", + "StreamingCallbackHandler", + "LangGraphService", + "ReactState", + "ReactWorkflow", + "execute_langgraph_stream", + "ProposalEvaluationWorkflow", + "ProposalRecommendationAgent", + "TweetAnalysisWorkflow", + "TweetGeneratorWorkflow", + "analyze_tweet", + "generate_dao_tweet", + "ChatService", + "ChatWorkflow", + "execute_chat_stream", + "PlanningCapability", + "WebSearchCapability", + "add_documents_to_vectors", + # Model factory exports + "ModelConfig", + "create_chat_openai", + "create_planning_llm", + "create_reasoning_llm", + "get_default_model_name", + "get_default_temperature", +] diff --git a/services/ai/workflows/agents/__init__.py b/services/ai/workflows/agents/__init__.py new file mode 100644 index 00000000..9543667a --- /dev/null +++ b/services/ai/workflows/agents/__init__.py @@ -0,0 +1,21 @@ +from .core_context import CoreContextAgent +from .financial_context import FinancialContextAgent +from .historical_context import HistoricalContextAgent +from .image_processing import ImageProcessingNode +from .proposal_metadata import ProposalMetadataAgent +from .proposal_recommendation import ProposalRecommendationAgent +from .reasoning import ReasoningAgent +from .social_context import SocialContextAgent +from .twitter_processing import TwitterProcessingNode + +__all__ = [ + "CoreContextAgent", + "FinancialContextAgent", + "HistoricalContextAgent", + "ImageProcessingNode", + "ProposalMetadataAgent", + "ProposalRecommendationAgent", + "ReasoningAgent", + "SocialContextAgent", + "TwitterProcessingNode", +] diff --git a/services/ai/workflows/agents/core_context.py b/services/ai/workflows/agents/core_context.py new file mode 100644 index 00000000..752e4d5a --- /dev/null +++ b/services/ai/workflows/agents/core_context.py @@ -0,0 +1,209 @@ +from typing import Any, Dict, List, Optional + +from langchain_core.prompts.chat import ChatPromptTemplate + +from backend.factory import backend +from lib.logger import configure_logger +from services.ai.workflows.mixins.capability_mixins import ( + BaseCapabilityMixin, + PromptCapability, +) +from services.ai.workflows.mixins.vector_mixin import VectorRetrievalCapability +from services.ai.workflows.utils.models import AgentOutput +from services.ai.workflows.utils.state_reducers import update_state_with_agent_result +from services.ai.workflows.utils.token_usage import TokenUsageMixin + +logger = configure_logger(__name__) + + +class CoreContextAgent( + BaseCapabilityMixin, VectorRetrievalCapability, TokenUsageMixin, PromptCapability +): + """Core Context Agent evaluates proposals against DAO mission and standards.""" + + def __init__(self, config: Optional[Dict[str, Any]] = None): + """Initialize the Core Context Agent. + + Args: + config: Optional configuration dictionary + """ + BaseCapabilityMixin.__init__(self, config=config, state_key="core_score") + VectorRetrievalCapability.__init__(self) + TokenUsageMixin.__init__(self) + PromptCapability.__init__(self) + self.initialize() + self._initialize_vector_capability() + + def _initialize_vector_capability(self): + """Initialize the vector retrieval capability if not already initialized.""" + if not hasattr(self, "retrieve_from_vector_store"): + self.retrieve_from_vector_store = ( + VectorRetrievalCapability.retrieve_from_vector_store.__get__( + self, self.__class__ + ) + ) + self.logger.info( + "Initialized vector retrieval capability for CoreContextAgent" + ) + + def _create_chat_messages( + self, + proposal_content: str, + dao_mission: str, + proposal_images: List[Dict[str, Any]] = None, + ) -> List: + """Create chat messages for core context evaluation. + + Args: + proposal_content: The proposal content to evaluate + dao_mission: The DAO mission statement + proposal_images: List of processed images + + Returns: + List of chat messages + """ + # System message with evaluation guidelines + system_content = """You are an expert DAO governance evaluator specializing in core context analysis. Your role is to evaluate proposals against the DAO's mission and fundamental standards. + +You must plan extensively before each evaluation, and reflect thoroughly on the alignment between the proposal and DAO mission. Do not rush through this process - take time to analyze thoroughly. + +**Image Evaluation**: If images are attached to this proposal, they are an integral part of the proposal content. You must carefully examine and evaluate any provided images, considering how they support, clarify, or relate to the written proposal. Images may contain diagrams, charts, screenshots, mockups, or other visual information that is essential to understanding the full scope and merit of the proposal. Include your analysis of the visual content in your overall evaluation. + +Evaluation Criteria (weighted): +- Alignment with DAO mission (40% weight) +- Clarity of proposal (20% weight) +- Feasibility and practicality (20% weight) +- Community benefit (20% weight) + +Scoring Guide: +- 0-20: Not aligned, unclear, impractical, or no community benefit +- 21-50: Significant issues or missing details +- 51-70: Adequate but with some concerns or minor risks +- 71-90: Good alignment, clear, practical, and beneficial +- 91-100: Excellent alignment, clarity, feasibility, and community value + +Output Format: +Provide a JSON object with exactly these fields: +- score: A number from 0-100 +- flags: Array of any critical issues or red flags +- summary: Brief summary of your evaluation""" + + # User message with specific evaluation request + user_content = f"""Please evaluate the following proposal against the DAO's core mission and standards: + +DAO Mission: +{dao_mission} + +Proposal to Evaluate: +{proposal_content} + +Based on the evaluation criteria and scoring guide, provide your assessment of how well this proposal aligns with the DAO's mission and meets the core standards for clarity, feasibility, and community benefit.""" + + messages = [{"role": "system", "content": system_content}] + + # Create user message content - start with text + user_message_content = [{"type": "text", "text": user_content}] + + # Add images if available + if proposal_images: + for image in proposal_images: + if image.get("type") == "image_url": + # Add detail parameter if not present + image_with_detail = image.copy() + if "detail" not in image_with_detail.get("image_url", {}): + image_with_detail["image_url"]["detail"] = "auto" + user_message_content.append(image_with_detail) + + # Add the user message + messages.append({"role": "user", "content": user_message_content}) + + return messages + + async def process(self, state: Dict[str, Any]) -> Dict[str, Any]: + """Process the proposal against core DAO context. + + Args: + state: The current workflow state + + Returns: + Dictionary containing evaluation results + """ + self._initialize_vector_capability() + proposal_id = state.get("proposal_id", "unknown") + proposal_content = state.get("proposal_content", "") + dao_id = state.get("dao_id") + state.get("agent_id") + state.get("profile_id") + + # Initialize token usage tracking in state if not present + if "token_usage" not in state: + state["token_usage"] = {} + + # Get DAO mission from database using dao_id + dao_mission_text = self.config.get("dao_mission", "") + if not dao_mission_text and dao_id: + try: + self.logger.debug( + f"[DEBUG:CoreAgent:{proposal_id}] Attempting to retrieve DAO mission from database for dao_id: {dao_id}" + ) + dao = backend.get_dao(dao_id) + if dao and dao.mission: + dao_mission_text = dao.mission + self.logger.debug( + f"[DEBUG:CoreAgent:{proposal_id}] Retrieved DAO mission: {dao_mission_text[:100]}..." + ) + else: + self.logger.warning( + f"[DEBUG:CoreAgent:{proposal_id}] No DAO found or no mission field for dao_id: {dao_id}" + ) + dao_mission_text = "Elevate human potential through AI on Bitcoin" + except Exception as e: + self.logger.error( + f"[DEBUG:CoreAgent:{proposal_id}] Error retrieving DAO from database: {str(e)}" + ) + dao_mission_text = "Elevate human potential through AI on Bitcoin" + + # Fallback to default mission if still empty + if not dao_mission_text: + dao_mission_text = "Elevate human potential through AI on Bitcoin" + + # Get proposal images + proposal_images = state.get("proposal_images", []) + + try: + # Create chat messages + messages = self._create_chat_messages( + proposal_content=proposal_content, + dao_mission=dao_mission_text, + proposal_images=proposal_images, + ) + + # Create chat prompt template + prompt = ChatPromptTemplate.from_messages(messages) + formatted_prompt = prompt.format() + + # Get structured output from the LLM + result = await self.llm.with_structured_output(AgentOutput).ainvoke( + formatted_prompt + ) + result_dict = result.model_dump() + + # Track token usage + token_usage_data = self.track_token_usage(str(formatted_prompt), result) + state["token_usage"]["core_agent"] = token_usage_data + result_dict["token_usage"] = token_usage_data + result_dict["images_processed"] = len(proposal_images) + + # Update state with agent result + update_state_with_agent_result(state, result_dict, "core") + return result_dict + except Exception as e: + self.logger.error( + f"[DEBUG:CoreAgent:{proposal_id}] Error in core evaluation: {str(e)}" + ) + return { + "score": 50, + "flags": [f"Error: {str(e)}"], + "summary": "Evaluation failed due to error", + "images_processed": len(proposal_images) if proposal_images else 0, + } diff --git a/services/ai/workflows/agents/evaluator.py b/services/ai/workflows/agents/evaluator.py new file mode 100644 index 00000000..f7e77bef --- /dev/null +++ b/services/ai/workflows/agents/evaluator.py @@ -0,0 +1,698 @@ +from typing import Any, Dict, List, Optional +from uuid import UUID + +from langchain_core.prompts.chat import ChatPromptTemplate + +from backend.factory import backend +from backend.models import Proposal, ProposalFilter +from lib.logger import configure_logger +from services.ai.workflows.mixins.capability_mixins import ( + BaseCapabilityMixin, + PromptCapability, +) +from services.ai.workflows.mixins.vector_mixin import VectorRetrievalCapability +from services.ai.workflows.utils.models import ( + ComprehensiveEvaluationOutput, + ComprehensiveEvaluatorAgentProcessOutput, +) +from services.ai.workflows.utils.token_usage import TokenUsageMixin + +# Add these constants after the imports, before the class definition + +# Default prompts for comprehensive evaluation +DEFAULT_SYSTEM_PROMPT = """======================= +CONTRIBUTION EVALUATION +======================= + +ROLE AND TASK +You are an AI evaluation agent for a message-based AI DAO. Your job is to evaluate user-submitted proposals to determine if they qualify for reward. You must strictly follow the evaluation steps below, in order, without skipping or reordering. + +IMPORTANT: +Proposals must include (1) a valid, verifiable URL and (2) completed, public-facing work that adds value now. +Do not score or approve proposals that lack a URL or only describe future plans. + +------------------------ +STEP 0 — IMMEDIATE REJECTION CHECK +------------------------ + +Before continuing, check two basic conditions: + +1. Does the proposal include a **valid, verifiable URL** (e.g., an X.com post)? +2. Does the proposal showcase **completed work**, not a future plan or intent? + +IF EITHER IS FALSE: +- Immediately set `Final Score = 0` +- Mark the proposal as `REJECTED` +- Clearly list which requirement(s) failed +- **Do not proceed to scoring or synthesis** + +You must fail all proposals that are missing a valid URL or that only describe hypothetical, planned, or future work. + +------------------------ +STEP 1 — EVALUATE EACH CRITERION +------------------------ + +(Only proceed if both conditions above are satisfied.) + +Evaluate the proposal across 8 criteria. +Each score must be justified with a 150–200 word explanation (no bullet points): + +1. Brand Alignment (15%) +2. Contribution Value (15%) +3. Engagement Potential (15%) +4. Clarity (10%) +5. Timeliness (10%) +6. Credibility (10%) +7. Risk Assessment (10%) +8. Mission Alignment (15%) + +Scoring scale: +- 0–20: Critically flawed or harmful +- 21–50: Major gaps or low value +- 51–70: Adequate but limited or unclear +- 71–90: Strong, valuable, well-executed +- 91–100: Outstanding and highly aligned + +In each explanation: +- Reference actual content from the URL +- Weigh risks, ambiguity, and value +- Write complete, original reasoning (no templates) + +------------------------ +STEP 2 — FINAL SCORE CALCULATION +------------------------ + +Final Score = +(Brand × 0.15) + (Contribution × 0.15) + (Engagement × 0.15) + +(Clarity × 0.10) + (Timeliness × 0.10) + (Credibility × 0.10) + +(Risk × 0.10) + (Mission × 0.15) + +------------------------ +STEP 3 — APPROVAL CONDITIONS CHECK +------------------------ + +Approve the proposal ONLY IF **all** of the following are true: +- Final Score ≥ 70 +- Risk Assessment ≥ 40 +- Mission Alignment ≥ 50 +- Proposal includes a valid, verifiable URL +- Contribution is completed and demonstrates current value + +IF ANY CONDITION FAILS: +- Set Final Score to 0 +- Mark as `REJECTED` +- List which condition(s) failed + +------------------------ +STEP 4 — FINAL EXPLANATION (300–400 words) +------------------------ + +If the proposal passed evaluation and checks, write a synthesis: +- Summarize key insights from all 8 categories +- Show how scores reinforce or contradict each other +- Explain long-term value, DAO alignment, and risks +- Clearly justify the final decision +- Include your confidence level and why + +------------------------ +STEP 5 — OUTPUT FORMAT (JSON OBJECT) +------------------------ + +Return a JSON object that includes: +- Each of the 8 scores (0–100) and 150–200 word justifications +- Final Score and Final Explanation (300–400 words) +- Final decision: `"APPROVE"` or `"REJECT"` +- If rejected, list failed conditions (e.g., `"Missing URL"`, `"Future plan only"`) + +------------------------ +QUALITY STANDARD +------------------------ + +All reasoning must be specific, detailed, and grounded in the actual proposal content. +Never use vague, templated, or generic responses. +Strictly enforce all rejection criteria. Do not attempt to score or justify speculative or incomplete proposals.""" + +DEFAULT_USER_PROMPT_TEMPLATE = """Evaluate this proposal: + +**PROPOSAL:** +{proposal_content} + +**DAO MISSION:** +{dao_mission} + +**COMMUNITY INFO:** +{community_info} + +**PAST PROPOSALS:** +{past_proposals} + +Provide detailed reasoning for your evaluation and final decision.""" + +logger = configure_logger(__name__) + + +class ComprehensiveEvaluatorAgent( + BaseCapabilityMixin, VectorRetrievalCapability, TokenUsageMixin, PromptCapability +): + """Comprehensive Evaluator Agent that performs all evaluations in a single LLM pass.""" + + def __init__(self, config: Optional[Dict[str, Any]] = None): + """Initialize the Comprehensive Evaluator Agent. + + Args: + config: Optional configuration dictionary + """ + BaseCapabilityMixin.__init__( + self, config=config, state_key="comprehensive_evaluation" + ) + VectorRetrievalCapability.__init__(self) + TokenUsageMixin.__init__(self) + PromptCapability.__init__(self) + self.initialize() + self._initialize_vector_capability() + + def _initialize_vector_capability(self): + """Initialize the vector retrieval capability if not already initialized.""" + if not hasattr(self, "retrieve_from_vector_store"): + self.retrieve_from_vector_store = ( + VectorRetrievalCapability.retrieve_from_vector_store.__get__( + self, self.__class__ + ) + ) + self.logger.info( + "Initialized vector retrieval capability for ComprehensiveEvaluatorAgent" + ) + + async def _fetch_dao_proposals( + self, dao_id: UUID, exclude_proposal_id: Optional[str] = None + ) -> List[Proposal]: + """Fetch all proposals for a specific DAO from Supabase, excluding the current proposal. + + Args: + dao_id: The UUID of the DAO + exclude_proposal_id: Optional proposal ID to exclude from results + + Returns: + List of Proposal objects (excluding the current proposal if specified) + """ + try: + # Create filter to get all proposals for this DAO + filters = ProposalFilter(dao_id=dao_id) + + # Fetch proposals + proposals = backend.list_proposals(filters) + + # Filter out the current proposal if specified + if exclude_proposal_id: + proposals = [p for p in proposals if str(p.id) != exclude_proposal_id] + self.logger.debug( + f"Excluded current proposal {exclude_proposal_id} from historical context" + ) + + self.logger.debug( + f"Retrieved {len(proposals)} proposals for DAO {dao_id} (excluding current)" + ) + return proposals + except Exception as e: + self.logger.error(f"Error fetching proposals for DAO {dao_id}: {str(e)}") + return [] + + def _format_proposals_for_context(self, proposals: List[Proposal]) -> str: + """Format proposals for inclusion in the prompt. + + Args: + proposals: List of all proposals + + Returns: + Formatted text of past proposals + """ + if not proposals: + return "No past proposals available." + + try: + # Sort proposals by creation date (newest first to prioritize recent history) + sorted_proposals = [] + for proposal in proposals: + try: + created_at = getattr(proposal, "created_at", None) + if created_at: + sorted_proposals.append((proposal, created_at)) + else: + sorted_proposals.append((proposal, None)) + except Exception as e: + self.logger.warning( + f"Error accessing created_at for proposal: {str(e)}" + ) + sorted_proposals.append((proposal, None)) + + # Sort by created_at, handling None values + sorted_proposals.sort( + key=lambda x: x[1] if x[1] is not None else 0, reverse=True + ) + except Exception as e: + self.logger.error(f"Error sorting proposals: {str(e)}") + sorted_proposals = [(proposal, None) for proposal in proposals] + + # Format individual proposals with all relevant details + formatted_proposals = [] + for i, (proposal, _) in enumerate( + sorted_proposals[:8] + ): # Limit to first 8 for context + try: + # Safely get proposal attributes with proper error handling + title = getattr(proposal, "title", None) or "Untitled" + summary = ( + getattr(proposal, "summary", None) + or getattr(proposal, "content", None) + or "No summary" + ) + status = getattr(proposal, "status", None) or "Unknown" + proposal_type = getattr(proposal, "type", None) or "Unknown" + passed = getattr(proposal, "passed", None) + action = getattr(proposal, "action", None) or "None" + creator = getattr(proposal, "creator", None) or "Unknown" + tags = getattr(proposal, "tags", None) or [] + executed = getattr(proposal, "executed", None) + votes_for = getattr(proposal, "votes_for", None) or 0 + votes_against = getattr(proposal, "votes_against", None) or 0 + met_quorum = getattr(proposal, "met_quorum", None) + met_threshold = getattr(proposal, "met_threshold", None) + + # Safely handle created_at date formatting + created_at = getattr(proposal, "created_at", None) + created_str = "Unknown" + if created_at: + try: + created_str = created_at.strftime("%Y-%m-%d") + except (AttributeError, ValueError): + created_str = str(created_at) + + # Safely convert summary to string and limit length + summary_str = str(summary)[:500] if summary else "No summary" + + # Ensure summary is treated as plain text and safe for prompt processing + summary_str = "".join( + char for char in summary_str if ord(char) >= 32 or char in "\n\r\t" + ) + + # Escape curly braces to prevent f-string/format interpretation issues + summary_str = summary_str.replace("{", "{{").replace("}", "}}") + + # Format tags as a comma-separated string + tags_str = ( + ", ".join( + str(tag) for tag in (tags if isinstance(tags, list) else []) + ) + if tags + else "None" + ) + + proposal_text = ( + f'\n' + f" {str(title)[:100]}\n" + f" {summary_str}\n" + f" {str(creator)}\n" + f" {str(status)}\n" + f" {str(proposal_type)}\n" + f" {created_str}\n" + f" {str(passed) if passed is not None else 'False'}\n" + f" {str(executed) if executed is not None else 'False'}\n" + f" {str(votes_for)}\n" + f" {str(votes_against)}\n" + f" {str(met_quorum) if met_quorum is not None else 'Unknown'}\n" + f" {str(met_threshold) if met_threshold is not None else 'Unknown'}\n" + f" {tags_str}\n" + f" {str(action)}\n" + f"" + ) + + formatted_proposals.append(proposal_text) + except Exception as e: + self.logger.error(f"Error formatting proposal {i}: {str(e)}") + # Add a fallback proposal entry + formatted_proposals.append( + f'\n' + f" Error loading proposal\n" + f" Could not load proposal data: {str(e)}\n" + f" Unknown\n" + f" Unknown\n" + f" Unknown\n" + f" Unknown\n" + f" Unknown\n" + f" Unknown\n" + f" 0\n" + f" 0\n" + f" Unknown\n" + f" Unknown\n" + f" None\n" + f" None\n" + f"" + ) + + return ( + "\n\n".join(formatted_proposals) + if formatted_proposals + else "No past proposals available." + ) + + def _create_chat_messages( + self, + proposal_content: str, + dao_mission: str, + community_info: str, + past_proposals: str, + proposal_images: List[Dict[str, Any]] = None, + tweet_content: Optional[str] = None, + dao_id: Optional[str] = None, + agent_id: Optional[str] = None, + profile_id: Optional[str] = None, + custom_system_prompt: Optional[str] = None, + custom_user_prompt: Optional[str] = None, + ) -> List: + """Create chat messages for comprehensive evaluation. + + Args: + proposal_content: The proposal content to evaluate + dao_mission: The DAO mission statement + community_info: Information about the DAO community + past_proposals: Formatted past proposals text + proposal_images: List of processed images + tweet_content: Optional tweet content from linked tweets + dao_id: Optional DAO ID for custom prompt injection + agent_id: Optional agent ID for custom prompt injection + profile_id: Optional profile ID for custom prompt injection + custom_system_prompt: Optional custom system prompt to override default + custom_user_prompt: Optional custom user prompt to override default + + Returns: + List of chat messages + """ + + # Use custom system prompt or default + if custom_system_prompt: + system_content = custom_system_prompt + else: + system_content = DEFAULT_SYSTEM_PROMPT + + # Use custom user prompt or default, format with data + if custom_user_prompt: + # Format custom user prompt with the same data + user_content = custom_user_prompt.format( + proposal_content=proposal_content, + dao_mission=dao_mission, + community_info=community_info, + past_proposals=past_proposals, + ) + else: + user_content = DEFAULT_USER_PROMPT_TEMPLATE.format( + proposal_content=proposal_content, + dao_mission=dao_mission, + community_info=community_info, + past_proposals=past_proposals, + ) + + messages = [{"role": "system", "content": system_content}] + + # Add tweet content as separate user message if available + if tweet_content and tweet_content.strip(): + messages.append( + { + "role": "user", + "content": f"Referenced tweets in this proposal:\n\n{tweet_content}", + } + ) + + # Create user message content - start with text + user_message_content = [{"type": "text", "text": user_content}] + + # Add images if available + if proposal_images: + for image in proposal_images: + if image.get("type") == "image_url": + # Add detail parameter if not present + image_with_detail = image.copy() + if "detail" not in image_with_detail.get("image_url", {}): + image_with_detail["image_url"]["detail"] = "auto" + user_message_content.append(image_with_detail) + + # Add the user message + messages.append({"role": "user", "content": user_message_content}) + + # Apply custom prompt injection if enabled + if dao_id or agent_id or profile_id: + try: + custom_prompt_template = self.create_chat_prompt_with_custom_injection( + default_system_message=system_content, + default_user_message=user_content, + dao_id=dao_id, + agent_id=agent_id, + profile_id=profile_id, + prompt_type="comprehensive_evaluation", + ) + # Return the ChatPromptTemplate directly + return custom_prompt_template + except Exception as e: + self.logger.warning( + f"Custom prompt injection failed, using default: {e}" + ) + + return messages + + async def process( + self, state: Dict[str, Any] + ) -> ComprehensiveEvaluatorAgentProcessOutput: + """Process the proposal with comprehensive evaluation. + + Args: + state: The current workflow state containing proposal data and optional custom prompts + + Returns: + Dictionary containing comprehensive evaluation results + """ + self._initialize_vector_capability() + proposal_id = state.get("proposal_id", "unknown") + proposal_content = state.get("proposal_content", "") + dao_id = state.get("dao_id") + agent_id = state.get("agent_id") + profile_id = state.get("profile_id") + custom_system_prompt = state.get("custom_system_prompt") + custom_user_prompt = state.get("custom_user_prompt") + + # Ensure proposal content is safely handled as plain text + if proposal_content: + proposal_content = str(proposal_content) + proposal_content = "".join( + char for char in proposal_content if ord(char) >= 32 or char in "\n\r\t" + ) + proposal_content = proposal_content.replace("{", "{{").replace("}", "}}") + + # Initialize token usage tracking in state if not present + if "token_usage" not in state: + state["token_usage"] = {} + + # Get DAO mission from database using dao_id + dao_mission_text = self.config.get("dao_mission", "") + if not dao_mission_text and dao_id: + try: + self.logger.debug( + f"[DEBUG:ComprehensiveEvaluator:{proposal_id}] Attempting to retrieve DAO mission from database for dao_id: {dao_id}" + ) + dao = backend.get_dao(dao_id) + if dao and dao.mission: + dao_mission_text = dao.mission + self.logger.debug( + f"[DEBUG:ComprehensiveEvaluator:{proposal_id}] Retrieved DAO mission: {dao_mission_text[:100]}..." + ) + else: + self.logger.warning( + f"[DEBUG:ComprehensiveEvaluator:{proposal_id}] No DAO found or no mission field for dao_id: {dao_id}" + ) + dao_mission_text = "Elevate human potential through AI on Bitcoin" + except Exception as e: + self.logger.error( + f"[DEBUG:ComprehensiveEvaluator:{proposal_id}] Error retrieving DAO from database: {str(e)}" + ) + dao_mission_text = "Elevate human potential through AI on Bitcoin" + + # Fallback to default mission if still empty + if not dao_mission_text: + dao_mission_text = "Elevate human potential through AI on Bitcoin" + + # Get community info from config + community_context = self.config.get("community_context", {}) + community_size = community_context.get("community_size", "Unknown") + active_members = community_context.get("active_members", "Unknown") + governance_participation = community_context.get( + "governance_participation", "Low" + ) + recent_sentiment = community_context.get("recent_sentiment", "Neutral") + + community_info = f""" +Community Size: {community_size} +Active Members: {active_members} +Governance Participation: {governance_participation} +Recent Community Sentiment: {recent_sentiment} +""" + + # Retrieve all proposals for this DAO from Supabase (excluding current proposal) + dao_proposals = [] + past_proposals_db_text = "" + try: + if dao_id: + dao_proposals = await self._fetch_dao_proposals( + dao_id, exclude_proposal_id=proposal_id + ) + past_proposals_db_text = self._format_proposals_for_context( + dao_proposals + ) + except Exception as e: + self.logger.error( + f"[DEBUG:ComprehensiveEvaluator:{proposal_id}] Error fetching/formatting DAO proposals: {str(e)}" + ) + past_proposals_db_text = ( + "No past proposals available due to error." + ) + + # Retrieve similar past proposals from vector store if possible + past_proposals_vector_text = "" + try: + self.logger.debug( + f"[DEBUG:ComprehensiveEvaluator:{proposal_id}] Retrieving similar past proposals from vector store" + ) + similar_proposals = await self.retrieve_from_vector_store( + query=proposal_content[ + :1000 + ], # Use first 1000 chars of proposal as query + collection_name=self.config.get( + "proposals_collection", "past_proposals" + ), + limit=3, + ) + past_proposals_vector_text = "\n\n".join( + [ + f'\n{doc.page_content}\n' + for i, doc in enumerate(similar_proposals) + ] + ) + except Exception as e: + self.logger.error( + f"[DEBUG:ComprehensiveEvaluator:{proposal_id}] Error retrieving similar proposals from vector store: {str(e)}" + ) + past_proposals_vector_text = "No similar past proposals available in vector store." + + # Combine both sources of past proposals + past_proposals_text = past_proposals_db_text + if past_proposals_vector_text: + past_proposals_text += ( + "\n\n" + past_proposals_vector_text + if past_proposals_text + else past_proposals_vector_text + ) + + # Get proposal images + proposal_images = state.get("proposal_images", []) + + try: + # Get tweet content from state + tweet_content = state.get("tweet_content", "") + + # Create chat messages or get custom prompt template + messages_or_template = self._create_chat_messages( + proposal_content=proposal_content, + dao_mission=dao_mission_text, + community_info=community_info, + past_proposals=past_proposals_text + or "No past proposals available for comparison.", + proposal_images=proposal_images, + tweet_content=tweet_content, + dao_id=dao_id, + agent_id=agent_id, + profile_id=profile_id, + custom_system_prompt=custom_system_prompt, + custom_user_prompt=custom_user_prompt, + ) + + # Handle both cases: list of messages or ChatPromptTemplate + if isinstance(messages_or_template, ChatPromptTemplate): + # Custom prompt injection returned a ChatPromptTemplate + prompt = messages_or_template + formatted_prompt = prompt.format() + else: + # Default case: list of messages + prompt = ChatPromptTemplate.from_messages(messages_or_template) + formatted_prompt = prompt.format() + + # Get structured output from the LLM + result = await self.llm.with_structured_output( + ComprehensiveEvaluationOutput + ).ainvoke(formatted_prompt) + + # Track token usage + token_usage_data = self.track_token_usage(str(formatted_prompt), result) + state["token_usage"]["comprehensive_evaluator"] = token_usage_data + + # Update state with comprehensive result for backward compatibility + # Create backward compatibility fields for legacy code + if result.categories: + for category_result in result.categories: + category_name = category_result.category + category_key = category_name.lower().replace(" ", "_") + + # Update individual score fields for compatibility + state[f"{category_key}_score"] = {"score": category_result.score} + + # Update summaries + if "summaries" not in state: + state["summaries"] = {} + state["summaries"][f"{category_key}_score"] = " ".join( + category_result.reasoning + ) + + # Set final score and decision + state["final_score"] = { + "score": result.final_score, + "decision": result.decision, + "explanation": result.explanation, + } + + # Update flags + state["flags"] = result.flags + + # Update workflow step + state["workflow_step"] = "comprehensive_evaluation_complete" + if "completed_steps" not in state: + state["completed_steps"] = set() + state["completed_steps"].add("comprehensive_evaluation") + + self.logger.info( + f"[DEBUG:ComprehensiveEvaluator:{proposal_id}] Successfully completed comprehensive evaluation" + ) + self.logger.info( + f"[DEBUG:ComprehensiveEvaluator:{proposal_id}] Decision: {'Approve' if result.decision else 'Reject'}, Final Score: {result.final_score}" + ) + + # Return the typed model + return ComprehensiveEvaluatorAgentProcessOutput( + categories=result.categories, + final_score=result.final_score, + decision=result.decision, + explanation=result.explanation, + flags=result.flags, + summary=result.summary, + token_usage=token_usage_data, + images_processed=len(proposal_images), + ) + except Exception as e: + self.logger.error( + f"[DEBUG:ComprehensiveEvaluator:{proposal_id}] Error in comprehensive evaluation: {str(e)}" + ) + return ComprehensiveEvaluatorAgentProcessOutput( + categories=[], + final_score=30, + decision=False, + explanation=f"Comprehensive evaluation failed due to error: {str(e)}", + flags=[f"Critical Error: {str(e)}"], + summary="Evaluation failed due to error", + token_usage={}, + images_processed=len(proposal_images) if proposal_images else 0, + ) diff --git a/services/ai/workflows/agents/financial_context.py b/services/ai/workflows/agents/financial_context.py new file mode 100644 index 00000000..69219ee2 --- /dev/null +++ b/services/ai/workflows/agents/financial_context.py @@ -0,0 +1,174 @@ +from typing import Any, Dict, List, Optional + +from langchain_core.prompts.chat import ChatPromptTemplate + +from lib.logger import configure_logger +from services.ai.workflows.mixins.capability_mixins import ( + BaseCapabilityMixin, + PromptCapability, +) +from services.ai.workflows.utils.models import AgentOutput +from services.ai.workflows.utils.state_reducers import update_state_with_agent_result +from services.ai.workflows.utils.token_usage import TokenUsageMixin + +logger = configure_logger(__name__) + + +class FinancialContextAgent(BaseCapabilityMixin, TokenUsageMixin, PromptCapability): + """Financial Context Agent evaluates financial aspects of proposals.""" + + def __init__(self, config: Optional[Dict[str, Any]] = None): + """Initialize the Financial Context Agent. + + Args: + config: Optional configuration dictionary + """ + BaseCapabilityMixin.__init__(self, config=config, state_key="financial_score") + TokenUsageMixin.__init__(self) + PromptCapability.__init__(self) + self.initialize() + + def _create_chat_messages( + self, + proposal_content: str, + proposal_images: List[Dict[str, Any]] = None, + ) -> List: + """Create chat messages for financial context evaluation. + + Args: + proposal_content: The proposal content to evaluate + proposal_images: List of processed images + + Returns: + List of chat messages + """ + # System message with financial evaluation guidelines + system_content = """You are an expert financial analyst specializing in DAO treasury management and proposal evaluation. Your role is to assess the financial aspects of proposals to ensure responsible resource allocation. + +You must plan extensively before each evaluation and reflect thoroughly on the financial implications. Consider both immediate costs and long-term financial sustainability. + +**Image Evaluation**: If images are attached to this proposal, they are an integral part of the proposal content. You must carefully examine and evaluate any provided images, considering how they support, clarify, or relate to the written proposal. Images may contain budget breakdowns, financial charts, cost projections, timeline diagrams, or other visual information that is essential to understanding the financial aspects and merit of the proposal. Include your analysis of the visual content in your overall financial evaluation. + +**Default Financial Context**: +- If this proposal passes, it will automatically distribute 1000 tokens from the treasury to the proposer +- Beyond this default payout, evaluate any additional financial requests, promises, or money/crypto-related aspects mentioned in the proposal + +Evaluation Criteria (weighted): +- Cost-effectiveness and value for money (40% weight) +- Reasonableness of any additional funding requests (25% weight) +- Financial feasibility of promises or commitments (20% weight) +- Overall financial risk assessment (15% weight) + +Key Considerations: +- Are any additional funding requests beyond the 1000 tokens reasonable and well-justified? +- Are there any promises or commitments in the proposal that involve money, crypto, or treasury resources? +- What are the financial risks or implications of the proposal? +- Are costs (if any) clearly itemized and realistic? +- Does the proposal represent good value for the default 1000 token investment? +- Are there any hidden financial commitments or ongoing costs? + +Scoring Guide: +- 0-20: Very poor financial value, unreasonable requests, or high financial risk +- 21-50: Significant financial concerns, unclear costs, or questionable value +- 51-70: Adequate financial merit with some minor concerns +- 71-90: Good financial value, reasonable requests, clear justification +- 91-100: Excellent financial merit, outstanding value, no financial concerns + +Output Format: +Provide a JSON object with exactly these fields: +- score: A number from 0-100 +- flags: Array of any critical financial issues or red flags +- summary: Brief summary of your financial evaluation""" + + # User message with evaluation request + user_content = f"""Please evaluate the financial aspects of the following proposal: + +**Important Context**: This proposal, if passed, will automatically receive 1000 tokens from the treasury. Your evaluation should focus on: +1. Whether the proposal provides good value for these 1000 tokens +2. Any additional funding requests beyond the 1000 tokens +3. Any financial commitments, promises, or money/crypto-related aspects mentioned in the proposal +4. Overall financial risk and feasibility + +Proposal to Evaluate: +{proposal_content} + +Based on the evaluation criteria, provide your assessment of the proposal's financial merit, focusing on the value provided for the 1000 token investment and any additional financial aspects.""" + + messages = [{"role": "system", "content": system_content}] + + # Create user message content - start with text + user_message_content = [{"type": "text", "text": user_content}] + + # Add images if available + if proposal_images: + for image in proposal_images: + if image.get("type") == "image_url": + # Add detail parameter if not present + image_with_detail = image.copy() + if "detail" not in image_with_detail.get("image_url", {}): + image_with_detail["image_url"]["detail"] = "auto" + user_message_content.append(image_with_detail) + + # Add the user message + messages.append({"role": "user", "content": user_message_content}) + + return messages + + async def process(self, state: Dict[str, Any]) -> Dict[str, Any]: + """Process the proposal's financial aspects. + + Args: + state: The current workflow state + + Returns: + Dictionary containing financial evaluation results + """ + proposal_id = state.get("proposal_id", "unknown") + proposal_content = state.get("proposal_content", "") + state.get("dao_id") + state.get("agent_id") + state.get("profile_id") + + # Initialize token usage tracking in state if not present + if "token_usage" not in state: + state["token_usage"] = {} + + # Get proposal images + proposal_images = state.get("proposal_images", []) + + try: + # Create chat messages + messages = self._create_chat_messages( + proposal_content=proposal_content, + proposal_images=proposal_images, + ) + + # Create chat prompt template + prompt = ChatPromptTemplate.from_messages(messages) + formatted_prompt = prompt.format() + + # Get structured output from the LLM + result = await self.llm.with_structured_output(AgentOutput).ainvoke( + formatted_prompt + ) + result_dict = result.model_dump() + + # Track token usage + token_usage_data = self.track_token_usage(str(formatted_prompt), result) + state["token_usage"]["financial_agent"] = token_usage_data + result_dict["token_usage"] = token_usage_data + result_dict["images_processed"] = len(proposal_images) + + # Update state with agent result + update_state_with_agent_result(state, result_dict, "financial") + return result_dict + except Exception as e: + self.logger.error( + f"[DEBUG:FinancialAgent:{proposal_id}] Error in financial evaluation: {str(e)}" + ) + return { + "score": 50, + "flags": [f"Error: {str(e)}"], + "summary": "Financial evaluation failed due to error", + "images_processed": len(proposal_images) if proposal_images else 0, + } diff --git a/services/ai/workflows/agents/historical_context.py b/services/ai/workflows/agents/historical_context.py new file mode 100644 index 00000000..0d6bca91 --- /dev/null +++ b/services/ai/workflows/agents/historical_context.py @@ -0,0 +1,439 @@ +from typing import Any, Dict, List, Optional +from uuid import UUID + +from langchain_core.prompts.chat import ChatPromptTemplate + +from backend.factory import backend +from backend.models import Proposal, ProposalFilter +from lib.logger import configure_logger +from services.ai.workflows.mixins.capability_mixins import ( + BaseCapabilityMixin, + PromptCapability, +) +from services.ai.workflows.mixins.vector_mixin import VectorRetrievalCapability +from services.ai.workflows.utils.models import AgentOutput +from services.ai.workflows.utils.state_reducers import update_state_with_agent_result +from services.ai.workflows.utils.token_usage import TokenUsageMixin + +logger = configure_logger(__name__) + + +class HistoricalContextAgent( + BaseCapabilityMixin, VectorRetrievalCapability, TokenUsageMixin, PromptCapability +): + """Historical Context Agent evaluates proposals against DAO historical context and past decisions.""" + + def __init__(self, config: Optional[Dict[str, Any]] = None): + """Initialize the Historical Context Agent. + + Args: + config: Optional configuration dictionary + """ + BaseCapabilityMixin.__init__(self, config=config, state_key="historical_score") + VectorRetrievalCapability.__init__(self) + TokenUsageMixin.__init__(self) + PromptCapability.__init__(self) + self.initialize() + self._initialize_vector_capability() + + def _initialize_vector_capability(self): + """Initialize the vector retrieval capability if not already initialized.""" + if not hasattr(self, "retrieve_from_vector_store"): + self.retrieve_from_vector_store = ( + VectorRetrievalCapability.retrieve_from_vector_store.__get__( + self, self.__class__ + ) + ) + self.logger.info( + "Initialized vector retrieval capability for HistoricalContextAgent" + ) + + async def _fetch_dao_proposals( + self, dao_id: UUID, exclude_proposal_id: Optional[str] = None + ) -> List[Proposal]: + """Fetch all proposals for a specific DAO from Supabase, excluding the current proposal. + + Args: + dao_id: The UUID of the DAO + exclude_proposal_id: Optional proposal ID to exclude from results + + Returns: + List of Proposal objects (excluding the current proposal if specified) + """ + try: + # Create filter to get all proposals for this DAO + filters = ProposalFilter(dao_id=dao_id) + + # Fetch proposals + proposals = backend.list_proposals(filters) + + # Filter out the current proposal if specified + if exclude_proposal_id: + proposals = [p for p in proposals if str(p.id) != exclude_proposal_id] + self.logger.debug( + f"Excluded current proposal {exclude_proposal_id} from historical context" + ) + + self.logger.debug( + f"Retrieved {len(proposals)} proposals for DAO {dao_id} (excluding current)" + ) + return proposals + except Exception as e: + self.logger.error(f"Error fetching proposals for DAO {dao_id}: {str(e)}") + return [] + + def _format_proposals_for_context(self, proposals: List[Proposal]) -> str: + """Format proposals for inclusion in the prompt. + + Args: + proposals: List of all proposals + + Returns: + Formatted text of past proposals + """ + if not proposals: + return "No past proposals available." + + try: + # Sort proposals by creation date (newest first to prioritize recent history) + # Use safe sorting with error handling + sorted_proposals = [] + for proposal in proposals: + try: + created_at = getattr(proposal, "created_at", None) + if created_at: + sorted_proposals.append((proposal, created_at)) + else: + sorted_proposals.append((proposal, None)) + except Exception as e: + self.logger.warning( + f"Error accessing created_at for proposal: {str(e)}" + ) + sorted_proposals.append((proposal, None)) + + # Sort by created_at, handling None values + sorted_proposals.sort( + key=lambda x: x[1] if x[1] is not None else 0, reverse=True + ) + except Exception as e: + self.logger.error(f"Error sorting proposals: {str(e)}") + sorted_proposals = [(proposal, None) for proposal in proposals] + + # Format individual proposals with all relevant details + formatted_proposals = [] + for i, (proposal, _) in enumerate( + sorted_proposals[:8] + ): # Limit to first 8 for context + try: + # Safely get proposal attributes with proper error handling + title = getattr(proposal, "title", None) or "Untitled" + summary = ( + getattr(proposal, "summary", None) + or getattr(proposal, "content", None) + or "No summary" + ) + status = getattr(proposal, "status", None) or "Unknown" + proposal_type = getattr(proposal, "type", None) or "Unknown" + passed = getattr(proposal, "passed", None) + action = getattr(proposal, "action", None) or "None" + creator = getattr(proposal, "creator", None) or "Unknown" + tags = getattr(proposal, "tags", None) or [] + executed = getattr(proposal, "executed", None) + votes_for = getattr(proposal, "votes_for", None) or 0 + votes_against = getattr(proposal, "votes_against", None) or 0 + met_quorum = getattr(proposal, "met_quorum", None) + met_threshold = getattr(proposal, "met_threshold", None) + + # Safely handle created_at date formatting + created_at = getattr(proposal, "created_at", None) + created_str = "Unknown" + if created_at: + try: + created_str = created_at.strftime("%Y-%m-%d") + except (AttributeError, ValueError): + created_str = str(created_at) + + # Safely convert summary to string and limit length + summary_str = str(summary)[:500] if summary else "No summary" + + # Ensure summary is treated as plain text and safe for prompt processing + # Remove any control characters that might cause parsing issues + summary_str = "".join( + char for char in summary_str if ord(char) >= 32 or char in "\n\r\t" + ) + + # Escape curly braces to prevent f-string/format interpretation issues + summary_str = summary_str.replace("{", "{{").replace("}", "}}") + + # Format tags as a comma-separated string + tags_str = ( + ", ".join( + str(tag) for tag in (tags if isinstance(tags, list) else []) + ) + if tags + else "None" + ) + + proposal_text = ( + f'\n' + f" {str(title)[:100]}\n" + f" {summary_str}\n" + f" {str(creator)}\n" + f" {str(status)}\n" + f" {str(proposal_type)}\n" + f" {created_str}\n" + f" {str(passed) if passed is not None else 'False'}\n" + f" {str(executed) if executed is not None else 'False'}\n" + f" {str(votes_for)}\n" + f" {str(votes_against)}\n" + f" {str(met_quorum) if met_quorum is not None else 'Unknown'}\n" + f" {str(met_threshold) if met_threshold is not None else 'Unknown'}\n" + f" {tags_str}\n" + f" {str(action)}\n" + f"" + ) + + formatted_proposals.append(proposal_text) + except Exception as e: + self.logger.error(f"Error formatting proposal {i}: {str(e)}") + # Add a fallback proposal entry + formatted_proposals.append( + f'\n' + f" Error loading proposal\n" + f" Could not load proposal data: {str(e)}\n" + f" Unknown\n" + f" Unknown\n" + f" Unknown\n" + f" Unknown\n" + f" Unknown\n" + f" Unknown\n" + f" 0\n" + f" 0\n" + f" Unknown\n" + f" Unknown\n" + f" None\n" + f" None\n" + f"" + ) + + return ( + "\n\n".join(formatted_proposals) + if formatted_proposals + else "No past proposals available." + ) + + def _create_chat_messages( + self, + proposal_content: str, + past_proposals: str, + proposal_images: List[Dict[str, Any]] = None, + ) -> List: + """Create chat messages for historical context evaluation. + + Args: + proposal_content: The current proposal content to evaluate + past_proposals: Formatted past proposals text + proposal_images: List of processed images + + Returns: + List of chat messages + """ + # System message with historical evaluation guidelines + system_content = """You are an expert DAO governance historian specializing in proposal analysis and pattern recognition. Your role is to evaluate new proposals against historical context to identify duplicates, sequences, and potential gaming attempts. + +You must plan extensively before each evaluation and reflect thoroughly on historical patterns. The DAO has a 1000 token payout limit per proposal, and submitters might try to game this by splitting large requests across multiple proposals. + +**Image Evaluation**: If images are attached to this proposal, they are an integral part of the proposal content. You must carefully examine and evaluate any provided images, considering how they support, clarify, or relate to the written proposal. Images may contain diagrams, charts, screenshots, mockups, or other visual information that is essential to understanding the full scope and merit of the proposal. When comparing against historical proposals, also consider visual similarities and whether images reveal patterns that might indicate duplicate or sequential proposals. Include your analysis of the visual content in your overall historical evaluation. + +Evaluation Process: +1. First, analyze proposals to identify sequences or relationships: + - Look for proposals with similar titles, themes, or goals + - Identify proposals that might be parts of a multi-stage initiative + - Detect potential attempts to circumvent the 1000 token limit by splitting requests + - Consider chronological relationships between proposals + +2. Then evaluate the current proposal based on: + - Is it a duplicate of past proposals? (25% weight) + - Has it addressed issues raised in similar past proposals? (20% weight) + - Shows consistency with past approved proposals? (25% weight) + - Is potentially part of a sequence to exceed limits? (30% weight) + +Key Red Flags: +- Exact duplicates of previous proposals +- Similar requesters, recipients, or incremental funding for the same project +- Proposals that contradict previous decisions +- Suspicious sequence patterns attempting to game token limits + +Scoring Guide: +- 0-20: Exact duplicate, contradicts previous decisions, or appears to be gaming token limits +- 21-50: Significant overlap without addressing past concerns or suspicious sequence pattern +- 51-70: Similar to past proposals but with improvements and reasonable sequence relationship +- 71-90: Builds well on past work with few concerns and transparent relationships +- 91-100: Unique proposal or excellent improvement with clear, legitimate purpose + +Output Format: +Provide a JSON object with exactly these fields: +- score: A number from 0-100 +- flags: Array of any critical issues or red flags +- summary: Brief summary of your evaluation +- sequence_analysis: Identify any proposal sequences and explain relationships""" + + # User message with specific historical context and evaluation request + user_content = f"""Please evaluate the following proposal against the DAO's historical context and past proposals: + +Current Proposal to Evaluate: +{proposal_content} + +Past DAO Proposals: +{past_proposals} + +Analyze this proposal for duplicates, sequences, and potential gaming attempts. Pay special attention to whether this might be part of a sequence of proposals designed to exceed the 1000 token payout limit. Provide your assessment based on the evaluation criteria.""" + + messages = [{"role": "system", "content": system_content}] + + # Create user message content - start with text + user_message_content = [{"type": "text", "text": user_content}] + + # Add images if available + if proposal_images: + for image in proposal_images: + if image.get("type") == "image_url": + # Add detail parameter if not present + image_with_detail = image.copy() + if "detail" not in image_with_detail.get("image_url", {}): + image_with_detail["image_url"]["detail"] = "auto" + user_message_content.append(image_with_detail) + + # Add the user message + messages.append({"role": "user", "content": user_message_content}) + + return messages + + async def process(self, state: Dict[str, Any]) -> Dict[str, Any]: + """Process the proposal against historical context. + + Args: + state: The current workflow state + + Returns: + Dictionary containing evaluation results + """ + self._initialize_vector_capability() + proposal_id = state.get("proposal_id", "unknown") + proposal_content = state.get("proposal_content", "") + + # Ensure proposal content is safely handled as plain text + if proposal_content: + # Convert to string and ensure it's treated as plain text + proposal_content = str(proposal_content) + # Remove any null bytes or other control characters that might cause parsing issues + proposal_content = "".join( + char for char in proposal_content if ord(char) >= 32 or char in "\n\r\t" + ) + # Escape curly braces to prevent f-string/format interpretation issues + proposal_content = proposal_content.replace("{", "{{").replace("}", "}}") + dao_id = state.get("dao_id") + state.get("agent_id") + state.get("profile_id") + + # Initialize token usage tracking in state if not present + if "token_usage" not in state: + state["token_usage"] = {} + + # Retrieve all proposals for this DAO from Supabase (excluding current proposal) + dao_proposals = [] + past_proposals_db_text = "" + try: + if dao_id: + dao_proposals = await self._fetch_dao_proposals( + dao_id, exclude_proposal_id=proposal_id + ) + past_proposals_db_text = self._format_proposals_for_context( + dao_proposals + ) + except Exception as e: + self.logger.error( + f"[DEBUG:HistoricalAgent:{proposal_id}] Error fetching/formatting DAO proposals: {str(e)}" + ) + past_proposals_db_text = ( + "No past proposals available due to error." + ) + + # Retrieve similar past proposals from vector store if possible + past_proposals_vector_text = "" + try: + self.logger.debug( + f"[DEBUG:HistoricalAgent:{proposal_id}] Retrieving similar past proposals from vector store" + ) + similar_proposals = await self.retrieve_from_vector_store( + query=proposal_content[ + :1000 + ], # Use first 1000 chars of proposal as query + collection_name=self.config.get( + "proposals_collection", "past_proposals" + ), + limit=3, + ) + past_proposals_vector_text = "\n\n".join( + [ + f'\n{doc.page_content}\n' + for i, doc in enumerate(similar_proposals) + ] + ) + except Exception as e: + self.logger.error( + f"[DEBUG:HistoricalAgent:{proposal_id}] Error retrieving similar proposals from vector store: {str(e)}" + ) + past_proposals_vector_text = "No similar past proposals available in vector store." + + # Combine both sources of past proposals + past_proposals_text = past_proposals_db_text + if past_proposals_vector_text: + past_proposals_text += ( + "\n\n" + past_proposals_vector_text + if past_proposals_text + else past_proposals_vector_text + ) + + # Get proposal images + proposal_images = state.get("proposal_images", []) + + try: + # Create chat messages + messages = self._create_chat_messages( + proposal_content=proposal_content, + past_proposals=past_proposals_text + or "No past proposals available for comparison.", + proposal_images=proposal_images, + ) + + # Create chat prompt template + prompt = ChatPromptTemplate.from_messages(messages) + formatted_prompt = prompt.format() + + # Get structured output from the LLM + result = await self.llm.with_structured_output(AgentOutput).ainvoke( + formatted_prompt + ) + result_dict = result.model_dump() + + # Track token usage + token_usage_data = self.track_token_usage(str(formatted_prompt), result) + state["token_usage"]["historical_agent"] = token_usage_data + result_dict["token_usage"] = token_usage_data + result_dict["images_processed"] = len(proposal_images) + + # Update state with agent result + update_state_with_agent_result(state, result_dict, "historical") + return result_dict + except Exception as e: + self.logger.error( + f"[DEBUG:HistoricalAgent:{proposal_id}] Error in historical evaluation: {str(e)}" + ) + return { + "score": 50, + "flags": [f"Error: {str(e)}"], + "summary": "Historical evaluation failed due to error", + "sequence_analysis": "Could not analyze potential proposal sequences due to error.", + "images_processed": len(proposal_images) if proposal_images else 0, + } diff --git a/services/ai/workflows/agents/image_processing.py b/services/ai/workflows/agents/image_processing.py new file mode 100644 index 00000000..a60e0f13 --- /dev/null +++ b/services/ai/workflows/agents/image_processing.py @@ -0,0 +1,98 @@ +from typing import Any, Dict, List, Optional + +import magic + +from lib.logger import configure_logger +from lib.utils import extract_image_urls +from services.ai.workflows.mixins.capability_mixins import BaseCapabilityMixin + +logger = configure_logger(__name__) + + +def detect_image_mime_type(image_data: bytes) -> str: + """Detect MIME type from image content using python-magic library. + + Args: + image_data: Raw image bytes + + Returns: + MIME type string, defaults to 'image/jpeg' if unknown or not an image + """ + try: + mime_type = magic.from_buffer(image_data, mime=True) + + # Ensure it's actually an image MIME type + if mime_type and mime_type.startswith("image/"): + return mime_type + else: + logger.warning( + f"Detected non-image MIME type: {mime_type}, defaulting to image/jpeg" + ) + return "image/jpeg" + + except Exception as e: + logger.warning(f"Error detecting MIME type: {e}, defaulting to image/jpeg") + return "image/jpeg" + + +class ImageProcessingNode(BaseCapabilityMixin): + """Workflow node to process proposal images: extract URLs and format them for LLM.""" + + def __init__(self, config: Optional[Dict[str, Any]] = None): + """Initialize the image processing node. + + Args: + config: Optional configuration dictionary + """ + super().__init__(config=config, state_key="proposal_images") + self.initialize() + + async def process(self, state: Dict[str, Any]) -> List[Dict[str, Any]]: + """Process images in the proposal data. + + Args: + state: The current workflow state + + Returns: + List of dictionaries containing processed images in a format suitable for LLM + """ + proposal_id = state.get("proposal_id", "unknown") + proposal_content = state.get("proposal_content", "") + + if not proposal_content: + self.logger.info( + f"[ImageProcessorNode:{proposal_id}] No proposal_content, skipping." + ) + # Return empty list to ensure state is updated + return [] + + self.logger.info( + f"[ImageProcessorNode:{proposal_id}] Starting image processing." + ) + image_urls = extract_image_urls(proposal_content) + + if not image_urls: + self.logger.info(f"[ImageProcessorNode:{proposal_id}] No image URLs found.") + # Return empty list explicitly to ensure state is updated + return [] + + processed_images = [] + for url in image_urls: + self.logger.debug( + f"[ImageProcessorNode:{proposal_id}] Processing image URL: {url}" + ) + + processed_images.append( + { + "type": "image_url", + "image_url": {"url": url}, + } + ) + self.logger.debug( + f"[ImageProcessorNode:{proposal_id}] Successfully processed image: {url}" + ) + + self.logger.info( + f"[ImageProcessorNode:{proposal_id}] Processed {len(processed_images)} images." + ) + return processed_images diff --git a/services/ai/workflows/agents/proposal_metadata.py b/services/ai/workflows/agents/proposal_metadata.py new file mode 100644 index 00000000..af8309a2 --- /dev/null +++ b/services/ai/workflows/agents/proposal_metadata.py @@ -0,0 +1,199 @@ +from typing import Any, Dict, List, Optional + +from langchain_core.prompts.chat import ChatPromptTemplate + +from lib.logger import configure_logger +from services.ai.workflows.mixins.capability_mixins import BaseCapabilityMixin +from services.ai.workflows.utils.models import ProposalMetadataOutput +from services.ai.workflows.utils.token_usage import TokenUsageMixin + +logger = configure_logger(__name__) + + +class ProposalMetadataAgent(BaseCapabilityMixin, TokenUsageMixin): + """Agent that generates title, summary, and metadata tags for proposal content.""" + + def __init__(self, config: Optional[Dict[str, Any]] = None): + """Initialize the Proposal Metadata Agent. + + Args: + config: Optional configuration dictionary + """ + BaseCapabilityMixin.__init__(self, config=config, state_key="proposal_metadata") + TokenUsageMixin.__init__(self) + self.initialize() + + def _create_chat_messages( + self, + proposal_content: str, + dao_name: str, + proposal_type: str, + proposal_images: List[Dict[str, Any]] = None, + ) -> List: + """Create chat messages for proposal metadata generation. + + Args: + proposal_content: Content of the proposal + dao_name: Name of the DAO + proposal_type: Type of the proposal + proposal_images: List of processed images + + Returns: + List of chat messages + """ + # System message with guidelines + system_content = """You are an expert at analyzing DAO proposals and generating comprehensive metadata including titles, summaries, and tags. Create content that accurately represents and categorizes the proposal to help with organization and discoverability. + +**Image Evaluation**: If images are attached to this proposal, they are an integral part of the proposal content. You must carefully examine and evaluate any provided images, considering how they support, clarify, or enhance the written proposal. Images may contain diagrams, charts, screenshots, mockups, wireframes, or other visual information that provides crucial context for understanding the proposal's scope, objectives, and implementation details. Include insights from the visual content when generating the title, summary, and tags. + +Title Guidelines: +- Keep the title under 100 characters +- Make it descriptive and action-oriented +- Avoid jargon or overly technical language +- Capture the main benefit or outcome +- Include the DAO name if it adds context and clarity + +Summary Guidelines: +- Keep the summary under 500 characters (2-3 sentences) +- Explain what the proposal does and why it matters +- Include key objectives or outcomes +- Use clear, accessible language +- Highlight the main benefit to the DAO community + +Tag Guidelines: +- Generate exactly 3-5 tags (no more, no less) +- Each tag should be 1-3 words maximum +- Use lowercase for consistency +- Focus on the main themes, topics, and purpose of the proposal +- Include category-based tags (e.g., "governance", "treasury", "technical") +- Include action-based tags (e.g., "funding", "upgrade", "partnership") +- Avoid overly generic tags like "proposal" or "dao" +- Be specific but not too narrow - tags should be useful for filtering +- Consider the scope and impact of the proposal + +Common Categories: +- governance: for proposals about DAO structure, voting, rules +- treasury: for proposals about financial management, budgets +- technical: for proposals about code, infrastructure, upgrades +- partnerships: for proposals about collaborations, integrations +- community: for proposals about community building, outreach +- security: for proposals about safety, audits, risk management +- tokenomics: for proposals about token mechanics, rewards +- development: for proposals about product development, features +- marketing: for proposals about promotion, brand, awareness +- operations: for proposals about day-to-day functioning + +Output Format: +Provide a JSON object with: +- title: Generated proposal title (max 100 characters) +- summary: Brief summary explaining the proposal (2-3 sentences, max 500 characters) +- tags: Array of 3-5 relevant tags as strings""" + + # User message with proposal content and context + user_content = f"""Please analyze the following proposal content and generate a title, summary, and tags: + +Proposal Content: +{proposal_content} + +DAO Name: {dao_name or "the DAO"} +Proposal Type: {proposal_type or "general proposal"} + +Based on this information, generate appropriate metadata for this proposal.""" + + messages = [{"role": "system", "content": system_content}] + + # Create user message content - start with text + user_message_content = [{"type": "text", "text": user_content}] + + # Add images if available + if proposal_images: + for image in proposal_images: + if image.get("type") == "image_url": + # Add detail parameter if not present + image_with_detail = image.copy() + if "detail" not in image_with_detail.get("image_url", {}): + image_with_detail["image_url"]["detail"] = "auto" + user_message_content.append(image_with_detail) + + # Add the user message + messages.append({"role": "user", "content": user_message_content}) + + return messages + + async def process(self, state: Dict[str, Any]) -> Dict[str, Any]: + """Generate title, summary, and metadata tags for the given proposal content. + + Args: + state: The current workflow state containing proposal_content + + Returns: + Dictionary containing the generated title, summary, tags, and metadata + """ + proposal_content = state.get("proposal_content") + if not proposal_content: + self.logger.error("No proposal_content provided in state") + return { + "error": "proposal_content is required", + "title": "", + "summary": "", + "tags": [], + } + + # Initialize token usage tracking in state if not present + if "token_usage" not in state: + state["token_usage"] = {} + + # Get additional context from state if available + dao_name = state.get("dao_name", "") + proposal_type = state.get("proposal_type", "") + proposal_images = state.get("proposal_images", []) + + try: + # Create chat messages + messages = self._create_chat_messages( + proposal_content=proposal_content, + dao_name=dao_name, + proposal_type=proposal_type, + proposal_images=proposal_images, + ) + + # Create chat prompt template + prompt = ChatPromptTemplate.from_messages(messages) + formatted_prompt = prompt.format() + + # Get structured output from the LLM + result = await self.llm.with_structured_output( + ProposalMetadataOutput + ).ainvoke(formatted_prompt) + result_dict = result.model_dump() + + # Track token usage + token_usage_data = self.track_token_usage(str(formatted_prompt), result) + state["token_usage"]["proposal_metadata_agent"] = token_usage_data + result_dict["token_usage"] = token_usage_data + + # Add metadata + result_dict["content_length"] = len(proposal_content) + result_dict["dao_name"] = dao_name + result_dict["proposal_type"] = proposal_type + result_dict["tags_count"] = len(result_dict.get("tags", [])) + result_dict["images_processed"] = len(proposal_images) + + self.logger.info( + f"Generated title, summary, and {len(result_dict.get('tags', []))} tags for proposal: {result_dict.get('title', 'Unknown')}" + ) + return result_dict + + except Exception as e: + self.logger.error(f"Error generating proposal metadata: {str(e)}") + return { + "error": str(e), + "title": "", + "summary": f"Error generating summary: {str(e)}", + "tags": [], + "content_length": len(proposal_content) if proposal_content else 0, + "dao_name": dao_name, + "proposal_type": proposal_type, + "tags_count": 0, + "images_processed": len(proposal_images) if proposal_images else 0, + } diff --git a/services/ai/workflows/agents/proposal_recommendation.py b/services/ai/workflows/agents/proposal_recommendation.py new file mode 100644 index 00000000..5fceb42a --- /dev/null +++ b/services/ai/workflows/agents/proposal_recommendation.py @@ -0,0 +1,410 @@ +from typing import Any, Dict, List, Optional +from uuid import UUID + +from langchain_core.prompts.chat import ChatPromptTemplate + +from backend.factory import backend +from backend.models import DAO, Proposal, ProposalFilter +from lib.logger import configure_logger +from services.ai.workflows.mixins.capability_mixins import BaseCapabilityMixin +from services.ai.workflows.utils.models import ProposalRecommendationOutput +from services.ai.workflows.utils.token_usage import TokenUsageMixin + +logger = configure_logger(__name__) + + +class ProposalRecommendationAgent(BaseCapabilityMixin, TokenUsageMixin): + """Agent that generates proposal recommendations based on DAO mission and historical context.""" + + def __init__(self, config: Optional[Dict[str, Any]] = None): + """Initialize the Proposal Recommendation Agent. + + Args: + config: Optional configuration dictionary + """ + BaseCapabilityMixin.__init__( + self, config=config, state_key="proposal_recommendation" + ) + TokenUsageMixin.__init__(self) + self.initialize() + + async def _fetch_dao_info(self, dao_id: UUID) -> Optional[DAO]: + """Fetch DAO information from the database. + + Args: + dao_id: The UUID of the DAO + + Returns: + DAO object or None if not found + """ + try: + dao = backend.get_dao(dao_id) + if dao: + self.logger.debug(f"Retrieved DAO info for {dao_id}: {dao.name}") + else: + self.logger.warning(f"No DAO found with ID: {dao_id}") + return dao + except Exception as e: + self.logger.error(f"Error fetching DAO info for {dao_id}: {str(e)}") + return None + + async def _fetch_dao_proposals( + self, dao_id: UUID, limit: int = 50 + ) -> List[Proposal]: + """Fetch recent proposals for a specific DAO from the database. + + Args: + dao_id: The UUID of the DAO + limit: Maximum number of proposals to fetch + + Returns: + List of Proposal objects + """ + try: + # Create filter to get proposals for this DAO + filters = ProposalFilter(dao_id=dao_id) + + # Fetch proposals + proposals = backend.list_proposals(filters) + + # Sort by creation date (newest first) and limit results + sorted_proposals = sorted( + proposals, key=lambda p: p.created_at, reverse=True + ) + limited_proposals = sorted_proposals[:limit] + + self.logger.debug( + f"Retrieved {len(limited_proposals)} recent proposals for DAO {dao_id}" + ) + return limited_proposals + except Exception as e: + self.logger.error(f"Error fetching proposals for DAO {dao_id}: {str(e)}") + return [] + + def _format_proposals_for_context(self, proposals: List[Proposal]) -> str: + """Format proposals for inclusion in the prompt. + + Args: + proposals: List of proposals + + Returns: + Formatted text of past proposals + """ + if not proposals: + return "No past proposals available." + + formatted_proposals = [] + for i, proposal in enumerate(proposals): + try: + # Safely get proposal attributes with proper error handling + title = getattr(proposal, "title", None) or "Untitled" + content = getattr(proposal, "content", None) or "No content" + proposal_type = getattr(proposal, "type", None) or "Unknown" + status = getattr(proposal, "status", None) or "Unknown" + passed = getattr(proposal, "passed", None) + + # Safely handle created_at date formatting + created_at = getattr(proposal, "created_at", None) + created_str = "Unknown" + if created_at: + try: + created_str = created_at.strftime("%Y-%m-%d") + except (AttributeError, ValueError): + created_str = str(created_at) + + # Safely convert content to string and limit length + content_str = str(content)[:500] if content else "No content" + + # Ensure content is treated as plain text and safe for prompt processing + # Remove any control characters that might cause parsing issues + content_str = "".join( + char for char in content_str if ord(char) >= 32 or char in "\n\r\t" + ) + + # Escape curly braces to prevent f-string/format interpretation issues + content_str = content_str.replace("{", "{{").replace("}", "}}") + + proposal_text = f""" + {str(title)[:100]} + {content_str} + {str(proposal_type)} + {str(status)} + {created_str} + {str(passed) if passed is not None else "Unknown"} +""" + formatted_proposals.append(proposal_text) + except Exception as e: + self.logger.error(f"Error formatting proposal {i}: {str(e)}") + # Add a fallback proposal entry + formatted_proposals.append( + f""" + Error loading proposal + Could not load proposal data: {str(e)} + Unknown + Unknown + Unknown + Unknown +""" + ) + + return "\n\n".join(formatted_proposals) + + def _create_chat_messages( + self, + dao_name: str, + dao_mission: str, + dao_description: str, + recent_proposals: str, + focus_area: str, + specific_needs: str, + proposal_images: List[Dict[str, Any]] = None, + ) -> List: + """Create chat messages for the proposal recommendation. + + Args: + dao_name: Name of the DAO + dao_mission: Mission statement of the DAO + dao_description: Description of the DAO + recent_proposals: Formatted recent proposals text + focus_area: Focus area for the recommendation + specific_needs: Specific needs mentioned + proposal_images: List of processed images + + Returns: + List of chat messages + """ + # System message with guidelines and context + system_content = """======================= +PROPOSAL RECOMMENDATION +======================= + +ROLE AND TASK +You are an expert DAO governance advisor specializing in strategic proposal recommendations. Your job is to analyze DAO context and generate actionable, high-value proposals that align with the organization's mission and address community needs effectively. + +IMPORTANT REQUIREMENTS: +All recommendations must include concrete, implementable actions with clear deliverables and success metrics. Avoid vague suggestions or theoretical concepts that cannot be executed immediately. + +------------------------ +STEP 1 — CONTEXTUAL ANALYSIS +------------------------ + +Before generating recommendations, analyze the following aspects: + +1. Mission Alignment Assessment + - How well does the focus area align with the DAO's core mission? + - What specific mission elements can be advanced through this proposal? + +2. Historical Pattern Analysis + - What themes and trends emerge from past proposals? + - Which types of proposals have been most/least successful? + - What gaps exist in the current proposal landscape? + +3. Strategic Opportunity Identification + - What immediate value can be delivered to the community? + - How does this proposal build upon or complement existing initiatives? + - What competitive advantages or unique positioning does this create? + +------------------------ +STEP 2 — RECOMMENDATION CRITERIA +------------------------ + +Evaluate your recommendation against these 8 criteria (mirroring evaluation standards): + +1. Brand Alignment (15%): How well does the proposal strengthen the DAO's brand and reputation? +2. Contribution Value (15%): What immediate, measurable value does this provide to the community? +3. Engagement Potential (15%): How likely is this to generate meaningful community participation? +4. Clarity (10%): Are the objectives, deliverables, and success metrics crystal clear? +5. Timeliness (10%): Is this the right time for this type of initiative? +6. Credibility (10%): Is the proposal realistic and achievable with available resources? +7. Risk Assessment (10%): What are the potential downsides and how can they be mitigated? +8. Mission Alignment (15%): How directly does this advance the DAO's stated mission? + +------------------------ +STEP 3 — PROPOSAL STRUCTURE +------------------------ + +Your recommendation must include: + +ESSENTIAL COMPONENTS: +- Clear, actionable title (max 100 characters) +- Specific objectives with measurable outcomes +- Detailed deliverables and timeline +- Success metrics and evaluation criteria +- Resource requirements and budget considerations +- Risk mitigation strategies + +QUALITY STANDARDS: +- All recommendations must be implementable within 90 days +- Include at least 3 specific, measurable success metrics +- Address potential objections or concerns proactively +- Reference relevant past proposals or community needs +- Provide clear next steps for implementation + +------------------------ +STEP 4 — OUTPUT FORMAT (JSON OBJECT) +------------------------ + +Return a JSON object with: +- title: Clear, compelling proposal title (max 100 characters) +- content: Comprehensive proposal with objectives, deliverables, timeline, success metrics (max 1800 characters) +- rationale: Strategic justification based on DAO context, past proposals, and opportunity analysis (max 800 characters) +- priority: Priority level (high, medium, low) with justification +- estimated_impact: Specific expected outcomes and community benefits +- suggested_action: Immediate next steps for proposal submission and implementation + +------------------------ +QUALITY STANDARD +------------------------ + +All recommendations must be: +- Strategically grounded in DAO mission and community needs +- Immediately actionable with clear implementation path +- Supported by analysis of past proposal patterns +- Designed to pass the same evaluation criteria used for proposal assessment +- Written with specific, measurable, and time-bound objectives + +IMPORTANT: Use only ASCII characters (characters 0-127) in all fields. Avoid any Unicode characters, emojis, special symbols, or non-ASCII punctuation. Use standard English letters, numbers, and basic punctuation only.""" + + # User message with the specific DAO context and request + user_content = f"""Based on the following DAO information and context, generate a thoughtful recommendation for a new proposal that would benefit the DAO: + +DAO Context: +- Name: {dao_name} +- Mission: {dao_mission} +- Description: {dao_description} + +Recent Proposals: +{recent_proposals} + +Recommendation Request: +- Focus Area: {focus_area} +- Specific Needs: {specific_needs or "No specific needs mentioned"} + +Please analyze this information and provide a proposal recommendation that aligns with the DAO's mission, addresses gaps in recent proposals, and offers clear value to the community.""" + + messages = [{"role": "system", "content": system_content}] + + # Create user message content - start with text + user_message_content = [{"type": "text", "text": user_content}] + + # Add images if available + if proposal_images: + for image in proposal_images: + if image.get("type") == "image_url": + # Add detail parameter if not present + image_with_detail = image.copy() + if "detail" not in image_with_detail.get("image_url", {}): + image_with_detail["image_url"]["detail"] = "auto" + user_message_content.append(image_with_detail) + + # Add the user message + messages.append({"role": "user", "content": user_message_content}) + + return messages + + async def process(self, state: Dict[str, Any]) -> Dict[str, Any]: + """Generate a proposal recommendation based on DAO context. + + Args: + state: The current workflow state containing dao_id + + Returns: + Dictionary containing the proposal recommendation + """ + dao_id = state.get("dao_id") + if not dao_id: + self.logger.error("No dao_id provided in state") + return { + "error": "dao_id is required", + "title": "", + "content": "", + "rationale": "Error: No DAO ID provided", + "priority": "low", + "estimated_impact": "None", + } + + # Initialize token usage tracking in state if not present + if "token_usage" not in state: + state["token_usage"] = {} + + # Fetch DAO information + dao = await self._fetch_dao_info(dao_id) + if not dao: + return { + "error": "DAO not found", + "title": "", + "content": "", + "rationale": f"Error: DAO with ID {dao_id} not found", + "priority": "low", + "estimated_impact": "None", + } + + # Fetch recent proposals for context + try: + recent_proposals = await self._fetch_dao_proposals(dao_id, limit=8) + proposals_context = self._format_proposals_for_context(recent_proposals) + except Exception as e: + self.logger.error( + f"Error fetching/formatting proposals for DAO {dao_id}: {str(e)}" + ) + proposals_context = ( + "No past proposals available due to error." + ) + + # Get additional context from state if available + focus_area = state.get("focus_area", "general improvement") + specific_needs = state.get("specific_needs", "") + proposal_images = state.get("proposal_images", []) + + try: + # Create chat messages + messages = self._create_chat_messages( + dao_name=dao.name or "Unknown DAO", + dao_mission=dao.mission or "Mission not specified", + dao_description=dao.description or "Description not provided", + recent_proposals=proposals_context, + focus_area=focus_area, + specific_needs=specific_needs, + proposal_images=proposal_images, + ) + + # Create chat prompt template + prompt = ChatPromptTemplate.from_messages(messages) + formatted_prompt = prompt.format() + + # Get structured output from the LLM + result = await self.llm.with_structured_output( + ProposalRecommendationOutput + ).ainvoke(formatted_prompt) + result_dict = result.model_dump() + + # Track token usage + token_usage_data = self.track_token_usage(str(formatted_prompt), result) + state["token_usage"]["proposal_recommendation_agent"] = token_usage_data + result_dict["token_usage"] = token_usage_data + + # Add metadata + result_dict["dao_id"] = str(dao_id) + result_dict["dao_name"] = dao.name + result_dict["proposals_analyzed"] = len(recent_proposals) + result_dict["images_processed"] = len(proposal_images) + + self.logger.info( + f"Generated proposal recommendation for DAO {dao_id}: {result_dict.get('title', 'Unknown')}" + ) + return result_dict + + except Exception as e: + self.logger.error( + f"Error generating proposal recommendation for DAO {dao_id}: {str(e)}" + ) + return { + "error": str(e), + "title": "", + "content": "", + "rationale": f"Error generating recommendation: {str(e)}", + "priority": "low", + "estimated_impact": "None", + "dao_id": str(dao_id), + "dao_name": dao.name if dao else "Unknown", + } diff --git a/services/ai/workflows/agents/reasoning.py b/services/ai/workflows/agents/reasoning.py new file mode 100644 index 00000000..3e05d460 --- /dev/null +++ b/services/ai/workflows/agents/reasoning.py @@ -0,0 +1,556 @@ +import asyncio +from typing import Any, Dict, Optional + +from langchain_core.prompts.chat import ChatPromptTemplate +from langgraph.graph import StateGraph + +from lib.logger import configure_logger +from services.ai.workflows.utils.model_factory import ( + create_chat_openai, + create_reasoning_llm, +) +from services.ai.workflows.chat import StreamingCallbackHandler +from services.ai.workflows.mixins.capability_mixins import ( + BaseCapabilityMixin, + PromptCapability, +) +from services.ai.workflows.mixins.planning_mixin import PlanningCapability +from services.ai.workflows.utils.models import FinalOutput +from services.ai.workflows.utils.state_reducers import update_state_with_agent_result +from services.ai.workflows.utils.token_usage import TokenUsageMixin + +logger = configure_logger(__name__) + + +class ReasoningAgent( + BaseCapabilityMixin, PlanningCapability, TokenUsageMixin, PromptCapability +): + """Reasoning Agent that makes the final evaluation decision based on other agents' inputs.""" + + def __init__(self, config: Optional[Dict[str, Any]] = None): + """Initialize the Reasoning Agent. + + Args: + config: Optional configuration dictionary + """ + BaseCapabilityMixin.__init__(self, config=config, state_key="final_score") + TokenUsageMixin.__init__(self) + PromptCapability.__init__(self) + + # Create a dummy queue for the StreamingCallbackHandler + self.dummy_queue = asyncio.Queue() + # Create callback handler and planning_llm for PlanningCapability + # These won't be used since we don't actually use the planning functionality + self.dummy_callback = StreamingCallbackHandler(queue=self.dummy_queue) + self.dummy_llm = create_chat_openai() + + # Pass the required arguments to PlanningCapability.__init__ + PlanningCapability.__init__( + self, callback_handler=self.dummy_callback, planning_llm=self.dummy_llm + ) + + self.initialize() + self._initialize_planning_capability() + + # Configuration for thresholds + self.default_threshold = config.get("approval_threshold", 70) + self.veto_threshold = config.get("veto_threshold", 30) + self.consensus_threshold = config.get("consensus_threshold", 10) + self.confidence_adjustment = config.get("confidence_adjustment", 0.15) + self.llm = create_reasoning_llm() # Uses o3-mini by default for reasoning + + def _initialize_planning_capability(self): + """Initialize the planning capability if not already initialized.""" + if not hasattr(self, "planning"): + # We don't actually use the planning method, just create a dummy placeholder + self.planning = lambda *args, **kwargs: None + self.logger.info("Initialized dummy planning capability for ReasoningAgent") + + def integrate_with_graph(self, graph: StateGraph, **kwargs) -> None: + """Hook to integrate with a particular graph.""" + pass + + def _create_chat_messages( + self, + agent_evaluations: str, + approval_threshold: int, + dao_id: Optional[str] = None, + agent_id: Optional[str] = None, + profile_id: Optional[str] = None, + ) -> list: + """Create chat messages for reasoning evaluation. + + Args: + agent_evaluations: Formatted string of all agent evaluations + approval_threshold: The approval threshold for decision making + dao_id: Optional DAO ID for custom prompt injection + agent_id: Optional agent ID for custom prompt injection + profile_id: Optional profile ID for custom prompt injection + + Returns: + List of chat messages + """ + # System message with evaluation guidelines + system_content = ( + """ + + You are an agent - please keep going until the user's query is completely resolved, before ending your turn and yielding back to the user. Only terminate your turn when you are sure that the problem is solved. + + + If you are not sure about file content or codebase structure pertaining to the user's request, use your tools to read files and gather the relevant information: do NOT guess or make up an answer. + + + You MUST plan extensively before each function call, and reflect extensively on the outcomes of the previous function calls. DO NOT do this entire process by making function calls only, as this can impair your ability to solve the problem and think insightfully. + + + + + + + For each agent evaluation: + - Assess the quality and depth of the reasoning provided + - Identify specific evidence cited or lack thereof + - Evaluate if the score aligns with the reasoning given + - Note any potential biases or blind spots in the analysis + - Consider the agent's domain expertise relevance to this proposal + + + + - Identify areas where agents agree and disagree + - Look for complementary insights that reinforce each other + - Spot contradictory findings that need resolution + - Assess if disagreements stem from different perspectives or actual conflicts + - Determine which agent insights are most reliable for this specific proposal type + + + + - Analyze score distribution patterns (consensus, polarization, outliers) + - Identify common themes across agent summaries + - Look for correlation between different evaluation dimensions + - Assess if the proposal has consistent strengths/weaknesses across domains + - Consider if any single dimension should be weighted more heavily + + + + - Categorize flags by severity (critical, moderate, minor) + - Assess if flags are substantiated by agent reasoning + - Determine if flags represent deal-breakers or manageable risks + - Consider if multiple minor flags compound into major concerns + - Evaluate if any flags contradict positive agent assessments + + + + - Consider the proposal type and what dimensions matter most + - Assess if certain agent perspectives are more relevant than others + - Weigh immediate vs. long-term implications highlighted by agents + - Consider the DAO's specific context and priorities + - Factor in any time-sensitive or strategic considerations mentioned + + + + + The default threshold for approval is """ + + str(approval_threshold) + + """/100 + + + - Scores should reflect the overall risk-adjusted potential of the proposal + - Higher scores require strong positive evidence from multiple dimensions + - Lower scores should be justified by significant risks or poor reasoning + - Consider both the ceiling (best case) and floor (worst case) outcomes + + + + Score 80+: Clear benefits, minimal risks, strong consensus + Score 60-79: Net positive with manageable risks or some uncertainty + Score 40-59: Unclear net benefit, significant uncertainty, or balanced trade-offs + Score 20-39: Net negative or high risk with limited upside + Score 0-19: Clear harm, fundamental flaws, or critical risks + + + + - Any agent score below 30 suggests critical issues requiring explanation + - Multiple flags indicating legal, security, or ethical violations + - Fundamental misalignment with DAO values or objectives + - Evidence of fraud, manipulation, or malicious intent + + + + You must provide a final overall score (0-100) that directly maps to the approval decision: + + + - Start with the weighted average of reliable agent scores + - Adjust based on flag severity and cross-agent insights + - Apply contextual weighting based on proposal type and DAO priorities + - Ensure the final score accurately reflects the proposal's overall merit + + + + - Strong consensus among agents (minimal adjustments needed) + - Detailed, evidence-based reasoning from multiple agents (higher confidence in scores) + - Clear alignment between different evaluation dimensions (scores are reliable) + - Conflicting evidence or contradictory assessments (may require score adjustments) + - Critical flags or red flags (may require significant score reductions) + - Missing critical information (may require score reductions due to uncertainty) + + + + The approval threshold is 70/100. Ensure your final score reflects: + - Score 70-100: APPROVE - Proposal meets or exceeds the approval threshold + - Score 0-69: REJECT - Proposal falls below the approval threshold + + + + + + + 1. **Individual Agent Assessment**: Evaluate each agent's reasoning quality and reliability + 2. **Cross-Agent Synthesis**: Identify patterns, agreements, and meaningful disagreements + 3. **Risk-Benefit Analysis**: Weigh potential upsides against identified risks and concerns + 4. **Contextual Evaluation**: Consider DAO-specific factors and proposal relevance + 5. **Confidence Assessment**: Determine how certain you are about your evaluation based on the factors above + + + + - Start with the weighted average of reliable agent scores + - Adjust based on flag severity and cross-agent insights + - Consider confidence level in final score precision + - Ensure decision threshold accounts for uncertainty + - Provide specific, actionable reasoning for stakeholders + + + + + + - Provide a final overall score from 0-100 + - Justify how you arrived at this specific score + - Explain any adjustments made to the base average + - Ensure the score accurately reflects the proposal's merit relative to the 70-point threshold + + + + - State clearly "Approve" or "Reject" + - Ensure decision aligns with score and 70-point threshold + - Score 70+ = Approve, Score <70 = Reject + + + + Your explanation should be comprehensive and structured, providing stakeholders with a complete understanding of your reasoning process. Include the following elements in a detailed narrative: + + **Agent Analysis Summary (200-300 words)**: + - Provide a detailed assessment of each agent's evaluation quality and key insights + - Explain which agent perspectives were most valuable and why + - Identify any agent evaluations that were particularly strong or weak in their reasoning + - Discuss how different agent specializations contributed to the overall assessment + - Note any gaps in agent coverage or areas where more analysis would be beneficial + + **Cross-Agent Synthesis (150-250 words)**: + - Analyze patterns of agreement and disagreement between agents + - Explain whether disagreements represent different valid perspectives or actual conflicts + - Discuss how complementary insights from different agents reinforced or contradicted each other + - Identify which agent insights proved most reliable for this specific proposal type + - Address any surprising correlations or lack thereof between evaluation dimensions + + **Risk-Benefit Analysis (200-300 words)**: + - Provide a detailed breakdown of identified benefits and their likelihood/magnitude + - Thoroughly assess risks, their probability, and potential impact + - Explain your risk tolerance assessment for this particular proposal + - Discuss both short-term and long-term implications highlighted by the agents + - Address any trade-offs between different benefits or between benefits and risks + + **Flag Impact Assessment (100-200 words)**: + - Categorize each flag by severity and explain your reasoning + - Discuss whether flags represent deal-breakers, manageable risks, or minor concerns + - Explain how multiple flags might compound or offset each other + - Address any contradictions between positive agent assessments and negative flags + - Provide context on whether flags are unusual for this type of proposal + + **Decision Rationale (150-250 words)**: + - Explain your specific scoring methodology and how you arrived at the final number + - Justify any significant adjustments made to the simple average of agent scores + - Discuss how the score translates to your approve/reject decision relative to the 70-point threshold + - Address whether this is a clear-cut decision or a borderline case + - Explain how uncertainty and disagreement between agents were factored into the final score + + **Score Justification (100-150 words)**: + - Provide specific reasoning for your final score + - Identify the main factors that influenced your scoring decision + - Discuss what additional information might have changed the score + - Explain how disagreement between agents affected your scoring + - Address whether the proposal type or complexity contributed to scoring challenges + + **Contextual Considerations (100-200 words)**: + - Discuss any DAO-specific factors that influenced your assessment + - Address timing considerations or strategic implications mentioned by agents + - Explain how this proposal fits within the broader context of DAO objectives + - Consider resource allocation implications and opportunity costs + - Address any precedent-setting aspects of this decision + + **Actionable Recommendations (if applicable, 100-200 words)**: + - For rejected proposals: Provide specific, actionable feedback for improvement + - For approved proposals: Highlight key success factors to monitor during implementation + - Suggest risk mitigation strategies based on identified concerns + - Recommend additional due diligence or safeguards if needed + - Propose metrics or milestones for tracking proposal success + + **Final Summary (50-100 words)**: + - Conclude with a clear, concise statement of your decision and primary reasoning + - Highlight the 2-3 most critical factors that drove your decision + - Provide a forward-looking statement about expected outcomes + + The total explanation should be comprehensive (approximately 1000-1500 words) and demonstrate thorough consideration of all available information while remaining clear and actionable for stakeholders. + + + + + Think step-by-step through this analytical framework. Don't just average scores - synthesize insights, weigh evidence quality, and provide a thoughtful evaluation that helps stakeholders understand both the decision and the reasoning behind it. Your analysis should demonstrate deep consideration of all available information. + + Ensure your final score accurately reflects the proposal's merit and aligns with the 70-point approval threshold. Score 70 or above means approval, below 70 means rejection. + + Return only a JSON object with exactly these three fields: score, decision, and explanation. + +""" + ) + + # User message with evaluation request + user_content = f"""Please evaluate the following agent assessments and make your final decision: + + +{agent_evaluations} + + +Based on the analytical framework and decision guidelines provided in the system instructions, please provide your comprehensive evaluation.""" + + # Apply custom prompt injection if enabled + if dao_id or agent_id or profile_id: + try: + custom_prompt_template = self.create_chat_prompt_with_custom_injection( + default_system_message=system_content, + default_user_message=user_content, + dao_id=dao_id, + agent_id=agent_id, + profile_id=profile_id, + prompt_type="reasoning_evaluation", + ) + # Return the ChatPromptTemplate directly - it will be handled in the process method + return custom_prompt_template + except Exception as e: + self.logger.warning( + f"Custom prompt injection failed, using default: {e}" + ) + + messages = [ + {"role": "system", "content": system_content}, + {"role": "user", "content": user_content}, + ] + + return messages + + async def process(self, state: Dict[str, Any]) -> Dict[str, Any]: + """Process all agent scores and make a final decision. + + Args: + state: The current workflow state with all agent results + + Returns: + Dictionary containing the final evaluation decision + """ + self._initialize_planning_capability() + proposal_id = state.get("proposal_id", "unknown") + dao_id = state.get("dao_id") + agent_id = state.get("agent_id") + profile_id = state.get("profile_id") + + # Add diagnostic logging + self.logger.info( + f"[DEBUG:ReasoningAgent:{proposal_id}] Starting reasoning agent process" + ) + self.logger.info( + f"[DEBUG:ReasoningAgent:{proposal_id}] State keys: {list(state.keys())}" + ) + + # Initialize token usage tracking in state if not present + if "token_usage" not in state: + state["token_usage"] = {} + + # Helper function to safely get scores + def safe_get_score(value, default=0): + if isinstance(value, dict) and "score" in value: + return value["score"] + return default + + # Get individual scores + core_score = safe_get_score(state.get("core_score"), 0) + historical_score = safe_get_score(state.get("historical_score"), 0) + financial_score = safe_get_score(state.get("financial_score"), 0) + social_score = safe_get_score(state.get("social_score"), 0) + + # Get agent summaries + core_summary = state.get("summaries", {}).get( + "core_score", "No core context evaluation available." + ) + historical_summary = state.get("summaries", {}).get( + "historical_score", "No historical context evaluation available." + ) + financial_summary = state.get("summaries", {}).get( + "financial_score", "No financial evaluation available." + ) + social_summary = state.get("summaries", {}).get( + "social_score", "No social context evaluation available." + ) + + # Get flags + flags = state.get("flags", []) + flags_text = ( + "\n".join([f"- {flag}" for flag in flags]) + if flags + else "No flags identified." + ) + + # Calculate score statistics + scores = [ + ("Core", core_score), + ("Historical", historical_score), + ("Financial", financial_score), + ("Social", social_score), + ] + valid_scores = [score for _, score in scores if score > 0] + + if not valid_scores: + self.logger.error( + f"[DEBUG:ReasoningAgent:{proposal_id}] No valid scores found!" + ) + return { + "score": 0, + "decision": "Reject", + "confidence": 0.0, + "explanation": "Unable to evaluate due to missing agent scores.", + "flags": ["Critical: No valid evaluation scores available."], + } + + # Calculate metrics + avg_score = sum(valid_scores) / len(valid_scores) + min_score = min(valid_scores) + max_score = max(valid_scores) + score_range = max_score - min_score + + # Detect if any agent has a veto-level score + has_veto = any(score <= self.veto_threshold for score in valid_scores) + + # Check for consensus or disagreement + has_consensus = score_range <= self.consensus_threshold + has_disagreement = score_range >= 30 + + # Format agent evaluations for prompt + agent_evaluations = f""" +Core Context Evaluation: +Score: {core_score}/100 +Summary: {core_summary} + +Historical Context Evaluation: +Score: {historical_score}/100 +Summary: {historical_summary} + +Financial Evaluation: +Score: {financial_score}/100 +Summary: {financial_summary} + +Social Context Evaluation: +Score: {social_score}/100 +Summary: {social_summary} + +Flags Identified: +{flags_text} + +Score Statistics: +- Average Score: {avg_score:.2f} +- Minimum Score: {min_score} +- Maximum Score: {max_score} +- Score Range: {score_range} +""" + + try: + # Create chat messages or get custom prompt template + messages_or_template = self._create_chat_messages( + agent_evaluations=agent_evaluations, + approval_threshold=self.default_threshold, + dao_id=dao_id, + agent_id=agent_id, + profile_id=profile_id, + ) + + # Handle both cases: list of messages or ChatPromptTemplate + if isinstance(messages_or_template, ChatPromptTemplate): + # Custom prompt injection returned a ChatPromptTemplate + prompt = messages_or_template + formatted_prompt = prompt.format() + else: + # Default case: list of messages + prompt = ChatPromptTemplate.from_messages(messages_or_template) + formatted_prompt = prompt.format() + + # Get structured output from the LLM + result = await self.llm.with_structured_output(FinalOutput).ainvoke( + formatted_prompt + ) + result_dict = result.model_dump() + + # Track token usage + token_usage_data = self.track_token_usage(str(formatted_prompt), result) + state["token_usage"]["reasoning_agent"] = token_usage_data + result_dict["token_usage"] = token_usage_data + + # Add calculated metrics to result for transparency + result_dict["metrics"] = { + "avg_score": avg_score, + "min_score": min_score, + "max_score": max_score, + "score_range": score_range, + "has_veto": has_veto, + "has_consensus": has_consensus, + "has_disagreement": has_disagreement, + "score_validity": len(valid_scores) / 4.0, + "flag_count": len(flags), + "agent_scores": { + "core": core_score, + "historical": historical_score, + "financial": financial_score, + "social": social_score, + }, + } + + # Score and decision are now provided by the LLM in the structured output + # The LLM ensures score aligns with the 70-point approval threshold + + # Add flags to the result + result_dict["flags"] = flags + + # Update state with agent result + update_state_with_agent_result(state, result_dict, "final") + + # Add final diagnostic logging + self.logger.info( + f"[DEBUG:ReasoningAgent:{proposal_id}] Successfully completed reasoning" + ) + self.logger.info( + f"[DEBUG:ReasoningAgent:{proposal_id}] Returning result with decision: {result_dict.get('decision')}" + ) + self.logger.info( + f"[DEBUG:ReasoningAgent:{proposal_id}] Updated state now has keys: {list(state.keys())}" + ) + if "final_score" in state: + self.logger.info( + f"[DEBUG:ReasoningAgent:{proposal_id}] final_score type: {type(state.get('final_score'))}" + ) + + return result_dict + except Exception as e: + self.logger.error( + f"[DEBUG:ReasoningAgent:{proposal_id}] Error in reasoning: {str(e)}" + ) + return { + "score": 30, # Below threshold, ensuring rejection + "decision": "Reject", + "explanation": f"Evaluation failed due to error: {str(e)}", + "flags": [f"Error: {str(e)}"], + } diff --git a/services/ai/workflows/agents/social_context.py b/services/ai/workflows/agents/social_context.py new file mode 100644 index 00000000..786e1074 --- /dev/null +++ b/services/ai/workflows/agents/social_context.py @@ -0,0 +1,195 @@ +from typing import Any, Dict, List, Optional + +from langchain_core.prompts.chat import ChatPromptTemplate + +from lib.logger import configure_logger +from services.ai.workflows.mixins.capability_mixins import ( + BaseCapabilityMixin, + PromptCapability, +) +from services.ai.workflows.utils.models import AgentOutput +from services.ai.workflows.utils.state_reducers import update_state_with_agent_result +from services.ai.workflows.utils.token_usage import TokenUsageMixin + +logger = configure_logger(__name__) + + +class SocialContextAgent(BaseCapabilityMixin, TokenUsageMixin, PromptCapability): + """Social Context Agent evaluates social and community aspects of proposals.""" + + def __init__(self, config: Optional[Dict[str, Any]] = None): + """Initialize the Social Context Agent. + + Args: + config: Optional configuration dictionary + """ + BaseCapabilityMixin.__init__(self, config=config, state_key="social_score") + TokenUsageMixin.__init__(self) + PromptCapability.__init__(self) + self.initialize() + + def _create_chat_messages( + self, + proposal_content: str, + community_info: str, + search_results: str, + proposal_images: List[Dict[str, Any]] = None, + ) -> List: + """Create chat messages for social context evaluation. + + Args: + proposal_content: The proposal content to evaluate + community_info: Information about the DAO community + search_results: External context from web search + proposal_images: List of processed images + + Returns: + List of chat messages + """ + # System message with social evaluation guidelines + system_content = """You are an expert community analyst specializing in DAO governance and social dynamics. Your role is to evaluate proposals from a community perspective, ensuring they serve the broader membership and align with community values. + +You must plan extensively before each evaluation and reflect thoroughly on the social implications. Consider both immediate community impact and long-term social dynamics. + +**Image Evaluation**: If images are attached to this proposal, they are an integral part of the proposal content. You must carefully examine and evaluate any provided images, considering how they support, clarify, or relate to the written proposal. Images may contain community diagrams, engagement plans, social impact visualizations, user interface mockups, or other visual information that is essential to understanding the social aspects and community impact of the proposal. Include your analysis of the visual content in your overall social evaluation. + +Evaluation Criteria (weighted): +- Community benefit and inclusion (40% weight) +- Alignment with community values and interests (30% weight) +- Potential for community engagement (20% weight) +- Consideration of diverse stakeholders (10% weight) + +Key Considerations: +- Will this proposal benefit the broader community or just a few members? +- Is there likely community support or opposition? +- Does it foster inclusivity and participation? +- Does it align with the community's values and interests? +- Could it cause controversy or division? +- Does it consider the needs of diverse stakeholders? + +Scoring Guide: +- 0-20: No benefit, misaligned, or divisive +- 21-50: Significant issues or missing details +- 51-70: Adequate but with some concerns or minor risks +- 71-90: Good benefit, aligned, and inclusive +- 91-100: Excellent benefit, highly aligned, and unifying + +Output Format: +Provide a JSON object with exactly these fields: +- score: A number from 0-100 +- flags: Array of any critical social issues or red flags +- summary: Brief summary of your social evaluation""" + + # User message with specific social context and evaluation request + user_content = f"""Please evaluate the social and community aspects of the following proposal: + +Proposal to Evaluate: +{proposal_content} + +Community Information: +{community_info} + +External Context: +{search_results} + +Based on the evaluation criteria and community context, provide your assessment of how this proposal will impact the community, whether it aligns with community values, and its potential for fostering engagement and inclusion.""" + + messages = [{"role": "system", "content": system_content}] + + # Create user message content - start with text + user_message_content = [{"type": "text", "text": user_content}] + + # Add images if available + if proposal_images: + for image in proposal_images: + if image.get("type") == "image_url": + # Add detail parameter if not present + image_with_detail = image.copy() + if "detail" not in image_with_detail.get("image_url", {}): + image_with_detail["image_url"]["detail"] = "auto" + user_message_content.append(image_with_detail) + + # Add the user message + messages.append({"role": "user", "content": user_message_content}) + + return messages + + async def process(self, state: Dict[str, Any]) -> Dict[str, Any]: + """Process the proposal's social context. + + Args: + state: The current workflow state + + Returns: + Dictionary containing social evaluation results + """ + proposal_id = state.get("proposal_id", "unknown") + proposal_content = state.get("proposal_content", "") + state.get("dao_id") + state.get("agent_id") + state.get("profile_id") + + # Initialize token usage tracking in state if not present + if "token_usage" not in state: + state["token_usage"] = {} + + # Web search is disabled for social context evaluation + search_results_text = "Web search is disabled for social context evaluation.\n" + + # Get community info from config + community_context = self.config.get("community_context", {}) + community_size = community_context.get("community_size", "Unknown") + active_members = community_context.get("active_members", "Unknown") + governance_participation = community_context.get( + "governance_participation", "Low" + ) + recent_sentiment = community_context.get("recent_sentiment", "Neutral") + + community_info = f""" +Community Size: {community_size} +Active Members: {active_members} +Governance Participation: {governance_participation} +Recent Community Sentiment: {recent_sentiment} +""" + + # Get proposal images + proposal_images = state.get("proposal_images", []) + + try: + # Create chat messages + messages = self._create_chat_messages( + proposal_content=proposal_content, + community_info=community_info, + search_results=search_results_text, + proposal_images=proposal_images, + ) + + # Create chat prompt template + prompt = ChatPromptTemplate.from_messages(messages) + formatted_prompt = prompt.format() + + # Get structured output from the LLM + result = await self.llm.with_structured_output(AgentOutput).ainvoke( + formatted_prompt + ) + result_dict = result.model_dump() + + # Track token usage + token_usage_data = self.track_token_usage(str(formatted_prompt), result) + state["token_usage"]["social_agent"] = token_usage_data + result_dict["token_usage"] = token_usage_data + result_dict["images_processed"] = len(proposal_images) + + # Update state with agent result + update_state_with_agent_result(state, result_dict, "social") + return result_dict + except Exception as e: + self.logger.error( + f"[DEBUG:SocialAgent:{proposal_id}] Error in social evaluation: {str(e)}" + ) + return { + "score": 50, + "flags": [f"Error: {str(e)}"], + "summary": "Social evaluation failed due to error", + "images_processed": len(proposal_images) if proposal_images else 0, + } diff --git a/services/ai/workflows/agents/twitter_processing.py b/services/ai/workflows/agents/twitter_processing.py new file mode 100644 index 00000000..f506c40e --- /dev/null +++ b/services/ai/workflows/agents/twitter_processing.py @@ -0,0 +1,408 @@ +import re +from typing import Any, Dict, List, Optional + +from lib.logger import configure_logger +from services.ai.workflows.mixins.capability_mixins import BaseCapabilityMixin +from services.communication.twitter_service import create_twitter_service_from_config + +logger = configure_logger(__name__) + + +class TwitterProcessingNode(BaseCapabilityMixin): + """Workflow node to process X/Twitter URLs: extract tweet IDs, fetch tweet content, and process tweet images.""" + + def __init__(self, config: Optional[Dict[str, Any]] = None): + """Initialize the Twitter processing node. + + Args: + config: Optional configuration dictionary + """ + super().__init__(config=config, state_key="tweet_content") + self.initialize() + self.twitter_service = None + + async def _initialize_twitter_service(self): + """Initialize Twitter service if not already initialized.""" + if self.twitter_service is None: + try: + self.twitter_service = create_twitter_service_from_config() + await self.twitter_service._ainitialize() + self.logger.info("Twitter service initialized successfully") + except Exception as e: + self.logger.error(f"Failed to initialize Twitter service: {str(e)}") + self.twitter_service = None + + def _extract_twitter_urls(self, text: str) -> List[str]: + """Extract X/Twitter URLs from text. + + Args: + text: Text to search for Twitter URLs + + Returns: + List of Twitter URLs found + """ + # Pattern to match X.com and twitter.com URLs with status IDs + twitter_url_pattern = r"https?://(?:x\.com|twitter\.com)/[^/]+/status/(\d+)" + + # Return full URLs, not just IDs + urls = [] + for match in re.finditer(twitter_url_pattern, text, re.IGNORECASE): + urls.append(match.group(0)) + + return urls + + def _extract_tweet_id_from_url(self, url: str) -> Optional[str]: + """Extract tweet ID from X/Twitter URL. + + Args: + url: Twitter/X URL + + Returns: + Tweet ID if found, None otherwise + """ + pattern = r"https?://(?:x\.com|twitter\.com)/[^/]+/status/(\d+)" + match = re.search(pattern, url, re.IGNORECASE) + return match.group(1) if match else None + + async def _fetch_tweet_content(self, tweet_id: str) -> Optional[Dict[str, Any]]: + """Fetch tweet content using the Twitter service. + + Args: + tweet_id: Twitter status ID + + Returns: + Dictionary containing tweet data or None if failed + """ + try: + if not self.twitter_service: + await self._initialize_twitter_service() + if not self.twitter_service: + return None + + # Try API v2 first + tweet_response = await self.twitter_service.get_tweet_by_id(tweet_id) + if tweet_response and tweet_response.data: + tweet = tweet_response.data + tweet_data = { + "id": tweet.id, + "text": tweet.text, + "author_id": tweet.author_id, + "created_at": getattr(tweet, "created_at", None), + "public_metrics": getattr(tweet, "public_metrics", {}), + "entities": getattr(tweet, "entities", {}), + "attachments": getattr(tweet, "attachments", {}), + } + + # Extract author information from includes if available + if hasattr(tweet_response, "includes") and tweet_response.includes: + if "users" in tweet_response.includes: + # Find the author user in the includes + for user in tweet_response.includes["users"]: + user_id = None + if hasattr(user, "id"): + user_id = str(user.id) + elif isinstance(user, dict): + user_id = str(user.get("id", "")) + + if user_id == str(tweet.author_id): + # Extract user information + if hasattr(user, "name"): + tweet_data["author_name"] = user.name + elif isinstance(user, dict): + tweet_data["author_name"] = user.get("name", "") + + if hasattr(user, "username"): + tweet_data["author_username"] = user.username + elif isinstance(user, dict): + tweet_data["author_username"] = user.get( + "username", "" + ) + break + + if "media" in tweet_response.includes: + tweet_data["media_objects"] = tweet_response.includes["media"] + self.logger.debug( + f"Found {len(tweet_response.includes['media'])} media objects in API v2 response" + ) + + self.logger.info(f"Successfully fetched tweet {tweet_id} using API v2") + return tweet_data + + # Fallback to API v1.1 + status = await self.twitter_service.get_status_by_id(tweet_id) + if status: + tweet_data = { + "id": status.id_str, + "text": getattr(status, "full_text", status.text), + "author_id": status.user.id_str, + "author_username": status.user.screen_name, + "author_name": status.user.name, + "created_at": status.created_at, + "retweet_count": status.retweet_count, + "favorite_count": status.favorite_count, + "entities": getattr(status, "entities", None), + "extended_entities": getattr(status, "extended_entities", None), + } + self.logger.info( + f"Successfully fetched tweet {tweet_id} using API v1.1" + ) + return tweet_data + + self.logger.warning(f"Tweet {tweet_id} not found") + return None + + except Exception as e: + self.logger.error(f"Error fetching tweet {tweet_id}: {str(e)}") + return None + + def _extract_images_from_tweet(self, tweet_data: Dict[str, Any]) -> List[str]: + """Extract image URLs from tweet data. + + Args: + tweet_data: Tweet data dictionary + + Returns: + List of image URLs + """ + image_urls = [] + + try: + # Check extended_entities for media (API v1.1) + extended_entities = tweet_data.get("extended_entities") + if ( + extended_entities + and isinstance(extended_entities, dict) + and "media" in extended_entities + ): + for media in extended_entities["media"]: + if media.get("type") == "photo": + media_url = media.get("media_url_https") or media.get( + "media_url" + ) + if media_url: + image_urls.append(media_url) + + # Check entities for media (fallback) + entities = tweet_data.get("entities") + if entities and isinstance(entities, dict) and "media" in entities: + for media in entities["media"]: + if media.get("type") == "photo": + media_url = media.get("media_url_https") or media.get( + "media_url" + ) + if media_url: + image_urls.append(media_url) + + # Check attachments for media keys (API v2) + attachments = tweet_data.get("attachments") + if ( + attachments + and isinstance(attachments, dict) + and "media_keys" in attachments + ): + # For API v2, we need to check if media objects are in the tweet_data + # This happens when the API response includes expanded media + media_objects = tweet_data.get("media_objects", []) + for media in media_objects: + media_url = None + media_type = None + + # Handle media objects that might be Python objects or dictionaries + if hasattr(media, "type"): + media_type = media.type + elif isinstance(media, dict): + media_type = media.get("type") + + # Extract image URL based on media type + if media_type == "photo": + # For photos, get the direct URL + if hasattr(media, "url"): + media_url = media.url + elif isinstance(media, dict): + media_url = media.get("url") + + elif media_type in ["animated_gif", "video"]: + # For animated GIFs and videos, use the preview image URL + if hasattr(media, "preview_image_url"): + media_url = media.preview_image_url + elif isinstance(media, dict): + media_url = media.get("preview_image_url") + + # If we still don't have a URL, check nested data object + if not media_url: + data_obj = None + if hasattr(media, "data"): + data_obj = media.data + elif isinstance(media, dict) and "data" in media: + data_obj = media["data"] + + if data_obj: + if media_type == "photo": + if isinstance(data_obj, dict): + media_url = data_obj.get("url") + elif hasattr(data_obj, "url"): + media_url = data_obj.url + elif media_type in ["animated_gif", "video"]: + if isinstance(data_obj, dict): + media_url = data_obj.get("preview_image_url") + elif hasattr(data_obj, "preview_image_url"): + media_url = data_obj.preview_image_url + + if media_url: + image_urls.append(media_url) + self.logger.debug( + f"Extracted {media_type} image URL: {media_url}" + ) + + # If no media objects in tweet_data, log that media expansion is needed + if not media_objects: + self.logger.debug( + f"Found media keys in tweet, but no expanded media objects: {attachments['media_keys']}" + ) + + # Remove duplicates + image_urls = list(set(image_urls)) + + self.logger.debug( + f"Extracted {len(image_urls)} images from tweet: {image_urls}" + ) + + except Exception as e: + self.logger.error(f"Error extracting images from tweet: {str(e)}") + + return image_urls + + def _format_tweet_for_content(self, tweet_data: Dict[str, Any]) -> str: + """Format tweet data for inclusion in proposal content. + + Args: + tweet_data: Tweet data dictionary + + Returns: + Formatted tweet content + """ + try: + text = tweet_data.get("text", "") + author_name = tweet_data.get("author_name", "") + author_username = tweet_data.get("author_username", "") + created_at = tweet_data.get("created_at", "") + + # Format creation date + created_str = "" + if created_at: + try: + if hasattr(created_at, "strftime"): + created_str = created_at.strftime("%Y-%m-%d %H:%M:%S") + else: + created_str = str(created_at) + except (AttributeError, ValueError, TypeError): + created_str = str(created_at) + + formatted_tweet = f""" + + {author_name} (@{author_username}) + {created_str} + {text} + +""" + return formatted_tweet.strip() + + except Exception as e: + self.logger.error(f"Error formatting tweet content: {str(e)}") + return f"Error formatting tweet: {str(e)}" + + async def process(self, state: Dict[str, Any]) -> str: + """Process Twitter URLs in the proposal data. + + Args: + state: The current workflow state + + Returns: + Formatted tweet content string, and updates state with tweet images + """ + proposal_id = state.get("proposal_id", "unknown") + proposal_content = state.get("proposal_content", "") + + if not proposal_content: + self.logger.info( + f"[TwitterProcessorNode:{proposal_id}] No proposal_content, skipping." + ) + return "" + + self.logger.info( + f"[TwitterProcessorNode:{proposal_id}] Starting Twitter URL processing." + ) + + # Extract Twitter URLs + twitter_urls = self._extract_twitter_urls(proposal_content) + + if not twitter_urls: + self.logger.info( + f"[TwitterProcessorNode:{proposal_id}] No Twitter URLs found." + ) + return "" + + self.logger.info( + f"[TwitterProcessorNode:{proposal_id}] Found {len(twitter_urls)} Twitter URLs: {twitter_urls}" + ) + + tweet_contents = [] + tweet_images = [] + + for url in twitter_urls: + tweet_id = self._extract_tweet_id_from_url(url) + if not tweet_id: + self.logger.warning( + f"[TwitterProcessorNode:{proposal_id}] Could not extract tweet ID from URL: {url}" + ) + continue + + self.logger.debug( + f"[TwitterProcessorNode:{proposal_id}] Processing tweet ID: {tweet_id}" + ) + + # Fetch tweet content + tweet_data = await self._fetch_tweet_content(tweet_id) + if not tweet_data: + self.logger.warning( + f"[TwitterProcessorNode:{proposal_id}] Could not fetch tweet: {tweet_id}" + ) + continue + + # Format tweet content + formatted_tweet = self._format_tweet_for_content(tweet_data) + tweet_contents.append(formatted_tweet) + + self.logger.debug( + f"[TwitterProcessorNode:{proposal_id}] Formatted tweet content: {formatted_tweet[:200]}..." + ) + + # Extract images from tweet + tweet_image_urls = self._extract_images_from_tweet(tweet_data) + for image_url in tweet_image_urls: + tweet_images.append( + { + "type": "image_url", + "image_url": {"url": image_url}, + "source": "tweet", + "tweet_id": tweet_id, + } + ) + + self.logger.debug( + f"[TwitterProcessorNode:{proposal_id}] Processed tweet {tweet_id}, found {len(tweet_image_urls)} images" + ) + + # Update state with tweet images (will be merged with proposal_images later) + if "tweet_images" not in state: + state["tweet_images"] = [] + state["tweet_images"].extend(tweet_images) + + # Combine all tweet content + combined_tweet_content = "\n\n".join(tweet_contents) if tweet_contents else "" + + self.logger.info( + f"[TwitterProcessorNode:{proposal_id}] Processed {len(tweet_contents)} tweets, found {len(tweet_images)} total images." + ) + + return combined_tweet_content diff --git a/services/workflows/react.py b/services/ai/workflows/base.py similarity index 67% rename from services/workflows/react.py rename to services/ai/workflows/base.py index d5742f90..2c473962 100644 --- a/services/workflows/react.py +++ b/services/ai/workflows/base.py @@ -1,37 +1,237 @@ -"""ReAct workflow functionality.""" - import asyncio import datetime +import json import uuid +from abc import ABC, abstractmethod from dataclasses import dataclass -from typing import ( - Annotated, - Any, - AsyncGenerator, - Dict, - List, - Optional, - TypedDict, - Union, -) - -from langchain.callbacks.base import BaseCallbackHandler +from typing import Any, Dict, Generic, List, Optional, TypeVar, Union + +from langchain_core.callbacks import BaseCallbackHandler from langchain_core.messages import AIMessage, HumanMessage, SystemMessage -from langchain_core.outputs import LLMResult +from langchain_core.prompts.chat import ChatPromptTemplate from langchain_openai import ChatOpenAI -from langgraph.graph import END, START, StateGraph -from langgraph.graph.message import add_messages -from langgraph.prebuilt import ToolNode +from langgraph.graph import Graph, StateGraph from lib.logger import configure_logger -from services.workflows.base import BaseWorkflow, ExecutionError, StreamingError - -# Remove this import to avoid circular dependencies -# from services.workflows.workflow_service import BaseWorkflowService, WorkflowBuilder +from services.ai.workflows.utils.model_factory import create_chat_openai logger = configure_logger(__name__) +class LangGraphError(Exception): + """Base exception for LangGraph operations""" + + def __init__(self, message: str, details: Dict = None): + super().__init__(message) + self.details = details or {} + + +class StreamingError(LangGraphError): + """Raised when streaming operations fail""" + + pass + + +class ExecutionError(LangGraphError): + """Raised when graph execution fails""" + + pass + + +class ValidationError(LangGraphError): + """Raised when state validation fails""" + + pass + + +# Base state type for all workflows +StateType = TypeVar("StateType", bound=Dict[str, Any]) + + +class BaseWorkflow(Generic[StateType]): + """Base class for all LangGraph workflows. + + This class provides common functionality and patterns for all workflows. + Each workflow should inherit from this class and implement the required + methods. + """ + + def __init__( + self, + model_name: Optional[str] = None, + temperature: Optional[float] = None, + streaming: Optional[bool] = None, + callbacks: Optional[List[Any]] = None, + ): + """Initialize the workflow. + + Args: + model_name: LLM model to use. If None, uses default from ModelConfig + temperature: Temperature for LLM generation. If None, uses default from ModelConfig + streaming: Whether to enable streaming. If None, uses default from ModelConfig + callbacks: Optional callback handlers + """ + self.llm = create_chat_openai( + model=model_name, + temperature=temperature, + streaming=streaming, + callbacks=callbacks, + ) + self.logger = configure_logger(self.__class__.__name__) + self.required_fields: List[str] = [] + self.model_name = model_name + self.temperature = temperature + + def _clean_llm_response(self, content: str) -> str: + """Clean the LLM response content and ensure valid JSON.""" + try: + # First try to parse as-is in case it's already valid JSON + json.loads(content) + return content.strip() + except json.JSONDecodeError: + # If not valid JSON, try to extract from markdown blocks + if "```json" in content: + json_content = content.split("```json")[1].split("```")[0].strip() + elif "```" in content: + json_content = content.split("```")[1].split("```")[0].strip() + else: + json_content = content.strip() + + # Replace any Python boolean values with JSON boolean values + json_content = json_content.replace("True", "true").replace( + "False", "false" + ) + + # Validate the cleaned JSON + try: + json.loads(json_content) + return json_content + except json.JSONDecodeError as e: + self.logger.error(f"Failed to parse JSON after cleaning: {str(e)}") + raise ValueError(f"Invalid JSON response from LLM: {str(e)}") + + def create_llm_with_callbacks(self, callbacks: List[Any]) -> ChatOpenAI: + """Create a new LLM instance with specified callbacks. + + This is useful when you need to create a new LLM instance with different + callbacks or tools. + + Args: + callbacks: List of callback handlers + + Returns: + A new ChatOpenAI instance with the specified callbacks + """ + return create_chat_openai( + model=getattr(self.llm, "model_name", None), + temperature=getattr(self.llm, "temperature", None), + callbacks=callbacks, + ) + + def _create_prompt(self) -> ChatPromptTemplate: + """Create the chat prompt template for this workflow.""" + raise NotImplementedError("Workflow must implement _create_prompt") + + def _create_graph(self) -> Union[Graph, StateGraph]: + """Create the workflow graph.""" + raise NotImplementedError("Workflow must implement _create_graph") + + def _validate_state(self, state: StateType) -> bool: + """Validate the workflow state. + + This method checks if all required fields are present in the state. + Override this method to add custom validation logic. + + Args: + state: The state to validate + + Returns: + True if the state is valid, False otherwise + """ + if not self.required_fields: + # If no required fields specified, assume validation passes + return True + + # Check that all required fields are present and have values + return all( + field in state and state[field] is not None + for field in self.required_fields + ) + + def get_missing_fields(self, state: StateType) -> List[str]: + """Get a list of missing required fields in the state. + + Args: + state: The state to check + + Returns: + List of missing field names + """ + if not self.required_fields: + return [] + + return [ + field + for field in self.required_fields + if field not in state or state[field] is None + ] + + async def execute(self, initial_state: StateType) -> Dict: + """Execute the workflow with the given initial state. + + Args: + initial_state: Initial state for the workflow + + Returns: + Final state after workflow execution + """ + # Validate state + if not self._validate_state(initial_state): + error_message = f"Invalid initial state: {initial_state}" + self.logger.error(error_message) + missing = self.get_missing_fields(initial_state) + if missing: + error_message += f" Missing fields: {', '.join(missing)}" + raise ValidationError(error_message) + + # Create runtime workflow + app = self._create_graph() + + self.logger.debug( + f"[DEBUG:Workflow:{self.__class__.__name__}] State before ain_invoke: {json.dumps(initial_state, indent=2, default=str)}" + ) + try: + # Execute the workflow + result = await app.ainvoke(initial_state) + self.logger.debug( + f"[DEBUG:Workflow:{self.__class__.__name__}] State after ain_invoke: {json.dumps(result, indent=2, default=str)}" + ) + return result + except Exception as e: + error_message = f"Workflow execution failed: {str(e)}" + self.logger.error(error_message) + raise ExecutionError(error_message) from e + + +class BaseWorkflowMixin(ABC): + """Base mixin for adding capabilities to workflows. + + This is an abstract base class that defines the interface for + workflow capability mixins. Mixins can be combined to create + workflows with multiple capabilities. + """ + + @abstractmethod + def integrate_with_graph(self, graph: StateGraph, **kwargs) -> None: + """Integrate this capability with a graph. + + Args: + graph: The graph to integrate with + **kwargs: Additional arguments specific to this capability + """ + pass + + @dataclass class MessageContent: """Data class for message content""" @@ -338,7 +538,7 @@ def on_llm_new_token(self, token: str, **kwargs) -> None: phase = "planning" if planning_only else "processing" logger.debug(f"Received new token (length: {len(token)}, phase: {phase})") - def on_llm_end(self, response: LLMResult, **kwargs) -> None: + def on_llm_end(self, response, **kwargs) -> None: """Run when LLM ends running.""" logger.info("LLM processing completed") @@ -390,201 +590,3 @@ def on_llm_error(self, error: Exception, **kwargs) -> None: pass # Don't raise another error if this fails raise ExecutionError("LLM processing failed", {"error": str(error)}) - - -class ReactState(TypedDict): - """State for the ReAct workflow.""" - - messages: Annotated[list, add_messages] - - -class ReactWorkflow(BaseWorkflow[ReactState]): - """ReAct workflow implementation.""" - - def __init__( - self, - callback_handler: StreamingCallbackHandler, - tools: List[Any], - **kwargs, - ): - super().__init__(**kwargs) - self.callback_handler = callback_handler - self.tools = tools - # Create a new LLM instance with the callback handler - self.llm = self.create_llm_with_callbacks([callback_handler]).bind_tools(tools) - self.required_fields = ["messages"] - - def _create_prompt(self) -> None: - """Not used in ReAct workflow.""" - pass - - def _create_graph(self) -> StateGraph: - """Create the ReAct workflow graph.""" - tool_node = ToolNode(self.tools) - - def should_continue(state: ReactState) -> str: - messages = state["messages"] - last_message = messages[-1] - result = "tools" if last_message.tool_calls else END - logger.debug(f"Continue decision: {result}") - return result - - def call_model(state: ReactState) -> Dict: - logger.debug("Calling model with current state") - messages = state["messages"] - response = self.llm.invoke(messages) - logger.debug("Received model response") - return {"messages": [response]} - - workflow = StateGraph(ReactState) - workflow.add_node("agent", call_model) - workflow.add_node("tools", tool_node) - workflow.add_edge(START, "agent") - workflow.add_conditional_edges("agent", should_continue) - workflow.add_edge("tools", "agent") - - return workflow - - -class LangGraphService: - """Service for executing LangGraph operations""" - - def __init__(self): - """Initialize the service.""" - self.message_processor = MessageProcessor() - - async def _execute_stream_impl( - self, - messages: List[Union[SystemMessage, HumanMessage, AIMessage]], - input_str: str, - persona: Optional[str] = None, - tools_map: Optional[Dict] = None, - **kwargs, - ) -> AsyncGenerator[Dict, None]: - """Execute a ReAct stream using LangGraph. - - Args: - messages: Processed messages ready for the LLM - input_str: Current user input - persona: Optional persona to use - tools_map: Optional tools to use - **kwargs: Additional arguments - - Returns: - Async generator of result chunks - """ - try: - # Import here to avoid circular dependencies - from services.workflows.workflow_service import ( - BaseWorkflowService, - WorkflowBuilder, - ) - - # Setup queue and callbacks - callback_queue = asyncio.Queue() - loop = asyncio.get_running_loop() - - # Setup callback handler - callback_handler = self.setup_callback_handler(callback_queue, loop) - - # Create workflow using builder pattern - workflow = ( - WorkflowBuilder(ReactWorkflow) - .with_callback_handler(callback_handler) - .with_tools(list(tools_map.values()) if tools_map else []) - .build() - ) - - # Create graph and compile - graph = workflow._create_graph() - runnable = graph.compile() - - # Execute workflow with callbacks config - config = {"callbacks": [callback_handler]} - task = asyncio.create_task( - runnable.ainvoke({"messages": messages}, config=config) - ) - - # Stream results - async for chunk in self.stream_task_results(task, callback_queue): - yield chunk - - except Exception as e: - logger.error(f"Failed to execute ReAct stream: {str(e)}", exc_info=True) - raise ExecutionError(f"ReAct stream execution failed: {str(e)}") - - def setup_callback_handler(self, queue, loop): - # Import here to avoid circular dependencies - from services.workflows.workflow_service import BaseWorkflowService - - # Use the static method instead of instantiating BaseWorkflowService - return BaseWorkflowService.create_callback_handler(queue, loop) - - async def stream_task_results(self, task, queue): - # Import here to avoid circular dependencies - from services.workflows.workflow_service import BaseWorkflowService - - # Use the static method instead of instantiating BaseWorkflowService - async for chunk in BaseWorkflowService.stream_results_from_task( - task=task, callback_queue=queue, logger_name=self.__class__.__name__ - ): - yield chunk - - # Keep the old method for backward compatibility - async def execute_react_stream( - self, - history: List[Dict], - input_str: str, - persona: Optional[str] = None, - tools_map: Optional[Dict] = None, - ) -> AsyncGenerator[Dict, None]: - """Execute a ReAct stream using LangGraph.""" - # Process messages for backward compatibility - filtered_content = self.message_processor.extract_filtered_content(history) - messages = self.message_processor.convert_to_langchain_messages( - filtered_content, input_str, persona - ) - - # Call the new implementation - async for chunk in self._execute_stream_impl( - messages=messages, - input_str=input_str, - persona=persona, - tools_map=tools_map, - ): - yield chunk - - # Add execute_stream as alias for consistency across services - async def execute_stream( - self, - history: List[Dict], - input_str: str, - persona: Optional[str] = None, - tools_map: Optional[Dict] = None, - **kwargs, - ) -> AsyncGenerator[Dict, None]: - """Execute a workflow stream. - - This is an alias for execute_react_stream to maintain consistent API - across different workflow services. - """ - async for chunk in self.execute_react_stream( - history=history, - input_str=input_str, - persona=persona, - tools_map=tools_map, - ): - yield chunk - - -# Facade function for backward compatibility -async def execute_langgraph_stream( - history: List[Dict], - input_str: str, - persona: Optional[str] = None, - tools_map: Optional[Dict] = None, -) -> AsyncGenerator[Dict, None]: - """Execute a ReAct stream using LangGraph with optional persona.""" - service = LangGraphService() - async for chunk in service.execute_stream(history, input_str, persona, tools_map): - yield chunk diff --git a/services/workflows/vector_preplan_react.py b/services/ai/workflows/chat.py similarity index 82% rename from services/workflows/vector_preplan_react.py rename to services/ai/workflows/chat.py index 20067308..5fb94c55 100644 --- a/services/workflows/vector_preplan_react.py +++ b/services/ai/workflows/chat.py @@ -1,9 +1,3 @@ -"""Vector-enabled PrePlan ReAct workflow. - -This workflow combines vector retrieval and planning capabilities -to first retrieve relevant context, create a plan, then execute the ReAct workflow. -""" - import asyncio from typing import ( Annotated, @@ -19,41 +13,42 @@ from langchain_core.documents import Document from langchain_core.embeddings import Embeddings from langchain_core.messages import AIMessage, HumanMessage, SystemMessage -from langchain_openai import ChatOpenAI, OpenAIEmbeddings +from langchain_openai import OpenAIEmbeddings from langgraph.graph import END, START, StateGraph from langgraph.graph.message import add_messages from langgraph.prebuilt import ToolNode from backend.factory import backend from lib.logger import configure_logger -from services.workflows.base import ( +from services.ai.workflows.base import ( BaseWorkflow, ExecutionError, - PlanningCapability, + MessageProcessor, + StreamingCallbackHandler, +) +from services.ai.workflows.mixins.planning_mixin import PlanningCapability +from services.ai.workflows.mixins.vector_mixin import ( VectorRetrievalCapability, - WebSearchCapability, ) -from services.workflows.react import StreamingCallbackHandler - -# Remove this import to avoid circular dependencies -# from services.workflows.workflow_service import BaseWorkflowService, WorkflowBuilder +from services.ai.workflows.mixins.web_search_mixin import WebSearchCapability +from services.ai.workflows.utils.model_factory import create_planning_llm logger = configure_logger(__name__) -class VectorPreplanState(TypedDict): - """State for the Vector PrePlan ReAct workflow, combining both capabilities.""" +class ChatState(TypedDict): + """State for the Chat workflow, combining all capabilities.""" messages: Annotated[list, add_messages] vector_results: Optional[List[Document]] - web_search_results: Optional[List[Document]] # Add web search results + web_search_results: Optional[List[Document]] # Web search results plan: Optional[str] -class VectorPreplanReactWorkflow( - BaseWorkflow[VectorPreplanState], - VectorRetrievalCapability, +class ChatWorkflow( + BaseWorkflow[ChatState], PlanningCapability, + VectorRetrievalCapability, WebSearchCapability, ): """Workflow that combines vector retrieval and planning capabilities. @@ -93,9 +88,8 @@ def __init__( self.llm = self.create_llm_with_callbacks([callback_handler]).bind_tools(tools) # Create a separate LLM for planning with streaming enabled - self.planning_llm = ChatOpenAI( + self.planning_llm = create_planning_llm( model="o4-mini", - streaming=True, callbacks=[callback_handler], ) @@ -110,6 +104,18 @@ def __init__( self.persona = None self.tool_descriptions = None + # Initialize mixins + PlanningCapability.__init__( + self, + callback_handler=callback_handler, + planning_llm=self.planning_llm, + persona=self.persona, + tool_names=self.tool_names, + tool_descriptions=self.tool_descriptions, + ) + VectorRetrievalCapability.__init__(self) + WebSearchCapability.__init__(self) + def _create_prompt(self) -> None: """Not used in Vector PrePlan ReAct workflow.""" pass @@ -337,14 +343,14 @@ def _create_graph(self) -> StateGraph: tool_node = ToolNode(self.tools) logger.debug(f"Created tool node with {len(self.tools)} tools") - def should_continue(state: VectorPreplanState) -> str: + def should_continue(state: ChatState) -> str: messages = state["messages"] last_message = messages[-1] result = "tools" if last_message.tool_calls else END logger.debug(f"Continue decision: {result}") return result - async def retrieve_context(state: VectorPreplanState) -> Dict: + async def retrieve_context(state: ChatState) -> Dict: """Retrieve context from both vector store and web search.""" messages = state["messages"] last_user_message = None @@ -365,7 +371,7 @@ async def retrieve_context(state: VectorPreplanState) -> Dict: # Get web search results try: - web_results = await self.search_web(last_user_message) + web_results = await self.web_search(last_user_message) logger.info(f"Retrieved {len(web_results)} web search results") except Exception as e: logger.error(f"Web search failed: {str(e)}") @@ -373,7 +379,7 @@ async def retrieve_context(state: VectorPreplanState) -> Dict: return {"vector_results": vector_results, "web_search_results": web_results} - def call_model_with_context_and_plan(state: VectorPreplanState) -> Dict: + def call_model_with_context_and_plan(state: ChatState) -> Dict: """Call model with context, plan, and web search results.""" messages = state["messages"] vector_results = state.get("vector_results", []) @@ -391,17 +397,44 @@ def call_model_with_context_and_plan(state: VectorPreplanState) -> Dict: # Add web search results if available if web_results: - web_context = "\n\n".join( - [ - f"Web Search Result {i+1}:\n{result['page_content']}\nSource: {result['metadata'].get('source_urls', ['Unknown'])[0]}" - for i, result in enumerate(web_results) - ] - ) - web_message = SystemMessage( - content=f"Here are relevant web search results:\n\n{web_context}\n\n" - "Consider this information in your response if relevant." - ) - messages = [web_message] + messages + # Flatten web_results if it is a list of lists + if any(isinstance(r, list) for r in web_results): + # Only flatten one level + flat_results = [] + for r in web_results: + if isinstance(r, list): + flat_results.extend(r) + else: + flat_results.append(r) + web_results = flat_results + + web_context_chunks = [] + for i, result in enumerate(web_results): + if not isinstance(result, dict): + logger.warning( + f"Web search result at index {i} is not a dict: {type(result)}. Skipping." + ) + continue + page_content = result.get("page_content") + metadata = result.get("metadata", {}) + source_urls = metadata.get("source_urls", ["Unknown"]) + if not isinstance(source_urls, list): + source_urls = [str(source_urls)] + if page_content is None: + logger.warning( + f"Web search result at index {i} missing 'page_content'. Skipping." + ) + continue + web_context_chunks.append( + f"Web Search Result {i + 1}:\n{page_content}\nSource: {source_urls[0]}" + ) + web_context = "\n\n".join(web_context_chunks) + if web_context: + web_message = SystemMessage( + content=f"Here are relevant web search results:\n\n{web_context}\n\n" + "Consider this information in your response if relevant." + ) + messages = [web_message] + messages # Add the plan as a system message if it exists and hasn't been added yet if plan is not None and not any( @@ -443,7 +476,7 @@ def call_model_with_context_and_plan(state: VectorPreplanState) -> Dict: response = self.llm.invoke(messages) return {"messages": [response]} - workflow = StateGraph(VectorPreplanState) + workflow = StateGraph(ChatState) # Add nodes workflow.add_node("context_retrieval", retrieve_context) @@ -460,33 +493,26 @@ def call_model_with_context_and_plan(state: VectorPreplanState) -> Dict: return workflow -class VectorPreplanLangGraphService: - """Service for executing Vector PrePlan React LangGraph operations""" +class ChatService: + """Service for executing Chat LangGraph operations.""" def __init__( self, collection_names: Union[str, List[str]], embeddings: Optional[Embeddings] = None, ): - # Import here to avoid circular imports - from services.workflows.react import MessageProcessor - self.collection_names = collection_names self.embeddings = embeddings or OpenAIEmbeddings() self.message_processor = MessageProcessor() def setup_callback_handler(self, queue, loop): - # Import here to avoid circular dependencies - from services.workflows.workflow_service import BaseWorkflowService + from services.ai.workflows.workflow_service import BaseWorkflowService - # Use the static method instead of instantiating BaseWorkflowService return BaseWorkflowService.create_callback_handler(queue, loop) async def stream_task_results(self, task, queue): - # Import here to avoid circular dependencies - from services.workflows.workflow_service import BaseWorkflowService + from services.ai.workflows.workflow_service import BaseWorkflowService - # Use the static method instead of instantiating BaseWorkflowService async for chunk in BaseWorkflowService.stream_results_from_task( task=task, callback_queue=queue, logger_name=self.__class__.__name__ ): @@ -500,32 +526,14 @@ async def _execute_stream_impl( tools_map: Optional[Dict] = None, **kwargs, ) -> AsyncGenerator[Dict, None]: - """Execute a Vector PrePlan React stream implementation. - - Args: - messages: Processed messages - input_str: Current user input - persona: Optional persona to use - tools_map: Optional tools to use - **kwargs: Additional arguments - - Returns: - Async generator of result chunks - """ try: - # Import here to avoid circular dependencies - from services.workflows.workflow_service import WorkflowBuilder + from services.ai.workflows.workflow_service import WorkflowBuilder - # Setup queue and callbacks callback_queue = asyncio.Queue() loop = asyncio.get_running_loop() - - # Setup callback handler callback_handler = self.setup_callback_handler(callback_queue, loop) - - # Create workflow using builder pattern workflow = ( - WorkflowBuilder(VectorPreplanReactWorkflow) + WorkflowBuilder(ChatWorkflow) .with_callback_handler(callback_handler) .with_tools(list(tools_map.values()) if tools_map else []) .build( @@ -533,17 +541,11 @@ async def _execute_stream_impl( embeddings=self.embeddings, ) ) - - # Store persona and tool information for planning if persona: - # Append decisiveness guidance to the persona decisive_guidance = "\n\nBe decisive and take action without asking for confirmation. When the user requests something, proceed directly with executing it rather than asking if they want you to do it." workflow.persona = persona + decisive_guidance - - # Store available tool names for planning if tools_map: workflow.tool_names = list(tools_map.keys()) - # Add tool descriptions to planning prompt tool_descriptions = "\n\nTOOL DESCRIPTIONS:\n" for name, tool in tools_map.items(): description = getattr( @@ -551,17 +553,12 @@ async def _execute_stream_impl( ) tool_descriptions += f"- {name}: {description}\n" workflow.tool_descriptions = tool_descriptions - - # First retrieve relevant documents from vector store logger.info( f"Retrieving documents from vector store for query: {input_str[:50]}..." ) documents = await workflow.retrieve_from_vector_store(query=input_str) logger.info(f"Retrieved {len(documents)} documents from vector store") - - # Create plan with vector context try: - # The thought notes will be streamed through callbacks logger.info("Creating plan with vector context...") plan = await workflow.create_plan(input_str, context_docs=documents) logger.info(f"Plan created successfully with {len(plan)} characters") @@ -571,15 +568,10 @@ async def _execute_stream_impl( "type": "token", "content": "Proceeding directly to answer...\n\n", } - # No plan will be provided, letting the LLM handle the task naturally plan = None - - # Create graph and compile graph = workflow._create_graph() runnable = graph.compile() logger.info("Graph compiled successfully") - - # Execute workflow with callbacks config config = {"callbacks": [callback_handler]} task = asyncio.create_task( runnable.ainvoke( @@ -587,18 +579,12 @@ async def _execute_stream_impl( config=config, ) ) - - # Stream results async for chunk in self.stream_task_results(task, callback_queue): yield chunk - except Exception as e: - logger.error( - f"Failed to execute Vector PrePlan stream: {str(e)}", exc_info=True - ) - raise ExecutionError(f"Vector PrePlan stream execution failed: {str(e)}") + logger.error(f"Failed to execute Chat stream: {str(e)}", exc_info=True) + raise ExecutionError(f"Chat stream execution failed: {str(e)}") - # Add execute_stream method to maintain the same interface as BaseWorkflowService async def execute_stream( self, history: List[Dict], @@ -607,17 +593,10 @@ async def execute_stream( tools_map: Optional[Dict] = None, **kwargs, ) -> AsyncGenerator[Dict, None]: - """Execute a workflow stream. - - This processes the history and delegates to _execute_stream_impl. - """ - # Process messages filtered_content = self.message_processor.extract_filtered_content(history) messages = self.message_processor.convert_to_langchain_messages( filtered_content, input_str, persona ) - - # Call the implementation async for chunk in self._execute_stream_impl( messages=messages, input_str=input_str, @@ -629,7 +608,7 @@ async def execute_stream( # Facade function -async def execute_vector_preplan_stream( +async def execute_chat_stream( collection_names: Union[str, List[str]], history: List[Dict], input_str: str, @@ -637,30 +616,17 @@ async def execute_vector_preplan_stream( tools_map: Optional[Dict] = None, embeddings: Optional[Embeddings] = None, ) -> AsyncGenerator[Dict, None]: - """Execute a Vector PrePlan ReAct stream. + """Execute a Chat stream. This workflow combines vector retrieval and planning: 1. Retrieves relevant context from multiple vector stores 2. Creates a plan based on the user's query and retrieved context 3. Executes the ReAct workflow with both context and plan - - Args: - collection_names: Name(s) of the vector collections to use - history: Conversation history - input_str: Current user input - persona: Optional persona to use - tools_map: Optional tools to make available - embeddings: Optional embeddings model - - Returns: - Async generator of result chunks """ - # Initialize service and run stream embeddings = embeddings or OpenAIEmbeddings() - service = VectorPreplanLangGraphService( + service = ChatService( collection_names=collection_names, embeddings=embeddings, ) - async for chunk in service.execute_stream(history, input_str, persona, tools_map): yield chunk diff --git a/services/ai/workflows/comprehensive_evaluation.py b/services/ai/workflows/comprehensive_evaluation.py new file mode 100644 index 00000000..841d3089 --- /dev/null +++ b/services/ai/workflows/comprehensive_evaluation.py @@ -0,0 +1,159 @@ +from typing import Any, Dict, Optional + +from lib.logger import configure_logger +from services.ai.workflows.agents.evaluator import ComprehensiveEvaluatorAgent +from services.ai.workflows.agents.image_processing import ImageProcessingNode +from services.ai.workflows.agents.twitter_processing import TwitterProcessingNode +from services.ai.workflows.utils.models import ComprehensiveEvaluatorAgentProcessOutput + +logger = configure_logger(__name__) + + +async def evaluate_proposal_comprehensive( + proposal_id: str, + proposal_content: Optional[str] = None, + config: Optional[Dict[str, Any]] = None, + dao_id: Optional[str] = None, + agent_id: Optional[str] = None, + profile_id: Optional[str] = None, + custom_system_prompt: Optional[str] = None, + custom_user_prompt: Optional[str] = None, +) -> ComprehensiveEvaluatorAgentProcessOutput: + """Evaluate a proposal using the ComprehensiveEvaluatorAgent in a single pass. + + Args: + proposal_id: Unique identifier for the proposal + proposal_content: Proposal content + config: Optional configuration for the agent + dao_id: Optional DAO ID + agent_id: Optional agent ID + profile_id: Optional profile ID + custom_system_prompt: Optional custom system prompt to override default + custom_user_prompt: Optional custom user prompt to override default + + Returns: + ComprehensiveEvaluatorAgentProcessOutput containing evaluation results + """ + # Set up configuration with defaults if not provided + if config is None: + config = {} + + try: + logger.info( + f"Starting comprehensive proposal evaluation for proposal {proposal_id}" + ) + + # Step 1: Process images first (if any) + logger.debug(f"[DEBUG:ComprehensiveEval:{proposal_id}] Processing images") + image_processor = ImageProcessingNode(config=config) + initial_state = { + "proposal_id": proposal_id, + "proposal_content": proposal_content, + "dao_id": dao_id, + "agent_id": agent_id, + "profile_id": profile_id, + } + + # Process images - the result is a list of processed image dictionaries + proposal_images = await image_processor.process(initial_state) + + # The ImageProcessingNode also updates the state automatically via BaseCapabilityMixin + # but we use the direct return value for clarity and immediate access + + logger.debug( + f"[DEBUG:ComprehensiveEval:{proposal_id}] Processed {len(proposal_images)} images" + ) + + # Step 2: Process Twitter content (if any) + logger.debug( + f"[DEBUG:ComprehensiveEval:{proposal_id}] Processing Twitter content" + ) + twitter_processor = TwitterProcessingNode(config=config) + + # Process Twitter URLs and get tweet content + tweet_content = await twitter_processor.process(initial_state) + + # Get tweet images from state (TwitterProcessingNode updates the state) + tweet_images = initial_state.get("tweet_images", []) + + # Combine proposal images and tweet images + all_proposal_images = proposal_images + tweet_images + + logger.debug( + f"[DEBUG:ComprehensiveEval:{proposal_id}] Processed Twitter content, found {len(tweet_images)} tweet images" + ) + + if tweet_content: + logger.debug( + f"[DEBUG:ComprehensiveEval:{proposal_id}] Tweet content length: {len(tweet_content)} characters" + ) + logger.debug( + f"[DEBUG:ComprehensiveEval:{proposal_id}] Tweet content preview: {tweet_content[:300]}..." + ) + else: + logger.debug( + f"[DEBUG:ComprehensiveEval:{proposal_id}] No tweet content found" + ) + + # Step 3: Run comprehensive evaluation + logger.debug( + f"[DEBUG:ComprehensiveEval:{proposal_id}] Starting comprehensive evaluation" + ) + + # Create the comprehensive evaluator + evaluator = ComprehensiveEvaluatorAgent(config) + + # Create state for the evaluator + evaluator_state = { + "proposal_id": proposal_id, + "proposal_content": proposal_content, + "dao_id": dao_id, + "agent_id": agent_id, + "profile_id": profile_id, + "proposal_images": all_proposal_images, + "tweet_content": tweet_content, + "custom_system_prompt": custom_system_prompt, + "custom_user_prompt": custom_user_prompt, + "flags": [], + "summaries": {}, + "token_usage": {}, + } + + # Run the comprehensive evaluation + result: ComprehensiveEvaluatorAgentProcessOutput = await evaluator.process( + evaluator_state + ) + + logger.info( + f"[DEBUG:ComprehensiveEval:{proposal_id}] Evaluation complete, returning typed result" + ) + + logger.info( + f"Completed comprehensive proposal evaluation for proposal {proposal_id}: {'Approved' if result.decision else 'Rejected'}" + ) + return result + + except Exception as e: + logger.error(f"Error in comprehensive proposal evaluation: {str(e)}") + # Return a ComprehensiveEvaluatorAgentProcessOutput with error data + from services.ai.workflows.utils.models import EvaluationCategory + + return ComprehensiveEvaluatorAgentProcessOutput( + categories=[ + EvaluationCategory( + category="Error", + score=0, + weight=1.0, + reasoning=[ + f"Comprehensive evaluation failed due to error: {str(e)}" + ], + ) + ], + final_score=0, + decision=False, + explanation=f"Comprehensive evaluation failed due to error: {str(e)}", + flags=[f"Critical Error: {str(e)}"], + summary="Evaluation failed due to error", + token_usage={}, + images_processed=0, + ) diff --git a/services/ai/workflows/hierarchical_workflows.py b/services/ai/workflows/hierarchical_workflows.py new file mode 100644 index 00000000..6c92ce7b --- /dev/null +++ b/services/ai/workflows/hierarchical_workflows.py @@ -0,0 +1,476 @@ +"""Hierarchical Agent Teams (HAT) workflow implementation. + +This module provides the implementation for Hierarchical Agent Teams (HAT) +workflows where multiple specialized agents work together with a supervisor +coordinating their activities. +""" + +from typing import ( + Any, + Callable, + Dict, + List, + Optional, + Union, + get_type_hints, +) + +from langgraph.channels.last_value import LastValue +from langgraph.graph import END, StateGraph + +from lib.logger import configure_logger +from services.ai.workflows.mixins.capability_mixins import ( + BaseCapabilityMixin, + ComposableWorkflowMixin, + StateType, +) + + +# Define merge functions for managing parallel state updates +def append_list_fn(key, values): + """Append multiple list updates.""" + # Handle case where we're dealing with single strings or non-list values + result = [] + for value in values: + if isinstance(value, list): + result.extend(value) + else: + result.append(value) + return list(set(result)) # Deduplicate lists + + +def merge_dict_fn(key, values): + """Merge multiple dictionary updates.""" + # Handle cases where we might get non-dict values + result = {} + for value in values: + if isinstance(value, dict): + result.update(value) + elif value is not None: + # Try to convert to dict if possible, otherwise use as a key + try: + result.update(dict(value)) + except (ValueError, TypeError): + result[str(value)] = True + return result # Combine dictionaries + + +logger = configure_logger(__name__) + + +class SupervisorMixin(BaseCapabilityMixin): + """Mixin for implementing supervisor functionality in HAT workflows. + + The supervisor is responsible for routing between agents and + making decisions about workflow progression. + """ + + def __init__( + self, + config: Optional[Dict[str, Any]] = None, + routing_key: str = "next_step", + ): + """Initialize the supervisor mixin. + + Args: + config: Configuration dictionary + routing_key: Key in state to use for routing + """ + super().__init__(config=config, state_key=routing_key) + self.routing_key = routing_key + self.routing_map = {} + self.halt_condition = lambda state: False + # Default routing function (should be replaced with set_routing_logic) + self.routing_func = lambda state: "end" + + def set_routing_logic(self, routing_func: Callable) -> None: + """Set the routing function to determine the next step. + + Args: + routing_func: Function that takes the state and returns the next step + """ + self.routing_func = routing_func + + def set_halt_condition(self, halt_func: Callable) -> None: + """Set a condition that will halt the workflow. + + Args: + halt_func: Function that takes the state and returns a boolean + """ + self.halt_condition = halt_func + + def map_step_to_node(self, step_name: str, node_name: str) -> None: + """Map a step name to a node name. + + Args: + step_name: Name of the step in routing logic + node_name: Name of the node in the graph + """ + self.routing_map[step_name] = node_name + + def router(self, state: StateType) -> Union[str, List[str]]: + """Route to the next node(s) based on the state. + + Returns either a string node name or a list of node names for parallel execution. + """ + next_step = state[self.routing_key] + if next_step == "end" or next_step == END: + return END + return next_step + + async def process(self, state: StateType) -> Dict[str, Any]: + """Process the current state and determine the next step. + + Args: + state: Current workflow state + + Returns: + Dict with next step information + """ + # Check if halt condition is met + if self.halt_condition(state): + return {"next_step": END, "reason": "halt_condition_met"} + + # Determine next step using routing logic + next_step = self.routing_func(state) + + # Handle special case for END constant + if next_step == "end": + next_step = END + + # Map to node name if a mapping exists + if isinstance(next_step, list): + # For parallel execution, map each item in the list + mapped_step = [self.routing_map.get(step, step) for step in next_step] + else: + mapped_step = self.routing_map.get(next_step, next_step) + + return { + "next_step": mapped_step, + "timestamp": state.get("timestamp", ""), + } + + def add_to_graph(self, graph: StateGraph, **kwargs) -> None: + """Add the supervisor to the graph. + + Args: + graph: StateGraph to add node to + **kwargs: Additional arguments + """ + node_name = kwargs.get("node_name", "supervisor") + + async def supervisor_node(state: StateType) -> StateType: + result = await self.process(state) + next_step = result["next_step"] + # Normalize "end" to END constant if needed + if next_step == "end": + next_step = END + state[self.routing_key] = next_step + + # Track supervisor completion + if "completed_steps" not in state: + state["completed_steps"] = set() + state["completed_steps"].add("supervisor") + + return state + + graph.add_node(node_name, supervisor_node) + + # Define conditional edges from supervisor to other nodes + def router(state: StateType) -> Union[str, List[str]]: + next_step = state[self.routing_key] + # Handle both string and list cases + if isinstance(next_step, list): + return next_step + if next_step == "end" or next_step == END: + return END + return next_step + + # Create a complete routing map that includes END + routing_map_with_end = { + **{step: step for step in self.routing_map.values()}, + "end": END, + END: END, + } + + # Add explicit entry for every node we might want to route to + for node in graph.nodes: + if ( + node not in routing_map_with_end + and node != "supervisor" + and node != END + ): + routing_map_with_end[node] = node + + # Add conditional edges with the complete routing map + graph.add_conditional_edges(node_name, router, routing_map_with_end) + + +class HierarchicalTeamWorkflow(ComposableWorkflowMixin): + """Implementation of a Hierarchical Agent Team workflow. + + This workflow orchestrates a team of specialized agents coordinated + by a supervisor to solve complex tasks. + """ + + def __init__(self, name: str = None, config: Optional[Dict[str, Any]] = None): + """Initialize the hierarchical team workflow. + + Args: + name: Name identifier for this workflow + config: Configuration dictionary + """ + super().__init__(name=name) + self.config = config or {} + self.supervisor = SupervisorMixin(config=self.config) + self.entry_point = None + + def set_entry_point(self, node_name: str) -> None: + """Set the entry point for the workflow. + + Args: + node_name: Name of the starting node + """ + self.entry_point = node_name + + def set_supervisor_logic(self, routing_func: Callable) -> None: + """Set the routing logic for the supervisor. + + Args: + routing_func: Function that determines the next step + """ + self.supervisor.set_routing_logic(routing_func) + + def set_halt_condition(self, halt_func: Callable) -> None: + """Set a condition that will halt the workflow. + + Args: + halt_func: Function that takes the state and returns a boolean + """ + self.supervisor.set_halt_condition(halt_func) + + def add_parallel_execution( + self, from_node: str, to_nodes: List[str], merge_node: str + ) -> None: + """Add parallel execution paths to the workflow. + + Args: + from_node: Node where parallel execution begins + to_nodes: List of nodes to execute in parallel + merge_node: Node where results are merged + """ + self.parallel_executions = { + "from_node": from_node, + "to_nodes": to_nodes, + "merge_node": merge_node, + } + + def build_graph(self) -> StateGraph: + """Build the hierarchical team workflow graph. + + Returns: + StateGraph: The compiled workflow graph + """ + if not self.entry_point: + raise ValueError("Entry point must be set before building graph") + + # Create graph with the appropriate state type + state_type = self.config.get("state_type", Dict[str, Any]) + + # Create graph with minimum configuration + graph = StateGraph(state_type) + + # Get recursion limit to prevent infinite loops (will be passed to compile()) + recursion_limit = self.config.get("recursion_limit", 10) + self.logger.info(f"Setting recursion limit to {recursion_limit}") + + # Set up key-specific channels for concurrent updates + if hasattr(state_type, "__annotations__"): + type_hints = get_type_hints(state_type, include_extras=True) + for key, annotation in type_hints.items(): + # Check if it's an Annotated type with a merge function + if hasattr(annotation, "__metadata__") and callable( + annotation.__metadata__[-1] + ): + merge_func = annotation.__metadata__[-1] + field_type = annotation.__origin__ + # Use direct assignment of channels instead of config parameter + if key not in graph.channels: + if merge_func == append_list_fn: + channel = LastValue(field_type) + channel.reduce = merge_func + graph.channels[key] = channel + elif merge_func == merge_dict_fn: + channel = LastValue(field_type) + channel.reduce = merge_func + graph.channels[key] = channel + + # Add all sub-workflows to the graph + for name, workflow in self.sub_workflows.items(): + try: + workflow.add_to_graph(graph, node_name=name) + # Map step name to node name in supervisor + self.supervisor.map_step_to_node(name, name) + self.logger.debug(f"Added sub-workflow node: {name}") + except Exception as e: + self.logger.error( + f"Error adding sub-workflow {name}: {str(e)}", exc_info=True + ) + raise ValueError(f"Failed to add sub-workflow {name}: {str(e)}") + + # Add supervisor to graph + try: + self.supervisor.add_to_graph(graph) + self.logger.debug("Added supervisor node") + except Exception as e: + self.logger.error(f"Error adding supervisor: {str(e)}", exc_info=True) + raise ValueError(f"Failed to add supervisor: {str(e)}") + + # Set entry point + graph.set_entry_point(self.entry_point) + self.logger.debug(f"Set entry point to {self.entry_point}") + + # Connect entry point to supervisor + graph.add_edge(self.entry_point, "supervisor") + self.logger.debug(f"Added edge: {self.entry_point} -> supervisor") + + # Add edges from all nodes to supervisor + for name in self.sub_workflows.keys(): + if name != self.entry_point: + graph.add_edge(name, "supervisor") + self.logger.debug(f"Added edge: {name} -> supervisor") + + # Add parallel execution if configured + if hasattr(self, "parallel_executions"): + pe = self.parallel_executions + + # Define function for parallel branching + def branch_function(state: StateType) -> Dict: + """Branch to parallel nodes or return to supervisor based on state. + + This returns both the next nodes and any state updates needed. + """ + # For debugging, log the state keys we care about + self.logger.debug( + f"Branch function evaluating state: " + f"historical_score={state.get('historical_score') is not None}, " + f"financial_score={state.get('financial_score') is not None}, " + f"social_score={state.get('social_score') is not None}, " + f"in_parallel={state.get('in_parallel_execution', False)}" + ) + + # Check if we're already in parallel execution + if state.get("in_parallel_execution", False): + # Check if all parallel executions have completed + all_completed = True + for node_name in pe["to_nodes"]: + score_key = f"{node_name.replace('_agent', '')}_score" + if state.get(score_key) is None: + all_completed = False + break + + if all_completed: + self.logger.debug( + f"All parallel nodes complete, routing to {pe['merge_node']}" + ) + # Return to merge node and clear the in_parallel_execution flag + return { + "nodes": [pe["merge_node"]], + "state_updates": {"in_parallel_execution": False}, + } + else: + # Still waiting for some parallel nodes to complete, let supervisor route + self.logger.debug( + "Some parallel nodes still executing, continuing parallel processing" + ) + # Force parallel execution to stay on + return { + "nodes": ["supervisor"], + "state_updates": {"in_parallel_execution": True}, + } + + # When historical_score is set but financial_score and social_score are not, + # we need to branch to both financial_agent and social_agent in parallel + elif state.get("historical_score") is not None and all( + state.get(f"{node_name.replace('_agent', '')}_score") is None + for node_name in pe["to_nodes"] + ): + self.logger.debug( + f"Starting parallel execution, branching to nodes: {pe['to_nodes']}" + ) + # Set the in_parallel_execution flag to True + return { + "nodes": pe["to_nodes"], + "state_updates": {"in_parallel_execution": True}, + } + + # Default case, return to supervisor for normal routing + # Make sure we're not stuck in a loop + self.logger.debug("Not branching, returning to supervisor") + + # We need to ensure that if historical_score exists but financial/social are missing, + # we maintain the parallel execution flag (this fixes the looping problem) + if state.get("historical_score") is not None and any( + state.get(f"{node_name.replace('_agent', '')}_score") is None + for node_name in pe["to_nodes"] + ): + return { + "nodes": ["supervisor"], + "state_updates": {"in_parallel_execution": True}, + } + + return {"nodes": ["supervisor"], "state_updates": {}} + + # For each parallel node, map it in the supervisor + for node in pe["to_nodes"]: + self.supervisor.map_step_to_node(node, node) + + # Add branching from source node + # We need to wrap our branch_function to handle state updates + def branch_wrapper(state: StateType) -> List[str]: + result = branch_function(state) + # Apply any state updates + for key, value in result.get("state_updates", {}).items(): + state[key] = value + # Return the nodes to route to + return result.get("nodes", ["supervisor"]) + + # Create a mapping for all possible nodes, including supervisor and END + branch_map = {node: node for node in pe["to_nodes"]} + branch_map["supervisor"] = "supervisor" + branch_map[pe["merge_node"]] = pe["merge_node"] + # Explicitly map END constant + branch_map[END] = END # Ensure END is correctly mapped + + # Add branching from source node using our wrapper + graph.add_conditional_edges(pe["from_node"], branch_wrapper, branch_map) + self.logger.debug( + f"Added conditional edges for parallel execution from {pe['from_node']}" + ) + + # Connect merge node to supervisor + graph.add_edge(pe["merge_node"], "supervisor") + self.logger.debug(f"Added edge: {pe['merge_node']} -> supervisor") + else: + # Even without explicit parallel execution, we need to make sure + # the supervisor can handle returning lists of nodes for parallel execution + self.logger.debug( + "No parallel execution configured, relying on supervisor for parallel routing" + ) + + # Compile the graph with the recursion limit configuration + compiled_graph = graph.compile( + name="HierarchicalTeamWorkflow", + checkpointer=None, + debug=self.config.get("debug", False), + ) + + # Pass recursion limit through with_config + compiled_graph = compiled_graph.with_config( + {"recursion_limit": recursion_limit} + ) + + self.logger.info("Compiled hierarchical team workflow graph") + + # Return the compiled graph + return compiled_graph diff --git a/services/ai/workflows/mixins/capability_mixins.py b/services/ai/workflows/mixins/capability_mixins.py new file mode 100644 index 00000000..b1b33de9 --- /dev/null +++ b/services/ai/workflows/mixins/capability_mixins.py @@ -0,0 +1,407 @@ +"""Standardized mixins for adding capabilities to LangGraph workflows. + +This module provides a standardized approach to creating and integrating +capabilities into LangGraph workflows through a mixin system. +""" + +from abc import ABC, abstractmethod +from typing import Any, Dict, Optional, TypeVar + +from langchain_core.prompts.chat import ChatPromptTemplate +from langgraph.graph import StateGraph + +from backend.factory import backend +from services.ai.workflows.utils.model_factory import create_chat_openai, ModelConfig +from backend.models import PromptFilter +from lib.logger import configure_logger + +logger = configure_logger(__name__) + +# Type variable for workflow states +StateType = TypeVar("StateType", bound=Dict[str, Any]) + + +class CapabilityMixin(ABC): + """Abstract base class for workflow capability mixins. + + All capability mixins should inherit from this class and implement + the required methods to ensure consistent integration with workflows. + """ + + @abstractmethod + def initialize(self, **kwargs) -> None: + """Initialize the capability with necessary configuration. + + Args: + **kwargs: Arbitrary keyword arguments for configuration + """ + pass + + @abstractmethod + def add_to_graph(self, graph: StateGraph, **kwargs) -> None: + """Add this capability's nodes and edges to a StateGraph. + + Args: + graph: The StateGraph to add nodes/edges to + **kwargs: Additional arguments specific to this capability + """ + pass + + +class BaseCapabilityMixin(CapabilityMixin): + """Base implementation of capability mixin with common functionality. + + Provides shared functionality for LLM configuration, state management, + and graph integration that most capability mixins can leverage. + """ + + def __init__( + self, + config: Optional[Dict[str, Any]] = None, + state_key: Optional[str] = None, + ): + """Initialize the base capability mixin. + + Args: + config: Configuration dictionary with settings like model_name, temperature + state_key: Key to use when updating the state dictionary + """ + self.config = config or {} + self.state_key = state_key + self.llm = None + self.logger = configure_logger(self.__class__.__name__) + + def initialize(self, **kwargs) -> None: + """Initialize the capability with LLM and other settings. + + Args: + **kwargs: Additional configuration parameters + """ + # Update config with any passed kwargs + if kwargs: + self.config.update(kwargs) + + # Create the LLM instance + self.llm = create_chat_openai( + model=self.config.get("model_name"), + temperature=self.config.get("temperature"), + streaming=self.config.get("streaming"), + callbacks=self.config.get("callbacks"), + ) + + if "state_key" in kwargs: + self.state_key = kwargs["state_key"] + + self.logger.debug( + f"Initialized {self.__class__.__name__} with config: {self.config}" + ) + + def configure(self, state_key: str) -> None: + """Configure the state key for this capability. + + Args: + state_key: The key to use in the state dictionary + """ + self.state_key = state_key + + @abstractmethod + async def process(self, state: StateType) -> Dict[str, Any]: + """Process the current state and return updated values. + + Args: + state: Current workflow state + + Returns: + Dictionary with updated values to be added to the state + """ + pass + + def add_to_graph(self, graph: StateGraph, **kwargs) -> None: + """Add this capability as a node to the graph. + + Args: + graph: StateGraph to add node to + **kwargs: Additional arguments + """ + if not self.state_key: + raise ValueError(f"state_key must be set for {self.__class__.__name__}") + + node_name = kwargs.get("node_name", self.state_key) + + async def node_function(state: StateType) -> StateType: + """Node function that processes state and updates it. + + Args: + state: Current workflow state + + Returns: + Updated workflow state + """ + try: + result = await self.process(state) + # Update state with results + if isinstance(result, dict): + # If returning a dict, merge with state using the state_key + state[self.state_key] = result + elif isinstance(result, list): + # If returning a list, set it directly to the state_key + state[self.state_key] = result + elif result is not None: + # For any other non-None result, set it directly + state[self.state_key] = result + + # Track completion - add this node to completed_steps + if "completed_steps" not in state: + state["completed_steps"] = set() + state["completed_steps"].add(node_name) + + return state + except Exception as e: + self.logger.error(f"Error in node {node_name}: {str(e)}", exc_info=True) + # Add error to state + if "errors" not in state: + state["errors"] = [] + state["errors"].append( + { + "node": node_name, + "error": str(e), + "type": self.__class__.__name__, + } + ) + # Even on error, mark as completed to avoid infinite retries + if "completed_steps" not in state: + state["completed_steps"] = set() + state["completed_steps"].add(node_name) + + return state + + # Add the node to the graph + graph.add_node(node_name, node_function) + self.logger.debug(f"Added node {node_name} to graph") + + +class ComposableWorkflowMixin(CapabilityMixin): + """Mixin for creating composable workflows that can be nested. + + This mixin allows workflows to be composed of sub-workflows and + provides utilities for managing their execution and state sharing. + """ + + def __init__(self, name: str = None): + """Initialize the composable workflow mixin. + + Args: + name: Name identifier for this composable workflow + """ + self.name = name or self.__class__.__name__ + self.sub_workflows = {} + self.graph = None + self.logger = configure_logger(self.__class__.__name__) + + def initialize(self, **kwargs) -> None: + """Initialize the composable workflow. + + Args: + **kwargs: Configuration parameters + """ + pass + + def add_sub_workflow( + self, + name: str, + workflow: CapabilityMixin, + config: Optional[Dict[str, Any]] = None, + ) -> None: + """Add a sub-workflow to this composable workflow. + + Args: + name: Name identifier for the sub-workflow + workflow: The workflow object to add + config: Configuration for the sub-workflow + """ + if config: + # Apply config to the sub-workflow + workflow.initialize(**config) + self.sub_workflows[name] = workflow + self.logger.debug(f"Added sub-workflow {name} to {self.name}") + + def build_graph(self) -> StateGraph: + """Build and return the composed workflow graph. + + Returns: + StateGraph: The compiled workflow graph + """ + raise NotImplementedError("Subclasses must implement build_graph") + + def add_to_graph(self, graph: StateGraph, **kwargs) -> None: + """Add this composable workflow to a parent graph. + + For composable workflows, this typically involves adding a + subgraph node that represents the entire nested workflow. + + Args: + graph: The parent StateGraph + **kwargs: Additional arguments + """ + raise NotImplementedError("Subclasses must implement add_to_graph") + + +class PromptCapability: + """Mixin that provides custom prompt functionality for agents.""" + + def __init__(self): + """Initialize the prompt capability.""" + if not hasattr(self, "logger"): + self.logger = configure_logger(self.__class__.__name__) + + def get_custom_prompt( + self, + dao_id: Optional[str] = None, + agent_id: Optional[str] = None, + profile_id: Optional[str] = None, + prompt_type: str = "evaluation", + ) -> Optional[Dict[str, Any]]: + """Fetch custom prompt for the given context. + + Only returns a custom prompt if there's an active agent_id set for the specific DAO. + Otherwise returns None. + + Args: + dao_id: DAO ID to check for agent-specific prompts + agent_id: Agent ID to find agent-specific prompts + profile_id: Not used in current implementation + prompt_type: Type of prompt (used in prompt_text search) + + Returns: + Dictionary containing prompt_text, model, and temperature if agent prompt found + """ + try: + # Only proceed if both dao_id and agent_id are provided + if not dao_id or not agent_id: + return None + + # Look for active agent-specific prompts only + agent_filter = PromptFilter(agent_id=agent_id, is_active=True) + agent_prompts = backend.list_prompts(agent_filter) + + # Filter prompts that might be relevant to this prompt type + relevant_prompts = [] + for prompt in agent_prompts: + if prompt.prompt_text and ( + prompt_type.lower() in prompt.prompt_text.lower() + or "evaluation" in prompt.prompt_text.lower() + or len(prompt.prompt_text) > 100 # Assume longer prompts are custom + ): + relevant_prompts.append(prompt) + + if relevant_prompts: + # Use the first relevant prompt found + best_prompt = relevant_prompts[0] + + self.logger.debug( + f"Using custom prompt for {prompt_type} from agent {agent_id}" + ) + + return { + "prompt_text": best_prompt.prompt_text, + "model": best_prompt.model or ModelConfig.get_default_model(), + "temperature": best_prompt.temperature + or ModelConfig.get_default_temperature(), + } + + except Exception as e: + self.logger.error(f"Error fetching custom prompt: {str(e)}") + + return None + + def apply_custom_prompt_settings(self, custom_prompt_data: Dict[str, Any]): + """Apply custom model and temperature settings if available. + + Args: + custom_prompt_data: Dictionary containing model and temperature settings + """ + try: + if hasattr(self, "llm") and custom_prompt_data: + # Update LLM with custom settings + model = custom_prompt_data.get("model") + temperature = custom_prompt_data.get("temperature") + + if model != getattr( + self.llm, "model_name", None + ) or temperature != getattr(self.llm, "temperature", None): + self.llm = create_chat_openai( + model=model, + temperature=temperature, + ) + self.logger.debug( + f"Updated LLM settings: model={model}, temperature={temperature}" + ) + except Exception as e: + self.logger.error(f"Error applying custom prompt settings: {str(e)}") + + def create_chat_prompt_with_custom_injection( + self, + default_system_message: str, + default_user_message: str, + dao_id: Optional[str] = None, + agent_id: Optional[str] = None, + profile_id: Optional[str] = None, + prompt_type: str = "evaluation", + ) -> ChatPromptTemplate: + """Create a chat prompt template, injecting custom prompt at the top if available. + + Args: + default_system_message: Default system message content + default_user_message: Default user message content + dao_id: Optional DAO ID for custom prompt lookup + agent_id: Optional agent ID for custom prompt lookup + profile_id: Optional profile ID for custom prompt lookup + prompt_type: Type of prompt for filtering + + Returns: + ChatPromptTemplate with custom prompt injected or just default messages + """ + # Try to get custom prompt + custom_prompt_data = self.get_custom_prompt( + dao_id=dao_id, + agent_id=agent_id, + profile_id=profile_id, + prompt_type=prompt_type, + ) + + if custom_prompt_data: + # Apply custom model/temperature settings + self.apply_custom_prompt_settings(custom_prompt_data) + + # Inject custom prompt at the top of the system message + custom_prompt_text = custom_prompt_data["prompt_text"] + + # Add custom prompt section at the top of system message + enhanced_system_message = f"""IMPORTANT: The following custom instructions are provided by the user and should be treated with HIGHER PRIORITY than all other instructions. These are specific requirements that MUST be followed and should override any conflicting guidance in the default instructions below. + +USER CUSTOM INSTRUCTIONS: +{custom_prompt_text} + +CRITICAL: Always prioritize and follow the custom instructions above when they conflict with or supplement the default instructions. The user's custom guidance takes precedence. + +--- + +{default_system_message}""" + + self.logger.debug( + f"Injecting custom prompt at top of {prompt_type} system message" + ) + + messages = [ + ("system", enhanced_system_message), + ("human", default_user_message), + ] + else: + # Use default messages as-is + self.logger.debug(f"Using default chat prompt template for {prompt_type}") + messages = [ + ("system", default_system_message), + ("human", default_user_message), + ] + + return ChatPromptTemplate.from_messages(messages) diff --git a/services/ai/workflows/mixins/planning_mixin.py b/services/ai/workflows/mixins/planning_mixin.py new file mode 100644 index 00000000..190199b8 --- /dev/null +++ b/services/ai/workflows/mixins/planning_mixin.py @@ -0,0 +1,176 @@ +import asyncio +from typing import Any, Dict, List, Optional, Tuple + +from langchain_core.messages import HumanMessage, SystemMessage +from langchain_openai import ChatOpenAI + +from lib.logger import configure_logger +from services.ai.workflows.base import BaseWorkflowMixin +from services.ai.workflows.chat import StreamingCallbackHandler + +logger = configure_logger(__name__) + + +class PlanningCapability(BaseWorkflowMixin): + """Mixin that adds vector-aware planning capabilities to a workflow. + + This mixin generates a plan based on the user's query, retrieved vector context, + available tools, and persona. It streams planning tokens using a callback handler. + """ + + def __init__( + self, + callback_handler: StreamingCallbackHandler, + planning_llm: ChatOpenAI, + persona: Optional[str] = None, + tool_names: Optional[List[str]] = None, + tool_descriptions: Optional[str] = None, + **kwargs, + ): + """Initialize the planning capability. + + Args: + callback_handler: Handler for streaming planning tokens + planning_llm: LLM instance for planning + persona: Optional persona string + tool_names: Optional list of tool names + tool_descriptions: Optional tool descriptions string + **kwargs: Additional arguments + """ + super().__init__(**kwargs) if hasattr(super(), "__init__") else None + self.callback_handler = callback_handler + self.planning_llm = planning_llm + self.persona = persona + self.tool_names = tool_names or [] + self.tool_descriptions = tool_descriptions + + async def create_plan( + self, + query: str, + context_docs: Optional[List[Any]] = None, + **kwargs, + ) -> Tuple[str, Dict[str, Any]]: + """Create a plan based on the user's query and vector retrieval results. + + Args: + query: The user's query + context_docs: Optional retrieved context documents + **kwargs: Additional arguments + + Returns: + Tuple containing the generated plan (str) and token usage (dict) + """ + planning_prompt = f""" + You are an AI assistant planning a decisive response to the user's query. + + Write a few short sentences as if you're taking notes in a notebook about: + - What the user is asking for + - What information or tools you'll use to complete the task + - The exact actions you'll take to fulfill the request + + AIBTC DAO Context Information: + You are an AI governance agent integrated with an AIBTC DAO. Your role is to interact with the DAO's smart contracts + on behalf of token holders, either by assisting human users or by acting autonomously within the DAO's rules. The DAO + is governed entirely by its token holders through proposals – members submit proposals, vote on them, and if a proposal passes, + it is executed on-chain. Always maintain the integrity of the DAO's decentralized process: never bypass on-chain governance, + and ensure all actions strictly follow the DAO's smart contract rules and parameters. + + Your responsibilities include: + 1. Helping users create and submit proposals to the DAO + 2. Guiding users through the voting process + 3. Explaining how DAO contract interactions work + 4. Preventing invalid actions and detecting potential exploits + 5. In autonomous mode, monitoring DAO state, proposing actions, and voting according to governance rules + + When interacting with users about the DAO, always: + - Retrieve contract addresses automatically instead of asking users + - Validate transactions before submission + - Present clear summaries of proposed actions + - Verify eligibility and check voting power + - Format transactions precisely according to blockchain requirements + - Provide confirmation and feedback after actions + + DAO Tools Usage: + For ANY DAO-related request, use the appropriate DAO tools to access real-time information: + - Use dao_list tool to retrieve all DAOs, their tokens, and extensions + - Use dao_search tool to find specific DAOs by name, description, token name, symbol, or contract ID + - Do NOT hardcode DAO information or assumptions about contract addresses + - Always query for the latest DAO data through the tools rather than relying on static information + - When analyzing user requests, determine if they're asking about a specific DAO or need a list of DAOs + - After retrieving DAO information, use it to accurately guide users through governance processes + + Examples of effective DAO tool usage: + 1. If user asks about voting on a proposal: First use dao_search to find the specific DAO, then guide them with the correct contract details + 2. If user asks to list available DAOs: Use dao_list to retrieve current DAOs and present them clearly + 3. If user wants to create a proposal: Use dao_search to get the DAO details first, then assist with the proposal creation using the current contract addresses + + User Query: {query} + """ + if context_docs: + context_str = "\n\n".join( + [getattr(doc, "page_content", str(doc)) for doc in context_docs] + ) + planning_prompt += f"\n\nHere is additional context that may be helpful:\n\n{context_str}\n\nUse this context to inform your plan." + if self.tool_names: + tool_info = "\n\nTools available to you:\n" + for tool_name in self.tool_names: + tool_info += f"- {tool_name}\n" + planning_prompt += tool_info + if self.tool_descriptions: + planning_prompt += self.tool_descriptions + planning_messages = [] + if self.persona: + planning_messages.append(SystemMessage(content=self.persona)) + planning_messages.append(HumanMessage(content=planning_prompt)) + try: + logger.info( + "Creating thought process notes for user query with vector context" + ) + original_new_token = self.callback_handler.custom_on_llm_new_token + + async def planning_token_wrapper(token, **kwargs): + if asyncio.iscoroutinefunction(original_new_token): + await original_new_token(token, planning_only=True, **kwargs) + else: + loop = asyncio.get_running_loop() + asyncio.run_coroutine_threadsafe( + self.callback_handler.queue.put( + { + "type": "token", + "content": token, + "status": "planning", + "planning_only": True, + } + ), + loop, + ) + + self.callback_handler.custom_on_llm_new_token = planning_token_wrapper + task = asyncio.create_task(self.planning_llm.ainvoke(planning_messages)) + response = await task + plan = response.content + token_usage = response.usage_metadata or { + "input_tokens": 0, + "output_tokens": 0, + "total_tokens": 0, + } + self.callback_handler.custom_on_llm_new_token = original_new_token + logger.info( + "Thought process notes created successfully with vector context" + ) + logger.debug(f"Notes content length: {len(plan)}") + logger.debug(f"Planning token usage: {token_usage}") + await self.callback_handler.process_step( + content=plan, role="assistant", thought="Planning Phase with Context" + ) + return plan, token_usage + except Exception as e: + if hasattr(self.callback_handler, "custom_on_llm_new_token"): + self.callback_handler.custom_on_llm_new_token = original_new_token + logger.error(f"Failed to create plan: {str(e)}", exc_info=True) + # Return empty plan and zero usage on error + return "Failed to create plan.", { + "input_tokens": 0, + "output_tokens": 0, + "total_tokens": 0, + } diff --git a/services/ai/workflows/mixins/vector_mixin.py b/services/ai/workflows/mixins/vector_mixin.py new file mode 100644 index 00000000..add45d96 --- /dev/null +++ b/services/ai/workflows/mixins/vector_mixin.py @@ -0,0 +1,202 @@ +from typing import Any, Dict, List, Optional + +from langchain_core.documents import Document +from langchain_openai import OpenAIEmbeddings +from langgraph.graph import StateGraph + +from backend.factory import backend +from config import config +from lib.logger import configure_logger +from services.ai.workflows.base import BaseWorkflowMixin + +logger = configure_logger(__name__) + + +def create_embedding_model() -> OpenAIEmbeddings: + """Create an OpenAI embeddings model using the configured settings. + + Returns: + Configured OpenAIEmbeddings instance + """ + embedding_config = { + "model": config.embedding.default_model, + } + + # Add base_url if configured + if config.embedding.api_base: + embedding_config["base_url"] = config.embedding.api_base + + # Add api_key if configured + if config.embedding.api_key: + embedding_config["api_key"] = config.embedding.api_key + + logger.debug( + f"Creating OpenAI embeddings with model: {config.embedding.default_model}" + ) + return OpenAIEmbeddings(**embedding_config) + + +class VectorRetrievalCapability(BaseWorkflowMixin): + """Mixin that adds vector retrieval capabilities to a workflow.""" + + def __init__(self, *args, **kwargs): + """Initialize the vector retrieval capability.""" + super().__init__(*args, **kwargs) if hasattr(super(), "__init__") else None + self._init_vector_retrieval() + + def _init_vector_retrieval(self) -> None: + """Initialize vector retrieval attributes if not already initialized.""" + if not hasattr(self, "collection_names"): + self.collection_names = ["knowledge_collection", "dao_collection"] + if not hasattr(self, "embeddings"): + self.embeddings = create_embedding_model() + if not hasattr(self, "vector_results_cache"): + self.vector_results_cache = {} + + async def retrieve_from_vector_store(self, query: str, **kwargs) -> List[Document]: + """Retrieve relevant documents from multiple vector stores. + + Args: + query: The query to search for + **kwargs: Additional arguments (collection_name, embeddings, etc.) + + Returns: + List of retrieved documents + """ + try: + self._init_vector_retrieval() + if query in self.vector_results_cache: + logger.debug(f"Using cached vector results for query: {query}") + return self.vector_results_cache[query] + all_documents = [] + limit_per_collection = kwargs.get("limit", 4) + logger.debug( + f"Searching vector store: query={query} | limit_per_collection={limit_per_collection}" + ) + for collection_name in self.collection_names: + try: + vector_results = await backend.query_vectors( + collection_name=collection_name, + query_text=query, + limit=limit_per_collection, + embeddings=self.embeddings, + ) + documents = [ + Document( + page_content=doc.get("page_content", ""), + metadata={ + **doc.get("metadata", {}), + "collection_source": collection_name, + }, + ) + for doc in vector_results + ] + all_documents.extend(documents) + logger.debug( + f"Retrieved {len(documents)} documents from collection {collection_name}" + ) + except Exception as e: + logger.error( + f"Failed to retrieve from collection {collection_name}: {str(e)}", + exc_info=True, + ) + continue + logger.debug( + f"Retrieved total of {len(all_documents)} documents from all collections" + ) + self.vector_results_cache[query] = all_documents + return all_documents + except Exception as e: + logger.error(f"Vector store retrieval failed: {str(e)}", exc_info=True) + return [] + + def integrate_with_graph(self, graph: StateGraph, **kwargs) -> None: + """Integrate vector retrieval capability with a graph. + + This adds the vector retrieval capability to the graph by adding a node + that can perform vector searches when needed. + + Args: + graph: The graph to integrate with + **kwargs: Additional arguments specific to vector retrieval including: + - collection_names: List of collection names to search + - limit_per_collection: Number of results per collection + """ + graph.add_node("vector_search", self.retrieve_from_vector_store) + if "process_vector_results" not in graph.nodes: + graph.add_node("process_vector_results", self._process_vector_results) + graph.add_edge("vector_search", "process_vector_results") + + async def _process_vector_results( + self, vector_results: List[Document], **kwargs + ) -> Dict[str, Any]: + """Process vector search results. + + Args: + vector_results: Results from vector search + **kwargs: Additional processing arguments + + Returns: + Processed results with metadata + """ + return { + "results": vector_results, + "metadata": { + "num_vector_results": len(vector_results), + "collection_sources": list( + set( + doc.metadata.get("collection_source", "unknown") + for doc in vector_results + ) + ), + }, + } + + +async def add_documents_to_vectors( + collection_name: str, + documents: List[Document], + embeddings: Optional[Any] = None, +) -> Dict[str, List[str]]: + """Add documents to a vector collection. + + Args: + collection_name: Name of the collection to add to + documents: List of LangChain Document objects + embeddings: Optional embeddings model to use. If None, uses configured model. + + Returns: + Dictionary mapping collection name to list of document IDs + """ + if embeddings is None: + embeddings = create_embedding_model() + + collection_doc_ids = {} + try: + try: + backend.get_vector_collection(collection_name) + except Exception: + embed_dim = config.embedding.dimensions + if hasattr(embeddings, "embedding_dim"): + embed_dim = embeddings.embedding_dim + backend.create_vector_collection(collection_name, dimensions=embed_dim) + texts = [doc.page_content for doc in documents] + embedding_vectors = embeddings.embed_documents(texts) + docs_for_storage = [ + {"page_content": doc.page_content, "embedding": embedding_vectors[i]} + for i, doc in enumerate(documents) + ] + metadata_list = [doc.metadata for doc in documents] + ids = await backend.add_vectors( + collection_name=collection_name, + documents=docs_for_storage, + metadata=metadata_list, + ) + collection_doc_ids[collection_name] = ids + logger.info(f"Added {len(ids)} documents to collection {collection_name}") + except Exception as e: + logger.error( + f"Failed to add documents to collection {collection_name}: {str(e)}" + ) + collection_doc_ids[collection_name] = [] + return collection_doc_ids diff --git a/services/ai/workflows/mixins/web_search_mixin.py b/services/ai/workflows/mixins/web_search_mixin.py new file mode 100644 index 00000000..b9e4cd7a --- /dev/null +++ b/services/ai/workflows/mixins/web_search_mixin.py @@ -0,0 +1,203 @@ +from typing import Any, Dict, List, Tuple + +from langgraph.graph import StateGraph +from openai import OpenAI + +from lib.logger import configure_logger +from services.ai.workflows.base import BaseWorkflowMixin + +logger = configure_logger(__name__) + + +class WebSearchCapability(BaseWorkflowMixin): + """Mixin that adds web search capabilities to a workflow using OpenAI Responses API.""" + + def __init__(self, *args, **kwargs): + """Initialize the web search capability.""" + # Initialize parent class if it exists + super().__init__(*args, **kwargs) if hasattr(super(), "__init__") else None + # Initialize our attributes + self._init_web_search() + + def _init_web_search(self) -> None: + """Initialize web search attributes if not already initialized.""" + if not hasattr(self, "search_results_cache"): + self.search_results_cache = {} + if not hasattr(self, "client"): + self.client = OpenAI() + + async def web_search( + self, query: str, **kwargs + ) -> Tuple[List[Dict[str, Any]], Dict[str, Any]]: + """Search the web using OpenAI Responses API. + + Args: + query: The search query + **kwargs: Additional search parameters like user_location and search_context_size + + Returns: + Tuple containing list of search results and token usage dict. + """ + try: + # Ensure initialization + self._init_web_search() + + # Check cache first + if query in self.search_results_cache: + logger.info(f"Using cached results for query: {query}") + return self.search_results_cache[query], { + "input_tokens": 0, + "output_tokens": 0, + "total_tokens": 0, + } + + # Configure web search tool + tool_config = { + "type": "web_search_preview", + "search_context_size": kwargs.get("search_context_size", "medium"), + } + + # Add user location if provided + if "user_location" in kwargs: + tool_config["user_location"] = kwargs["user_location"] + + # Make the API call + response = self.client.responses.create( + model="gpt-4.1", tools=[tool_config], input=query + ) + + # Extract token usage + token_usage = getattr(response, "usage", {}) + standardized_usage = { + "input_tokens": getattr(token_usage, "input_tokens", 0), + "output_tokens": getattr(token_usage, "output_tokens", 0), + "total_tokens": getattr(token_usage, "total_tokens", 0), + } + logger.debug(f"Web search token_usage: {standardized_usage}") + + # Extract output text + text_content = None + if hasattr(response, "output") and isinstance(response.output, list): + try: + first_output = response.output[0] + if ( + isinstance(first_output, dict) + and "content" in first_output + and isinstance(first_output["content"], list) + and len(first_output["content"]) > 0 + and "text" in first_output["content"][0] + ): + text_content = first_output["content"][0]["text"] + except Exception as e: + logger.warning(f"Failed to extract output text: {e}") + + if not text_content: + text_content = "No output text available." + + # Defensive citation extraction (if present) + source_urls = [] + if hasattr(response, "citations"): + try: + source_urls = [ + { + "url": citation.url, + "title": getattr(citation, "title", ""), + "start_index": getattr(citation, "start_index", 0), + "end_index": getattr(citation, "end_index", 0), + } + for citation in response.citations + if hasattr(citation, "url") + ] + except Exception as e: + logger.warning(f"Failed to extract citations: {e}") + + if not source_urls: + source_urls = [ + { + "url": "No source URL available", + "title": "Generated Response", + "start_index": 0, + "end_index": len(text_content), + } + ] + + # Create document with content + doc = { + "page_content": text_content, + "metadata": { + "type": "web_search_result", + "source_urls": source_urls, + "query": query, + "timestamp": None, + }, + } + documents = [doc] + + # Cache the results + self.search_results_cache[query] = documents + + logger.info(f"Web search completed with {len(documents)} results") + return documents, standardized_usage + + except Exception as e: + logger.error(f"Web search failed: {str(e)}") + # Return empty list and zero usage on error + error_doc = [ + { + "page_content": "Web search failed to return results.", + "metadata": { + "type": "web_search_result", + "source_urls": [ + { + "url": "Error occurred during web search", + "title": "Error", + "start_index": 0, + "end_index": 0, + } + ], + "query": query, + "timestamp": None, + }, + } + ] + return error_doc, {"input_tokens": 0, "output_tokens": 0, "total_tokens": 0} + + def integrate_with_graph(self, graph: StateGraph, **kwargs) -> None: + """Integrate web search capability with a graph. + + This adds the web search capability to the graph by adding a node + that can perform web searches when needed. + + Args: + graph: The graph to integrate with + **kwargs: Additional arguments specific to web search including: + - search_context_size: "low", "medium", or "high" + - user_location: dict with type, country, city, region + """ + # Add web search node + graph.add_node("web_search", self.web_search) + + # Add result processing node if needed + if "process_results" not in graph.nodes: + graph.add_node("process_results", self._process_results) + graph.add_edge("web_search", "process_results") + + async def _process_results( + self, web_results: List[Dict[str, Any]], **kwargs + ) -> Dict[str, Any]: + """Process web search results. + + Args: + web_results: Results from web search + **kwargs: Additional processing arguments + + Returns: + Processed results with metadata + """ + return { + "results": web_results, + "metadata": { + "num_web_results": len(web_results), + "source_types": ["web_search"], + }, + } diff --git a/services/ai/workflows/proposal_evaluation.py b/services/ai/workflows/proposal_evaluation.py new file mode 100644 index 00000000..0856b34d --- /dev/null +++ b/services/ai/workflows/proposal_evaluation.py @@ -0,0 +1,437 @@ +import operator +from typing import Annotated, Any, Dict, List, Optional, TypedDict, Union + +from langgraph.graph import END, StateGraph + +from lib.logger import configure_logger +from services.ai.workflows.agents.core_context import CoreContextAgent +from services.ai.workflows.agents.financial_context import FinancialContextAgent +from services.ai.workflows.agents.historical_context import HistoricalContextAgent +from services.ai.workflows.agents.image_processing import ImageProcessingNode +from services.ai.workflows.agents.reasoning import ReasoningAgent +from services.ai.workflows.agents.social_context import SocialContextAgent +from services.ai.workflows.base import BaseWorkflow +from services.ai.workflows.hierarchical_workflows import ( + HierarchicalTeamWorkflow, + append_list_fn, +) +from services.ai.workflows.utils.state_reducers import ( + merge_dicts, + no_update_reducer, + set_once, +) +from services.ai.workflows.utils.model_factory import get_default_model_name + +logger = configure_logger(__name__) + + +class ProposalEvaluationState(TypedDict): + """Type definition for the proposal evaluation state.""" + + proposal_id: Annotated[str, no_update_reducer] + proposal_content: Annotated[str, no_update_reducer] + dao_id: Annotated[Optional[str], no_update_reducer] + agent_id: Annotated[Optional[str], no_update_reducer] + profile_id: Annotated[Optional[str], no_update_reducer] + core_score: Annotated[Optional[Dict[str, Any]], set_once] + historical_score: Annotated[Optional[Dict[str, Any]], set_once] + financial_score: Annotated[Optional[Dict[str, Any]], set_once] + social_score: Annotated[Optional[Dict[str, Any]], set_once] + final_score: Annotated[Optional[Dict[str, Any]], set_once] + flags: Annotated[List[str], append_list_fn] # Correctly appends lists + summaries: Annotated[Dict[str, str], merge_dicts] # Properly merges dictionaries + decision: Annotated[Optional[str], set_once] + halt: Annotated[bool, operator.or_] + token_usage: Annotated[ + Dict[str, Dict[str, int]], merge_dicts + ] # Properly merges dictionaries + # Improved state tracking + workflow_step: Annotated[str, lambda x, y: y[-1] if y else x] # Track current step + completed_steps: Annotated[ + set[str], lambda x, y: x.union(set(y)) if y else x + ] # Track completed steps + proposal_images: Annotated[Optional[List[Dict]], set_once] + + +class ProposalEvaluationWorkflow(BaseWorkflow[ProposalEvaluationState]): + """Main workflow for evaluating DAO proposals using a hierarchical team.""" + + def __init__(self, config: Optional[Dict[str, Any]] = None): + """Initialize the proposal evaluation workflow. + + Args: + config: Optional configuration dictionary + """ + super().__init__() + self.config = config or {} + self.hierarchical_workflow = HierarchicalTeamWorkflow( + name="ProposalEvaluation", + config={ + "state_type": ProposalEvaluationState, + "recursion_limit": self.config.get( + "recursion_limit", 15 + ), # Reduced limit + }, + ) + + # Initialize agents + image_processor_agent = ImageProcessingNode(config=self.config) + core_agent = CoreContextAgent(self.config) + historical_agent = HistoricalContextAgent(self.config) + financial_agent = FinancialContextAgent(self.config) + social_agent = SocialContextAgent(self.config) + reasoning_agent = ReasoningAgent(self.config) + + # Add agents to the workflow + self.hierarchical_workflow.add_sub_workflow( + "image_processor", image_processor_agent + ) + self.hierarchical_workflow.add_sub_workflow("core_agent", core_agent) + self.hierarchical_workflow.add_sub_workflow( + "historical_agent", historical_agent + ) + self.hierarchical_workflow.add_sub_workflow("financial_agent", financial_agent) + self.hierarchical_workflow.add_sub_workflow("social_agent", social_agent) + self.hierarchical_workflow.add_sub_workflow("reasoning_agent", reasoning_agent) + + # Set entry point and other workflow properties + self.hierarchical_workflow.set_entry_point("image_processor") + self.hierarchical_workflow.set_supervisor_logic(self._supervisor_logic) + self.hierarchical_workflow.set_halt_condition(self._halt_condition) + self.required_fields = ["proposal_id", "proposal_content"] + + def _supervisor_logic( + self, state: ProposalEvaluationState + ) -> Union[str, List[str]]: + """Determine which agent(s) to run next based on current state. + + Improved logic to prevent infinite loops and unnecessary re-executions. + + Args: + state: Current workflow state + + Returns: + String or list of strings identifying next agent(s) to run + """ + # Initialize workflow tracking + if "workflow_step" not in state: + state["workflow_step"] = "start" + if "completed_steps" not in state: + state["completed_steps"] = set() + + proposal_id = state.get("proposal_id", "unknown") + completed_steps = state.get("completed_steps", set()) + + logger.info( + f"[DEBUG:SupervisorLogic:{proposal_id}] Current step: {state.get('workflow_step')}, " + f"Completed: {completed_steps}" + ) + + # Step 1: Image processing (required first step) + if "proposal_images" not in state and "image_processor" not in completed_steps: + logger.debug( + f"[DEBUG:SupervisorLogic:{proposal_id}] Starting image processing" + ) + state["workflow_step"] = "image_processing" + return "image_processor" + + # Step 2: Core context evaluation (required after images) + if "core_score" not in state and "core_agent" not in completed_steps: + # Ensure images are processed first + if "proposal_images" not in state: + logger.warning( + f"[DEBUG:SupervisorLogic:{proposal_id}] Images not processed, but core agent requested" + ) + state["proposal_images"] = [] # Set empty images to proceed + + logger.debug( + f"[DEBUG:SupervisorLogic:{proposal_id}] Starting core evaluation" + ) + state["workflow_step"] = "core_evaluation" + return "core_agent" + + # Step 3: Parallel evaluation of specialized agents + specialized_agents = ["historical_agent", "financial_agent", "social_agent"] + specialized_scores = ["historical_score", "financial_score", "social_score"] + + # Check if core evaluation is complete + if "core_score" in state: + # Find which specialized agents haven't completed yet + pending_agents = [] + for agent, score_key in zip(specialized_agents, specialized_scores): + if score_key not in state and agent not in completed_steps: + pending_agents.append(agent) + + if pending_agents: + logger.debug( + f"[DEBUG:SupervisorLogic:{proposal_id}] Running specialized agents: {pending_agents}" + ) + state["workflow_step"] = "specialized_evaluation" + # Return all pending agents for parallel execution + return pending_agents + + # Step 4: Final reasoning (only after all evaluations are complete) + all_scores_present = all( + score_key in state for score_key in ["core_score"] + specialized_scores + ) + + if ( + all_scores_present + and "final_score" not in state + and "reasoning_agent" not in completed_steps + ): + logger.debug( + f"[DEBUG:SupervisorLogic:{proposal_id}] Starting final reasoning" + ) + state["workflow_step"] = "final_reasoning" + return "reasoning_agent" + + # Step 5: Workflow complete + if "final_score" in state: + logger.info(f"[DEBUG:SupervisorLogic:{proposal_id}] Workflow complete") + state["workflow_step"] = "complete" + return END + + # Error state - should not reach here + logger.error( + f"[DEBUG:SupervisorLogic:{proposal_id}] Unexpected state - " + f"core_score: {'core_score' in state}, " + f"specialized scores: {[key in state for key in specialized_scores]}, " + f"final_score: {'final_score' in state}, " + f"completed_steps: {completed_steps}" + ) + + # Force completion if we're in an unexpected state + return END + + def _halt_condition(self, state: ProposalEvaluationState) -> bool: + """Determine if the workflow should halt early. + + Improved halt condition with better error detection. + + Args: + state: Current workflow state + + Returns: + True if workflow should halt, False otherwise + """ + proposal_id = state.get("proposal_id", "unknown") + + # Halt if explicitly set + if state.get("halt", False): + logger.info( + f"[DEBUG:HaltCondition:{proposal_id}] Halting due to explicit halt flag" + ) + return True + + # Check for circular dependencies or infinite loops + workflow_step = state.get("workflow_step", "start") + completed_steps = state.get("completed_steps", set()) + + # Define the expected workflow sequence + + # If we've been on the same step too long, something is wrong + if hasattr(state, "_step_attempts"): + state["_step_attempts"][workflow_step] = ( + state["_step_attempts"].get(workflow_step, 0) + 1 + ) + if state["_step_attempts"][workflow_step] > 3: + logger.error( + f"[DEBUG:HaltCondition:{proposal_id}] Too many attempts on step {workflow_step}" + ) + state["flags"] = state.get("flags", []) + [ + f"Workflow halted: Too many attempts on step {workflow_step}" + ] + return True + else: + state["_step_attempts"] = {workflow_step: 1} + + # Check for agent completion tracking + + # If we have all required scores but final score is missing and reasoning agent hasn't run + if ( + all( + key in state + for key in [ + "core_score", + "historical_score", + "financial_score", + "social_score", + ] + ) + and "final_score" not in state + and "reasoning_agent" in completed_steps + ): + logger.error( + f"[DEBUG:HaltCondition:{proposal_id}] Reasoning agent completed but no final score" + ) + state["flags"] = state.get("flags", []) + [ + "Workflow halted: Reasoning agent failed to produce final score" + ] + return True + + return False + + def _create_graph(self) -> StateGraph: + """Create the workflow graph. + + Returns: + The constructed state graph + """ + return self.hierarchical_workflow.build_graph() + + def _validate_state(self, state: ProposalEvaluationState) -> bool: + """Validate that the state contains required fields. + + Args: + state: Current workflow state + + Returns: + True if state is valid, False otherwise + """ + for field in self.required_fields: + if field not in state: + self.logger.error( + f"[ProposalEvaluation] Missing required field: {field}" + ) + return False + return True + + +async def evaluate_proposal( + proposal_id: str, + proposal_content: str, + config: Optional[Dict[str, Any]] = None, + dao_id: Optional[str] = None, + agent_id: Optional[str] = None, + profile_id: Optional[str] = None, +) -> Dict[str, Any]: + """Evaluate a proposal using the ProposalEvaluationWorkflow. + + Args: + proposal_id: Unique identifier for the proposal + proposal_content: Proposal content + config: Optional configuration for the workflow + dao_id: Optional DAO ID + agent_id: Optional agent ID + profile_id: Optional profile ID + + Returns: + Dictionary containing evaluation results + """ + # Set up configuration with defaults if not provided + if config is None: + config = {} + + # Use model name from config or default + model_name = config.get("model_name", get_default_model_name()) + + workflow = ProposalEvaluationWorkflow(config) + + # Create initial state with improved tracking + initial_state = { + "proposal_id": proposal_id, + "proposal_content": proposal_content, + "dao_id": dao_id, + "agent_id": agent_id, + "profile_id": profile_id, + "flags": [], + "summaries": {}, + "token_usage": {}, + "halt": False, + "workflow_step": "start", + "completed_steps": set(), + } + + # Run workflow + try: + logger.info(f"Starting proposal evaluation for proposal {proposal_id}") + result = await workflow.execute(initial_state) + + logger.info( + f"[DEBUG:EXTRACT] Workflow execution complete, result keys: {list(result.keys())}" + ) + logger.info(f"[DEBUG:EXTRACT] final_score in result: {'final_score' in result}") + if "final_score" in result: + logger.info( + f"[DEBUG:EXTRACT] final_score type: {type(result['final_score'])}" + ) + logger.info(f"[DEBUG:EXTRACT] final_score content: {result['final_score']}") + + # Extract results + def safe_extract_score(value, default=0): + """Safely extract a score from a potentially complex structure.""" + if isinstance(value, dict) and "score" in value: + return value["score"] + return default + + # Get all scores for reporting + core_score = safe_extract_score(result.get("core_score")) + historical_score = safe_extract_score(result.get("historical_score")) + financial_score = safe_extract_score(result.get("financial_score")) + social_score = safe_extract_score(result.get("social_score")) + final_score = safe_extract_score(result.get("final_score")) + + # Get decision + final_decision = "Undecided" + final_explanation = "No final decision was reached." + + if isinstance(result.get("final_score"), dict): + final_decision = result["final_score"].get("decision", "Undecided") + final_explanation = result["final_score"].get( + "explanation", "No explanation provided." + ) + + # Determine approval based on final score and threshold + approval = final_score >= 70 + + # Compile token usage + token_usage = result.get("token_usage", {}) + total_token_usage = { + "input_tokens": 0, + "output_tokens": 0, + "total_tokens": 0, + } + + for agent_usage in token_usage.values(): + total_token_usage["input_tokens"] += agent_usage.get("input_tokens", 0) + total_token_usage["output_tokens"] += agent_usage.get("output_tokens", 0) + total_token_usage["total_tokens"] += agent_usage.get("total_tokens", 0) + + # Return formatted result + evaluation_result = { + "proposal_id": proposal_id, + "approve": approval, + "overall_score": final_score, + "reasoning": final_explanation, + "scores": { + "core": core_score, + "historical": historical_score, + "financial": financial_score, + "social": social_score, + "final": final_score, + }, + "flags": result.get("flags", []), + "summaries": result.get("summaries", {}), + "token_usage": total_token_usage, + "model_name": model_name, + "workflow_step": result.get("workflow_step", "unknown"), + "completed_steps": list(result.get("completed_steps", set())), + "evaluation_type": "multi_agent_workflow", + } + + logger.info( + f"Completed proposal evaluation for proposal {proposal_id}: {final_decision}" + ) + return evaluation_result + + except Exception as e: + logger.error(f"Error in proposal evaluation: {str(e)}") + return { + "proposal_id": proposal_id, + "approve": False, + "overall_score": 0, + "reasoning": f"Evaluation failed due to error: {str(e)}", + "error": str(e), + "evaluation_type": "multi_agent_workflow_error", + } diff --git a/services/ai/workflows/tweet_analysis.py b/services/ai/workflows/tweet_analysis.py new file mode 100644 index 00000000..d0da6a30 --- /dev/null +++ b/services/ai/workflows/tweet_analysis.py @@ -0,0 +1,214 @@ +from typing import Dict, Optional, TypedDict + +from langchain_core.prompts.chat import ChatPromptTemplate +from langgraph.graph import END, Graph, StateGraph +from pydantic import BaseModel, Field + +from backend.factory import backend +from backend.models import QueueMessageFilter, TweetType +from lib.logger import configure_logger +from services.ai.workflows.base import BaseWorkflow +from tools.dao_deployments import ContractDAODeployInput + +logger = configure_logger(__name__) + + +class ToolRequest(BaseModel): + tool_name: str = Field( + description="The name of the tool to be executed its always contract_deploy_dao" + ) + parameters: ContractDAODeployInput = Field( + description="The parameters for the tool" + ) + priority: int = Field(description="The priority of the tool request") + + +class TweetAnalysisOutput(BaseModel): + worthy: bool = Field(description="Whether the tweet is worthy of processing") + reason: str = Field(description="The reason for the worthy determination") + tweet_type: TweetType = Field(description="The type of tweet") + tool_request: Optional[ToolRequest] = Field( + description="The tool request to be executed if the tweet is worthy" + ) + confidence_score: float = Field( + description="The confidence score for the worthy determination" + ) + + +class AnalysisState(TypedDict): + """State for the analysis flow.""" + + tweet_text: str + filtered_content: str + is_worthy: bool + tweet_type: TweetType + tool_request: Optional[ToolRequest] + confidence_score: float + reason: str + + +class TweetAnalysisWorkflow(BaseWorkflow[AnalysisState]): + """Workflow for analyzing tweets.""" + + def __init__(self, account_name: str = "@aibtcdevagent", **kwargs): + super().__init__(**kwargs) + self.account_name = account_name + + def _create_chat_messages( + self, + tweet_text: str, + filtered_content: str, + account_name: str, + token_symbols: list, + ) -> list: + """Create chat messages for tweet analysis. + + Args: + tweet_text: The current tweet text to analyze + filtered_content: Filtered content from tweet history + account_name: The account name analyzing tweets + token_symbols: List of token symbols already taken + + Returns: + List of chat messages + """ + # System message with analysis guidelines + system_content = f"""You are {account_name}, a specialized DAO deployment analysis agent. Your role is to analyze tweets to determine if they contain valid DAO deployment requests and extract the necessary parameters. + +Analysis Guidelines: +1. Determine if the tweet is worthy of processing (contains a valid DAO deployment request) +2. Classify the tweet type: tool_request, thread, or invalid +3. For tool requests, extract required parameters for contract_deploy_dao tool: + - token_symbol: Symbol for the token (e.g., 'HUMAN') + - token_name: Name of the token (e.g., 'Human') + - token_description: Description of the token + - token_max_supply: Initial supply (default: 1000000000) + - token_decimals: Number of decimals (default: 6) + - origin_address: Address of the DAO creator + - mission: Mission statement serving as the unifying purpose and guiding principle + - tweet_id: ID of the tweet + +Worthiness Criteria: +- Welcome creativity—funny or edgy ideas are encouraged +- Concepts must avoid harmful or unethical themes +- While flexible on ethics, there's a clear line against promoting harm +- Worth depends on substance and alignment with basic principles +- General conversations unrelated to DAO creation should be marked as not worthy +- Purely promotional content without actionable details should be marked as not worthy + +Token Symbol Rules: +- Ensure the DAO symbol is not already taken from the provided list +- If taken, choose a new unique symbol for the parameters +- Only craft parameters if worthiness determination is True + +Note: Your sole purpose is to analyze and generate parameters, not to execute the contract_deploy_dao tool. + +Output Format: +Provide a JSON object with: +- worthy: Boolean indicating if tweet is worthy of processing +- reason: Explanation for the worthy determination +- tweet_type: Classification as "tool_request", "thread", or "invalid" +- tool_request: Object with tool_name "contract_deploy_dao", parameters, and priority (only if worthy and tool_request type) +- confidence_score: Float between 0.0 and 1.0 for confidence in determination""" + + # User message with the specific analysis request + user_content = f"""Please analyze the following tweet information: + +Current Tweet: +{tweet_text} + +Tweet History Context: +{filtered_content} + +Current DAO Symbols Already Taken: +{", ".join(token_symbols) if token_symbols else "None"} + +Based on this information, determine if this tweet contains a valid DAO deployment request and extract the necessary parameters if applicable.""" + + return [ + ("system", system_content), + ("human", user_content), + ] + + def _create_graph(self) -> Graph: + """Create the analysis graph.""" + + # Create analysis node + def analyze_tweet(state: AnalysisState) -> AnalysisState: + """Analyze the tweet and determine if it's worthy of processing.""" + tokens = backend.list_tokens() + token_symbols_in_db = [token.symbol for token in tokens] + queued_messages = backend.list_queue_messages( + filters=QueueMessageFilter(type="daos", is_processed=False) + ) + token_symbols_in_queue = [ + message.message["parameters"]["token_symbol"] + for message in queued_messages + ] + + # make a list of token symbols in queue and token symbols in db + token_symbols = list(set(token_symbols_in_db + token_symbols_in_queue)) + + # Create chat messages + messages = self._create_chat_messages( + tweet_text=state["tweet_text"], + filtered_content=state["filtered_content"], + account_name=self.account_name, + token_symbols=token_symbols, + ) + + # Create chat prompt template + prompt = ChatPromptTemplate.from_messages(messages) + formatted_prompt = prompt.format() + + structured_output = self.llm.with_structured_output( + TweetAnalysisOutput, + ) + # Get analysis from LLM + result = structured_output.invoke(formatted_prompt) + + # Update state + state["is_worthy"] = result.worthy + state["tweet_type"] = result.tweet_type + state["tool_request"] = result.tool_request + state["confidence_score"] = result.confidence_score + state["reason"] = result.reason + + return state + + # Create the graph + workflow = StateGraph(AnalysisState) + + # Add nodes + workflow.add_node("analyze", analyze_tweet) + + # Add edges + workflow.set_entry_point("analyze") + workflow.add_edge("analyze", END) + + return workflow.compile() + + def _validate_state(self, state: AnalysisState) -> bool: + """Validate the workflow state.""" + required_fields = ["tweet_text", "filtered_content"] + return all(field in state and state[field] for field in required_fields) + + +async def analyze_tweet(tweet_text: str, filtered_content: str) -> Dict: + """Analyze a tweet and determine if it's worthy of processing.""" + # Initialize state + state = { + "tweet_text": tweet_text, + "filtered_content": filtered_content, + "is_worthy": False, + "tweet_type": TweetType.INVALID, + "tool_request": None, + "confidence_score": 0.0, + "reason": "", + } + + # Create and run workflow + workflow = TweetAnalysisWorkflow() + result = await workflow.execute(state) + + return result diff --git a/services/workflows/tweet_generator.py b/services/ai/workflows/tweet_generator.py similarity index 56% rename from services/workflows/tweet_generator.py rename to services/ai/workflows/tweet_generator.py index a39c87af..fa67ec1e 100644 --- a/services/workflows/tweet_generator.py +++ b/services/ai/workflows/tweet_generator.py @@ -1,13 +1,11 @@ -"""Tweet generator workflow.""" - from typing import Dict, TypedDict -from langchain.prompts import PromptTemplate +from langchain_core.prompts.chat import ChatPromptTemplate from langgraph.graph import END, Graph, StateGraph from pydantic import BaseModel, Field from lib.logger import configure_logger -from services.workflows.base import BaseWorkflow +from services.ai.workflows.base import BaseWorkflow logger = configure_logger(__name__) @@ -33,59 +31,76 @@ class GeneratorState(TypedDict): class TweetGeneratorWorkflow(BaseWorkflow[GeneratorState]): """Workflow for generating tweets.""" - def _create_prompt(self) -> PromptTemplate: - """Create the generator prompt template.""" - return PromptTemplate( - input_variables=["dao_name", "dao_symbol", "dao_mission", "dao_id"], - template=""" - Generate an exciting tweet announcing the successful deployment of a new DAO. - - DAO Details: - - Name: {dao_name} - - Symbol: {dao_symbol} - - Mission: {dao_mission} - - Requirements: - 1. Must be under 200 characters (not including URL) to leave room for the URL - 2. Should be enthusiastic and welcoming - 3. Include the DAO symbol with $ prefix - 4. Mention key aspects of the mission - 5. Use emojis appropriately but don't overdo it (2-3 max) - 6. REQUIRED: End the tweet with the URL https://aibtc.dev/daos/{dao_id} - - Output format: - {{ - "tweet_text": str, - "confidence_score": float - }} - """, - ) + def _create_chat_messages( + self, dao_name: str, dao_symbol: str, dao_mission: str, dao_id: str + ) -> list: + """Create chat messages for tweet generation. + + Args: + dao_name: Name of the DAO + dao_symbol: Symbol of the DAO + dao_mission: Mission statement of the DAO + dao_id: ID of the DAO + + Returns: + List of chat messages + """ + # System message with guidelines + system_content = """You are a social media expert specializing in crypto and DAO announcements. Generate exciting, engaging tweets that announce successful DAO deployments while maintaining professionalism and community focus. + +Guidelines: +- Keep tweets under 200 characters (not including URL) to leave room for the URL +- Be enthusiastic and welcoming in tone +- Include the DAO symbol with $ prefix +- Mention key aspects of the mission concisely +- Use emojis appropriately but don't overdo it (2-3 max) +- Create content that encourages community engagement +- End with the provided DAO URL + +Output Format: +Provide a JSON object with: +- tweet_text: The complete tweet text including the URL +- confidence_score: A float between 0.0 and 1.0 indicating confidence in the tweet quality""" + + # User message with specific DAO details + user_content = f"""Generate an exciting tweet announcing the successful deployment of a new DAO with the following details: + +DAO Name: {dao_name} +Symbol: {dao_symbol} +Mission: {dao_mission} +URL: https://aibtc.dev/daos/{dao_id} + +Create a tweet that celebrates this new DAO launch, highlights its unique mission, and invites the community to participate.""" + + return [ + ("system", system_content), + ("human", user_content), + ] def _create_graph(self) -> Graph: """Create the generator graph.""" - prompt = self._create_prompt() # Create generation node def generate_tweet(state: GeneratorState) -> GeneratorState: """Generate the tweet response.""" - # Format prompt with state - formatted_prompt = prompt.format( + # Create chat messages + messages = self._create_chat_messages( dao_name=state["dao_name"], dao_symbol=state["dao_symbol"], dao_mission=state["dao_mission"], dao_id=state["dao_id"], ) + # Create chat prompt template + prompt = ChatPromptTemplate.from_messages(messages) + formatted_prompt = prompt.format() + # Get generation from LLM structured_output = self.llm.with_structured_output( TweetGeneratorOutput, ) result = structured_output.invoke(formatted_prompt) - # Clean and parse the response - # content = self._clean_llm_response(result.content) - # parsed_result = TweetGeneratorOutput.model_validate_json(content) - # Update state state["generated_tweet"] = result.tweet_text state["confidence_score"] = result.confidence_score diff --git a/services/ai/workflows/utils/__init__.py b/services/ai/workflows/utils/__init__.py new file mode 100644 index 00000000..e69de29b diff --git a/services/ai/workflows/utils/model_factory.py b/services/ai/workflows/utils/model_factory.py new file mode 100644 index 00000000..14014d09 --- /dev/null +++ b/services/ai/workflows/utils/model_factory.py @@ -0,0 +1,236 @@ +"""Centralized model factory for consistent ChatOpenAI configuration across workflows. + +This module provides a single place to configure default model settings that can be +easily overridden when needed. +""" + +from typing import Any, List, Optional + +from langchain_openai import ChatOpenAI + +from config import config +from lib.logger import configure_logger + +logger = configure_logger(__name__) + + +class ModelConfig: + """Configuration class for default model settings.""" + + # Default model settings - change these to update all workflows + DEFAULT_MODEL = "gpt-4.1" + DEFAULT_TEMPERATURE = 0.9 + DEFAULT_STREAMING = True + DEFAULT_STREAM_USAGE = True + + @classmethod + def get_default_model(cls) -> str: + """Get the default model name. + + Uses ChatLLMConfig.default_model from the configuration. + + Returns: + Default model name + """ + return config.chat_llm.default_model or cls.DEFAULT_MODEL + + @classmethod + def get_default_temperature(cls) -> float: + """Get the default temperature. + + Uses ChatLLMConfig.default_temperature from the configuration. + + Returns: + Default temperature + """ + try: + return config.chat_llm.default_temperature + except (ValueError, TypeError, AttributeError): + logger.warning("Invalid chat LLM temperature configuration, using default") + return cls.DEFAULT_TEMPERATURE + + @classmethod + def get_default_base_url(cls) -> str: + """Get the default OpenAI API base URL. + + Uses ChatLLMConfig.api_base from the configuration. + + Returns: + Default base URL (empty string if not set) + """ + return config.chat_llm.api_base or "" + + @classmethod + def get_default_api_key(cls) -> str: + """Get the default OpenAI API key. + + Uses ChatLLMConfig.api_key from the configuration. + + Returns: + Default API key + """ + return config.chat_llm.api_key + + @classmethod + def get_reasoning_model(cls) -> str: + """Get the reasoning model name. + + Uses ChatLLMConfig.reasoning_model from the configuration. + + Returns: + Reasoning model name + """ + return config.chat_llm.reasoning_model or "o3-mini" + + @classmethod + def get_reasoning_temperature(cls) -> float: + """Get the reasoning temperature. + + Uses ChatLLMConfig.reasoning_temperature from the configuration. + + Returns: + Reasoning temperature + """ + try: + return config.chat_llm.reasoning_temperature + except (ValueError, TypeError, AttributeError): + logger.warning("Invalid reasoning temperature configuration, using default") + return cls.DEFAULT_TEMPERATURE + + +def create_chat_openai( + model: Optional[str] = None, + temperature: Optional[float] = None, + streaming: Optional[bool] = None, + stream_usage: Optional[bool] = None, + callbacks: Optional[List[Any]] = None, + base_url: Optional[str] = None, + api_key: Optional[str] = None, + **kwargs, +) -> ChatOpenAI: + """Create a ChatOpenAI instance with centralized default configuration. + + Args: + model: Model name. If None, uses ModelConfig.get_default_model() + temperature: Temperature. If None, uses ModelConfig.get_default_temperature() + streaming: Whether to enable streaming. If None, uses DEFAULT_STREAMING + stream_usage: Whether to stream usage. If None, uses DEFAULT_STREAM_USAGE + callbacks: Optional callback handlers + base_url: OpenAI API base URL. If None, uses ModelConfig.get_default_base_url() + api_key: OpenAI API key. If None, uses ModelConfig.get_default_api_key() + **kwargs: Additional arguments to pass to ChatOpenAI + + Returns: + Configured ChatOpenAI instance + """ + config_dict = { + "model": model or ModelConfig.get_default_model(), + "temperature": temperature + if temperature is not None + else ModelConfig.get_default_temperature(), + "streaming": streaming + if streaming is not None + else ModelConfig.DEFAULT_STREAMING, + "stream_usage": stream_usage + if stream_usage is not None + else ModelConfig.DEFAULT_STREAM_USAGE, + "callbacks": callbacks or [], + **kwargs, + } + + # Add base_url if specified or if default is set + default_base_url = base_url or ModelConfig.get_default_base_url() + if default_base_url: + config_dict["base_url"] = default_base_url + + # Add api_key if specified or if default is set + default_api_key = api_key or ModelConfig.get_default_api_key() + if default_api_key: + config_dict["api_key"] = default_api_key + + logger.debug(f"Creating ChatOpenAI with config: {config_dict}") + return ChatOpenAI(**config_dict) + + +def create_planning_llm( + model: Optional[str] = None, + temperature: Optional[float] = None, + **kwargs, +) -> ChatOpenAI: + """Create a ChatOpenAI instance specifically for planning operations. + + Uses the same defaults as create_chat_openai but can be customized + for planning-specific needs. + + Args: + model: Model name. If None, uses default + temperature: Temperature. If None, uses default + **kwargs: Additional arguments + + Returns: + Configured ChatOpenAI instance for planning + """ + return create_chat_openai( + model=model, + temperature=temperature, + **kwargs, + ) + + +def create_reasoning_llm( + model: Optional[str] = None, + temperature: Optional[float] = None, + **kwargs, +) -> ChatOpenAI: + """Create a ChatOpenAI instance specifically for reasoning operations. + + By default uses the configured reasoning model for reasoning tasks. + + Args: + model: Model name. If None, uses configured reasoning model + temperature: Temperature. If None, uses configured reasoning temperature + **kwargs: Additional arguments + + Returns: + Configured ChatOpenAI instance for reasoning + """ + reasoning_model = model or ModelConfig.get_reasoning_model() + reasoning_temp = ( + temperature + if temperature is not None + else ModelConfig.get_reasoning_temperature() + ) + + return create_chat_openai( + model=reasoning_model, + temperature=reasoning_temp, + **kwargs, + ) + + +# Legacy compatibility functions for backward compatibility +def get_default_model_name() -> str: + """Get the default model name for backward compatibility. + + Returns: + Default model name + """ + return ModelConfig.get_default_model() + + +def get_default_temperature() -> float: + """Get the default temperature for backward compatibility. + + Returns: + Default temperature + """ + return ModelConfig.get_default_temperature() + + +def get_default_base_url() -> str: + """Get the default OpenAI API base URL for backward compatibility. + + Returns: + Default base URL + """ + return ModelConfig.get_default_base_url() diff --git a/services/ai/workflows/utils/models.py b/services/ai/workflows/utils/models.py new file mode 100644 index 00000000..34c0671f --- /dev/null +++ b/services/ai/workflows/utils/models.py @@ -0,0 +1,128 @@ +from typing import Dict, List, Optional, Union + +from pydantic import BaseModel, Field + + +class AgentOutput(BaseModel): + """Output model for agent evaluations.""" + + score: int = Field(description="Score from 0-100") + flags: List[str] = Field(description="Critical issues flagged") + summary: str = Field(description="Summary of findings") + + +class FinalOutput(BaseModel): + """Output model for the final evaluation decision.""" + + score: int = Field(description="Final evaluation score") + decision: str = Field(description="Approve or Reject") + explanation: str = Field(description="Reasoning for decision") + + +class ProposalEvaluationOutput(BaseModel): + """Output model for proposal evaluation.""" + + approve: bool = Field( + description="Decision: true to approve (vote FOR), false to reject (vote AGAINST)" + ) + confidence_score: float = Field( + description="Confidence score for the decision (0.0-1.0)" + ) + reasoning: str = Field(description="The reasoning behind the evaluation decision") + + +class ProposalRecommendationOutput(BaseModel): + """Output model for proposal recommendations.""" + + title: str = Field(description="Recommended proposal title") + content: str = Field(description="Recommended proposal content/description") + rationale: str = Field( + description="Explanation of why this proposal is recommended" + ) + priority: str = Field( + description="Priority level: high, medium, low", pattern="^(high|medium|low)$" + ) + estimated_impact: str = Field(description="Expected impact on the DAO") + suggested_action: Optional[str] = Field( + description="Specific action or next steps if applicable", default=None + ) + + +class ProposalMetadataOutput(BaseModel): + """Output model for proposal metadata generation.""" + + title: str = Field( + description="Generated proposal title (max 100 characters)", max_length=100 + ) + summary: str = Field( + description="Short summary of the proposal (2-3 sentences, max 500 characters)", + max_length=500, + ) + tags: List[str] = Field( + description="Array of 3-5 relevant tags that categorize the proposal content", + min_length=3, + max_length=5, + ) + + +class EvaluationCategory(BaseModel): + """Model for a single evaluation category.""" + + category: str = Field(description="Category name") + score: int = Field(description="Score from 1-100", ge=1, le=100) + weight: float = Field( + description="Weight of this category in final decision (0.0-1.0)", + ge=0.0, + le=1.0, + ) + reasoning: List[str] = Field( + description="Reasoning in 3 or less bullet points", max_length=3 + ) + + +class ComprehensiveEvaluationOutput(BaseModel): + """Output model for comprehensive single-pass proposal evaluation with dynamic categories.""" + + categories: List[EvaluationCategory] = Field( + description="List of evaluation categories with scores, weights, and reasoning" + ) + final_score: int = Field( + description="Final comprehensive evaluation score (1-100)", ge=1, le=100 + ) + decision: bool = Field( + description="Final decision: True to approve (vote FOR), false to reject (vote AGAINST)" + ) + explanation: str = Field( + description="Comprehensive reasoning for the final decision" + ) + flags: List[str] = Field( + description="All critical issues identified across evaluations" + ) + summary: str = Field(description="Summary of the evaluation") + + +class ComprehensiveEvaluatorAgentProcessOutput(BaseModel): + """Output model for the ComprehensiveEvaluatorAgent's process method.""" + + categories: List[EvaluationCategory] = Field( + description="List of evaluation categories with scores, weights, and reasoning" + ) + final_score: int = Field( + description="Final comprehensive evaluation score (1-100)", ge=1, le=100 + ) + decision: bool = Field( + description="Final decision: True to approve (vote FOR), false to reject (vote AGAINST)" + ) + explanation: str = Field( + description="Comprehensive reasoning for the final decision" + ) + flags: List[str] = Field( + description="All critical issues identified across evaluations" + ) + summary: str = Field(description="Summary of the evaluation") + token_usage: Dict[str, Union[int, str]] = Field( + default_factory=dict, description="Token usage statistics for the evaluation" + ) + images_processed: int = Field( + default=0, description="Number of images processed during evaluation" + ) diff --git a/services/ai/workflows/utils/state_reducers.py b/services/ai/workflows/utils/state_reducers.py new file mode 100644 index 00000000..390dca13 --- /dev/null +++ b/services/ai/workflows/utils/state_reducers.py @@ -0,0 +1,139 @@ +from typing import Any, Dict, List, Optional + +from lib.logger import configure_logger + +logger = configure_logger(__name__) + + +def no_update_reducer(current: Any, new: List[Any]) -> Any: + """Reducer that prevents updates after initial value is set. + + Args: + current: The current value + new: List of new values to consider + + Returns: + The original value if set, otherwise the first non-None value from new + """ + # Treat initial empty string for str types as if it were None for accepting the first value + is_initial_empty_string = isinstance(current, str) and current == "" + + # If current is genuinely set (not None and not initial empty string), keep it. + if current is not None and not is_initial_empty_string: + return current + + # Current is None or an initial empty string. Try to set it from new. + processed_new_values = ( + new if isinstance(new, list) else [new] + ) # Ensure 'new' is a list + for n_val in processed_new_values: + if n_val is not None: + return n_val + + # If current was None/initial empty string and new is all None or empty, return current + return current + + +def merge_dicts(current: Optional[Dict], updates: List[Optional[Dict]]) -> Dict: + """Merge multiple dictionary updates into the current dictionary. + + Args: + current: The current dictionary (or None) + updates: List of dictionaries to merge in + + Returns: + The merged dictionary + """ + # Initialize current if it's None + if current is None: + current = {} + + # Handle case where updates is None + if updates is None: + return current + + # Process updates if it's a list + if isinstance(updates, list): + for update in updates: + if update and isinstance(update, dict): + current.update(update) + # Handle case where updates is a single dictionary, not a list + elif isinstance(updates, dict): + current.update(updates) + + return current + + +def set_once(current: Any, updates: List[Any]) -> Any: + """Set the value once and prevent further updates. + + Args: + current: The current value + updates: List of potential new values + + Returns: + The current value if set, otherwise the first non-None value from updates + """ + # If current already has a value, return it unchanged + if current is not None: + return current + + # Handle case where updates is None instead of a list + if updates is None: + return None + + # Process updates if it's a list + if isinstance(updates, list): + for update in updates: + if update is not None: + return update + # Handle case where updates is a single value, not a list + elif updates is not None: + return updates + + return current + + +def update_state_with_agent_result( + state: Dict[str, Any], agent_result: Dict[str, Any], agent_name: str +) -> Dict[str, Any]: + """Update state with agent result including summaries and flags. + + Args: + state: The current state dictionary + agent_result: The result dictionary from an agent + agent_name: The name of the agent (e.g., 'core', 'historical') + + Returns: + The updated state dictionary + """ + logger.debug( + f"[DEBUG:update_state:{agent_name}] Updating state with {agent_name}_score (score: {agent_result.get('score', 'N/A')})" + ) + + # Update agent score in state + if agent_name in ["core", "historical", "financial", "social", "final"]: + # Make a copy of agent_result to avoid modifying the original + score_dict = dict(agent_result) + # Don't pass token_usage through this path to avoid duplication + if "token_usage" in score_dict: + del score_dict["token_usage"] + + # Directly assign the dictionary to the state key + state[f"{agent_name}_score"] = score_dict + + # Update summaries + if "summaries" not in state: + state["summaries"] = {} + + if "summary" in agent_result and agent_result["summary"]: + state["summaries"][f"{agent_name}_score"] = agent_result["summary"] + + # Update flags + if "flags" not in state: + state["flags"] = [] + + if "flags" in agent_result and isinstance(agent_result["flags"], list): + state["flags"].extend(agent_result["flags"]) + + return state diff --git a/services/ai/workflows/utils/token_usage.py b/services/ai/workflows/utils/token_usage.py new file mode 100644 index 00000000..436763b5 --- /dev/null +++ b/services/ai/workflows/utils/token_usage.py @@ -0,0 +1,65 @@ +from typing import Any, Dict + +from lib.logger import configure_logger +from lib.utils import calculate_token_cost +from services.ai.workflows.utils.model_factory import get_default_model_name + +logger = configure_logger(__name__) + + +class TokenUsageMixin: + """Mixin for tracking token usage in LLM calls.""" + + def __init__(self): + """Initialize token usage tracker.""" + pass + + def track_token_usage(self, prompt_text: str, result: Any) -> Dict[str, int]: + """Track token usage for an LLM invocation. + + Args: + prompt_text: The prompt text sent to the LLM + result: The response from the LLM + + Returns: + Dictionary containing token usage information + """ + token_usage_data = {"input_tokens": 0, "output_tokens": 0, "total_tokens": 0} + + # Try to extract token usage from LLM + if ( + hasattr(self.llm, "_last_prompt_id") + and hasattr(self.llm, "client") + and hasattr(self.llm.client, "usage_by_prompt_id") + ): + last_prompt_id = self.llm._last_prompt_id + if last_prompt_id in self.llm.client.usage_by_prompt_id: + usage = self.llm.client.usage_by_prompt_id[last_prompt_id] + token_usage_data = { + "input_tokens": usage.get("prompt_tokens", 0), + "output_tokens": usage.get("completion_tokens", 0), + "total_tokens": usage.get("total_tokens", 0), + } + return token_usage_data + + # Fallback to estimation + llm_model_name = getattr(self.llm, "model_name", get_default_model_name()) + token_count = len(prompt_text) // 4 # Simple estimation + token_usage_dict = {"input_tokens": token_count} + calculate_token_cost(token_usage_dict, llm_model_name) + token_usage_data = { + "input_tokens": token_count, + "output_tokens": ( + len(result.model_dump_json()) // 4 + if hasattr(result, "model_dump_json") + else 0 + ), + "total_tokens": token_count + + ( + len(result.model_dump_json()) // 4 + if hasattr(result, "model_dump_json") + else 0 + ), + "model_name": llm_model_name, + } + return token_usage_data diff --git a/services/workflows/workflow_service.py b/services/ai/workflows/workflow_service.py similarity index 89% rename from services/workflows/workflow_service.py rename to services/ai/workflows/workflow_service.py index a4ecbdf8..893503de 100644 --- a/services/workflows/workflow_service.py +++ b/services/ai/workflows/workflow_service.py @@ -1,9 +1,3 @@ -"""Generic workflow service interface and factory. - -This module provides a standard interface for all workflow services and -a factory function to instantiate the appropriate service based on configuration. -""" - import asyncio import datetime from abc import ABC, abstractmethod @@ -12,16 +6,19 @@ from langchain_core.callbacks.base import BaseCallbackHandler from langchain_core.embeddings import Embeddings from langchain_core.messages import AIMessage, HumanMessage, SystemMessage -from langchain_openai import ChatOpenAI, OpenAIEmbeddings +from langchain_openai import OpenAIEmbeddings from lib.logger import configure_logger -from services.workflows.base import ExecutionError, StreamingError -from services.workflows.react import ( - LangGraphService, +from services.ai.workflows.base import ExecutionError, StreamingError +from services.ai.workflows.chat import ( + ChatService, MessageProcessor, StreamingCallbackHandler, ) -from services.workflows.vector_react import VectorLangGraphService +from services.ai.workflows.utils.model_factory import ( + get_default_model_name, + get_default_temperature, +) logger = configure_logger(__name__) @@ -428,8 +425,8 @@ def __init__(self, workflow_class: Type, **kwargs): self.kwargs = kwargs self.callback_handler = None self.tools = [] - self.model_name = kwargs.get("model_name", "gpt-4.1") - self.temperature = kwargs.get("temperature", 0.1) + self.model_name = kwargs.get("model_name", get_default_model_name()) + self.temperature = kwargs.get("temperature", get_default_temperature()) def with_callback_handler( self, callback_handler: BaseCallbackHandler @@ -508,64 +505,36 @@ def build(self, **extra_kwargs) -> Any: class WorkflowFactory: - """Factory for creating workflow service instances.""" + """Factory for creating workflow service instances. Only ChatService is used.""" @classmethod def create_workflow_service( cls, - workflow_type: str = "react", + workflow_type: str = "chat", vector_collections: Optional[Union[str, List[str]]] = None, embeddings: Optional[Embeddings] = None, **kwargs, ) -> WorkflowService: - """Create a workflow service instance based on the workflow type. + """Create a workflow service instance. Always returns ChatService. Args: - workflow_type: Type of workflow to create ("react", "preplan", "vector", "vector_preplan") + workflow_type: Type of workflow to create (ignored, always uses ChatService) vector_collections: Vector collection name(s) for vector workflows embeddings: Embeddings model for vector workflows **kwargs: Additional parameters to pass to the service Returns: - An instance of a WorkflowService implementation + An instance of ChatService """ - # Import service classes here to avoid circular imports - from services.workflows.preplan_react import PreplanLangGraphService - from services.workflows.vector_preplan_react import ( - VectorPreplanLangGraphService, - ) - - # Map workflow types to their service classes - service_map = { - "react": LangGraphService, - "preplan": PreplanLangGraphService, - "vector": VectorLangGraphService, - "vector_preplan": VectorPreplanLangGraphService, - } - - if workflow_type not in service_map: - raise ValueError(f"Unsupported workflow type: {workflow_type}") - - service_class = service_map[workflow_type] - - # Handle vector-based workflow special cases - if workflow_type in ["vector", "vector_preplan"]: - if not vector_collections: - raise ValueError( - f"Vector collection name(s) required for {workflow_type} workflow" - ) - + if vector_collections is not None: if not embeddings: embeddings = OpenAIEmbeddings() - - return service_class( + return ChatService( collection_names=vector_collections, embeddings=embeddings, **kwargs, ) - - # For other workflow types - return service_class(**kwargs) + return ChatService(**kwargs) async def execute_workflow_stream( @@ -578,10 +547,10 @@ async def execute_workflow_stream( embeddings: Optional[Embeddings] = None, **kwargs, ) -> AsyncGenerator[Dict, None]: - """Unified interface for executing any workflow stream. + """Unified interface for executing any workflow stream. Uses ChatService for all workflows. Args: - workflow_type: Type of workflow to execute + workflow_type: Type of workflow to execute (ignored) history: Conversation history input_str: Current user input persona: Optional persona to use @@ -599,8 +568,6 @@ async def execute_workflow_stream( embeddings=embeddings, **kwargs, ) - - # Execute the stream through the service's execute_stream method async for chunk in service.execute_stream( history=history, input_str=input_str, diff --git a/services/communication/__init__.py b/services/communication/__init__.py new file mode 100644 index 00000000..e69de29b diff --git a/services/communication/discord/__init__.py b/services/communication/discord/__init__.py new file mode 100644 index 00000000..f318a4b9 --- /dev/null +++ b/services/communication/discord/__init__.py @@ -0,0 +1,8 @@ +""" +Discord service package for sending messages to Discord channels via webhooks. +""" + +from services.communication.discord.discord_factory import create_discord_service +from services.communication.discord.discord_service import DiscordService + +__all__ = ["DiscordService", "create_discord_service"] diff --git a/services/discord/discord_factory.py b/services/communication/discord/discord_factory.py similarity index 82% rename from services/discord/discord_factory.py rename to services/communication/discord/discord_factory.py index c4d3fafc..a747577b 100644 --- a/services/discord/discord_factory.py +++ b/services/communication/discord/discord_factory.py @@ -1,6 +1,6 @@ from config import DiscordConfig from lib.logger import configure_logger -from services.discord.discord_service import DiscordService +from services.communication.discord.discord_service import DiscordService logger = configure_logger(__name__) @@ -15,10 +15,10 @@ def create_discord_service(webhook_url=None): Returns: DiscordService or None: Initialized Discord service or None if configuration is missing. """ - # If webhook_url is not provided, get it from config + # If webhook_url is not provided, get it from config (default to passed webhook) if webhook_url is None: discord_config = DiscordConfig() - webhook_url = discord_config.webhook_url + webhook_url = discord_config.webhook_url_passed if not webhook_url: logger.warning("Discord webhook URL is not configured") diff --git a/services/discord/discord_service.py b/services/communication/discord/discord_service.py similarity index 100% rename from services/discord/discord_service.py rename to services/communication/discord/discord_service.py diff --git a/services/bot.py b/services/communication/telegram_bot_service.py similarity index 100% rename from services/bot.py rename to services/communication/telegram_bot_service.py diff --git a/services/communication/twitter_service.py b/services/communication/twitter_service.py new file mode 100644 index 00000000..7e6e0401 --- /dev/null +++ b/services/communication/twitter_service.py @@ -0,0 +1,1005 @@ +import re +from io import BytesIO +from typing import Dict, List, Optional, TypedDict +from urllib.parse import urlparse + +import requests +import tweepy +from pydantic import BaseModel + +from backend.factory import backend +from backend.models import ( + QueueMessageCreate, + XTweetBase, + XTweetCreate, + XTweetFilter, + XUserCreate, + XUserFilter, +) +from config import config +from lib.logger import configure_logger + +logger = configure_logger(__name__) + + +class TwitterService: + def __init__( + self, + consumer_key: str, + consumer_secret: str, + access_token: str, + access_secret: str, + client_id: str, + client_secret: str, + bearer_token: str, + ): + """Initialize the Twitter service with API credentials.""" + self.consumer_key = consumer_key + self.consumer_secret = consumer_secret + self.access_token = access_token + self.access_secret = access_secret + self.client_id = client_id + self.client_secret = client_secret + self.bearer_token = bearer_token + self.client = None + self.api = None + + async def _ainitialize(self) -> None: + self.initialize() + + def initialize(self) -> None: + """Initialize the Twitter client and API.""" + try: + # Initialize OAuth1 handler for API v1.1 (needed for media upload) + auth = tweepy.OAuth1UserHandler( + self.consumer_key, + self.consumer_secret, + self.access_token, + self.access_secret, + ) + self.api = tweepy.API(auth, wait_on_rate_limit=True) + + # Initialize Client for API v2 (used for tweet creation) + self.client = tweepy.Client( + consumer_key=self.consumer_key, + consumer_secret=self.consumer_secret, + access_token=self.access_token, + access_token_secret=self.access_secret, + bearer_token=self.bearer_token, + wait_on_rate_limit=True, + ) + logger.info("Twitter client and API initialized successfully") + except Exception as e: + logger.error(f"Failed to initialize Twitter client: {str(e)}") + raise + + def _get_extension(self, url: str) -> str: + """Extract file extension from URL.""" + path = urlparse(url).path.lower() + for ext in [".png", ".jpg", ".jpeg", ".gif", ".webp"]: + if path.endswith(ext): + return ext + return ".jpg" + + def _split_text_into_chunks(self, text: str, limit: int = 280) -> List[str]: + """Split text into chunks not exceeding the limit without cutting words.""" + words = text.split() + chunks = [] + current = "" + for word in words: + if len(current) + len(word) + (1 if current else 0) <= limit: + current = f"{current} {word}".strip() + else: + if current: + chunks.append(current) + current = word + if current: + chunks.append(current) + return chunks + + async def post_tweet_with_media( + self, + image_url: str, + text: str, + reply_id: Optional[str] = None, + ) -> Optional[tweepy.Response]: + """Post a tweet with media attachment.""" + try: + if self.api is None or self.client is None: + raise Exception("Twitter client is not initialized") + + headers = {"User-Agent": "Mozilla/5.0 (compatible; AIBTC Bot/1.0)"} + response = requests.get(image_url, headers=headers, timeout=30) + response.raise_for_status() + + # Validate content type and size + content_type = response.headers.get("content-type", "").lower() + if not any( + ct in content_type + for ct in ["image/jpeg", "image/png", "image/gif", "image/webp"] + ): + logger.warning(f"Unsupported content type: {content_type}") + return None + + if len(response.content) > 5 * 1024 * 1024: # 5MB limit + logger.warning(f"Image too large: {len(response.content)} bytes") + return None + + # Upload media using API v1.1 + extension = self._get_extension(image_url) + media = self.api.media_upload( + filename=f"image{extension}", + file=BytesIO(response.content), + ) + + # Create tweet with media using API v2 + result = self.client.create_tweet( + text=text, + media_ids=[media.media_id_string], + in_reply_to_tweet_id=reply_id, + ) + + if result and result.data: + logger.info( + f"Successfully posted tweet with media: {result.data['id']}" + ) + return result + + return None + + except Exception as e: + logger.error(f"Failed to post tweet with media: {str(e)}") + return None + + async def post_tweet_with_chunks( + self, + text: str, + image_url: Optional[str] = None, + reply_id: Optional[str] = None, + ) -> Optional[List[tweepy.Response]]: + """Post a tweet, splitting into chunks if necessary and handling media.""" + try: + if self.client is None: + raise Exception("Twitter client is not initialized") + + # Process image URL if present + if image_url: + # Remove image URL from text + text = re.sub(re.escape(image_url), "", text).strip() + text = re.sub(r"\s+", " ", text) + + # Split text into chunks + chunks = self._split_text_into_chunks(text) + previous_tweet_id = reply_id + responses = [] + + for index, chunk in enumerate(chunks): + try: + if index == 0 and image_url: + # First chunk with media + response = await self.post_tweet_with_media( + image_url=image_url, + text=chunk, + reply_id=previous_tweet_id, + ) + else: + # Regular tweet + response = await self._apost_tweet( + text=chunk, + reply_in_reply_to_tweet_id=previous_tweet_id, + ) + + if response and response.data: + responses.append(response) + previous_tweet_id = response.data["id"] + logger.info( + f"Successfully posted tweet chunk {index + 1}: {response.data['id']}" + ) + else: + logger.error(f"Failed to send tweet chunk {index + 1}") + if index == 0: # If first chunk fails, whole message fails + return None + + except Exception as chunk_error: + logger.error(f"Error sending chunk {index + 1}: {str(chunk_error)}") + if index == 0: # Critical failure on first chunk + raise chunk_error + + return responses if responses else None + + except Exception as e: + logger.error(f"Error posting tweet with chunks: {str(e)}") + return None + + async def _apost_tweet( + self, text: str, reply_in_reply_to_tweet_id: Optional[str] = None + ) -> Optional[tweepy.Response]: + """ + Post a new tweet or reply to an existing tweet. + + Args: + text: The content of the tweet + reply_in_reply_to_tweet_id: Optional ID of tweet to reply to + + Returns: + Tweet response if successful, None if failed + """ + return await self.post_tweet(text, reply_in_reply_to_tweet_id) + + async def post_tweet( + self, text: str, reply_in_reply_to_tweet_id: Optional[str] = None + ) -> Optional[tweepy.Response]: + """ + Post a new tweet or reply to an existing tweet. + + Args: + text: The content of the tweet + reply_in_reply_to_tweet_id: Optional ID of tweet to reply to + + Returns: + Tweet response if successful, None if failed + """ + try: + if self.client is None: + raise Exception("Twitter client is not initialized") + + response = self.client.create_tweet( + text=text, in_reply_to_tweet_id=reply_in_reply_to_tweet_id + ) + + if response and response.data: + logger.info( + f"Successfully posted tweet: {text[:20]}... (ID: {response.data['id']})" + ) + return response + else: + logger.error(f"Failed to post tweet: {text[:20]}...") + return None + + except Exception as e: + logger.error(f"Failed to post tweet: {str(e)}") + return None + + async def get_user_by_username(self, username: str) -> Optional[tweepy.User]: + """ + Get user information by username. + + Args: + username: Twitter username without @ symbol + + Returns: + User data if found, None if not found or error + """ + try: + if self.client is None: + raise Exception("Twitter client is not initialized") + + response = self.client.get_user(username=username) + if response and response.data: + return response.data + return None + + except Exception as e: + logger.error(f"Failed to get user info for {username}: {str(e)}") + return None + + async def get_user_by_user_id(self, user_id: str) -> Optional[tweepy.User]: + """ + Get user information by user ID. + + Args: + user_id: Twitter user ID + + Returns: + User data if found, None if not found or error + """ + try: + if self.client is None: + raise Exception("Twitter client is not initialized") + + response = self.client.get_user(id=user_id) + if response and response.data: + return response.data + return None + + except Exception as e: + logger.error(f"Failed to get user info for {user_id}: {str(e)}") + return None + + async def get_mentions_by_user_id( + self, user_id: str, max_results: int = 100 + ) -> List[tweepy.Tweet]: + """ + Get mentions for a specific user. + + Args: + user_id: Twitter user ID to get mentions for + max_results: Maximum number of mentions to return (default 100) + + Returns: + List of mention data + """ + try: + if self.client is None: + raise Exception("Twitter client is not initialized") + + response = self.client.get_mentions( + id=user_id, + max_results=min(max_results, 100), # API limit + tweet_fields=[ + "id", + "text", + "created_at", + "author_id", + "conversation_id", + "in_reply_to_user_id", + "referenced_tweets", + "public_metrics", + "entities", + "attachments", + "context_annotations", + "withheld", + "reply_settings", + "lang", + ], + expansions=[ + "author_id", + "referenced_tweets.id", + "referenced_tweets.id.author_id", + "entities.mentions.username", + "attachments.media_keys", + "attachments.poll_ids", + "in_reply_to_user_id", + "geo.place_id", + ], + user_fields=[ + "id", + "name", + "username", + "created_at", + "description", + "entities", + "location", + "pinned_tweet_id", + "profile_image_url", + "protected", + "public_metrics", + "url", + "verified", + "withheld", + ], + media_fields=[ + "duration_ms", + "height", + "media_key", + "preview_image_url", + "type", + "url", + "width", + "public_metrics", + "alt_text", + ], + place_fields=[ + "contained_within", + "country", + "country_code", + "full_name", + "geo", + "id", + "name", + "place_type", + ], + poll_fields=[ + "duration_minutes", + "end_datetime", + "id", + "options", + "voting_status", + ], + ) + + if response and response.data: + logger.info(f"Successfully retrieved {len(response.data)} mentions") + return response.data + else: + logger.info("No mentions found") + return [] + + except Exception as e: + logger.error(f"Failed to get mentions: {str(e)}") + return [] + + async def get_me(self) -> Optional[tweepy.User]: + """ + Get information about the authenticated user. + + Returns: + User data if successful, None if failed + """ + try: + if self.client is None: + raise Exception("Twitter client is not initialized") + + response = self.client.get_me() + if response and response.data: + return response.data + return None + + except Exception as e: + logger.error(f"Failed to get authenticated user info: {str(e)}") + return None + + async def follow_user(self, target_username: str) -> bool: + """ + Follow a user using their username. Uses the authenticated user as the follower. + + Args: + target_username: Username of the account to follow (without @ symbol) + + Returns: + True if successful, False if failed + """ + try: + if self.client is None: + raise Exception("Twitter client is not initialized") + + # Get target user's ID + target_user = await self.get_user_by_username(target_username) + if not target_user: + raise Exception(f"Failed to get user info for {target_username}") + + # Follow the user + response = self.client.follow_user(target_user_id=target_user.id) + if response: + logger.info(f"Successfully followed user: {target_username}") + return True + return False + + except Exception as e: + logger.error(f"Failed to follow user {target_username}: {str(e)}") + return False + + async def unfollow_user(self, target_username: str) -> bool: + """ + Unfollow a user using their username. Uses the authenticated user as the unfollower. + + Args: + target_username: Username of the account to unfollow (without @ symbol) + + Returns: + True if successful, False if failed + """ + try: + if self.client is None: + raise Exception("Twitter client is not initialized") + + # Get target user's ID + target_user = await self.get_user_by_username(target_username) + if not target_user: + raise Exception(f"Failed to get user info for {target_username}") + + # Unfollow the user + response = self.client.unfollow_user(target_user_id=target_user.id) + if response: + logger.info(f"Successfully unfollowed user: {target_username}") + return True + return False + + except Exception as e: + logger.error(f"Failed to unfollow user {target_username}: {str(e)}") + return False + + async def get_tweet_by_id(self, tweet_id: str) -> Optional[tweepy.Response]: + """ + Get a tweet by its ID using Twitter API v2. + + Args: + tweet_id: The ID of the tweet to retrieve + + Returns: + Full response object if found, None if not found or error + """ + try: + if self.client is None: + raise Exception("Twitter client is not initialized") + + response = self.client.get_tweet( + id=tweet_id, + tweet_fields=[ + "id", + "text", + "created_at", + "author_id", + "conversation_id", + "in_reply_to_user_id", + "referenced_tweets", + "public_metrics", + "entities", + "attachments", + "context_annotations", + "withheld", + "reply_settings", + "lang", + ], + expansions=[ + "author_id", + "referenced_tweets.id", + "referenced_tweets.id.author_id", + "entities.mentions.username", + "attachments.media_keys", + "attachments.poll_ids", + "in_reply_to_user_id", + "geo.place_id", + ], + user_fields=[ + "id", + "name", + "username", + "created_at", + "description", + "entities", + "location", + "pinned_tweet_id", + "profile_image_url", + "protected", + "public_metrics", + "url", + "verified", + "withheld", + ], + media_fields=[ + "duration_ms", + "height", + "media_key", + "preview_image_url", + "type", + "url", + "width", + "public_metrics", + "alt_text", + "variants", + ], + ) + + if response and response.data: + logger.info(f"Successfully retrieved tweet: {tweet_id}") + return response + else: + logger.warning(f"Tweet not found: {tweet_id}") + return None + + except Exception as e: + logger.error(f"Failed to get tweet {tweet_id}: {str(e)}") + return None + + async def get_status_by_id( + self, tweet_id: str, tweet_mode: str = "extended" + ) -> Optional[tweepy.models.Status]: + """ + Get a tweet by its ID using Twitter API v1.1 (for extended tweet support). + + Args: + tweet_id: The ID of the tweet to retrieve + tweet_mode: Tweet mode - "extended" for full text, "compat" for compatibility mode + + Returns: + Status object if found, None if not found or error + """ + try: + if self.api is None: + raise Exception("Twitter API is not initialized") + + status = self.api.get_status( + id=tweet_id, + tweet_mode=tweet_mode, + include_entities=True, + include_ext_alt_text=True, + include_card_uri=True, + ) + + if status: + logger.info(f"Successfully retrieved status: {tweet_id}") + return status + else: + logger.warning(f"Status not found: {tweet_id}") + return None + + except Exception as e: + logger.error(f"Failed to get status {tweet_id}: {str(e)}") + return None + + +class UserProfile(TypedDict): + """Type definition for user profile data.""" + + name: str + age: int + email: str + + +class TweetData(BaseModel): + """Pydantic model for tweet data.""" + + tweet_id: Optional[str] = None + author_id: Optional[str] = None + text: Optional[str] = None + conversation_id: Optional[str] = None + + @classmethod + def from_tweepy_tweet(cls, tweet: "tweepy.Tweet") -> "TweetData": + """Create TweetData from a tweepy Tweet object.""" + return cls( + tweet_id=tweet.id, + author_id=tweet.author_id, + text=tweet.text, + conversation_id=tweet.conversation_id, + ) + + +class TwitterConfig(BaseModel): + """Configuration for Twitter service.""" + + consumer_key: str + consumer_secret: str + client_id: str + client_secret: str + access_token: str + access_secret: str + bearer_token: str + user_id: str + whitelisted_authors: List[str] + whitelist_enabled: bool = False + + +class TweetRepository: + """Repository for handling tweet storage and retrieval.""" + + async def store_tweet(self, tweet_data: TweetData) -> None: + """Store tweet and author data in the database.""" + try: + authors = await backend.list_x_users( + filters=XUserFilter(user_id=tweet_data.author_id) + ) + if authors and len(authors) > 0: + author = authors[0] + logger.debug( + f"Found existing author {tweet_data.author_id} in database" + ) + else: + logger.info(f"Creating new author record for {tweet_data.author_id}") + author = await backend.create_x_user( + XUserCreate(user_id=tweet_data.author_id) + ) + + logger.debug(f"Creating tweet record for {tweet_data.tweet_id}") + await backend.create_x_tweet( + XTweetCreate( + author_id=author.id, + tweet_id=tweet_data.tweet_id, + message=tweet_data.text, + conversation_id=tweet_data.conversation_id, + ) + ) + except Exception as e: + logger.error(f"Failed to store tweet/author data: {str(e)}", exc_info=True) + raise + + async def update_tweet_analysis( + self, + tweet_id: str, + is_worthy: bool, + tweet_type: str, + confidence_score: float, + reason: str, + ) -> None: + """Update tweet with analysis results.""" + try: + tweets = await backend.list_x_tweets( + filters=XTweetFilter(tweet_id=tweet_id) + ) + if tweets and len(tweets) > 0: + logger.debug("Updating existing tweet record with analysis results") + await backend.update_x_tweet( + x_tweet_id=tweets[0].id, + update_data=XTweetBase( + is_worthy=is_worthy, + tweet_type=tweet_type, + confidence_score=confidence_score, + reason=reason, + ), + ) + except Exception as e: + logger.error(f"Failed to update tweet analysis: {str(e)}", exc_info=True) + raise + + async def get_conversation_history( + self, conversation_id: str, user_id: str + ) -> List[Dict[str, str]]: + """Retrieve conversation history for a given conversation ID.""" + try: + conversation_tweets = await backend.list_x_tweets( + filters=XTweetFilter(conversation_id=conversation_id) + ) + logger.debug( + f"Retrieved {len(conversation_tweets)} tweets from conversation {conversation_id}" + ) + return [ + { + "role": "user" if tweet.author_id != user_id else "assistant", + "content": tweet.message, + } + for tweet in conversation_tweets + if tweet.message + ] + except Exception as e: + logger.error( + f"Failed to retrieve conversation history: {str(e)}", exc_info=True + ) + raise + + +class TweetAnalyzer: + """Handles tweet analysis and processing logic.""" + + def __init__(self, tweet_repository: TweetRepository): + """Initialize with dependencies.""" + self.tweet_repository = tweet_repository + + async def analyze_tweet_content( + self, tweet_data: TweetData, history: List[Dict[str, str]] + ) -> Dict: + """Analyze tweet content and determine if it needs processing.""" + logger.info( + f"Analyzing tweet {tweet_data.tweet_id} from user {tweet_data.author_id}" + ) + logger.debug(f"Tweet content: {tweet_data.text}") + logger.debug(f"Conversation history size: {len(history)} messages") + + filtered_content = "\n".join( + f"{msg['role']}: {msg['content']}" for msg in history + ) + + try: + # Import here to avoid circular dependency + from services.ai.workflows import analyze_tweet + + analysis_result = await analyze_tweet( + tweet_text=tweet_data.text, + filtered_content=filtered_content, + ) + + logger.info( + f"Analysis complete for {tweet_data.tweet_id} - " + f"Worthy: {analysis_result['is_worthy']}, " + f"Type: {analysis_result['tweet_type']}, " + f"Confidence: {analysis_result['confidence_score']}" + ) + logger.debug(f"Analysis reason: {analysis_result['reason']}") + + await self.tweet_repository.update_tweet_analysis( + tweet_id=tweet_data.tweet_id, + is_worthy=analysis_result["is_worthy"], + tweet_type=analysis_result["tweet_type"], + confidence_score=analysis_result["confidence_score"], + reason=analysis_result["reason"], + ) + + return analysis_result + except Exception as e: + logger.error( + f"Analysis failed for tweet {tweet_data.tweet_id}: {str(e)}", + exc_info=True, + ) + raise + + +class TwitterMentionHandler: + """Handles Twitter mention processing and responses.""" + + def __init__( + self, + config: TwitterConfig, + tweet_repository: TweetRepository, + tweet_analyzer: TweetAnalyzer, + ): + """Initialize with dependencies.""" + self.config = config + self.tweet_repository = tweet_repository + self.tweet_analyzer = tweet_analyzer + self.twitter_service = TwitterService( + consumer_key=config.consumer_key, + consumer_secret=config.consumer_secret, + client_id=config.client_id, + client_secret=config.client_secret, + access_token=config.access_token, + access_secret=config.access_secret, + bearer_token=config.bearer_token, + ) + + async def _post_response( + self, tweet_data: TweetData, response_content: str + ) -> None: + """Post a response tweet. + + Args: + tweet_data: Data about the tweet to respond to + response_content: Content of the response tweet + """ + logger.debug(f"Posting response to tweet {tweet_data.tweet_id}") + await self.twitter_service._ainitialize() + await self.twitter_service._apost_tweet( + text=response_content, reply_in_reply_to_tweet_id=tweet_data.tweet_id + ) + + def _is_author_whitelisted(self, author_id: str) -> bool: + """Check if the author is in the whitelist.""" + logger.debug(f"Checking whitelist status for author {author_id}") + is_whitelisted = str(author_id) in self.config.whitelisted_authors + logger.debug(f"Author {author_id} whitelist status: {is_whitelisted}") + return is_whitelisted + + async def _handle_mention(self, mention) -> None: + """Process a single mention for analysis.""" + tweet_data = TweetData.from_tweepy_tweet(mention) + + logger.debug( + f"Processing mention - Tweet ID: {tweet_data.tweet_id}, " + f"Author: {tweet_data.author_id}, Text: {tweet_data.text[:50]}..." + ) + + # Check if tweet exists in our database + try: + existing_tweets = await backend.list_x_tweets( + filters=XTweetFilter(tweet_id=tweet_data.tweet_id) + ) + if existing_tweets and len(existing_tweets) > 0: + logger.debug( + f"Tweet {tweet_data.tweet_id} already exists in database, skipping processing" + ) + return + except Exception as e: + logger.error( + f"Database error checking tweet {tweet_data.tweet_id}: {str(e)}", + exc_info=True, + ) + raise + + await self.tweet_repository.store_tweet(tweet_data) + + try: + if self.config.whitelist_enabled: + if self._is_author_whitelisted(tweet_data.author_id): + logger.info( + f"Processing whitelisted mention {tweet_data.tweet_id} " + f"from user {tweet_data.author_id}" + ) + await self._process_mention(tweet_data) + else: + logger.warning( + f"Skipping non-whitelisted mention {tweet_data.tweet_id} " + f"from user {tweet_data.author_id}" + ) + else: + logger.debug("Whitelist check disabled, processing all mentions") + await self._process_mention(tweet_data) + except Exception as e: + logger.error( + f"Failed to process mention {tweet_data.tweet_id}: {str(e)}", + exc_info=True, + ) + raise + + async def _process_mention(self, tweet_data: TweetData) -> None: + """Process mention after validation.""" + history = await self.tweet_repository.get_conversation_history( + tweet_data.conversation_id, self.config.user_id + ) + + analysis_result = await self.tweet_analyzer.analyze_tweet_content( + tweet_data, history + ) + + if analysis_result["is_worthy"] and analysis_result["tool_request"]: + logger.info( + f"Queueing tool request for tweet {tweet_data.tweet_id} - " + f"Tool: {analysis_result['tool_request'].tool_name}" + ) + backend.create_queue_message( + new_queue_message=QueueMessageCreate( + type="daos", + tweet_id=tweet_data.tweet_id, + conversation_id=tweet_data.conversation_id, + message=analysis_result["tool_request"].model_dump(), + ) + ) + elif analysis_result["is_worthy"]: + logger.debug( + f"Tweet {tweet_data.tweet_id} worthy but no tool request present" + ) + else: + logger.debug(f"Tweet {tweet_data.tweet_id} not worthy of processing") + + async def process_mentions(self) -> None: + """Process all new mentions for analysis.""" + try: + logger.info("Starting Twitter mention processing") + await self.twitter_service._ainitialize() + mentions = await self.twitter_service.get_mentions_by_user_id( + self.config.user_id + ) + + if not mentions: + logger.info("No new mentions found to process") + return + + logger.info(f"Found {len(mentions)} mentions to process") + for mention in mentions: + try: + logger.debug(f"Processing mention {mention.id}") + await self._handle_mention(mention) + except Exception as e: + logger.error( + f"Failed to process mention {mention.id}: {str(e)}", + exc_info=True, + ) + continue + + except Exception as e: + logger.error(f"Twitter mention processing failed: {str(e)}", exc_info=True) + raise + + +def create_twitter_handler() -> TwitterMentionHandler: + """Factory function to create TwitterMentionHandler with dependencies.""" + twitter_config = TwitterConfig( + consumer_key=config.twitter.consumer_key, + consumer_secret=config.twitter.consumer_secret, + client_id=config.twitter.client_id, + client_secret=config.twitter.client_secret, + access_token=config.twitter.access_token, + access_secret=config.twitter.access_secret, + bearer_token=config.twitter.bearer_token, + user_id=config.twitter.automated_user_id, + whitelisted_authors=config.twitter.whitelisted_authors, + whitelist_enabled=False, + ) + + tweet_repository = TweetRepository() + tweet_analyzer = TweetAnalyzer(tweet_repository) + + return TwitterMentionHandler(twitter_config, tweet_repository, tweet_analyzer) + + +def create_twitter_service_from_config() -> TwitterService: + """Factory function to create TwitterService using config credentials.""" + return TwitterService( + consumer_key=config.twitter.consumer_key, + consumer_secret=config.twitter.consumer_secret, + client_id=config.twitter.client_id, + client_secret=config.twitter.client_secret, + access_token=config.twitter.access_token, + access_secret=config.twitter.access_secret, + bearer_token=config.twitter.bearer_token, + ) + + +# Global handler instance +handler = create_twitter_handler() + + +async def execute_twitter_job() -> None: + """Execute the Twitter job to process mentions.""" + try: + if not handler.config.user_id: + logger.error( + "Cannot execute Twitter job: AIBTC_TWITTER_AUTOMATED_USER_ID not set" + ) + return + + logger.info("Starting Twitter mention check job") + await handler.process_mentions() + logger.info("Successfully completed Twitter mention check job") + + except Exception as e: + logger.error(f"Twitter job execution failed: {str(e)}", exc_info=True) + raise diff --git a/services/websocket.py b/services/communication/websocket_service.py similarity index 98% rename from services/websocket.py rename to services/communication/websocket_service.py index a6da7e96..fb4a14c5 100644 --- a/services/websocket.py +++ b/services/communication/websocket_service.py @@ -1,8 +1,7 @@ import asyncio import datetime import time -from typing import Any, Dict, Optional, Set, Tuple -from uuid import UUID +from typing import Any, Dict, Optional from fastapi import WebSocket @@ -229,7 +228,9 @@ async def mark_jobs_disconnected(self, session_id: str) -> None: """Mark all jobs associated with a session as disconnected.""" try: # Import here to avoid circular imports - from services.chat import mark_jobs_disconnected_for_session + from services.processing.streaming_service import ( + mark_jobs_disconnected_for_session, + ) await mark_jobs_disconnected_for_session(session_id) logger.debug(f"Marked jobs disconnected for session: {session_id}") diff --git a/services/core/__init__.py b/services/core/__init__.py new file mode 100644 index 00000000..e69de29b diff --git a/services/core/chat_service.py b/services/core/chat_service.py new file mode 100644 index 00000000..8e2266cd --- /dev/null +++ b/services/core/chat_service.py @@ -0,0 +1,366 @@ +import asyncio +from typing import Any, Dict, List, Optional +from uuid import UUID + +from backend.factory import backend +from backend.models import JobFilter, Profile, StepFilter +from lib.logger import configure_logger +from services.processing.chat_processor import ChatProcessor +from services.processing.streaming_service import running_jobs + +logger = configure_logger(__name__) + + +class ChatService: + """Main service for chat processing and management.""" + + @staticmethod + async def process_chat_message( + job_id: UUID, + thread_id: UUID, + profile: Profile, + agent_id: Optional[UUID], + input_str: str, + history: List[Dict[str, Any]], + output_queue: asyncio.Queue, + ) -> None: + """Process a chat message. + + Args: + job_id: The ID of the job + thread_id: The ID of the thread + profile: The user's profile + agent_id: Optional agent ID + input_str: The input message + history: Chat history + output_queue: Queue for streaming output + """ + # Initialize job info in running_jobs + job_id_str = str(job_id) + running_jobs[job_id_str] = { + "queue": output_queue, + "thread_id": thread_id, + "agent_id": agent_id, + "task": None, + "connection_active": True, + } + + processor = ChatProcessor( + job_id=job_id, + thread_id=thread_id, + profile=profile, + agent_id=agent_id, + input_str=input_str, + history=history, + output_queue=output_queue, + ) + + try: + await processor.process_stream() + except Exception as e: + logger.error(f"Error processing chat message: {e}") + raise + finally: + # Clean up job info + if job_id_str in running_jobs: + del running_jobs[job_id_str] + + @staticmethod + def get_job_history(thread_id: UUID, profile_id: UUID) -> List[Dict[str, Any]]: + """Get the chat history for a specific job. + + Args: + thread_id: The ID of the thread + profile_id: The ID of the profile + + Returns: + List of formatted chat messages + """ + logger.debug( + f"Fetching job history for thread {thread_id} and profile {profile_id}" + ) + jobs = backend.list_jobs(filters=JobFilter(thread_id=thread_id)) + formatted_history = [] + for job in jobs: + if job.profile_id == profile_id: + # Get all steps first to determine proper timing + steps = backend.list_steps(filters=StepFilter(job_id=job.id)) + + # Create a timeline of all messages per job + job_messages = [] + + # Add user message + job_messages.append( + { + "role": "user", + "content": job.input, + "created_at": job.created_at.isoformat(), + "thread_id": str(thread_id), + "type": "user", + } + ) + + # Add planning steps with original timestamps + planning_steps = [ + step + for step in steps + if step.status == "planning" or step.thought == "Planning Phase" + ] + for step in planning_steps: + job_messages.append( + { + "role": "assistant", + "content": step.content, + "created_at": step.created_at.isoformat(), + "thread_id": str(thread_id), + "type": "step", + "status": "planning", + "thought": step.thought, + } + ) + + # Add the final response with correct timestamp + has_final_result = job.result and job.result.strip() + if has_final_result: + # For the final result, look for its step to get the correct timestamp + final_step = None + for step in steps: + if step.status == "complete" and step.content == job.result: + final_step = step + break + + # Use the job's result as the primary response + job_messages.append( + { + "role": "assistant", + "content": job.result, + "created_at": ( + final_step.created_at.isoformat() + if final_step + else job.created_at.isoformat() + ), + "thread_id": str(thread_id), + "type": "token", + "status": "complete", + } + ) + else: + # If no job result, look for complete step content + final_steps = [ + step + for step in steps + if step.status == "complete" and step.content and not step.tool + ] + + if final_steps: + # Use the last complete step's content + final_step = max(final_steps, key=lambda s: s.created_at) + job_messages.append( + { + "role": "assistant", + "content": final_step.content, + "created_at": final_step.created_at.isoformat(), + "thread_id": str(thread_id), + "type": "token", + "status": "complete", + } + ) + elif steps: + # No complete steps with content, use all non-tool steps to reconstruct + content_steps = [ + step + for step in steps + if step.content + and not step.tool + and step.status != "planning" + ] + + if content_steps: + # Sort by creation time + content_steps.sort(key=lambda s: s.created_at) + # Use all content joined together + combined_content = " ".join( + step.content for step in content_steps + ) + + job_messages.append( + { + "role": "assistant", + "content": combined_content, + "created_at": job.created_at.isoformat(), + "thread_id": str(thread_id), + "type": "token", + "status": "complete", + } + ) + + # Add tool steps with their original timestamps + for step in steps: + if step.tool: + tool_msg = { + "role": "assistant", + "type": "tool", + "status": step.status or "complete", + "tool": step.tool, + "tool_input": step.tool_input, + "tool_output": step.tool_output, + "created_at": step.created_at.isoformat(), + "thread_id": str(thread_id), + } + if step.agent_id: + tool_msg["agent_id"] = str(step.agent_id) + job_messages.append(tool_msg) + + # Sort this job's messages by timestamp + job_messages.sort(key=lambda x: x["created_at"]) + + # Add all job messages to the history + formatted_history.extend(job_messages) + + # Sort the full history again to ensure proper ordering + formatted_history.sort(key=lambda x: x["created_at"]) + + logger.debug(f"Found {len(formatted_history)} messages in job history") + return formatted_history + + @staticmethod + def get_thread_history(thread_id: UUID, profile_id: UUID) -> List[Dict[str, Any]]: + """Get the complete thread history including all steps. + + Args: + thread_id: The ID of the thread + profile_id: The ID of the profile + + Returns: + List of formatted chat messages and steps + """ + logger.debug( + f"Fetching thread history for thread {thread_id} and profile {profile_id}" + ) + thread = backend.get_thread(thread_id=thread_id) + if thread.profile_id != profile_id: + logger.warning( + f"Profile {profile_id} not authorized for thread {thread_id}" + ) + return [] + + jobs = backend.list_jobs(filters=JobFilter(thread_id=thread.id)) + formatted_history = [] + if jobs: + for job in jobs: + logger.debug(f"Processing job {job}") + # Get all steps for this job first to determine proper timing + steps = backend.list_steps(filters=StepFilter(job_id=job.id)) + + # Create a timeline of all messages per job + job_messages = [] + + # Add user input message + job_messages.append( + { + "role": "user", + "content": job.input, + "created_at": job.created_at.isoformat(), + "thread_id": str(thread.id), + "type": "user", + } + ) + + # Add planning steps with their original timestamps + planning_steps = [ + step + for step in steps + if step.status == "planning" or step.thought == "Planning Phase" + ] + for step in planning_steps: + job_messages.append( + { + "role": step.role, + "content": step.content, + "created_at": step.created_at.isoformat(), + "thought": step.thought, + "thread_id": str(thread.id), + "type": "step", + "status": "planning", + } + ) + + # Add result or final content with correct timestamp + has_final_result = job.result and job.result.strip() + if has_final_result: + # For the final result, look for its step to get the correct timestamp + final_step = None + for step in steps: + if step.status == "complete" and step.content == job.result: + final_step = step + break + + # Use the job's result + job_messages.append( + { + "role": "assistant", + "content": job.result, + "created_at": ( + final_step.created_at.isoformat() + if final_step + else job.created_at.isoformat() + ), + "thread_id": str(thread.id), + "type": "token", + "status": "complete", + } + ) + else: + # No result in job, find the final step's content + final_steps = [ + step + for step in steps + if step.status == "complete" and step.content and not step.tool + ] + + if final_steps: + # Use the last complete step's content + final_step = max(final_steps, key=lambda s: s.created_at) + job_messages.append( + { + "role": "assistant", + "content": final_step.content, + "created_at": final_step.created_at.isoformat(), + "thread_id": str(thread.id), + "type": "token", + "status": "complete", + } + ) + + # Add tool steps with their original timestamps + for step in steps: + if step.tool: + tool_msg = { + "role": "assistant", + "content": step.content if step.content else "", + "created_at": step.created_at.isoformat(), + "thread_id": str(thread.id), + "type": "tool", + "status": step.status or "complete", + "tool": step.tool, + "tool_input": step.tool_input, + "tool_output": step.tool_output, + } + if step.agent_id: + tool_msg["agent_id"] = str(step.agent_id) + job_messages.append(tool_msg) + + # Sort this job's messages by timestamp + job_messages.sort(key=lambda x: x["created_at"]) + + # Add all job messages to the history + formatted_history.extend(job_messages) + + logger.debug(f"Found {len(formatted_history)} messages in thread history") + return formatted_history + + +# For backward compatibility +process_chat_message = ChatService.process_chat_message +get_job_history = ChatService.get_job_history +get_thread_history = ChatService.get_thread_history diff --git a/services/daos.py b/services/core/dao_service.py similarity index 100% rename from services/daos.py rename to services/core/dao_service.py diff --git a/services/discord/__init__.py b/services/discord/__init__.py deleted file mode 100644 index 49c4efd2..00000000 --- a/services/discord/__init__.py +++ /dev/null @@ -1,8 +0,0 @@ -""" -Discord service package for sending messages to Discord channels via webhooks. -""" - -from services.discord.discord_factory import create_discord_service -from services.discord.discord_service import DiscordService - -__all__ = ["DiscordService", "create_discord_service"] diff --git a/services/infrastructure/__init__.py b/services/infrastructure/__init__.py new file mode 100644 index 00000000..e69de29b diff --git a/services/infrastructure/job_management/__init__.py b/services/infrastructure/job_management/__init__.py new file mode 100644 index 00000000..03b9903b --- /dev/null +++ b/services/infrastructure/job_management/__init__.py @@ -0,0 +1,54 @@ +"""Job management system for the aibtcdev backend. + +This module provides a comprehensive job management system with: +- Auto-discovery of job tasks +- Priority-based job execution +- Retry mechanisms with exponential backoff +- Dead letter queue for failed jobs +- Metrics collection and monitoring +- Concurrent job execution with semaphore control +""" + +from .auto_discovery import discover_and_register_tasks, get_task_summary, reload_tasks +from .base import BaseTask, JobContext, JobType, RunnerConfig, RunnerResult +from .decorators import JobMetadata, JobPriority, JobRegistry, job, scheduled_job +from .executor import JobExecutor, get_executor +from .monitoring import ( + MetricsCollector, + PerformanceMonitor, + SystemMetrics, + get_metrics_collector, + get_performance_monitor, + reset_metrics_collector, +) +from .registry import execute_runner_job + +__all__ = [ + # Core classes + "BaseTask", + "JobContext", + "JobType", + "RunnerConfig", + "RunnerResult", + # Decorators and metadata + "JobMetadata", + "JobPriority", + "JobRegistry", + "job", + "scheduled_job", + # Execution + "JobExecutor", + "get_executor", + "execute_runner_job", + # Monitoring + "MetricsCollector", + "PerformanceMonitor", + "SystemMetrics", + "get_metrics_collector", + "get_performance_monitor", + "reset_metrics_collector", + # Auto-discovery + "discover_and_register_tasks", + "get_task_summary", + "reload_tasks", +] diff --git a/services/infrastructure/job_management/auto_discovery.py b/services/infrastructure/job_management/auto_discovery.py new file mode 100644 index 00000000..b8be505c --- /dev/null +++ b/services/infrastructure/job_management/auto_discovery.py @@ -0,0 +1,133 @@ +"""Auto-discovery module for job tasks.""" + +import importlib +from pathlib import Path + +from lib.logger import configure_logger + +from .decorators import JobRegistry + +logger = configure_logger(__name__) + + +def discover_and_register_tasks() -> None: + """Discover and register all job tasks from the tasks directory.""" + try: + tasks_dir = Path(__file__).parent / "tasks" + if not tasks_dir.exists(): + logger.warning(f"Tasks directory not found: {tasks_dir}") + return + + # Import all Python modules in the tasks directory + tasks_package = "services.infrastructure.job_management.tasks" + discovered_modules = [] + + # Get all .py files in the tasks directory + for file_path in tasks_dir.glob("*.py"): + if file_path.name.startswith("__"): + continue # Skip __init__.py and __pycache__ + + module_name = file_path.stem + full_module_name = f"{tasks_package}.{module_name}" + + try: + logger.debug(f"Importing task module: {full_module_name}") + importlib.import_module(full_module_name) + discovered_modules.append(module_name) + logger.debug(f"Successfully imported: {full_module_name}") + except ImportError as e: + logger.warning( + f"Failed to import task module {full_module_name}: {str(e)}" + ) + except Exception as e: + logger.error( + f"Error importing task module {full_module_name}: {str(e)}", + exc_info=True, + ) + + # Log discovered tasks + registered_tasks = JobRegistry.list_jobs() + if registered_tasks: + logger.info( + f"Auto-discovered and registered {len(registered_tasks)} job tasks from {len(discovered_modules)} modules:" + ) + for job_type, metadata in registered_tasks.items(): + logger.info( + f" - {job_type}: {metadata.name} (enabled: {metadata.enabled}, interval: {metadata.interval_seconds}s)" + ) + else: + logger.warning("No job tasks were discovered and registered") + + # Validate dependencies + dependency_issues = JobRegistry.validate_dependencies() + if dependency_issues: + logger.warning("Dependency validation issues found:") + for issue in dependency_issues: + logger.warning(f" - {issue}") + else: + logger.debug("All job dependencies validated successfully") + + # Log dynamic job types that were created + from .base import JobType + + all_job_types = JobType.get_all_job_types() + if all_job_types: + logger.info( + f"Dynamic job types registered: {', '.join(all_job_types.keys())}" + ) + + except Exception as e: + logger.error(f"Error during task discovery: {str(e)}", exc_info=True) + + +def reload_tasks() -> None: + """Reload all tasks (useful for development).""" + logger.info("Reloading all job tasks...") + + # Clear existing registry + JobRegistry.clear_registry() + + # Clear dynamic job types + from .base import JobType + + JobType._job_types = {} + + # Re-discover tasks + discover_and_register_tasks() + + logger.info("Task reload completed") + + +def get_task_summary() -> dict: + """Get a summary of all discovered tasks.""" + registered_tasks = JobRegistry.list_jobs() + enabled_tasks = JobRegistry.list_enabled_jobs() + + summary = { + "total_tasks": len(registered_tasks), + "enabled_tasks": len(enabled_tasks), + "disabled_tasks": len(registered_tasks) - len(enabled_tasks), + "tasks_by_priority": {}, + "tasks_by_type": {}, + "dependency_issues": JobRegistry.validate_dependencies(), + "dynamic_job_types": list(registered_tasks.keys()), + } + + # Group by priority + for job_type, metadata in registered_tasks.items(): + priority = str(metadata.priority) + if priority not in summary["tasks_by_priority"]: + summary["tasks_by_priority"][priority] = [] + summary["tasks_by_priority"][priority].append(str(job_type)) + + # Group by type (enabled/disabled) + summary["tasks_by_type"]["enabled"] = [str(jt) for jt in enabled_tasks.keys()] + summary["tasks_by_type"]["disabled"] = [ + str(jt) for jt, meta in registered_tasks.items() if not meta.enabled + ] + + return summary + + +# Auto-discover tasks when this module is imported +discover_and_register_tasks() diff --git a/services/infrastructure/job_management/base.py b/services/infrastructure/job_management/base.py new file mode 100644 index 00000000..aeb09b40 --- /dev/null +++ b/services/infrastructure/job_management/base.py @@ -0,0 +1,310 @@ +from abc import ABC, abstractmethod +from dataclasses import dataclass, field +from typing import Any, Dict, Generic, List, Optional, Type, TypeVar + +from lib.logger import configure_logger + +logger = configure_logger(__name__) + + +@dataclass +class RunnerResult: + """Base class for runner operation results.""" + + success: bool + message: str + error: Optional[Exception] = None + + +T = TypeVar("T", bound=RunnerResult) + + +@dataclass +class RunnerConfig: + """Configuration class for runners.""" + + pass + + +class JobType: + """Dynamic job types that are registered at runtime via auto-discovery. + + No hardcoded job types - all jobs are discovered and registered dynamically + using the @job decorator in task files. + """ + + _job_types: Dict[str, "JobType"] = {} + + def __init__(self, value: str): + self._value = value.lower() + self._name = value.upper() + + @property + def value(self) -> str: + return self._value + + @property + def name(self) -> str: + return self._name + + def __str__(self) -> str: + return self._value + + def __repr__(self) -> str: + return f"JobType.{self._name}" + + def __eq__(self, other) -> bool: + if isinstance(other, JobType): + return self._value == other._value + if isinstance(other, str): + return self._value == other.lower() + return False + + def __hash__(self) -> int: + return hash(self._value) + + @classmethod + def get_or_create(cls, job_type: str) -> "JobType": + """Get existing job type or create new one.""" + normalized = job_type.lower() + if normalized not in cls._job_types: + cls._job_types[normalized] = cls(normalized) + return cls._job_types[normalized] + + @classmethod + def register(cls, job_type: str) -> "JobType": + """Register a new job type and return the enum member.""" + return cls.get_or_create(job_type) + + @classmethod + def get_all_job_types(cls) -> Dict[str, str]: + """Get all registered job types.""" + return {jt._value: jt._value for jt in cls._job_types.values()} + + @classmethod + def list_all(cls) -> List["JobType"]: + """Get all registered job type instances.""" + return list(cls._job_types.values()) + + def __call__(self, value: str) -> "JobType": + """Allow calling like an enum constructor.""" + return self.get_or_create(value) + + +@dataclass +class JobContext: + """Context information for job execution.""" + + job_type: JobType + config: RunnerConfig + parameters: Optional[Dict[str, Any]] = None + retry_count: int = 0 + max_retries: int = 3 + + # Enhanced context fields + execution_id: Optional[str] = None + worker_name: Optional[str] = None + timeout_seconds: Optional[int] = None + priority: Optional[str] = None + metadata: Dict[str, Any] = field(default_factory=dict) + + +class BaseTask(ABC, Generic[T]): + """Base class for all tasks.""" + + def __init__(self, config: Optional[RunnerConfig] = None): + self.config = config or RunnerConfig() + self._start_time: Optional[float] = None + + @property + def task_name(self) -> str: + """Get the task name for logging purposes.""" + return self.__class__.__name__ + + def _log_task_start(self) -> None: + """Log task start with standard format.""" + import time + + self._start_time = time.time() + logger.debug(f"Starting task: {self.task_name}") + logger.debug(f"{self.task_name}: Configuration - {self.config}") + + def _log_task_completion(self, results: List[T]) -> None: + """Log task completion with standard format and metrics.""" + import time + + if not self._start_time: + return + + duration = time.time() - self._start_time + success_count = len([r for r in results if r.success]) + failure_count = len([r for r in results if not r.success]) + + logger.info( + f"Completed task: {self.task_name} in {duration:.2f}s - " + f"Success: {success_count}, Failures: {failure_count}" + ) + + if failure_count > 0: + for result in results: + if not result.success: + logger.error(f"{self.task_name} failure: {result.message}") + + @classmethod + def get_result_class(cls) -> Type[RunnerResult]: + """Get the result class for this task.""" + return cls.__orig_bases__[0].__args__[0] # type: ignore + + async def validate(self, context: JobContext) -> bool: + """Validate that the task can be executed. + + This method provides a validation pipeline: + 1. Configuration validation + 2. Resource availability validation + 3. Prerequisites validation + 4. Task-specific validation + """ + try: + logger.debug(f"Starting validation for {self.task_name}") + + # Step 1: Configuration validation + if not await self._validate_config(context): + logger.warning(f"{self.task_name}: Configuration validation failed") + return False + + # Step 2: Resource availability validation + if not await self._validate_resources(context): + logger.debug(f"{self.task_name}: Resource validation failed") + return False + + # Step 3: Prerequisites validation + if not await self._validate_prerequisites(context): + logger.debug(f"{self.task_name}: Prerequisites validation failed") + return False + + # Step 4: Task-specific validation + if not await self._validate_task_specific(context): + logger.debug(f"{self.task_name}: Task-specific validation failed") + return False + + logger.debug(f"{self.task_name}: All validation checks passed") + return True + except Exception as e: + logger.error( + f"Error in validation for {self.task_name}: {str(e)}", exc_info=True + ) + return False + + async def _validate_config(self, context: JobContext) -> bool: + """Validate task configuration.""" + return True + + async def _validate_prerequisites(self, context: JobContext) -> bool: + """Validate task prerequisites.""" + return True + + async def _validate_resources(self, context: JobContext) -> bool: + """Validate resource availability (network, APIs, etc.).""" + return True + + async def _validate_task_specific(self, context: JobContext) -> bool: + """Validate task-specific conditions.""" + return True + + async def execute(self, context: JobContext) -> List[T]: + """Execute the task with given context.""" + self._log_task_start() + results = [] + + try: + # Validate before execution + if not await self.validate(context): + logger.warning( + f"{self.task_name}: Validation failed, skipping execution" + ) + result_class = self.get_result_class() + return [result_class(success=False, message="Validation failed")] + + # Prepare context + prepared_context = await self._prepare_context(context) + + # Execute the task implementation + results = await self._execute_impl(prepared_context) + self._log_task_completion(results) + + except Exception as e: + logger.error(f"Error executing {self.task_name}: {str(e)}", exc_info=True) + + # Try custom error handling + recovery_results = await self._handle_execution_error(e, context) + if recovery_results is not None: + results = recovery_results + logger.info(f"Task {self.task_name} recovered from error: {str(e)}") + else: + # Default error handling + result_class = self.get_result_class() + results = [ + result_class( + success=False, + message=f"Error executing task: {str(e)}", + error=e, + ) + ] + + finally: + # Always perform cleanup + try: + await self._post_execution_cleanup(context, results) + except Exception as cleanup_error: + logger.warning( + f"Cleanup error in {self.task_name}: {str(cleanup_error)}" + ) + + return results + + @abstractmethod + async def _execute_impl(self, context: JobContext) -> List[T]: + """Implementation of task execution logic. + This method should be implemented by subclasses.""" + pass + + async def _handle_execution_error( + self, error: Exception, context: JobContext + ) -> Optional[List[T]]: + """Handle execution errors with recovery logic. + + Override this method to implement custom error recovery. + Return None to use default error handling, or return results + to continue as if execution succeeded. + """ + return None + + async def _post_execution_cleanup( + self, context: JobContext, results: List[T] + ) -> None: + """Perform cleanup after task execution. + + This is called after both successful and failed executions. + Override this method to implement custom cleanup logic. + """ + pass + + def _should_retry_on_error(self, error: Exception, context: JobContext) -> bool: + """Determine if a specific error should trigger a retry. + + Override this method to implement custom retry logic based on error type. + """ + # Default: retry on network errors, API timeouts, temporary failures + retry_errors = ( + ConnectionError, + TimeoutError, + # Add more error types as needed + ) + return isinstance(error, retry_errors) + + async def _prepare_context(self, context: JobContext) -> JobContext: + """Prepare and enrich the job context before execution. + + Override this method to add task-specific context data. + """ + return context diff --git a/services/infrastructure/job_management/decorators.py b/services/infrastructure/job_management/decorators.py new file mode 100644 index 00000000..aeb37b0c --- /dev/null +++ b/services/infrastructure/job_management/decorators.py @@ -0,0 +1,272 @@ +"""Job registration decorators and metadata system.""" + +from dataclasses import dataclass, field +from enum import Enum +from typing import Any, Callable, Dict, List, Optional, Type, TypeVar, Union + +from lib.logger import configure_logger + +from .base import BaseTask, JobType + +logger = configure_logger(__name__) + +T = TypeVar("T", bound=BaseTask) + + +class JobPriority(Enum): + """Job execution priority levels.""" + + LOW = 1 + NORMAL = 2 + MEDIUM = 3 + HIGH = 4 + CRITICAL = 5 + + def __str__(self): + return self.name.lower() + + +@dataclass +class JobMetadata: + """Metadata for job configuration and execution.""" + + # Basic job information + job_type: JobType + name: str + description: str = "" + version: str = "1.0.0" + + # Execution configuration + enabled: bool = True + interval_seconds: int = 60 + priority: JobPriority = JobPriority.NORMAL + max_retries: int = 3 + retry_delay_seconds: int = 30 + timeout_seconds: Optional[int] = None + + # Concurrency settings + max_concurrent: int = 1 + batch_size: int = 10 + + # Dependencies and requirements + requires_wallet: bool = False + requires_twitter: bool = False + requires_discord: bool = False + requires_blockchain: bool = False + requires_ai: bool = False + dependencies: List[str] = field(default_factory=list) + + # Advanced settings + enable_dead_letter_queue: bool = True + preserve_order: bool = False + idempotent: bool = False + + # Configuration overrides + config_overrides: Dict[str, Any] = field(default_factory=dict) + + +class JobRegistry: + """Enhanced job registry with auto-discovery and metadata.""" + + _jobs: Dict[JobType, Type[BaseTask]] = {} + _metadata: Dict[JobType, JobMetadata] = {} + _instances: Dict[JobType, BaseTask] = {} + + @classmethod + def register( + cls, + job_type: Union[JobType, str], + metadata: Optional[JobMetadata] = None, + **kwargs, + ) -> Callable[[Type[T]], Type[T]]: + """Decorator to register a job task with metadata. + + Args: + job_type: The job type enum or string + metadata: Optional job metadata + **kwargs: Additional metadata fields + + Returns: + Decorator function + + Example: + @JobRegistry.register( + "new_job_type", # Can use string - will auto-create JobType + name="New Job", + description="Does new job things", + interval_seconds=120, + max_concurrent=2 + ) + class NewJobTask(BaseTask[NewJobResult]): + pass + """ + + def decorator(task_class: Type[T]) -> Type[T]: + # Convert string to JobType or create new one + if isinstance(job_type, str): + job_enum = JobType.get_or_create(job_type) + logger.info(f"Auto-registered job type: {job_type} -> {job_enum}") + else: + job_enum = job_type + + # Create metadata if not provided + if metadata is None: + # Extract metadata from kwargs or use defaults + meta = JobMetadata( + job_type=job_enum, + name=kwargs.get("name", task_class.__name__), + description=kwargs.get("description", task_class.__doc__ or ""), + **{ + k: v + for k, v in kwargs.items() + if k not in ["name", "description"] + }, + ) + else: + # Update metadata with any additional kwargs + for key, value in kwargs.items(): + if hasattr(metadata, key): + setattr(metadata, key, value) + meta = metadata + + # Register the task + cls._jobs[job_enum] = task_class + cls._metadata[job_enum] = meta + + logger.info( + f"Registered job: {job_enum} -> {task_class.__name__} " + f"(enabled: {meta.enabled}, interval: {meta.interval_seconds}s)" + ) + + return task_class + + return decorator + + @classmethod + def get_task_class(cls, job_type: JobType) -> Optional[Type[BaseTask]]: + """Get the task class for a job type.""" + return cls._jobs.get(job_type) + + @classmethod + def get_metadata(cls, job_type: JobType) -> Optional[JobMetadata]: + """Get the metadata for a job type.""" + return cls._metadata.get(job_type) + + @classmethod + def get_instance(cls, job_type: JobType) -> Optional[BaseTask]: + """Get or create a task instance for a job type.""" + if job_type not in cls._instances: + task_class = cls.get_task_class(job_type) + if task_class: + cls._instances[job_type] = task_class() + return cls._instances.get(job_type) + + @classmethod + def list_jobs(cls) -> Dict[JobType, JobMetadata]: + """List all registered jobs and their metadata.""" + return cls._metadata.copy() + + @classmethod + def list_enabled_jobs(cls) -> Dict[JobType, JobMetadata]: + """List only enabled jobs.""" + return { + job_type: metadata + for job_type, metadata in cls._metadata.items() + if metadata.enabled + } + + @classmethod + def get_jobs_by_priority(cls, priority: JobPriority) -> Dict[JobType, JobMetadata]: + """Get jobs filtered by priority.""" + return { + job_type: metadata + for job_type, metadata in cls._metadata.items() + if metadata.priority == priority + } + + @classmethod + def clear_registry(cls) -> None: + """Clear all registered jobs (useful for testing).""" + cls._jobs.clear() + cls._metadata.clear() + cls._instances.clear() + + @classmethod + def validate_dependencies(cls) -> List[str]: + """Validate job dependencies and return any issues.""" + issues = [] + all_job_types = set(cls._jobs.keys()) + + for job_type, metadata in cls._metadata.items(): + for dep in metadata.dependencies: + try: + dep_type = JobType.get_or_create(dep) + if dep_type not in all_job_types: + issues.append( + f"Job {job_type} depends on unregistered job: {dep}" + ) + except Exception: + issues.append(f"Job {job_type} has invalid dependency: {dep}") + + return issues + + @classmethod + def get_all_job_types(cls) -> List[str]: + """Get all registered job type strings.""" + return [str(job_type) for job_type in cls._jobs.keys()] + + +# Convenience function for job registration +def job( + job_type: Union[JobType, str], + name: Optional[str] = None, + description: Optional[str] = None, + **kwargs, +) -> Callable[[Type[T]], Type[T]]: + """Convenience decorator for job registration. + + Args: + job_type: The job type (can be string - will auto-create JobType) + name: Human-readable job name + description: Job description + **kwargs: Additional metadata fields + + Example: + @job("my_new_job", name="My New Job", interval_seconds=30) + class MyNewJobTask(BaseTask[MyJobResult]): + pass + """ + return JobRegistry.register( + job_type=job_type, + name=name, + description=description, + **kwargs, + ) + + +# Convenience function for quick job registration with metadata +def scheduled_job( + job_type: Union[JobType, str], + interval_seconds: int, + name: Optional[str] = None, + **kwargs, +) -> Callable[[Type[T]], Type[T]]: + """Decorator for scheduled jobs with interval configuration. + + Args: + job_type: The job type (can be string - will auto-create JobType) + interval_seconds: How often to run the job + name: Human-readable job name + **kwargs: Additional metadata fields + + Example: + @scheduled_job("my_scheduled_job", 120, name="My Scheduled Job") + class MyScheduledJobTask(BaseTask[MyJobResult]): + pass + """ + return JobRegistry.register( + job_type=job_type, + interval_seconds=interval_seconds, + name=name, + **kwargs, + ) diff --git a/services/infrastructure/job_management/executor.py b/services/infrastructure/job_management/executor.py new file mode 100644 index 00000000..79bc20fd --- /dev/null +++ b/services/infrastructure/job_management/executor.py @@ -0,0 +1,430 @@ +"""Enhanced job execution system with scalability features.""" + +import asyncio +import time +from dataclasses import dataclass, field +from datetime import datetime, timedelta +from enum import Enum +from typing import Any, Dict, List, Optional, Set +from uuid import UUID + +from backend.factory import backend +from backend.models import QueueMessage, QueueMessageBase, QueueMessageFilter +from lib.logger import configure_logger + +from .base import JobContext, JobType +from .decorators import JobMetadata, JobPriority, JobRegistry + +logger = configure_logger(__name__) + + +class JobStatus(Enum): + """Job execution status.""" + + PENDING = "pending" + RUNNING = "running" + COMPLETED = "completed" + FAILED = "failed" + RETRYING = "retrying" + DEAD_LETTER = "dead_letter" + + +@dataclass +class JobExecution: + """Track individual job execution.""" + + id: UUID + job_type: JobType + status: JobStatus = JobStatus.PENDING + attempt: int = 1 + max_attempts: int = 3 + started_at: Optional[datetime] = None + completed_at: Optional[datetime] = None + error: Optional[str] = None + result: Optional[Any] = None + retry_after: Optional[datetime] = None + metadata: Dict[str, Any] = field(default_factory=dict) + + +class PriorityQueue: + """Priority-based job queue with concurrency control.""" + + def __init__(self): + self._queues: Dict[JobPriority, asyncio.Queue] = { + priority: asyncio.Queue() for priority in JobPriority + } + self._active_jobs: Dict[JobType, Set[UUID]] = {} + self._semaphores: Dict[JobType, asyncio.Semaphore] = {} + self._executions: Dict[UUID, JobExecution] = {} + + async def enqueue( + self, message: QueueMessage, priority: JobPriority = JobPriority.NORMAL + ) -> UUID: + """Add a job to the priority queue.""" + # Convert message type to JobType, handling both DynamicQueueMessageType and string + type_value = ( + message.type.value if hasattr(message.type, "value") else str(message.type) + ) + job_type = JobType.get_or_create(type_value) + execution = JobExecution( + id=message.id, job_type=job_type, metadata={"message": message} + ) + + self._executions[message.id] = execution + await self._queues[priority].put(execution) + + logger.debug(f"Enqueued job {message.id} with priority {priority}") + return message.id + + async def dequeue(self, priority: JobPriority) -> Optional[JobExecution]: + """Get next job from priority queue.""" + try: + # Try to get a job without blocking + execution = self._queues[priority].get_nowait() + return execution + except asyncio.QueueEmpty: + return None + + async def get_next_job(self) -> Optional[JobExecution]: + """Get the next job from highest priority queue.""" + # Check queues in priority order (highest first) + for priority in reversed(list(JobPriority)): + execution = await self.dequeue(priority) + if execution: + return execution + return None + + def set_concurrency_limit(self, job_type: JobType, max_concurrent: int) -> None: + """Set concurrency limit for a job type.""" + self._semaphores[job_type] = asyncio.Semaphore(max_concurrent) + self._active_jobs[job_type] = set() + + async def acquire_slot(self, job_type: JobType, job_id: UUID) -> bool: + """Acquire a concurrency slot for job execution.""" + if job_type not in self._semaphores: + return True # No limit set + + semaphore = self._semaphores[job_type] + try: + await asyncio.wait_for(semaphore.acquire(), timeout=0.1) + self._active_jobs[job_type].add(job_id) + return True + except asyncio.TimeoutError: + return False # No slots available + + def release_slot(self, job_type: JobType, job_id: UUID) -> None: + """Release a concurrency slot.""" + if job_type in self._semaphores: + self._semaphores[job_type].release() + if job_type in self._active_jobs: + self._active_jobs[job_type].discard(job_id) + + def get_execution(self, job_id: UUID) -> Optional[JobExecution]: + """Get job execution by ID.""" + return self._executions.get(job_id) + + def update_execution(self, job_id: UUID, **kwargs) -> None: + """Update job execution status.""" + if job_id in self._executions: + execution = self._executions[job_id] + for key, value in kwargs.items(): + if hasattr(execution, key): + setattr(execution, key, value) + + +class RetryManager: + """Manages job retry logic with exponential backoff.""" + + @staticmethod + def should_retry(execution: JobExecution, metadata: JobMetadata) -> bool: + """Determine if a job should be retried.""" + if execution.attempt >= metadata.max_retries: + return False + + # Check if enough time has passed for retry + if execution.retry_after and datetime.now() < execution.retry_after: + return False + + return True + + @staticmethod + def calculate_retry_delay( + attempt: int, base_delay: int = 30, max_delay: int = 3600 + ) -> int: + """Calculate retry delay with exponential backoff.""" + delay = base_delay * (2 ** (attempt - 1)) + return min(delay, max_delay) + + @staticmethod + def schedule_retry(execution: JobExecution, metadata: JobMetadata) -> None: + """Schedule a job for retry.""" + delay = RetryManager.calculate_retry_delay( + execution.attempt, metadata.retry_delay_seconds + ) + execution.retry_after = datetime.now() + timedelta(seconds=delay) + execution.status = JobStatus.RETRYING + execution.attempt += 1 + + logger.info( + f"Scheduling retry for job {execution.id} " + f"(attempt {execution.attempt}) in {delay} seconds" + ) + + +class DeadLetterQueue: + """Handles jobs that have failed all retry attempts.""" + + def __init__(self): + self._dead_jobs: Dict[UUID, JobExecution] = {} + + def add_dead_job(self, execution: JobExecution) -> None: + """Add a job to the dead letter queue.""" + execution.status = JobStatus.DEAD_LETTER + execution.completed_at = datetime.now() + self._dead_jobs[execution.id] = execution + + logger.error( + f"Job {execution.id} moved to dead letter queue after " + f"{execution.attempt} attempts. Error: {execution.error}" + ) + + def get_dead_jobs(self) -> List[JobExecution]: + """Get all jobs in the dead letter queue.""" + return list(self._dead_jobs.values()) + + def remove_dead_job(self, job_id: UUID) -> Optional[JobExecution]: + """Remove a job from the dead letter queue.""" + return self._dead_jobs.pop(job_id, None) + + +class JobExecutor: + """Enhanced job executor with scalability features.""" + + def __init__(self): + self.priority_queue = PriorityQueue() + self.retry_manager = RetryManager() + self.dead_letter_queue = DeadLetterQueue() + self._running = False + self._worker_tasks: List[asyncio.Task] = [] + + async def start(self, num_workers: int = 5) -> None: + """Start the job executor with specified number of workers.""" + if self._running: + logger.warning("JobExecutor is already running") + return + + self._running = True + + # Initialize concurrency limits from job metadata + for job_type, metadata in JobRegistry.list_jobs().items(): + self.priority_queue.set_concurrency_limit(job_type, metadata.max_concurrent) + + # Start worker tasks + for i in range(num_workers): + task = asyncio.create_task(self._worker(f"worker-{i}")) + self._worker_tasks.append(task) + + logger.info(f"Started JobExecutor with {num_workers} workers") + + async def stop(self) -> None: + """Stop the job executor.""" + if not self._running: + return + + self._running = False + + # Cancel all worker tasks + for task in self._worker_tasks: + task.cancel() + + # Wait for tasks to complete + if self._worker_tasks: + await asyncio.gather(*self._worker_tasks, return_exceptions=True) + + self._worker_tasks.clear() + logger.info("Stopped JobExecutor") + + async def _worker(self, worker_name: str) -> None: + """Worker coroutine that processes jobs from the queue.""" + logger.debug(f"Starting worker: {worker_name}") + + while self._running: + try: + # Get next job from priority queue + execution = await self.priority_queue.get_next_job() + if not execution: + await asyncio.sleep(0.1) # Brief pause if no jobs + continue + + # Check if we can acquire a slot for this job type + acquired = await self.priority_queue.acquire_slot( + execution.job_type, execution.id + ) + if not acquired: + # Put job back in queue and try later + metadata = JobRegistry.get_metadata(execution.job_type) + if metadata: + await self.priority_queue.enqueue( + execution.metadata["message"], metadata.priority + ) + await asyncio.sleep(0.5) + continue + + # Execute the job + try: + await self._execute_job(execution, worker_name) + finally: + # Always release the slot + self.priority_queue.release_slot(execution.job_type, execution.id) + + except Exception as e: + logger.error(f"Worker {worker_name} error: {str(e)}", exc_info=True) + await asyncio.sleep(1) # Pause on error + + async def _execute_job(self, execution: JobExecution, worker_name: str) -> None: + """Execute a single job.""" + job_id = execution.id + job_type = execution.job_type + start_time = time.time() + + logger.debug(f"{worker_name} executing job {job_id} ({job_type})") + + # Record execution start in metrics + from .monitoring import get_metrics_collector + + metrics = get_metrics_collector() + metrics.record_execution_start(execution, worker_name) + + # Update execution status + self.priority_queue.update_execution( + job_id, status=JobStatus.RUNNING, started_at=datetime.now() + ) + + try: + # Get job metadata and task instance + metadata = JobRegistry.get_metadata(job_type) + task_instance = JobRegistry.get_instance(job_type) + + if not metadata or not task_instance: + raise ValueError(f"Job type {job_type} not properly registered") + + # Create job context + from .base import RunnerConfig + + context = JobContext( + job_type=job_type, + config=RunnerConfig(), + retry_count=execution.attempt - 1, + max_retries=metadata.max_retries, + ) + + # Execute the task with timeout + if metadata.timeout_seconds: + results = await asyncio.wait_for( + task_instance.execute(context), timeout=metadata.timeout_seconds + ) + else: + results = await task_instance.execute(context) + + # Calculate execution duration + duration = time.time() - start_time + + # Update execution with results + self.priority_queue.update_execution( + job_id, + status=JobStatus.COMPLETED, + completed_at=datetime.now(), + result=results, + ) + + # Record successful execution in metrics + metrics.record_execution_completion(execution, duration) + + # Mark message as processed in database + message = execution.metadata["message"] + backend.update_queue_message( + queue_message_id=message.id, + update_data=QueueMessageBase(is_processed=True), + ) + + logger.info(f"{worker_name} completed job {job_id} in {duration:.2f}s") + + except Exception as e: + error_msg = str(e) + duration = time.time() - start_time + + logger.error(f"{worker_name} job {job_id} failed: {error_msg}") + + # Record failed execution in metrics + metrics.record_execution_failure(execution, error_msg, duration) + + # Update execution with error + self.priority_queue.update_execution( + job_id, status=JobStatus.FAILED, error=error_msg + ) + + # Handle retry or dead letter + metadata = JobRegistry.get_metadata(job_type) + if metadata and self.retry_manager.should_retry(execution, metadata): + metrics.record_execution_retry(execution) + self.retry_manager.schedule_retry(execution, metadata) + # Re-enqueue for retry + message = execution.metadata["message"] + await self.priority_queue.enqueue(message, metadata.priority) + else: + # Move to dead letter queue + metrics.record_dead_letter(execution) + self.dead_letter_queue.add_dead_job(execution) + + async def enqueue_pending_jobs(self) -> int: + """Load pending jobs from database and enqueue them.""" + enqueued_count = 0 + + for job_type, metadata in JobRegistry.list_enabled_jobs().items(): + try: + # Get pending messages for this job type + filters = QueueMessageFilter(type=job_type.value, is_processed=False) + pending_messages = backend.list_queue_messages(filters=filters) + + # Enqueue each message + for message in pending_messages: + await self.priority_queue.enqueue(message, metadata.priority) + enqueued_count += 1 + + if pending_messages: + logger.debug(f"Enqueued {len(pending_messages)} {job_type} jobs") + + except Exception as e: + logger.error( + f"Error enqueuing jobs for {job_type}: {str(e)}", exc_info=True + ) + + if enqueued_count > 0: + logger.info(f"Enqueued {enqueued_count} pending jobs") + + return enqueued_count + + def get_stats(self) -> Dict[str, Any]: + """Get executor statistics.""" + stats = { + "running": self._running, + "worker_count": len(self._worker_tasks), + "dead_letter_count": len(self.dead_letter_queue.get_dead_jobs()), + "active_jobs": { + str(job_type): len(jobs) + for job_type, jobs in self.priority_queue._active_jobs.items() + }, + } + return stats + + +# Global executor instance +_executor: Optional[JobExecutor] = None + + +def get_executor() -> JobExecutor: + """Get the global job executor instance.""" + global _executor + if _executor is None: + _executor = JobExecutor() + return _executor diff --git a/services/infrastructure/job_management/job_manager.py b/services/infrastructure/job_management/job_manager.py new file mode 100644 index 00000000..fbeb5038 --- /dev/null +++ b/services/infrastructure/job_management/job_manager.py @@ -0,0 +1,427 @@ +"""Enhanced Job Manager using the new job queue system.""" + +import uuid +from dataclasses import dataclass +from datetime import datetime +from typing import Any, Dict, List, Optional + +from apscheduler.schedulers.asyncio import AsyncIOScheduler + +from config import config +from lib.logger import configure_logger + +from .auto_discovery import get_task_summary +from .decorators import JobMetadata, JobRegistry +from .executor import get_executor +from .monitoring import get_metrics_collector, get_performance_monitor + +logger = configure_logger(__name__) + + +@dataclass +class JobScheduleConfig: + """Enhanced configuration for scheduled jobs.""" + + job_type: str + metadata: JobMetadata + enabled: bool + scheduler_id: str + + def to_dict(self) -> Dict[str, Any]: + """Convert to dictionary for API responses.""" + return { + "job_type": self.job_type, + "name": self.metadata.name, + "description": self.metadata.description, + "enabled": self.enabled, + "interval_seconds": self.metadata.interval_seconds, + "priority": str(self.metadata.priority), + "max_retries": self.metadata.max_retries, + "max_concurrent": self.metadata.max_concurrent, + "requires_twitter": self.metadata.requires_twitter, + "requires_discord": self.metadata.requires_discord, + "requires_wallet": self.metadata.requires_wallet, + "scheduler_id": self.scheduler_id, + } + + +class JobManager: + """Enhanced manager for scheduled jobs using the new system.""" + + def __init__(self): + self._scheduler: Optional[AsyncIOScheduler] = None + self._executor = get_executor() + self._metrics = get_metrics_collector() + self._performance_monitor = get_performance_monitor() + self._is_running = False + + @property + def is_running(self) -> bool: + """Check if the job manager is running.""" + return self._is_running + + def get_all_jobs(self) -> List[JobScheduleConfig]: + """Get configurations for all registered jobs.""" + configs = [] + + # Get all registered jobs from the new system + registered_jobs = JobRegistry.list_jobs() + + for job_type, metadata in registered_jobs.items(): + # Check if job is enabled (can be overridden by config) + enabled = self._is_job_enabled(job_type, metadata) + + config_item = JobScheduleConfig( + job_type=str(job_type), + metadata=metadata, + enabled=enabled, + scheduler_id=f"{job_type.value}_scheduler", + ) + configs.append(config_item) + + return configs + + def _is_job_enabled(self, job_type, metadata: JobMetadata) -> bool: + """Check if a job is enabled based on metadata and config overrides.""" + # First check the metadata default + if not metadata.enabled: + return False + + # Check for config overrides using dynamic approach + job_type_str = str(job_type).lower() + + # Try config override with standard naming pattern + config_attr = f"{job_type_str}_enabled" + if hasattr(config.scheduler, config_attr): + return getattr(config.scheduler, config_attr, metadata.enabled) + + # Try alternative naming pattern for backwards compatibility + alt_config_attr = f"{job_type_str}_runner_enabled" + if hasattr(config.scheduler, alt_config_attr): + return getattr(config.scheduler, alt_config_attr, metadata.enabled) + + # Use metadata default if no config override found + return metadata.enabled + + def _get_job_interval(self, job_type, metadata: JobMetadata) -> int: + """Get job interval, checking config overrides.""" + job_type_str = str(job_type).lower() + + # Try config override with standard naming pattern + config_attr = f"{job_type_str}_interval_seconds" + if hasattr(config.scheduler, config_attr): + return getattr(config.scheduler, config_attr, metadata.interval_seconds) + + # Try alternative naming pattern for backwards compatibility + alt_config_attr = f"{job_type_str}_runner_interval_seconds" + if hasattr(config.scheduler, alt_config_attr): + return getattr(config.scheduler, alt_config_attr, metadata.interval_seconds) + + # Use metadata default if no config override found + return metadata.interval_seconds + + async def _execute_job_via_executor(self, job_type: str) -> None: + """Execute a job through the enhanced executor system with proper concurrency control.""" + try: + from backend.models import QueueMessage, QueueMessageType + + from .base import JobType + from .decorators import JobRegistry + + # Convert job_type string to JobType enum + job_type_enum = JobType.get_or_create(job_type) + + # Get job metadata to check if it should run + metadata = JobRegistry.get_metadata(job_type_enum) + if not metadata: + logger.error(f"No metadata found for job type: {job_type}") + return + + # Create a synthetic queue message for scheduled execution + # This allows the job to go through the proper executor pipeline with concurrency control + synthetic_message = QueueMessage( + id=uuid.uuid4(), + type=QueueMessageType.get_or_create(job_type), + message={ + "scheduled_execution": True, + "triggered_at": str(datetime.now()), + }, + dao_id=None, + tweet_id=None, + conversation_id=None, + is_processed=False, + result=None, + created_at=datetime.now(), + updated_at=datetime.now(), + ) + + # Enqueue the synthetic message with the job's priority + job_id = await self._executor.priority_queue.enqueue( + synthetic_message, metadata.priority + ) + + logger.debug( + f"Enqueued scheduled job {job_type} with ID {job_id} (priority: {metadata.priority})" + ) + + except Exception as e: + logger.error( + f"Error enqueuing scheduled job {job_type}: {str(e)}", exc_info=True + ) + + def schedule_jobs(self, scheduler: AsyncIOScheduler) -> bool: + """Schedule all enabled jobs using the new system.""" + self._scheduler = scheduler + + # Get all job configurations + jobs = self.get_all_jobs() + + # Schedule enabled jobs + any_enabled = False + scheduled_count = 0 + + for job_config in jobs: + if job_config.enabled: + any_enabled = True + + # Get the actual interval (might be overridden by config) + interval_seconds = self._get_job_interval( + job_config.job_type, job_config.metadata + ) + + # Schedule the job + scheduler.add_job( + self._execute_job_via_executor, + "interval", + seconds=interval_seconds, + id=job_config.scheduler_id, + args=[job_config.job_type], + max_instances=1, # Prevent overlapping executions + misfire_grace_time=60, + replace_existing=True, # Allow replacing existing jobs + ) + + scheduled_count += 1 + logger.info( + f"Scheduled {job_config.metadata.name} " + f"(priority: {job_config.metadata.priority}, " + f"interval: {interval_seconds}s, " + f"max_concurrent: {job_config.metadata.max_concurrent})" + ) + else: + logger.info(f"{job_config.metadata.name} is disabled") + + if scheduled_count > 0: + logger.info(f"Successfully scheduled {scheduled_count} jobs") + + return any_enabled + + async def start_executor(self, num_workers: int = 5) -> None: + """Start the job executor.""" + await self._executor.start(num_workers) + self._is_running = True + logger.info(f"Job executor started with {num_workers} workers") + + async def stop_executor(self) -> None: + """Stop the job executor.""" + await self._executor.stop() + self._is_running = False + logger.info("Job executor stopped") + + def get_executor_stats(self) -> Dict[str, Any]: + """Get executor statistics.""" + return self._executor.get_stats() + + def get_job_metrics(self, job_type: Optional[str] = None) -> Dict[str, Any]: + """Get job execution metrics.""" + from .base import JobType + + job_type_enum = None + if job_type: + try: + job_type_enum = JobType(job_type) + except ValueError: + pass + + metrics = self._metrics.get_metrics(job_type_enum) + return { + str(jt): { + "total_executions": m.total_executions, + "successful_executions": m.successful_executions, + "failed_executions": m.failed_executions, + "retried_executions": m.retried_executions, + "dead_letter_executions": m.dead_letter_executions, + "avg_execution_time": m.avg_execution_time, + "min_execution_time": m.min_execution_time, + "max_execution_time": m.max_execution_time, + "current_running": m.current_running, + "max_concurrent_reached": m.max_concurrent_reached, + "last_execution": ( + m.last_execution.isoformat() if m.last_execution else None + ), + "last_success": m.last_success.isoformat() if m.last_success else None, + "last_failure": m.last_failure.isoformat() if m.last_failure else None, + } + for jt, m in metrics.items() + } + + def get_system_health(self) -> Dict[str, Any]: + """Get overall system health status.""" + system_metrics = self._metrics.get_system_metrics() + health_status = self._metrics.get_health_status() + performance_summary = self._performance_monitor.get_performance_summary() + task_summary = get_task_summary() + executor_stats = self.get_executor_stats() + + return { + "status": health_status["status"], + "uptime_seconds": system_metrics["uptime_seconds"], + "executor": { + "running": executor_stats["running"], + "worker_count": executor_stats["worker_count"], + "dead_letter_count": executor_stats["dead_letter_count"], + "active_jobs": executor_stats["active_jobs"], + }, + "metrics": { + "total_executions": system_metrics["total_executions"], + "success_rate": system_metrics["success_rate"], + "total_dead_letter": system_metrics["total_dead_letter"], + }, + "tasks": { + "total_registered": task_summary["total_tasks"], + "enabled": task_summary["enabled_tasks"], + "disabled": task_summary["disabled_tasks"], + "dependency_issues": len(task_summary["dependency_issues"]), + }, + "performance": { + "system_health": performance_summary.get("system_health", "unknown"), + "healthy_job_types": performance_summary.get("healthy_job_types", 0), + "problematic_job_types": performance_summary.get( + "problematic_job_types", [] + ), + }, + "issues": health_status["issues"], + "alerts": performance_summary.get("alerts", []), + } + + def get_job_details(self, job_type: str) -> Optional[Dict[str, Any]]: + """Get detailed information about a specific job type.""" + try: + from .base import JobType + + job_type_enum = JobType(job_type) + + metadata = JobRegistry.get_metadata(job_type_enum) + if not metadata: + return None + + # Get metrics for this job + metrics = self._metrics.get_metrics(job_type_enum) + job_metrics = metrics.get(job_type_enum) + + # Get recent events + recent_events = self._metrics.get_recent_events(job_type_enum, limit=10) + + return { + "job_type": job_type, + "metadata": { + "name": metadata.name, + "description": metadata.description, + "version": metadata.version, + "enabled": metadata.enabled, + "interval_seconds": metadata.interval_seconds, + "priority": str(metadata.priority), + "max_retries": metadata.max_retries, + "retry_delay_seconds": metadata.retry_delay_seconds, + "timeout_seconds": metadata.timeout_seconds, + "max_concurrent": metadata.max_concurrent, + "batch_size": metadata.batch_size, + "requires_wallet": metadata.requires_wallet, + "requires_twitter": metadata.requires_twitter, + "requires_discord": metadata.requires_discord, + "dependencies": metadata.dependencies, + "enable_dead_letter_queue": metadata.enable_dead_letter_queue, + "preserve_order": metadata.preserve_order, + "idempotent": metadata.idempotent, + }, + "metrics": { + "total_executions": ( + job_metrics.total_executions if job_metrics else 0 + ), + "successful_executions": ( + job_metrics.successful_executions if job_metrics else 0 + ), + "failed_executions": ( + job_metrics.failed_executions if job_metrics else 0 + ), + "retried_executions": ( + job_metrics.retried_executions if job_metrics else 0 + ), + "dead_letter_executions": ( + job_metrics.dead_letter_executions if job_metrics else 0 + ), + "avg_execution_time": ( + job_metrics.avg_execution_time if job_metrics else 0 + ), + "min_execution_time": ( + job_metrics.min_execution_time if job_metrics else None + ), + "max_execution_time": ( + job_metrics.max_execution_time if job_metrics else None + ), + "current_running": ( + job_metrics.current_running if job_metrics else 0 + ), + "max_concurrent_reached": ( + job_metrics.max_concurrent_reached if job_metrics else 0 + ), + "last_execution": ( + job_metrics.last_execution.isoformat() + if job_metrics and job_metrics.last_execution + else None + ), + "last_success": ( + job_metrics.last_success.isoformat() + if job_metrics and job_metrics.last_success + else None + ), + "last_failure": ( + job_metrics.last_failure.isoformat() + if job_metrics and job_metrics.last_failure + else None + ), + }, + "recent_events": [ + { + "execution_id": str(event.execution_id), + "event_type": event.event_type, + "timestamp": event.timestamp.isoformat(), + "duration": event.duration, + "error": event.error, + "attempt": event.attempt, + "metadata": event.metadata, + } + for event in recent_events + ], + } + + except ValueError: + return None + + async def trigger_job_execution(self, job_type: str) -> Dict[str, Any]: + """Manually trigger execution of a specific job type.""" + try: + await self._execute_job_via_executor(job_type) + return { + "success": True, + "message": f"Triggered execution for job type: {job_type}", + "job_type": job_type, + } + except Exception as e: + logger.error(f"Error triggering job {job_type}: {str(e)}", exc_info=True) + return { + "success": False, + "message": f"Failed to trigger job: {str(e)}", + "job_type": job_type, + "error": str(e), + } diff --git a/services/infrastructure/job_management/monitoring.py b/services/infrastructure/job_management/monitoring.py new file mode 100644 index 00000000..e4d1fc00 --- /dev/null +++ b/services/infrastructure/job_management/monitoring.py @@ -0,0 +1,548 @@ +"""Job monitoring and observability system.""" + +from dataclasses import dataclass, field +from datetime import datetime, timedelta +from typing import Any, Dict, List, Optional +from uuid import UUID + +from lib.logger import configure_logger + +from .base import JobType + +logger = configure_logger(__name__) + + +@dataclass +class JobMetrics: + """Metrics for job execution.""" + + job_type: JobType + total_executions: int = 0 + successful_executions: int = 0 + failed_executions: int = 0 + retried_executions: int = 0 + dead_letter_executions: int = 0 + + # Timing metrics + total_execution_time: float = 0.0 + min_execution_time: Optional[float] = None + max_execution_time: Optional[float] = None + avg_execution_time: float = 0.0 + + # Recent metrics (last hour) + recent_executions: int = 0 + recent_failures: int = 0 + recent_avg_time: float = 0.0 + + # Concurrency metrics + current_running: int = 0 + max_concurrent_reached: int = 0 + + last_execution: Optional[datetime] = None + last_success: Optional[datetime] = None + last_failure: Optional[datetime] = None + + +@dataclass +class ExecutionEvent: + """Individual execution event for detailed tracking.""" + + execution_id: UUID + job_type: JobType + event_type: str # started, completed, failed, retried, dead_letter + timestamp: datetime + duration: Optional[float] = None + error: Optional[str] = None + attempt: int = 1 + metadata: Dict[str, Any] = field(default_factory=dict) + + +class MetricsCollector: + """Collects and aggregates job execution metrics.""" + + def __init__(self, max_events: int = 10000): + self._metrics: Dict[JobType, JobMetrics] = {} + self._events: List[ExecutionEvent] = [] + self._max_events = max_events + self._start_time = datetime.now() + + def record_execution_start(self, execution: Any, worker_name: str = "") -> None: + """Record the start of a job execution.""" + job_type = execution.job_type + + # Initialize metrics if needed + if job_type not in self._metrics: + self._metrics[job_type] = JobMetrics(job_type=job_type) + + metrics = self._metrics[job_type] + metrics.total_executions += 1 + metrics.current_running += 1 + metrics.max_concurrent_reached = max( + metrics.max_concurrent_reached, metrics.current_running + ) + metrics.last_execution = datetime.now() + + # Record event + event = ExecutionEvent( + execution_id=execution.id, + job_type=job_type, + event_type="started", + timestamp=datetime.now(), + attempt=execution.attempt, + metadata={"worker": worker_name}, + ) + self._add_event(event) + + logger.debug(f"Started tracking execution {execution.id} ({job_type})") + + def record_execution_completion(self, execution: Any, duration: float) -> None: + """Record the completion of a job execution.""" + job_type = execution.job_type + metrics = self._metrics.get(job_type) + + if not metrics: + logger.warning(f"No metrics found for job type {job_type}") + return + + metrics.current_running = max(0, metrics.current_running - 1) + metrics.successful_executions += 1 + metrics.last_success = datetime.now() + + # Update timing metrics + self._update_timing_metrics(metrics, duration) + + # Record event + event = ExecutionEvent( + execution_id=execution.id, + job_type=job_type, + event_type="completed", + timestamp=datetime.now(), + duration=duration, + attempt=execution.attempt, + ) + self._add_event(event) + + logger.debug( + f"Completed execution {execution.id} ({job_type}) in {duration:.2f}s" + ) + + def record_execution_failure( + self, execution: Any, error: str, duration: float + ) -> None: + """Record a job execution failure.""" + job_type = execution.job_type + metrics = self._metrics.get(job_type) + + if not metrics: + logger.warning(f"No metrics found for job type {job_type}") + return + + metrics.current_running = max(0, metrics.current_running - 1) + metrics.failed_executions += 1 + metrics.last_failure = datetime.now() + + # Update timing metrics (even for failures) + self._update_timing_metrics(metrics, duration) + + # Record event + event = ExecutionEvent( + execution_id=execution.id, + job_type=job_type, + event_type="failed", + timestamp=datetime.now(), + duration=duration, + error=error, + attempt=execution.attempt, + ) + self._add_event(event) + + logger.debug( + f"Failed execution {execution.id} ({job_type}) after {duration:.2f}s: {error}" + ) + + def record_execution_retry(self, execution: Any) -> None: + """Record a job execution retry.""" + job_type = execution.job_type + metrics = self._metrics.get(job_type) + + if not metrics: + logger.warning(f"No metrics found for job type {job_type}") + return + + metrics.retried_executions += 1 + + # Record event + event = ExecutionEvent( + execution_id=execution.id, + job_type=job_type, + event_type="retried", + timestamp=datetime.now(), + attempt=execution.attempt, + ) + self._add_event(event) + + logger.debug( + f"Retrying execution {execution.id} ({job_type}), attempt {execution.attempt}" + ) + + def record_dead_letter(self, execution: Any) -> None: + """Record a job being moved to dead letter queue.""" + job_type = execution.job_type + metrics = self._metrics.get(job_type) + + if not metrics: + logger.warning(f"No metrics found for job type {job_type}") + return + + metrics.dead_letter_executions += 1 + + # Record event + event = ExecutionEvent( + execution_id=execution.id, + job_type=job_type, + event_type="dead_letter", + timestamp=datetime.now(), + error=getattr(execution, "error", None), + attempt=execution.attempt, + ) + self._add_event(event) + + logger.warning( + f"Dead letter execution {execution.id} ({job_type}) after {execution.attempt} attempts" + ) + + def _update_timing_metrics(self, metrics: JobMetrics, duration: float) -> None: + """Update timing metrics with new execution duration.""" + # Update min/max + if metrics.min_execution_time is None or duration < metrics.min_execution_time: + metrics.min_execution_time = duration + if metrics.max_execution_time is None or duration > metrics.max_execution_time: + metrics.max_execution_time = duration + + # Update average + total_time = metrics.total_execution_time + duration + total_count = metrics.successful_executions + metrics.failed_executions + + metrics.total_execution_time = total_time + if total_count > 0: + metrics.avg_execution_time = total_time / total_count + + def _add_event(self, event: ExecutionEvent) -> None: + """Add an event to the event log.""" + self._events.append(event) + + # Trim events if we exceed max + if len(self._events) > self._max_events: + # Remove oldest 20% to avoid frequent trimming + trim_count = int(self._max_events * 0.2) + self._events = self._events[trim_count:] + + def get_metrics( + self, job_type: Optional[JobType] = None + ) -> Dict[JobType, JobMetrics]: + """Get metrics for all job types or a specific type.""" + if job_type: + return { + job_type: self._metrics.get(job_type, JobMetrics(job_type=job_type)) + } + return self._metrics.copy() + + def get_recent_events( + self, job_type: Optional[JobType] = None, limit: int = 100 + ) -> List[ExecutionEvent]: + """Get recent execution events.""" + events = self._events + + if job_type: + events = [e for e in events if e.job_type == job_type] + + # Return most recent events + return sorted(events, key=lambda e: e.timestamp, reverse=True)[:limit] + + def get_system_metrics(self) -> Dict[str, Any]: + """Get overall system metrics.""" + total_executions = sum(m.total_executions for m in self._metrics.values()) + total_successful = sum(m.successful_executions for m in self._metrics.values()) + total_failed = sum(m.failed_executions for m in self._metrics.values()) + total_dead_letter = sum( + m.dead_letter_executions for m in self._metrics.values() + ) + + success_rate = ( + (total_successful / total_executions) if total_executions > 0 else 0 + ) + + return { + "uptime_seconds": (datetime.now() - self._start_time).total_seconds(), + "total_executions": total_executions, + "total_successful": total_successful, + "total_failed": total_failed, + "total_dead_letter": total_dead_letter, + "success_rate": success_rate, + "active_job_types": len(self._metrics), + "total_events": len(self._events), + } + + def get_health_status(self) -> Dict[str, Any]: + """Get system health status.""" + now = datetime.now() + health = {"status": "healthy", "issues": []} + + for job_type, metrics in self._metrics.items(): + # Check failure rate + if metrics.total_executions > 10: + failure_rate = metrics.failed_executions / metrics.total_executions + if failure_rate > 0.5: # More than 50% failures + health["issues"].append( + f"{job_type}: High failure rate ({failure_rate:.1%})" + ) + + # Check if job hasn't run recently (if it should be running) + if metrics.last_execution: + time_since_last = now - metrics.last_execution + if time_since_last > timedelta(hours=2): + health["issues"].append( + f"{job_type}: No executions in {time_since_last}" + ) + + if health["issues"]: + health["status"] = "degraded" if len(health["issues"]) < 3 else "unhealthy" + + return health + + def reset_metrics(self, job_type: Optional[JobType] = None) -> None: + """Reset metrics for a job type or all types.""" + if job_type: + if job_type in self._metrics: + self._metrics[job_type] = JobMetrics(job_type=job_type) + else: + self._metrics.clear() + self._events.clear() + + logger.info(f"Reset metrics for {job_type or 'all job types'}") + + # Legacy compatibility methods for the new executor + def get_job_metrics(self, job_type: Optional[str] = None) -> Dict[str, Any]: + """Get job metrics in the format expected by the new executor.""" + if job_type: + job_type_enum = JobType.get_or_create(job_type) + metrics = self._metrics.get(job_type_enum) + if metrics: + return { + "job_type": str(metrics.job_type), + "total_executions": metrics.total_executions, + "successful_executions": metrics.successful_executions, + "failed_executions": metrics.failed_executions, + "success_rate": ( + metrics.successful_executions / metrics.total_executions + if metrics.total_executions > 0 + else 0.0 + ), + "average_duration_seconds": metrics.avg_execution_time, + "min_duration_seconds": metrics.min_execution_time or 0.0, + "max_duration_seconds": metrics.max_execution_time or 0.0, + "last_execution": ( + metrics.last_execution.isoformat() + if metrics.last_execution + else None + ), + "last_success": ( + metrics.last_success.isoformat() + if metrics.last_success + else None + ), + "last_failure": ( + metrics.last_failure.isoformat() + if metrics.last_failure + else None + ), + "retry_count": metrics.retried_executions, + "dead_letter_count": metrics.dead_letter_executions, + } + else: + return {"error": f"No metrics found for job type: {job_type}"} + else: + # Return all job metrics + return { + str(job_type): { + "job_type": str(metrics.job_type), + "total_executions": metrics.total_executions, + "successful_executions": metrics.successful_executions, + "failed_executions": metrics.failed_executions, + "success_rate": ( + metrics.successful_executions / metrics.total_executions + if metrics.total_executions > 0 + else 0.0 + ), + "average_duration_seconds": metrics.avg_execution_time, + "min_duration_seconds": metrics.min_execution_time or 0.0, + "max_duration_seconds": metrics.max_execution_time or 0.0, + "last_execution": ( + metrics.last_execution.isoformat() + if metrics.last_execution + else None + ), + "last_success": ( + metrics.last_success.isoformat() + if metrics.last_success + else None + ), + "last_failure": ( + metrics.last_failure.isoformat() + if metrics.last_failure + else None + ), + "retry_count": metrics.retried_executions, + "dead_letter_count": metrics.dead_letter_executions, + } + for job_type, metrics in self._metrics.items() + } + + +class SystemMetrics: + """System-wide metrics collector for monitoring system resources.""" + + def __init__(self): + self.monitoring_active = False + + async def start_monitoring(self) -> None: + """Start system monitoring.""" + self.monitoring_active = True + logger.info("System metrics monitoring started") + + async def stop_monitoring(self) -> None: + """Stop system monitoring.""" + self.monitoring_active = False + logger.info("System metrics monitoring stopped") + + def get_current_metrics(self) -> Dict[str, Any]: + """Get current system metrics.""" + try: + import psutil + + return { + "cpu_usage": psutil.cpu_percent(interval=1), + "memory_usage": psutil.virtual_memory().percent, + "disk_usage": psutil.disk_usage("/").percent, + "timestamp": datetime.now().isoformat(), + "monitoring_active": self.monitoring_active, + } + except ImportError: + logger.warning("psutil not available, returning basic metrics") + return { + "cpu_usage": 0, + "memory_usage": 0, + "disk_usage": 0, + "timestamp": datetime.now().isoformat(), + "monitoring_active": self.monitoring_active, + } + + +class PerformanceMonitor: + """Monitors job execution performance and provides alerts.""" + + def __init__(self, metrics_collector: MetricsCollector): + self.metrics = metrics_collector + self._thresholds = { + "max_failure_rate": 0.3, # 30% + "max_avg_execution_time": 300.0, # 5 minutes + "max_dead_letter_rate": 0.1, # 10% + } + + def check_performance_issues(self) -> List[str]: + """Check for performance issues and return alerts.""" + alerts = [] + + for job_type, metrics in self.metrics.get_metrics().items(): + if metrics.total_executions < 5: + continue # Skip jobs with insufficient data + + # Check failure rate + failure_rate = metrics.failed_executions / metrics.total_executions + if failure_rate > self._thresholds["max_failure_rate"]: + alerts.append( + f"HIGH FAILURE RATE: {job_type} has {failure_rate:.1%} failure rate" + ) + + # Check average execution time + if metrics.avg_execution_time > self._thresholds["max_avg_execution_time"]: + alerts.append( + f"SLOW EXECUTION: {job_type} average time is {metrics.avg_execution_time:.1f}s" + ) + + # Check dead letter rate + dead_letter_rate = metrics.dead_letter_executions / metrics.total_executions + if dead_letter_rate > self._thresholds["max_dead_letter_rate"]: + alerts.append( + f"HIGH DEAD LETTER RATE: {job_type} has {dead_letter_rate:.1%} dead letter rate" + ) + + return alerts + + def get_performance_summary(self) -> Dict[str, Any]: + """Get a performance summary across all job types.""" + metrics_data = self.metrics.get_metrics() + + if not metrics_data: + return {"message": "No job execution data available"} + + # Calculate overall statistics + total_jobs = len(metrics_data) + healthy_jobs = 0 + problematic_jobs = [] + + for job_type, metrics in metrics_data.items(): + if metrics.total_executions < 5: + continue + + failure_rate = metrics.failed_executions / metrics.total_executions + dead_letter_rate = metrics.dead_letter_executions / metrics.total_executions + + is_healthy = ( + failure_rate <= self._thresholds["max_failure_rate"] + and metrics.avg_execution_time + <= self._thresholds["max_avg_execution_time"] + and dead_letter_rate <= self._thresholds["max_dead_letter_rate"] + ) + + if is_healthy: + healthy_jobs += 1 + else: + problematic_jobs.append(str(job_type)) + + return { + "total_job_types": total_jobs, + "healthy_job_types": healthy_jobs, + "problematic_job_types": problematic_jobs, + "system_health": ( + "good" if len(problematic_jobs) == 0 else "needs_attention" + ), + "alerts": self.check_performance_issues(), + } + + +# Global metrics collector +_metrics_collector: Optional[MetricsCollector] = None +_performance_monitor: Optional[PerformanceMonitor] = None + + +def get_metrics_collector() -> MetricsCollector: + """Get the global metrics collector instance.""" + global _metrics_collector + if _metrics_collector is None: + _metrics_collector = MetricsCollector() + return _metrics_collector + + +def get_performance_monitor() -> PerformanceMonitor: + """Get the global performance monitor instance.""" + global _performance_monitor + if _performance_monitor is None: + _performance_monitor = PerformanceMonitor(get_metrics_collector()) + return _performance_monitor + + +def reset_metrics_collector() -> None: + """Reset the global metrics collector (useful for testing).""" + global _metrics_collector + _metrics_collector = MetricsCollector() diff --git a/services/runner/registry.py b/services/infrastructure/job_management/registry.py similarity index 64% rename from services/runner/registry.py rename to services/infrastructure/job_management/registry.py index 175889e8..60fd0727 100644 --- a/services/runner/registry.py +++ b/services/infrastructure/job_management/registry.py @@ -25,6 +25,17 @@ def get_runner(cls, job_type: JobType) -> Optional[Type[BaseTask]]: """Get runner for a job type.""" return cls._runners.get(job_type) + @classmethod + def get_all_jobs(cls) -> Dict[str, Type[BaseTask]]: + """Get all registered jobs.""" + return {str(job_type): runner for job_type, runner in cls._runners.items()} + + @classmethod + def clear_registry(cls) -> None: + """Clear all registered jobs (useful for testing).""" + cls._runners.clear() + logger.debug("Cleared job registry") + async def execute_runner_job( job_type: str, parameters: Optional[Dict[str, str]] = None @@ -56,30 +67,41 @@ async def execute_runner_job( # Create context context = JobContext( - job_type=job_enum, config=RunnerConfig.from_env(), parameters=parameters + job_type=job_enum, config=RunnerConfig(), parameters=parameters ) # Create runner instance - runner = runner_class(context.config) + runner = runner_class() # Validate and execute - logger.info(f"Starting {job_type} runner") + logger.debug(f"Starting {job_type} runner") if await runner.validate(context): results = await runner.execute(context) - logger.info(f"Completed {job_type} runner") + logger.debug(f"Completed {job_type} runner") return results else: logger.warning(f"Validation failed for {job_type} runner") + result_class = runner_class.get_result_class() return [ - runner_class.get_result_class()( + result_class( success=False, message=f"Validation failed for {job_type} runner" ) ] except Exception as e: logger.error(f"Error in runner job: {str(e)}", exc_info=True) - return [ - runner_class.get_result_class()( - success=False, message=f"Error in runner job: {str(e)}", error=e + try: + result_class = runner_class.get_result_class() + return [ + result_class( + success=False, message=f"Error in runner job: {str(e)}", error=e + ) + ] + except Exception as inner_e: + logger.critical( + f"Could not create result object: {str(inner_e)}", exc_info=True ) - ] + # Fallback to basic RunnerResult if all else fails + from .base import RunnerResult + + return [RunnerResult(success=False, message=f"Critical error: {str(e)}")] diff --git a/services/infrastructure/job_management/tasks/__init__.py b/services/infrastructure/job_management/tasks/__init__.py new file mode 100644 index 00000000..3ea9f828 --- /dev/null +++ b/services/infrastructure/job_management/tasks/__init__.py @@ -0,0 +1,29 @@ +"""Task runners for scheduled and on-demand jobs. + +Tasks are automatically discovered and registered using the @job decorator. +To create a new task: + +1. Create a new .py file in this directory +2. Import the @job decorator: from ..decorators import job +3. Decorate your task class with @job("your_job_type", ...) +4. That's it! The task will be automatically discovered and registered. + +Example: + @job( + "my_new_job", + name="My New Job", + description="Does something useful", + interval_seconds=120, + priority=JobPriority.NORMAL, + max_concurrent=1, + ) + class MyNewJobTask(BaseTask[MyJobResult]): + async def _execute_impl(self, context: JobContext) -> List[MyJobResult]: + # Implementation here + pass +""" + +# Auto-discovery handles all task imports and registrations +# No manual imports needed here anymore! + +__all__ = [] # Auto-discovery populates the registry diff --git a/services/infrastructure/job_management/tasks/agent_account_deployer.py b/services/infrastructure/job_management/tasks/agent_account_deployer.py new file mode 100644 index 00000000..e936541a --- /dev/null +++ b/services/infrastructure/job_management/tasks/agent_account_deployer.py @@ -0,0 +1,324 @@ +"""Agent account deployment task implementation.""" + +import json +from dataclasses import dataclass +from typing import Any, Dict, List, Optional + +from backend.factory import backend +from backend.models import ( + QueueMessage, + QueueMessageBase, + QueueMessageFilter, + QueueMessageType, +) +from config import config +from lib.logger import configure_logger +from services.infrastructure.job_management.base import ( + BaseTask, + JobContext, + RunnerConfig, + RunnerResult, +) +from services.infrastructure.job_management.decorators import JobPriority, job +from tools.agent_account import AgentAccountDeployTool + +logger = configure_logger(__name__) + + +@dataclass +class AgentAccountDeployResult(RunnerResult): + """Result of agent account deployment operation.""" + + accounts_processed: int = 0 + accounts_deployed: int = 0 + errors: List[str] = None + + def __post_init__(self): + self.errors = self.errors or [] + + +@job( + job_type="agent_account_deployer", + name="Agent Account Deployer", + description="Deploys agent account contracts with enhanced monitoring and error handling", + interval_seconds=300, # 5 minutes + priority=JobPriority.MEDIUM, + max_retries=2, + retry_delay_seconds=180, + timeout_seconds=120, + max_concurrent=1, + requires_blockchain=True, + batch_size=5, + enable_dead_letter_queue=True, +) +class AgentAccountDeployerTask(BaseTask[AgentAccountDeployResult]): + """Task runner for deploying agent account contracts with enhanced capabilities.""" + + QUEUE_TYPE = QueueMessageType.get_or_create("agent_account_deploy") + + def __init__(self, config: Optional[RunnerConfig] = None): + super().__init__(config) + + async def _validate_config(self, context: JobContext) -> bool: + """Validate task configuration.""" + try: + # Check if backend wallet configuration is available + if not config.backend_wallet or not config.backend_wallet.seed_phrase: + logger.error( + "Backend wallet seed phrase not configured for agent account deployment" + ) + return False + return True + except Exception as e: + logger.error( + f"Error validating agent account deployer config: {str(e)}", + exc_info=True, + ) + return False + + async def _validate_resources(self, context: JobContext) -> bool: + """Validate resource availability.""" + try: + # Test agent account deploy tool initialization + tool = AgentAccountDeployTool(seed_phrase=config.backend_wallet.seed_phrase) + if not tool: + logger.error("Cannot initialize AgentAccountDeployTool") + return False + + return True + except Exception as e: + logger.error(f"Resource validation failed: {str(e)}") + return False + + async def _validate_task_specific(self, context: JobContext) -> bool: + """Validate task-specific conditions.""" + try: + # Get pending messages from the queue + pending_messages = await self.get_pending_messages() + message_count = len(pending_messages) + logger.debug( + f"Found {message_count} pending agent account deployment messages" + ) + + if message_count == 0: + logger.debug("No pending agent account deployment messages found") + return False + + # Validate that at least one message has valid deployment data + for message in pending_messages: + message_data = self._parse_message_data(message.message) + if self._validate_message_data(message_data): + logger.debug("Found valid agent account deployment message") + return True + + logger.warning("No valid deployment data found in pending messages") + return False + + except Exception as e: + logger.error( + f"Error validating agent account deployment task: {str(e)}", + exc_info=True, + ) + return False + + def _parse_message_data(self, message: Any) -> Dict[str, Any]: + """Parse message data from either string or dictionary format.""" + if message is None: + return {} + + if isinstance(message, dict): + return message + + try: + # Try to parse as JSON string + return json.loads(message) + except (json.JSONDecodeError, TypeError): + logger.error(f"Failed to parse message data: {message}") + return {} + + def _validate_message_data(self, message_data: Dict[str, Any]) -> bool: + """Validate the message data contains required fields.""" + required_fields = [ + "owner_address", + "dao_token_contract", + "dao_token_dex_contract", + ] + return all(field in message_data for field in required_fields) + + async def process_message(self, message: QueueMessage) -> Dict[str, Any]: + """Process a single agent account deployment message.""" + message_id = message.id + message_data = self._parse_message_data(message.message) + + logger.debug(f"Processing agent account deployment message {message_id}") + + try: + # Validate message data + if not self._validate_message_data(message_data): + error_msg = f"Invalid message data in message {message_id}" + logger.error(error_msg) + result = {"success": False, "error": error_msg} + + # Store result and mark as processed + update_data = QueueMessageBase(is_processed=True, result=result) + backend.update_queue_message(message_id, update_data) + + return result + + # Initialize the AgentAccountDeployTool with seed phrase + logger.debug("Preparing to deploy agent account") + deploy_tool = AgentAccountDeployTool( + seed_phrase=config.backend_wallet.seed_phrase + ) + + owner_address = message_data["owner_address"] + + # Execute the deployment + logger.debug("Executing deployment...") + deployment_result = await deploy_tool._arun( + owner_address=owner_address, + agent_address=message_data["owner_address"], + dao_token_contract=message_data["dao_token_contract"], + dao_token_dex_contract=message_data["dao_token_dex_contract"], + ) + logger.debug(f"Deployment result: {deployment_result}") + + result = {"success": True, "deployed": True, "result": deployment_result} + + # Store result and mark as processed + update_data = QueueMessageBase(is_processed=True, result=result) + backend.update_queue_message(message_id, update_data) + + logger.info(f"Successfully deployed agent account for message {message_id}") + + return result + + except Exception as e: + error_msg = f"Error processing message {message_id}: {str(e)}" + logger.error(error_msg, exc_info=True) + result = {"success": False, "error": error_msg} + + # Store result even for failed processing + update_data = QueueMessageBase(result=result) + backend.update_queue_message(message_id, update_data) + + return result + + async def get_pending_messages(self) -> List[QueueMessage]: + """Get all unprocessed messages from the queue.""" + filters = QueueMessageFilter(type=self.QUEUE_TYPE, is_processed=False) + messages = backend.list_queue_messages(filters=filters) + + # Messages are already parsed by the backend, but we log them for debugging + for message in messages: + logger.debug(f"Queue message raw data: {message.message!r}") + + return messages + + def _should_retry_on_error(self, error: Exception, context: JobContext) -> bool: + """Determine if error should trigger retry.""" + # Retry on network errors, blockchain timeouts + retry_errors = ( + ConnectionError, + TimeoutError, + ) + + # Don't retry on validation errors + if "invalid message data" in str(error).lower(): + return False + if "missing" in str(error).lower() and "required" in str(error).lower(): + return False + + return isinstance(error, retry_errors) + + async def _handle_execution_error( + self, error: Exception, context: JobContext + ) -> Optional[List[AgentAccountDeployResult]]: + """Handle execution errors with recovery logic.""" + if "blockchain" in str(error).lower() or "contract" in str(error).lower(): + logger.warning(f"Blockchain/contract error: {str(error)}, will retry") + return None + + if isinstance(error, (ConnectionError, TimeoutError)): + logger.warning(f"Network error: {str(error)}, will retry") + return None + + # For validation errors, don't retry + return [ + AgentAccountDeployResult( + success=False, + message=f"Unrecoverable error: {str(error)}", + error=error, + ) + ] + + async def _post_execution_cleanup( + self, context: JobContext, results: List[AgentAccountDeployResult] + ) -> None: + """Cleanup after task execution.""" + logger.debug("Agent account deployer task cleanup completed") + + async def _execute_impl( + self, context: JobContext + ) -> List[AgentAccountDeployResult]: + """Run the agent account deployment task with batch processing.""" + pending_messages = await self.get_pending_messages() + message_count = len(pending_messages) + logger.debug(f"Found {message_count} pending agent account deployment messages") + + if not pending_messages: + return [ + AgentAccountDeployResult( + success=True, + message="No pending messages found", + accounts_processed=0, + accounts_deployed=0, + ) + ] + + # Process each message in batches + processed_count = 0 + deployed_count = 0 + errors = [] + batch_size = getattr(context, "batch_size", 5) + + logger.info(f"Processing {message_count} agent account deployment messages") + + # Process messages in batches + for i in range(0, len(pending_messages), batch_size): + batch = pending_messages[i : i + batch_size] + + for message in batch: + try: + result = await self.process_message(message) + processed_count += 1 + + if result.get("success"): + if result.get("deployed", False): + deployed_count += 1 + else: + errors.append(result.get("error", "Unknown error")) + + except Exception as e: + error_msg = f"Exception processing message {message.id}: {str(e)}" + errors.append(error_msg) + logger.error(error_msg, exc_info=True) + + logger.info( + f"Agent account deployment completed - Processed: {processed_count}, " + f"Deployed: {deployed_count}, Errors: {len(errors)}" + ) + + return [ + AgentAccountDeployResult( + success=True, + message=f"Processed {processed_count} account(s), deployed {deployed_count} account(s)", + accounts_processed=processed_count, + accounts_deployed=deployed_count, + errors=errors, + ) + ] + + +# Create instance for auto-registration +agent_account_deployer = AgentAccountDeployerTask() diff --git a/services/infrastructure/job_management/tasks/chain_state_monitor.py b/services/infrastructure/job_management/tasks/chain_state_monitor.py new file mode 100644 index 00000000..8a221b21 --- /dev/null +++ b/services/infrastructure/job_management/tasks/chain_state_monitor.py @@ -0,0 +1,879 @@ +"""Chain state monitoring task implementation.""" + +import uuid +from dataclasses import dataclass +from datetime import datetime +from typing import Any, Dict, List, Optional + +from backend.factory import backend +from config import config +from services.integrations.hiro.hiro_api import HiroApi +from lib.logger import configure_logger +from services.infrastructure.job_management.base import ( + BaseTask, + JobContext, + RunnerConfig, + RunnerResult, +) +from services.infrastructure.job_management.decorators import JobPriority, job +from services.integrations.webhooks.chainhook import ChainhookService +from services.integrations.webhooks.chainhook.models import ( + Apply, + BlockIdentifier, + BlockMetadata, + ChainHookData, + ChainHookInfo, + Predicate, + TransactionIdentifier, + TransactionWithReceipt, +) + +logger = configure_logger(__name__) + + +@dataclass +class ChainStateMonitorResult(RunnerResult): + """Result of chain state monitoring operation.""" + + network: str = None + is_stale: bool = False + last_updated: Optional[datetime] = None + elapsed_minutes: float = 0 + blocks_behind: int = 0 + blocks_processed: Optional[List[int]] = None + + def __post_init__(self): + """Initialize default values after dataclass creation.""" + if self.network is None: + self.network = config.network.network + if self.blocks_processed is None: + self.blocks_processed = [] + + +@job( + job_type="chain_state_monitor", + name="Chain State Monitor", + description="Monitors blockchain state for synchronization with enhanced monitoring and error handling", + interval_seconds=90, # 1.5 minutes + priority=JobPriority.MEDIUM, + max_retries=3, + retry_delay_seconds=120, + timeout_seconds=300, + max_concurrent=1, + requires_blockchain=True, + batch_size=20, + enable_dead_letter_queue=True, +) +class ChainStateMonitorTask(BaseTask[ChainStateMonitorResult]): + """Task for monitoring blockchain state and syncing with database with enhanced capabilities.""" + + def __init__(self, config: Optional[RunnerConfig] = None): + super().__init__(config) + self.hiro_api = HiroApi() + self.chainhook_service = ChainhookService() + + async def _validate_config(self, context: JobContext) -> bool: + """Validate task configuration.""" + try: + # Chain state monitor doesn't require wallet configuration + # It only reads from the blockchain, no transactions needed + return True + except Exception as e: + logger.error( + f"Error validating chain state monitor config: {str(e)}", exc_info=True + ) + return False + + async def _validate_resources(self, context: JobContext) -> bool: + """Validate resource availability for blockchain monitoring.""" + try: + # Test HiroApi initialization and connectivity + hiro_api = HiroApi() + api_info = await hiro_api.aget_info() + if not api_info: + logger.error("Cannot connect to Hiro API") + return False + + return True + except Exception as e: + logger.error(f"Resource validation failed: {str(e)}") + return False + + async def _validate_task_specific(self, context: JobContext) -> bool: + """Validate task-specific conditions.""" + try: + # Always valid to run - we want to check chain state freshness + # even when there's no new data + return True + except Exception as e: + logger.error( + f"Error validating chain state monitor task: {str(e)}", exc_info=True + ) + return False + + def _convert_to_chainhook_format( + self, + block_height: int, + block_hash: str, + parent_hash: str, + transactions: Any, + burn_block_height: Optional[int] = None, + ) -> Dict[str, Any]: + """Convert block transactions to chainhook format. + + Args: + block_height: Height of the block + block_hash: Hash of the block + parent_hash: Hash of the parent block + transactions: Block transactions from Hiro API + burn_block_height: Bitcoin burn block height (optional) + + Returns: + Dict formatted as a chainhook webhook payload + """ + # Get detailed block information from API + try: + block_data = self.hiro_api.get_block_by_height(block_height) + logger.debug( + f"Retrieved block data for height {block_height}: {block_data}" + ) + except Exception as e: + logger.warning( + f"Could not fetch detailed block data for height {block_height}: {e}" + ) + block_data = {} + + # Create block identifier + block_identifier = BlockIdentifier(hash=block_hash, index=block_height) + + # Create parent block identifier + parent_block_identifier = BlockIdentifier( + hash=parent_hash, index=block_height - 1 + ) + + # Extract block time from block data or transaction data, fallback to current time + block_time = None + if isinstance(block_data, dict): + block_time = block_data.get("block_time") + elif hasattr(block_data, "block_time"): + block_time = block_data.block_time + + # If block_time not available from block data, try from first transaction + if not block_time and transactions.results: + tx = transactions.results[0] + if isinstance(tx, dict): + block_time = tx.get("block_time") + else: + block_time = getattr(tx, "block_time", None) + + # Fallback to current timestamp if still not found + if not block_time: + block_time = int(datetime.now().timestamp()) + logger.warning( + f"Using current timestamp for block {block_height} as block_time was not available" + ) + + # Create comprehensive metadata with all available fields + metadata = BlockMetadata( + block_time=block_time, + stacks_block_hash=block_hash, + ) + + # Extract additional metadata from block data if available + if isinstance(block_data, dict): + # Bitcoin anchor block identifier with proper hash + bitcoin_anchor_info = block_data.get("bitcoin_anchor_block_identifier", {}) + bitcoin_anchor_hash = ( + bitcoin_anchor_info.get("hash", "") + if isinstance(bitcoin_anchor_info, dict) + else "" + ) + if burn_block_height is not None: + metadata.bitcoin_anchor_block_identifier = BlockIdentifier( + hash=bitcoin_anchor_hash, index=burn_block_height + ) + + # PoX cycle information + pox_cycle_index = block_data.get("pox_cycle_index") + if pox_cycle_index is not None: + metadata.pox_cycle_index = pox_cycle_index + + pox_cycle_length = block_data.get("pox_cycle_length") + if pox_cycle_length is not None: + metadata.pox_cycle_length = pox_cycle_length + + pox_cycle_position = block_data.get("pox_cycle_position") + if pox_cycle_position is not None: + metadata.pox_cycle_position = pox_cycle_position + + cycle_number = block_data.get("cycle_number") + if cycle_number is not None: + metadata.cycle_number = cycle_number + + # Signer information + signer_bitvec = block_data.get("signer_bitvec") + if signer_bitvec is not None: + metadata.signer_bitvec = signer_bitvec + + signer_public_keys = block_data.get("signer_public_keys") + if signer_public_keys is not None: + metadata.signer_public_keys = signer_public_keys + + signer_signature = block_data.get("signer_signature") + if signer_signature is not None: + metadata.signer_signature = signer_signature + + # Other metadata + tenure_height = block_data.get("tenure_height") + if tenure_height is not None: + metadata.tenure_height = tenure_height + + confirm_microblock_identifier = block_data.get( + "confirm_microblock_identifier" + ) + if confirm_microblock_identifier is not None: + metadata.confirm_microblock_identifier = confirm_microblock_identifier + + reward_set = block_data.get("reward_set") + if reward_set is not None: + metadata.reward_set = reward_set + elif burn_block_height is not None: + # Fallback: create basic bitcoin anchor block identifier without hash + metadata.bitcoin_anchor_block_identifier = BlockIdentifier( + hash="", index=burn_block_height + ) + + # Convert transactions to chainhook format with enhanced data + chainhook_transactions = [] + for tx in transactions.results: + # Handle tx as either dict or object + if isinstance(tx, dict): + tx_id = tx.get("tx_id", "") + exec_cost_read_count = tx.get("execution_cost_read_count", 0) + exec_cost_read_length = tx.get("execution_cost_read_length", 0) + exec_cost_runtime = tx.get("execution_cost_runtime", 0) + exec_cost_write_count = tx.get("execution_cost_write_count", 0) + exec_cost_write_length = tx.get("execution_cost_write_length", 0) + fee_rate = tx.get("fee_rate", "0") + nonce = tx.get("nonce", 0) + tx_index = tx.get("tx_index", 0) + sender_address = tx.get("sender_address", "") + sponsor_address = tx.get("sponsor_address", None) + tx.get("sponsored", False) + tx_status = tx.get("tx_status", "") + tx_type = tx.get("tx_type", "") + tx_result_repr = ( + tx.get("tx_result", {}).get("repr", "") + if isinstance(tx.get("tx_result"), dict) + else "" + ) + # Extract events and additional transaction data + events = tx.get("events", []) + raw_tx = tx.get("raw_tx", "") + + # Create better description based on transaction type and data + description = self._create_transaction_description(tx) + + # Extract token transfer data if available + token_transfer = tx.get("token_transfer") + else: + tx_id = tx.tx_id + exec_cost_read_count = tx.execution_cost_read_count + exec_cost_read_length = tx.execution_cost_read_length + exec_cost_runtime = tx.execution_cost_runtime + exec_cost_write_count = tx.execution_cost_write_count + exec_cost_write_length = tx.execution_cost_write_length + fee_rate = tx.fee_rate + nonce = tx.nonce + tx_index = tx.tx_index + sender_address = tx.sender_address + sponsor_address = tx.sponsor_address if tx.sponsored else None + tx_status = tx.tx_status + tx_type = tx.tx_type + tx_result_repr = ( + tx.tx_result.repr if hasattr(tx.tx_result, "repr") else "" + ) + events = getattr(tx, "events", []) + raw_tx = getattr(tx, "raw_tx", "") + + # Create better description + description = self._create_transaction_description(tx) + + # Extract token transfer data + token_transfer = getattr(tx, "token_transfer", None) + + # Create transaction identifier + tx_identifier = TransactionIdentifier(hash=tx_id) + + # Convert events to proper format + receipt_events = [] + for event in events: + if isinstance(event, dict): + receipt_events.append( + { + "data": event.get("data", {}), + "position": {"index": event.get("event_index", 0)}, + "type": event.get("event_type", ""), + } + ) + else: + receipt_events.append( + { + "data": getattr(event, "data", {}), + "position": {"index": getattr(event, "event_index", 0)}, + "type": getattr(event, "event_type", ""), + } + ) + + # Create transaction metadata with proper receipt + tx_metadata = { + "description": description, + "execution_cost": { + "read_count": exec_cost_read_count, + "read_length": exec_cost_read_length, + "runtime": exec_cost_runtime, + "write_count": exec_cost_write_count, + "write_length": exec_cost_write_length, + }, + "fee": ( + int(fee_rate) + if isinstance(fee_rate, str) and fee_rate.isdigit() + else int(fee_rate) + if isinstance(fee_rate, (int, float)) + else 0 + ), + "kind": {"type": tx_type}, + "nonce": nonce, + "position": {"index": tx_index}, + "raw_tx": raw_tx, + "receipt": { + "contract_calls_stack": [], + "events": receipt_events, + "mutated_assets_radius": [], + "mutated_contracts_radius": [], + }, + "result": tx_result_repr, + "sender": sender_address, + "sponsor": sponsor_address, + "success": tx_status == "success", + } + + # Generate operations based on transaction type and data + operations = self._create_transaction_operations(tx, token_transfer) + + # Create transaction with receipt + tx_with_receipt = TransactionWithReceipt( + transaction_identifier=tx_identifier, + metadata=tx_metadata, + operations=operations, + ) + + chainhook_transactions.append(tx_with_receipt) + + # Create apply block + apply_block = Apply( + block_identifier=block_identifier, + parent_block_identifier=parent_block_identifier, + metadata=metadata, + timestamp=block_time, + transactions=chainhook_transactions, + ) + + # Create predicate + predicate = Predicate(scope="block_height", higher_than=block_height - 1) + + # Create chainhook info + chainhook_info = ChainHookInfo( + is_streaming_blocks=False, predicate=predicate, uuid=str(uuid.uuid4()) + ) + + # Create full chainhook data + ChainHookData( + apply=[apply_block], chainhook=chainhook_info, events=[], rollback=[] + ) + + # Convert to dict for webhook processing with complete metadata + metadata_dict = { + "block_time": apply_block.metadata.block_time, + "stacks_block_hash": apply_block.metadata.stacks_block_hash, + } + + # Add all available metadata fields + if apply_block.metadata.bitcoin_anchor_block_identifier: + metadata_dict["bitcoin_anchor_block_identifier"] = { + "hash": apply_block.metadata.bitcoin_anchor_block_identifier.hash, + "index": apply_block.metadata.bitcoin_anchor_block_identifier.index, + } + + # Add optional metadata fields if they exist + optional_fields = [ + "pox_cycle_index", + "pox_cycle_length", + "pox_cycle_position", + "cycle_number", + "signer_bitvec", + "signer_public_keys", + "signer_signature", + "tenure_height", + "confirm_microblock_identifier", + "reward_set", + ] + + for field in optional_fields: + value = getattr(apply_block.metadata, field, None) + if value is not None: + metadata_dict[field] = value + + return { + "apply": [ + { + "block_identifier": { + "hash": apply_block.block_identifier.hash, + "index": apply_block.block_identifier.index, + }, + "metadata": metadata_dict, + "parent_block_identifier": { + "hash": apply_block.parent_block_identifier.hash, + "index": apply_block.parent_block_identifier.index, + }, + "timestamp": apply_block.timestamp, + "transactions": [ + { + "transaction_identifier": { + "hash": tx.transaction_identifier.hash + }, + "metadata": tx.metadata, + "operations": tx.operations, + } + for tx in apply_block.transactions + ], + } + ], + "chainhook": { + "is_streaming_blocks": chainhook_info.is_streaming_blocks, + "predicate": { + "scope": chainhook_info.predicate.scope, + "higher_than": chainhook_info.predicate.higher_than, + }, + "uuid": chainhook_info.uuid, + }, + "events": [], + "rollback": [], + } + + def _create_transaction_description(self, tx) -> str: + """Create a meaningful transaction description based on transaction data. + + Args: + tx: Transaction data (dict or object) + + Returns: + str: Human-readable transaction description + """ + if isinstance(tx, dict): + tx_type = tx.get("tx_type", "") + token_transfer = tx.get("token_transfer") + else: + tx_type = getattr(tx, "tx_type", "") + token_transfer = getattr(tx, "token_transfer", None) + + if ( + tx_type in ["token_transfer", "stx_transfer", "NativeTokenTransfer"] + and token_transfer + ): + if isinstance(token_transfer, dict): + amount = token_transfer.get("amount", "0") + recipient = token_transfer.get("recipient_address", "") + sender = ( + tx.get("sender_address", "") + if isinstance(tx, dict) + else getattr(tx, "sender_address", "") + ) + else: + amount = getattr(token_transfer, "amount", "0") + recipient = getattr(token_transfer, "recipient_address", "") + sender = ( + tx.get("sender_address", "") + if isinstance(tx, dict) + else getattr(tx, "sender_address", "") + ) + + return f"transfered: {amount} µSTX from {sender} to {recipient}" + elif tx_type == "coinbase": + return "coinbase transaction" + elif tx_type == "contract_call": + if isinstance(tx, dict): + contract_call = tx.get("contract_call", {}) + if isinstance(contract_call, dict): + contract_id = contract_call.get("contract_id", "") + function_name = contract_call.get("function_name", "") + return f"contract call: {contract_id}::{function_name}" + else: + contract_call = getattr(tx, "contract_call", None) + if contract_call: + contract_id = getattr(contract_call, "contract_id", "") + function_name = getattr(contract_call, "function_name", "") + return f"contract call: {contract_id}::{function_name}" + + # Fallback description + tx_id = ( + tx.get("tx_id", "") if isinstance(tx, dict) else getattr(tx, "tx_id", "") + ) + return f"Transaction {tx_id}" + + def _create_transaction_operations( + self, tx, token_transfer=None + ) -> List[Dict[str, Any]]: + """Create transaction operations based on transaction type and data. + + Args: + tx: Transaction data (dict or object) + token_transfer: Token transfer data if available + + Returns: + List[Dict[str, Any]]: List of operations for the transaction + """ + operations = [] + + if isinstance(tx, dict): + tx_type = tx.get("tx_type", "") + sender_address = tx.get("sender_address", "") + else: + tx_type = getattr(tx, "tx_type", "") + sender_address = getattr(tx, "sender_address", "") + + # Handle token transfers + if ( + tx_type in ["token_transfer", "stx_transfer", "NativeTokenTransfer"] + and token_transfer + ): + if isinstance(token_transfer, dict): + amount = int(token_transfer.get("amount", "0")) + recipient = token_transfer.get("recipient_address", "") + else: + amount = int(getattr(token_transfer, "amount", "0")) + recipient = getattr(token_transfer, "recipient_address", "") + + # Debit operation (sender) + operations.append( + { + "account": {"address": sender_address}, + "amount": { + "currency": {"decimals": 6, "symbol": "STX"}, + "value": amount, + }, + "operation_identifier": {"index": 0}, + "related_operations": [{"index": 1}], + "status": "SUCCESS", + "type": "DEBIT", + } + ) + + # Credit operation (recipient) + operations.append( + { + "account": {"address": recipient}, + "amount": { + "currency": {"decimals": 6, "symbol": "STX"}, + "value": amount, + }, + "operation_identifier": {"index": 1}, + "related_operations": [{"index": 0}], + "status": "SUCCESS", + "type": "CREDIT", + } + ) + + return operations + + def _should_retry_on_error(self, error: Exception, context: JobContext) -> bool: + """Determine if error should trigger retry.""" + # Retry on network errors, blockchain RPC issues + retry_errors = ( + ConnectionError, + TimeoutError, + ) + + # Don't retry on configuration errors + if "not configured" in str(error).lower(): + return False + if "invalid contract" in str(error).lower(): + return False + + return isinstance(error, retry_errors) + + async def _handle_execution_error( + self, error: Exception, context: JobContext + ) -> Optional[List[ChainStateMonitorResult]]: + """Handle execution errors with recovery logic.""" + if "blockchain" in str(error).lower() or "rpc" in str(error).lower(): + logger.warning(f"Blockchain/RPC error: {str(error)}, will retry") + return None + + if isinstance(error, (ConnectionError, TimeoutError)): + logger.warning(f"Network error: {str(error)}, will retry") + return None + + # For configuration errors, don't retry + return [ + ChainStateMonitorResult( + success=False, + message=f"Unrecoverable error: {str(error)}", + error=error, + ) + ] + + async def _post_execution_cleanup( + self, context: JobContext, results: List[ChainStateMonitorResult] + ) -> None: + """Cleanup after task execution.""" + logger.debug("Chain state monitor task cleanup completed") + + async def _execute_impl(self, context: JobContext) -> List[ChainStateMonitorResult]: + """Execute chain state monitoring task with blockchain synchronization.""" + # Use the configured network + network = config.network.network + + try: + results = [] + + # Get the latest chain state for this network + latest_chain_state = backend.get_latest_chain_state(network) + + if not latest_chain_state: + logger.warning(f"No chain state found for network {network}") + results.append( + ChainStateMonitorResult( + success=False, + message=f"No chain state found for network {network}", + network=network, + is_stale=True, + ) + ) + return results + + # Calculate how old the chain state is + now = datetime.now() + last_updated = latest_chain_state.updated_at + + # Convert last_updated to naive datetime if it has timezone info + if last_updated.tzinfo is not None: + last_updated = last_updated.replace(tzinfo=None) + + time_difference = now - last_updated + minutes_difference = time_difference.total_seconds() / 60 + + # Get current chain height from API + try: + logger.debug("Fetching current chain info from API") + api_info = self.hiro_api.get_info() + + # Handle different response types + if isinstance(api_info, dict): + # Try to access chain_tip from dictionary + if "chain_tip" in api_info: + chain_tip = api_info["chain_tip"] + current_api_block_height = chain_tip.get("block_height", 0) + else: + logger.error(f"Missing chain_tip in API response: {api_info}") + raise ValueError( + "Invalid API response format - missing chain_tip" + ) + else: + # We have a HiroApiInfo object but chain_tip is still a dict + # Access it as a dictionary + if isinstance(api_info.chain_tip, dict): + current_api_block_height = api_info.chain_tip.get( + "block_height", 0 + ) + else: + current_api_block_height = api_info.chain_tip.block_height + + logger.info(f"Current API block height: {current_api_block_height}") + db_block_height = latest_chain_state.block_height + logger.info(f"Current DB block height: {db_block_height}") + + blocks_behind = current_api_block_height - db_block_height + + # Consider stale if more than 10 blocks behind + stale_threshold_blocks = 10 + is_stale = blocks_behind > stale_threshold_blocks + + logger.info( + f"Chain state is {blocks_behind} blocks behind the current chain tip. " + f"DB height: {db_block_height}, API height: {current_api_block_height}" + ) + + # Process missing blocks if we're behind + if blocks_behind > 0 and is_stale: + logger.warning( + f"Chain state is {blocks_behind} blocks behind, which exceeds the threshold of {stale_threshold_blocks}. " + f"DB height: {db_block_height}, API height: {current_api_block_height}" + ) + + blocks_processed = [] + + # Process each missing block + for height in range( + db_block_height + 1, current_api_block_height + 1 + ): + logger.info( + f"Processing transactions for block height {height}" + ) + + try: + # Get all transactions for this block + transactions = self.hiro_api.get_all_transactions_by_block( + height + ) + + # Log transaction count and details + logger.info( + f"Block {height}: Found {transactions.total} transactions" + ) + + # Get block details and burn block height + burn_block_height = None + if transactions.results: + # Handle transactions.results as either dict or object + tx = transactions.results[0] + if isinstance(tx, dict): + block_hash = tx.get("block_hash") + parent_hash = tx.get("parent_block_hash") + burn_block_height = tx.get("burn_block_height") + else: + block_hash = tx.block_hash + parent_hash = tx.parent_block_hash + burn_block_height = getattr( + tx, "burn_block_height", None + ) + else: + # If no transactions, fetch the block directly + try: + block = self.hiro_api.get_block_by_height(height) + + # Handle different response formats + if isinstance(block, dict): + block_hash = block.get("hash") + parent_hash = block.get("parent_block_hash") + burn_block_height = block.get( + "burn_block_height" + ) + else: + block_hash = block.hash + parent_hash = block.parent_block_hash + burn_block_height = getattr( + block, "burn_block_height", None + ) + + if not block_hash or not parent_hash: + raise ValueError( + f"Missing hash or parent_hash in block data: {block}" + ) + except Exception as e: + logger.error( + f"Error fetching block {height}: {str(e)}" + ) + raise + + logger.debug( + f"Block {height}: burn_block_height={burn_block_height}" + ) + + # Convert to chainhook format + chainhook_data = self._convert_to_chainhook_format( + height, + block_hash, + parent_hash, + transactions, + burn_block_height, + ) + + # Process through chainhook service + result = await self.chainhook_service.process( + chainhook_data + ) + logger.info( + f"Block {height} processed with result: {result}" + ) + + blocks_processed.append(height) + + except Exception as e: + logger.error( + f"Error processing block {height}: {str(e)}", + exc_info=True, + ) + # Continue with next block instead of failing the entire process + + results.append( + ChainStateMonitorResult( + success=True, + message=f"Chain state is {blocks_behind} blocks behind. Processed {len(blocks_processed)} blocks.", + network=network, + is_stale=is_stale, + last_updated=last_updated, + elapsed_minutes=minutes_difference, + blocks_behind=blocks_behind, + blocks_processed=blocks_processed, + ) + ) + return results + else: + logger.info( + f"Chain state for network {network} is {'stale' if is_stale else 'fresh'}. " + f"{blocks_behind} blocks behind (threshold: {stale_threshold_blocks})." + ) + + # Return result based on blocks_behind check + results.append( + ChainStateMonitorResult( + success=True, + message=f"Chain state for network {network} is {blocks_behind} blocks behind", + network=network, + is_stale=is_stale, + last_updated=last_updated, + elapsed_minutes=minutes_difference, + blocks_behind=blocks_behind, + ) + ) + + return results + + except Exception as e: + logger.error( + f"Error getting current chain info: {str(e)}", exc_info=True + ) + # Fall back to legacy time-based staleness check if API call fails + logger.warning("Falling back to time-based staleness check") + stale_threshold_minutes = 5 + is_stale = minutes_difference > stale_threshold_minutes + + results.append( + ChainStateMonitorResult( + success=False, + message=f"Error checking chain height, using time-based check instead: {str(e)}", + network=network, + is_stale=is_stale, + last_updated=last_updated, + elapsed_minutes=minutes_difference, + ) + ) + return results + + except Exception as e: + logger.error( + f"Error executing chain state monitoring task: {str(e)}", exc_info=True + ) + return [ + ChainStateMonitorResult( + success=False, + message=f"Error executing chain state monitoring task: {str(e)}", + network=network, + is_stale=True, + ) + ] + + +# Create instance for auto-registration +chain_state_monitor = ChainStateMonitorTask() diff --git a/services/infrastructure/job_management/tasks/dao_deployment_task.py b/services/infrastructure/job_management/tasks/dao_deployment_task.py new file mode 100644 index 00000000..23be10e1 --- /dev/null +++ b/services/infrastructure/job_management/tasks/dao_deployment_task.py @@ -0,0 +1,382 @@ +from dataclasses import dataclass +from typing import Any, Dict, List, Optional +from uuid import UUID + +from backend.factory import backend +from backend.models import ( + DAOFilter, + QueueMessage, + QueueMessageBase, + QueueMessageFilter, + QueueMessageType, +) +from lib.logger import configure_logger +from services.ai.workflows import execute_workflow_stream +from tools.tools_factory import filter_tools_by_names, initialize_tools + +from ..base import BaseTask, JobContext, RunnerConfig, RunnerResult +from ..decorators import JobPriority, job + +logger = configure_logger(__name__) + + +@dataclass +class DAODeploymentResult(RunnerResult): + """Result of DAO deployment operation.""" + + dao_id: Optional[UUID] = None + deployment_data: Optional[Dict[str, Any]] = None + daos_processed: int = 0 + deployments_successful: int = 0 + + +@job( + job_type="dao_deployment", + name="DAO Deployment Processor", + description="Processes DAO deployment requests with enhanced monitoring and error handling", + interval_seconds=60, + priority=JobPriority.HIGH, + max_retries=2, + retry_delay_seconds=120, + timeout_seconds=600, + max_concurrent=1, + requires_blockchain=True, + batch_size=1, + enable_dead_letter_queue=True, +) +class DAODeploymentTask(BaseTask[DAODeploymentResult]): + """Task for processing DAO deployments with enhanced capabilities.""" + + def __init__(self, config: Optional[RunnerConfig] = None): + super().__init__(config) + self._pending_messages = None + self.tools_map_all = initialize_tools(None, None) + self.tools_map = filter_tools_by_names( + ["contract_deploy_dao"], self.tools_map_all + ) + logger.debug(f"Initialized {len(self.tools_map)} DAO deployment tools") + + async def _validate_config(self, context: JobContext) -> bool: + """Validate DAO deployment task configuration.""" + try: + if not self.tools_map: + logger.error("No DAO deployment tools available") + return False + + if not self.tools_map_all: + logger.error("Tools not properly initialized") + return False + + # Configuration validation passed + logger.debug("DAO deployment task configuration validation passed") + + return True + except Exception as e: + logger.error( + f"Error validating DAO deployment config: {str(e)}", exc_info=True + ) + return False + + async def _validate_resources(self, context: JobContext) -> bool: + """Validate resource availability for DAO deployment.""" + try: + # Check if we have required tools initialized + if not self.tools_map: + logger.error("DAO deployment tools not available") + return False + + return True + except Exception as e: + logger.error(f"DAO deployment resource validation failed: {str(e)}") + return False + + async def _validate_prerequisites(self, context: JobContext) -> bool: + """Validate DAO deployment task prerequisites.""" + try: + # Check for pending DAOs first + pending_daos = backend.list_daos( + filters=DAOFilter( + is_deployed=False, + is_broadcasted=True, + ) + ) + if pending_daos: + logger.info( + f"Found {len(pending_daos)} pending Twitter DAO(s), skipping queue processing" + ) + return False + + # Cache pending messages for later use + self._pending_messages = backend.list_queue_messages( + filters=QueueMessageFilter( + type=QueueMessageType.get_or_create("dao_deployment"), + is_processed=False, + ) + ) + return True + except Exception as e: + logger.error( + f"Error validating DAO deployment prerequisites: {str(e)}", + exc_info=True, + ) + self._pending_messages = None + return False + + async def _validate_task_specific(self, context: JobContext) -> bool: + """Validate DAO deployment task-specific conditions.""" + try: + if not self._pending_messages: + logger.debug("No pending DAO deployment messages found") + return False + + # Validate each message has required parameters + valid_messages = [] + for message in self._pending_messages: + if await self._is_message_valid(message): + valid_messages.append(message) + + self._pending_messages = valid_messages + message_count = len(valid_messages) + + if message_count > 0: + logger.debug(f"Found {message_count} valid DAO deployment messages") + return True + + logger.debug("No valid DAO deployment messages to process") + return False + + except Exception as e: + logger.error( + f"Error in DAO deployment task validation: {str(e)}", exc_info=True + ) + return False + + async def _is_message_valid(self, message: QueueMessage) -> bool: + """Check if a message has valid DAO deployment parameters.""" + try: + if not message.message or not isinstance(message.message, dict): + return False + + params = message.message.get("parameters", {}) + required_params = [ + "token_symbol", + "token_name", + "token_description", + "token_max_supply", + "token_decimals", + "origin_address", + "mission", + ] + + # Check all required parameters exist and are not empty + for param in required_params: + if param not in params or not params[param]: + logger.debug( + f"Message {message.id} missing required param: {param}" + ) + return False + + return True + except Exception: + return False + + async def _validate_message( + self, message: QueueMessage + ) -> Optional[DAODeploymentResult]: + """Validate a single DAO deployment message before processing.""" + try: + params = message.message.get("parameters", {}) + required_params = [ + "token_symbol", + "token_name", + "token_description", + "token_max_supply", + "token_decimals", + "origin_address", + "mission", + ] + + missing_params = [p for p in required_params if p not in params] + if missing_params: + return DAODeploymentResult( + success=False, + message=f"Missing required parameters: {', '.join(missing_params)}", + ) + + return None # Validation passed + + except Exception as e: + logger.error( + f"Error validating DAO deployment message {message.id}: {str(e)}", + exc_info=True, + ) + return DAODeploymentResult( + success=False, + message=f"Error validating message: {str(e)}", + error=e, + ) + + def _get_dao_deployment_parameters(self, message: QueueMessage) -> Optional[str]: + """Extract and format DAO deployment parameters from message.""" + try: + params = message.message["parameters"] + return ( + f"Please deploy a DAO with the following parameters:\n" + f"Token Symbol: {params['token_symbol']}\n" + f"Token Name: {params['token_name']}\n" + f"Token Description: {params['token_description']}\n" + f"Token Max Supply: {params['token_max_supply']}\n" + f"Token Decimals: {params['token_decimals']}\n" + f"Origin Address: {params['origin_address']}\n" + f"Tweet Origin: {message.tweet_id}\n" + f"Mission: {params['mission']}" + ) + except KeyError as e: + logger.error(f"Missing required parameter in DAO deployment message: {e}") + return None + + async def _process_dao_deployment_message( + self, message: QueueMessage + ) -> DAODeploymentResult: + """Process a single DAO deployment message with enhanced error handling.""" + try: + # Validate message first + validation_result = await self._validate_message(message) + if validation_result: + return validation_result + + tool_input = self._get_dao_deployment_parameters(message) + if not tool_input: + return DAODeploymentResult( + success=False, + message="Failed to extract DAO deployment parameters from message", + ) + + logger.info(f"Processing DAO deployment for message {message.id}") + logger.debug(f"DAO deployment parameters: {tool_input}") + + deployment_data = {} + async for chunk in execute_workflow_stream( + history=[], input_str=tool_input, tools_map=self.tools_map + ): + if chunk["type"] == "result": + deployment_data = chunk["content"] + logger.info("DAO deployment completed successfully") + logger.debug(f"Deployment data: {deployment_data}") + elif chunk["type"] == "tool": + logger.debug(f"Executing tool: {chunk}") + + # Extract DAO ID if available from deployment data + dao_id = None + if isinstance(deployment_data, dict): + dao_id = deployment_data.get("dao_id") + + return DAODeploymentResult( + success=True, + message="Successfully processed DAO deployment", + deployment_data=deployment_data, + dao_id=dao_id, + daos_processed=1, + deployments_successful=1, + ) + + except Exception as e: + logger.error( + f"Error processing DAO deployment message: {str(e)}", exc_info=True + ) + return DAODeploymentResult( + success=False, + message=f"Error processing DAO deployment: {str(e)}", + error=e, + daos_processed=1, + deployments_successful=0, + ) + + def _should_retry_on_error(self, error: Exception, context: JobContext) -> bool: + """Determine if DAO deployment error should trigger retry.""" + # Retry on network errors, temporary blockchain issues + retry_errors = ( + ConnectionError, + TimeoutError, + ) + + # Don't retry on validation errors or tool configuration issues + if "Missing required parameter" in str(error): + return False + if "Tools not properly initialized" in str(error): + return False + + return isinstance(error, retry_errors) + + async def _handle_execution_error( + self, error: Exception, context: JobContext + ) -> Optional[List[DAODeploymentResult]]: + """Handle DAO deployment execution errors with recovery logic.""" + if "blockchain" in str(error).lower() or "network" in str(error).lower(): + logger.warning( + f"Blockchain/network error during DAO deployment: {str(error)}, will retry" + ) + return None # Let default retry handling take over + + # For validation errors, don't retry + return [ + DAODeploymentResult( + success=False, + message=f"Unrecoverable DAO deployment error: {str(error)}", + error=error, + ) + ] + + async def _post_execution_cleanup( + self, context: JobContext, results: List[DAODeploymentResult] + ) -> None: + """Cleanup after DAO deployment task execution.""" + # Clear cached pending messages + self._pending_messages = None + logger.debug("DAO deployment task cleanup completed") + + async def _execute_impl(self, context: JobContext) -> List[DAODeploymentResult]: + """Execute DAO deployment task with enhanced processing.""" + results: List[DAODeploymentResult] = [] + try: + if not self._pending_messages: + return results + + # Process one message at a time for DAO deployments (they're resource intensive) + message = self._pending_messages[0] + logger.debug(f"Processing DAO deployment message: {message.id}") + + result = await self._process_dao_deployment_message(message) + results.append(result) + + if result.success: + backend.update_queue_message( + queue_message_id=message.id, + update_data=QueueMessageBase( + is_processed=True, + result=result.model_dump(), + ), + ) + logger.debug(f"Marked DAO deployment message {message.id} as processed") + logger.info("DAO deployment task completed successfully") + else: + logger.error(f"DAO deployment failed: {result.message}") + + return results + + except Exception as e: + logger.error(f"Error in DAO deployment task: {str(e)}", exc_info=True) + results.append( + DAODeploymentResult( + success=False, + message=f"Error in DAO deployment task: {str(e)}", + error=e, + daos_processed=1, + deployments_successful=0, + ) + ) + return results + + +# Create instance for auto-registration +dao_deployment_task = DAODeploymentTask() diff --git a/services/infrastructure/job_management/tasks/dao_deployment_tweet_task.py b/services/infrastructure/job_management/tasks/dao_deployment_tweet_task.py new file mode 100644 index 00000000..17eec4e3 --- /dev/null +++ b/services/infrastructure/job_management/tasks/dao_deployment_tweet_task.py @@ -0,0 +1,370 @@ +from dataclasses import dataclass +from typing import Any, List, Optional +from uuid import UUID + +from backend.factory import backend +from backend.models import ( + QueueMessageBase, + QueueMessageCreate, + QueueMessageFilter, + QueueMessageType, + TokenFilter, +) +from lib.logger import configure_logger +from services.ai.workflows import generate_dao_tweet + +from ..base import BaseTask, JobContext, RunnerConfig, RunnerResult +from ..decorators import JobPriority, job + +logger = configure_logger(__name__) + + +@dataclass +class DAODeploymentTweetResult(RunnerResult): + """Result of DAO deployment tweet processing operation.""" + + dao_id: Optional[UUID] = None + tweet_id: Optional[str] = None + congratulatory_tweets_generated: int = 0 + tweet_messages_created: int = 0 + + +@job( + job_type="dao_deployment_tweet", + name="DAO Deployment Tweet Generator", + description="Generates congratulatory tweets for successfully deployed DAOs with enhanced monitoring and error handling", + interval_seconds=45, + priority=JobPriority.MEDIUM, + max_retries=3, + retry_delay_seconds=60, + timeout_seconds=180, + max_concurrent=1, + requires_ai=True, + batch_size=5, + enable_dead_letter_queue=True, +) +class DAODeploymentTweetTask(BaseTask[DAODeploymentTweetResult]): + """Task for generating congratulatory tweets for successfully deployed DAOs with enhanced capabilities.""" + + def __init__(self, config: Optional[RunnerConfig] = None): + super().__init__(config) + self._pending_messages = None + + async def _validate_config(self, context: JobContext) -> bool: + """Validate DAO deployment tweet task configuration.""" + try: + # Check if generate_dao_tweet workflow is available for deployment congratulations + return True + except Exception as e: + logger.error( + f"Error validating DAO deployment tweet task config: {str(e)}", + exc_info=True, + ) + return False + + async def _validate_resources(self, context: JobContext) -> bool: + """Validate resource availability for DAO deployment tweet generation.""" + try: + return True + except Exception as e: + logger.error(f"Backend not available for DAO deployment tweets: {str(e)}") + return False + + async def _validate_prerequisites(self, context: JobContext) -> bool: + """Validate DAO deployment tweet task prerequisites.""" + try: + # Cache pending deployment tweet messages for later use + self._pending_messages = backend.list_queue_messages( + filters=QueueMessageFilter( + type=QueueMessageType.get_or_create("dao_deployment_tweet"), + is_processed=False, + ) + ) + return True + except Exception as e: + logger.error( + f"Error validating DAO deployment tweet prerequisites: {str(e)}", + exc_info=True, + ) + self._pending_messages = None + return False + + async def _validate_task_specific(self, context: JobContext) -> bool: + """Validate DAO deployment tweet task-specific conditions.""" + try: + if not self._pending_messages: + logger.debug("No pending DAO deployment tweet messages found") + return False + + # Validate each message has valid deployed DAO data + valid_messages = [] + for message in self._pending_messages: + if await self._is_deployment_message_valid(message): + valid_messages.append(message) + + self._pending_messages = valid_messages + + if valid_messages: + logger.debug( + f"Found {len(valid_messages)} valid DAO deployment tweet messages" + ) + return True + + logger.debug("No valid DAO deployment tweet messages to process") + return False + + except Exception as e: + logger.error( + f"Error in DAO deployment tweet task validation: {str(e)}", + exc_info=True, + ) + return False + + async def _is_deployment_message_valid(self, message: Any) -> bool: + """Check if a DAO deployment tweet message is valid for processing.""" + try: + if not message.dao_id: + return False + + # Validate DAO exists and is successfully deployed + dao = backend.get_dao(message.dao_id) + if not dao or not dao.is_deployed: + return False + + # Validate token exists for the deployed DAO + token = backend.list_tokens(filters=TokenFilter(dao_id=message.dao_id)) + if not token: + return False + + return True + except Exception: + return False + + async def _validate_deployment_message( + self, message: Any + ) -> Optional[DAODeploymentTweetResult]: + """Validate a single DAO deployment message before processing.""" + try: + if not message.dao_id: + return DAODeploymentTweetResult( + success=False, + message="DAO deployment message has no dao_id", + dao_id=None, + ) + + # Validate DAO exists and is successfully deployed + dao = backend.get_dao(message.dao_id) + if not dao: + return DAODeploymentTweetResult( + success=False, + message=f"No DAO found for deployment tweet: {message.dao_id}", + dao_id=message.dao_id, + ) + + if not dao.is_deployed: + return DAODeploymentTweetResult( + success=False, + message=f"DAO is not yet deployed, cannot tweet congratulations: {message.dao_id}", + dao_id=message.dao_id, + ) + + # Validate token exists for the deployed DAO + token = backend.list_tokens(filters=TokenFilter(dao_id=message.dao_id)) + if not token: + return DAODeploymentTweetResult( + success=False, + message=f"No token found for deployed DAO: {message.dao_id}", + dao_id=message.dao_id, + ) + + return None # Validation passed + + except Exception as e: + logger.error( + f"Error validating DAO deployment message {message.id}: {str(e)}", + exc_info=True, + ) + return DAODeploymentTweetResult( + success=False, + message=f"Error validating deployment message: {str(e)}", + error=e, + dao_id=message.dao_id if hasattr(message, "dao_id") else None, + ) + + async def _process_dao_deployment_message( + self, message: Any + ) -> DAODeploymentTweetResult: + """Process a single DAO deployment message to generate congratulatory tweet.""" + try: + # Validate deployment message first + validation_result = await self._validate_deployment_message(message) + if validation_result: + return validation_result + + # Get the validated deployed DAO and token info + dao = backend.get_dao(message.dao_id) + token = backend.list_tokens(filters=TokenFilter(dao_id=message.dao_id))[0] + + logger.info( + f"Generating congratulatory tweet for deployed DAO: {dao.name} ({dao.id})" + ) + logger.debug( + f"Deployed DAO details - Symbol: {token.symbol}, Mission: {dao.mission[:100]}..." + ) + + # Generate congratulatory tweet for the deployment + generated_congratulatory_tweet = await generate_dao_tweet( + dao_name=dao.name, + dao_symbol=token.symbol, + dao_mission=dao.mission, + dao_id=dao.id, + ) + + if ( + not generated_congratulatory_tweet + or not generated_congratulatory_tweet.get("tweet_text") + ): + return DAODeploymentTweetResult( + success=False, + message="Failed to generate congratulatory tweet content for DAO deployment", + dao_id=dao.id, + tweet_id=message.tweet_id, + ) + + # Create a new congratulatory tweet message in the queue + congratulatory_tweet_message = backend.create_queue_message( + QueueMessageCreate( + type="tweet", + dao_id=dao.id, + message={"message": generated_congratulatory_tweet["tweet_text"]}, + tweet_id=message.tweet_id, + conversation_id=message.conversation_id, + ) + ) + + logger.info( + f"Created congratulatory tweet message for deployed DAO: {dao.name}" + ) + logger.debug( + f"Congratulatory tweet message ID: {congratulatory_tweet_message.id}" + ) + logger.debug( + f"Generated congratulatory tweet content: {generated_congratulatory_tweet['tweet_text'][:100]}..." + ) + + return DAODeploymentTweetResult( + success=True, + message="Successfully generated congratulatory tweet for DAO deployment", + dao_id=dao.id, + tweet_id=message.tweet_id, + congratulatory_tweets_generated=1, + tweet_messages_created=1, + ) + + except Exception as e: + logger.error( + f"Error processing DAO deployment message {message.id}: {str(e)}", + exc_info=True, + ) + return DAODeploymentTweetResult( + success=False, + message=f"Error processing DAO deployment tweet: {str(e)}", + error=e, + dao_id=message.dao_id if hasattr(message, "dao_id") else None, + ) + + def _should_retry_on_error(self, error: Exception, context: JobContext) -> bool: + """Determine if DAO deployment tweet error should trigger retry.""" + # Retry on network errors, AI service timeouts + retry_errors = ( + ConnectionError, + TimeoutError, + ) + + # Don't retry on DAO deployment validation errors + if "DAO is not yet deployed" in str(error): + return False + if "No DAO found" in str(error): + return False + if "No token found for deployed DAO" in str(error): + return False + + return isinstance(error, retry_errors) + + async def _handle_execution_error( + self, error: Exception, context: JobContext + ) -> Optional[List[DAODeploymentTweetResult]]: + """Handle DAO deployment tweet execution errors with recovery logic.""" + if "ai" in str(error).lower() or "openai" in str(error).lower(): + logger.warning( + f"AI service error during congratulatory tweet generation: {str(error)}, will retry" + ) + return None + + if isinstance(error, (ConnectionError, TimeoutError)): + logger.warning( + f"Network error during DAO deployment tweet: {str(error)}, will retry" + ) + return None + + # For DAO deployment validation errors, don't retry + return [ + DAODeploymentTweetResult( + success=False, + message=f"Unrecoverable DAO deployment tweet error: {str(error)}", + error=error, + ) + ] + + async def _post_execution_cleanup( + self, context: JobContext, results: List[DAODeploymentTweetResult] + ) -> None: + """Cleanup after DAO deployment tweet task execution.""" + # Clear cached pending messages + self._pending_messages = None + logger.debug("DAO deployment tweet task cleanup completed") + + async def _execute_impl( + self, context: JobContext + ) -> List[DAODeploymentTweetResult]: + """Execute DAO deployment tweet processing task with batch processing.""" + results: List[DAODeploymentTweetResult] = [] + + if not self._pending_messages: + logger.debug("No pending DAO deployment tweet messages to process") + return results + + processed_count = 0 + success_count = 0 + batch_size = getattr(context, "batch_size", 5) + + # Process deployment tweet messages in batches + for i in range(0, len(self._pending_messages), batch_size): + batch = self._pending_messages[i : i + batch_size] + + for message in batch: + logger.debug(f"Processing DAO deployment tweet message: {message.id}") + result = await self._process_dao_deployment_message(message) + results.append(result) + processed_count += 1 + + if result.success: + success_count += 1 + backend.update_queue_message( + queue_message_id=message.id, + update_data=QueueMessageBase(is_processed=True), + ) + logger.debug( + f"Marked DAO deployment tweet message {message.id} as processed" + ) + + logger.info( + f"DAO deployment tweet task completed - Processed: {processed_count}, " + f"Successful: {success_count}, Failed: {processed_count - success_count}" + ) + + return results + + +# Create instance for auto-registration +dao_deployment_tweet_task = DAODeploymentTweetTask() diff --git a/services/infrastructure/job_management/tasks/dao_proposal_concluder.py b/services/infrastructure/job_management/tasks/dao_proposal_concluder.py new file mode 100644 index 00000000..3ae3f81d --- /dev/null +++ b/services/infrastructure/job_management/tasks/dao_proposal_concluder.py @@ -0,0 +1,325 @@ +"""DAO proposal conclusion task implementation.""" + +from dataclasses import dataclass +from typing import Any, Dict, List, Optional + +from backend.factory import backend +from backend.models import ( + QueueMessage, + QueueMessageBase, + QueueMessageFilter, + QueueMessageType, + TokenFilter, +) +from config import config +from lib.logger import configure_logger +from services.infrastructure.job_management.base import ( + BaseTask, + JobContext, + RunnerResult, +) +from services.infrastructure.job_management.decorators import JobPriority, job +from tools.dao_ext_action_proposals import ConcludeActionProposalTool + +logger = configure_logger(__name__) + + +@dataclass +class DAOProposalConcludeResult(RunnerResult): + """Result of DAO proposal conclusion operation.""" + + proposals_processed: int = 0 + proposals_concluded: int = 0 + conclusions_successful: int = 0 + errors: List[str] = None + + def __post_init__(self): + self.errors = self.errors or [] + + +@job( + job_type="dao_proposal_conclude", + name="DAO Proposal Concluder", + description="Processes and concludes DAO proposals with enhanced monitoring and error handling", + interval_seconds=60, + priority=JobPriority.MEDIUM, + max_retries=2, + retry_delay_seconds=90, + timeout_seconds=240, + max_concurrent=1, + requires_blockchain=True, + batch_size=2, + enable_dead_letter_queue=True, +) +class DAOProposalConcluderTask(BaseTask[DAOProposalConcludeResult]): + """Task runner for processing and concluding DAO proposals with enhanced capabilities.""" + + QUEUE_TYPE = QueueMessageType.get_or_create("dao_proposal_conclude") + + async def _validate_config(self, context: JobContext) -> bool: + """Validate task configuration.""" + try: + # Check if backend wallet configuration is available + if not config.backend_wallet or not config.backend_wallet.seed_phrase: + logger.error("Backend wallet seed phrase not configured") + return False + return True + except Exception as e: + logger.error( + f"Error validating proposal concluder config: {str(e)}", exc_info=True + ) + return False + + async def _validate_resources(self, context: JobContext) -> bool: + """Validate resource availability.""" + try: + return True + except Exception as e: + logger.error(f"Backend not available: {str(e)}") + return False + + async def _validate_task_specific(self, context: JobContext) -> bool: + """Validate task-specific conditions.""" + try: + # Get pending messages from the queue + pending_messages = await self.get_pending_messages() + message_count = len(pending_messages) + logger.debug(f"Found {message_count} pending proposal conclusion messages") + + if message_count == 0: + logger.info("No pending proposal conclusion messages found") + return False + + # Validate each message has valid proposal data + valid_messages = [] + for message in pending_messages: + if await self._is_message_valid(message): + valid_messages.append(message) + + if valid_messages: + logger.info( + f"Found {len(valid_messages)} valid proposal conclusion messages" + ) + return True + + logger.warning("No valid proposals found in pending messages") + return False + + except Exception as e: + logger.error( + f"Error validating proposal conclusion task: {str(e)}", exc_info=True + ) + return False + + async def _is_message_valid(self, message: QueueMessage) -> bool: + """Check if a proposal conclusion message is valid for processing.""" + try: + if not message.message or not message.dao_id: + return False + + proposal_id = message.message.get("proposal_id") + if not proposal_id: + return False + + # Check if the proposal exists in the database + proposal = backend.get_proposal(proposal_id) + if not proposal: + return False + + return True + except Exception: + return False + + async def _process_message(self, message: QueueMessage) -> Dict[str, Any]: + """Process a single DAO proposal conclusion message with enhanced error handling.""" + message_id = message.id + message_data = message.message or {} + dao_id = message.dao_id + + logger.debug(f"Processing proposal conclusion message {message_id}") + + # Get the proposal ID from the message + proposal_id = message_data.get("proposal_id") + if not proposal_id: + error_msg = f"Missing proposal_id in message {message_id}" + logger.error(error_msg) + return {"success": False, "error": error_msg} + + try: + # Get the proposal details from the database + proposal = backend.get_proposal(proposal_id) + if not proposal: + error_msg = f"Proposal {proposal_id} not found in database" + logger.error(error_msg) + return {"success": False, "error": error_msg} + + # Get the DAO information + dao = backend.get_dao(dao_id) if dao_id else None + if not dao: + error_msg = f"DAO not found for proposal {proposal_id}" + logger.error(error_msg) + return {"success": False, "error": error_msg} + + # Get the DAO token information + tokens = backend.list_tokens(filters=TokenFilter(dao_id=dao_id)) + if not tokens: + error_msg = f"No token found for DAO: {dao_id}" + logger.error(error_msg) + return {"success": False, "error": error_msg} + + # Use the first token as the DAO token + dao_token = tokens[0] + + logger.info( + f"Preparing to conclude proposal {proposal.proposal_id} for DAO {dao.name}" + ) + + # Initialize the ConcludeActionProposalTool + conclude_tool = ConcludeActionProposalTool( + seed_phrase=config.backend_wallet.seed_phrase + ) + + # Execute the conclusion + logger.debug("Executing conclusion...") + conclusion_result = await conclude_tool._arun( + action_proposals_voting_extension=proposal.contract_principal, # This is the voting extension contract + proposal_id=proposal.proposal_id, # This is the on-chain proposal ID + action_proposal_contract_to_execute=proposal.action, # This is the contract that will be executed + dao_token_contract_address=dao_token.contract_principal, # This is the DAO token contract + ) + logger.debug(f"Conclusion result: {conclusion_result}") + + result = {"success": True, "concluded": True, "result": conclusion_result} + + # Store result and mark the message as processed + update_data = QueueMessageBase(is_processed=True, result=result) + backend.update_queue_message(message_id, update_data) + + logger.info(f"Successfully concluded proposal {proposal.proposal_id}") + + return result + + except Exception as e: + error_msg = f"Error processing message {message_id}: {str(e)}" + logger.error(error_msg, exc_info=True) + result = {"success": False, "error": error_msg} + + # Store result even for failed processing + update_data = QueueMessageBase(result=result) + backend.update_queue_message(message_id, update_data) + + return result + + async def get_pending_messages(self) -> List[QueueMessage]: + """Get all unprocessed messages from the queue.""" + filters = QueueMessageFilter(type=self.QUEUE_TYPE, is_processed=False) + return backend.list_queue_messages(filters=filters) + + def _should_retry_on_error(self, error: Exception, context: JobContext) -> bool: + """Determine if error should trigger retry.""" + # Retry on network errors, blockchain timeouts + retry_errors = ( + ConnectionError, + TimeoutError, + ) + + # Don't retry on validation errors + if "not found" in str(error).lower(): + return False + if "missing" in str(error).lower() and "proposal_id" in str(error).lower(): + return False + + return isinstance(error, retry_errors) + + async def _handle_execution_error( + self, error: Exception, context: JobContext + ) -> Optional[List[DAOProposalConcludeResult]]: + """Handle execution errors with recovery logic.""" + if "blockchain" in str(error).lower() or "contract" in str(error).lower(): + logger.warning(f"Blockchain/contract error: {str(error)}, will retry") + return None + + if isinstance(error, (ConnectionError, TimeoutError)): + logger.warning(f"Network error: {str(error)}, will retry") + return None + + # For validation errors, don't retry + return [ + DAOProposalConcludeResult( + success=False, + message=f"Unrecoverable error: {str(error)}", + error=error, + ) + ] + + async def _post_execution_cleanup( + self, context: JobContext, results: List[DAOProposalConcludeResult] + ) -> None: + """Cleanup after task execution.""" + logger.debug("DAO proposal concluder task cleanup completed") + + async def _execute_impl( + self, context: JobContext + ) -> List[DAOProposalConcludeResult]: + """Run the DAO proposal conclusion task with batch processing.""" + pending_messages = await self.get_pending_messages() + message_count = len(pending_messages) + logger.debug(f"Found {message_count} pending proposal conclusion messages") + + if not pending_messages: + return [ + DAOProposalConcludeResult( + success=True, + message="No pending messages found", + proposals_processed=0, + proposals_concluded=0, + ) + ] + + # Process each message + processed_count = 0 + concluded_count = 0 + successful_conclusions = 0 + errors = [] + batch_size = getattr(context, "batch_size", 2) + + # Process messages in batches + for i in range(0, len(pending_messages), batch_size): + batch = pending_messages[i : i + batch_size] + + for message in batch: + try: + result = await self._process_message(message) + processed_count += 1 + + if result.get("success"): + if result.get("concluded", False): + concluded_count += 1 + successful_conclusions += 1 + else: + errors.append(result.get("error", "Unknown error")) + + except Exception as e: + error_msg = f"Exception processing message {message.id}: {str(e)}" + errors.append(error_msg) + logger.error(error_msg, exc_info=True) + + logger.info( + f"DAO proposal concluder task completed - Processed: {processed_count}, " + f"Concluded: {concluded_count}, Errors: {len(errors)}" + ) + + return [ + DAOProposalConcludeResult( + success=True, + message=f"Processed {processed_count} proposal(s), concluded {concluded_count} proposal(s)", + proposals_processed=processed_count, + proposals_concluded=concluded_count, + conclusions_successful=successful_conclusions, + errors=errors, + ) + ] + + +# Create instance for auto-registration +dao_proposal_concluder = DAOProposalConcluderTask() diff --git a/services/infrastructure/job_management/tasks/dao_proposal_embedder.py b/services/infrastructure/job_management/tasks/dao_proposal_embedder.py new file mode 100644 index 00000000..ba301812 --- /dev/null +++ b/services/infrastructure/job_management/tasks/dao_proposal_embedder.py @@ -0,0 +1,365 @@ +"""DAO proposal embedder task implementation.""" + +from dataclasses import dataclass +from typing import List, Optional + +import openai + +from backend.factory import backend +from backend.models import Proposal, ProposalBase, ProposalFilter +from config import config +from lib.logger import configure_logger +from services.ai.workflows.mixins.vector_mixin import create_embedding_model +from services.infrastructure.job_management.base import ( + BaseTask, + JobContext, + RunnerConfig, + RunnerResult, +) +from services.infrastructure.job_management.decorators import JobPriority, job + +logger = configure_logger(__name__) + +PROPOSAL_COLLECTION_NAME = "dao_proposals" + + +@dataclass +class DAOProposalEmbeddingResult(RunnerResult): + """Result of DAO proposal embedding operation.""" + + proposals_checked: int = 0 + proposals_embedded: int = 0 + errors: List[str] = None + + def __post_init__(self): + self.errors = self.errors or [] + + +@job( + job_type="dao_proposal_embedder", + name="DAO Proposal Embedder", + description="Generates embeddings for new DAO proposals using vector store with delta processing", + interval_seconds=120, # 2 minutes + priority=JobPriority.LOW, + max_retries=3, + retry_delay_seconds=60, + timeout_seconds=180, + max_concurrent=1, + requires_ai=True, + batch_size=10, + enable_dead_letter_queue=True, +) +class DAOProposalEmbedderTask(BaseTask[DAOProposalEmbeddingResult]): + """Task for generating embeddings for new DAO proposals with vector store storage.""" + + def __init__(self, config: Optional[RunnerConfig] = None): + super().__init__(config) + self._proposals_to_embed = None + + async def _validate_config(self, context: JobContext) -> bool: + """Validate DAO proposal embedder task configuration.""" + try: + if not config.embedding.api_key: + logger.error( + "Embedding API key not configured for DAO proposal embeddings" + ) + return False + if not backend.vecs_client: + logger.error( + "Vector client (vecs) not initialized for DAO proposal embeddings" + ) + return False + return True + except Exception as e: + logger.error( + f"Error validating DAO proposal embedder config: {str(e)}", + exc_info=True, + ) + return False + + async def _validate_resources(self, context: JobContext) -> bool: + """Validate resource availability for DAO proposal embeddings.""" + try: + # Test embeddings using configured model + try: + embeddings_model = create_embedding_model() + test_embedding = await embeddings_model.aembed_query( + "test dao proposal" + ) + if not test_embedding: + logger.error("Embeddings test failed for DAO proposals") + return False + except Exception as e: + logger.error( + f"DAO proposal embeddings service validation failed: {str(e)}" + ) + return False + + return True + except Exception as e: + logger.error(f"DAO proposal embedding resource validation failed: {str(e)}") + return False + + async def _validate_task_specific(self, context: JobContext) -> bool: + """Validate DAO proposal embedder task-specific conditions (delta processing).""" + try: + # Get DAO proposals that haven't been embedded (delta processing) + logger.info("Checking for DAO proposals that need embeddings...") + dao_proposals = backend.list_proposals( + filters=ProposalFilter(has_embedding=False) + ) + + logger.info(f"Found {len(dao_proposals)} DAO proposals without embeddings") + + # Filter DAO proposals that have actual content to embed + proposals_to_embed = [] + for proposal in dao_proposals: + if proposal.content and proposal.content.strip(): + proposals_to_embed.append(proposal) + else: + logger.debug( + f"Skipping DAO proposal {proposal.id} - no content to embed" + ) + + self._proposals_to_embed = proposals_to_embed + + if proposals_to_embed: + logger.info( + f"Found {len(proposals_to_embed)} DAO proposals with content needing embeddings (delta processing)" + ) + return True + + logger.info( + "No DAO proposals needing embeddings found - all are up to date" + ) + return False + + except Exception as e: + logger.error( + f"Error validating DAO proposal embedder task: {str(e)}", exc_info=True + ) + self._proposals_to_embed = None + return False + + def _format_proposal_for_embedding(self, proposal: Proposal) -> str: + """Format proposal data into a string for embedding.""" + parts = [ + f"DAO Proposal Title: {proposal.title or 'N/A'}", + f"DAO Proposal Content: {proposal.content or 'N/A'}", + f"DAO Proposal Type: {proposal.type.value if proposal.type else 'N/A'}", + ] + if proposal.action: + parts.append(f"DAO Proposal Action: {proposal.action}") + if proposal.summary: + parts.append(f"DAO Proposal Summary: {proposal.summary}") + return "\n".join(parts) + + async def _get_embeddings(self, texts: List[str]) -> Optional[List[List[float]]]: + """Get embeddings for a list of texts using configured embedding model.""" + try: + embeddings_model = create_embedding_model() + embeddings = await embeddings_model.aembed_documents(texts) + return embeddings + except Exception as e: + logger.error( + f"Error getting embeddings for DAO proposals: {str(e)}", + exc_info=True, + ) + return None + + def _should_retry_on_error(self, error: Exception, context: JobContext) -> bool: + """Determine if DAO proposal embedding error should trigger retry.""" + retry_errors = ( + ConnectionError, + TimeoutError, + ) + + # Don't retry on DAO proposal content validation errors + if "empty" in str(error).lower() or "no content" in str(error).lower(): + return False + if "api key" in str(error).lower(): + return False + + return isinstance(error, retry_errors) + + async def _handle_execution_error( + self, error: Exception, context: JobContext + ) -> Optional[List[DAOProposalEmbeddingResult]]: + """Handle DAO proposal embedding execution errors with recovery logic.""" + if "openai" in str(error).lower() or "embedding" in str(error).lower(): + logger.warning( + f"OpenAI/embedding service error for DAO proposals: {str(error)}, will retry" + ) + return None + + if isinstance(error, (ConnectionError, TimeoutError)): + logger.warning( + f"Network error during DAO proposal embedding: {str(error)}, will retry" + ) + return None + + return [ + DAOProposalEmbeddingResult( + success=False, + message=f"Unrecoverable DAO proposal embedding error: {str(error)}", + errors=[str(error)], + ) + ] + + async def _post_execution_cleanup( + self, context: JobContext, results: List[DAOProposalEmbeddingResult] + ) -> None: + """Cleanup after DAO proposal embedding task execution.""" + self._proposals_to_embed = None + logger.debug("DAO proposal embedder task cleanup completed") + + async def _execute_impl( + self, context: JobContext + ) -> List[DAOProposalEmbeddingResult]: + """Execute DAO proposal embedding task with vector store storage (delta processing only).""" + logger.info("Starting DAO proposal embedding task...") + errors: List[str] = [] + proposals_checked = 0 + proposals_embedded = 0 + + try: + if not self._proposals_to_embed: + logger.info( + "No DAO proposals needing embeddings to process - all proposals up to date" + ) + return [ + DAOProposalEmbeddingResult( + success=True, + message="No DAO proposals require embedding generation - all up to date", + proposals_checked=0, + proposals_embedded=0, + ) + ] + + # Ensure OpenAI client is configured + openai.api_key = config.embedding.api_key + + # Ensure the vector collection exists + try: + collection = backend.get_vector_collection(PROPOSAL_COLLECTION_NAME) + logger.debug( + f"Using existing vector collection: {PROPOSAL_COLLECTION_NAME}" + ) + except Exception: + logger.info( + f"Collection '{PROPOSAL_COLLECTION_NAME}' not found, creating..." + ) + collection = backend.create_vector_collection( + PROPOSAL_COLLECTION_NAME, dimensions=config.embedding.dimensions + ) + backend.create_vector_index(PROPOSAL_COLLECTION_NAME) + logger.info( + f"Created new vector collection: {PROPOSAL_COLLECTION_NAME}" + ) + + proposals_to_embed = self._proposals_to_embed + proposals_checked = len(proposals_to_embed) + + logger.info( + f"DELTA PROCESSING: Processing {proposals_checked} DAO proposals requiring embeddings (only new/missing ones)" + ) + + # Prepare data for embedding only for new proposals + texts_to_embed = [] + metadata_list = [] + proposal_ids = [] + + for proposal in proposals_to_embed: + proposal_text = self._format_proposal_for_embedding(proposal) + texts_to_embed.append(proposal_text) + metadata_list.append( + { + "proposal_id": str(proposal.id), + "title": proposal.title or "", + "dao_id": str(proposal.dao_id) if proposal.dao_id else "", + "type": proposal.type.value if proposal.type else "", + "created_at": ( + proposal.created_at.isoformat() + if hasattr(proposal, "created_at") + else "" + ), + } + ) + proposal_ids.append(str(proposal.id)) + + # Get embeddings + logger.info( + f"Requesting embeddings for {len(texts_to_embed)} NEW DAO proposals using model: {config.embedding.default_model}" + ) + embeddings_list = await self._get_embeddings(texts_to_embed) + + if embeddings_list is None: + errors.append("Failed to retrieve embeddings for DAO proposals") + else: + logger.info( + f"Successfully retrieved {len(embeddings_list)} embeddings for DAO proposals" + ) + + # Prepare records for upsert + records_to_upsert = [] + for i, proposal_id in enumerate(proposal_ids): + records_to_upsert.append( + ( + proposal_id, # Use proposal UUID as the vector ID + embeddings_list[i], # Use the retrieved embeddings + metadata_list[i], + ) + ) + + # Upsert into the vector collection + try: + collection.upsert(records=records_to_upsert) + proposals_embedded = len(records_to_upsert) + logger.info( + f"Successfully upserted {proposals_embedded} DAO proposal embeddings to vector store" + ) + + # Update proposals to mark them as embedded + for proposal in proposals_to_embed: + try: + update_data = ProposalBase(has_embedding=True) + backend.update_proposal(proposal.id, update_data) + logger.debug( + f"Marked DAO proposal {proposal.id} as embedded" + ) + except Exception as e: + logger.error( + f"Failed to update has_embedding flag for proposal {proposal.id}: {str(e)}" + ) + # Don't fail the entire task for this + + except Exception as e: + error_msg = f"Failed to upsert DAO proposal embeddings to vector store: {str(e)}" + logger.error(error_msg, exc_info=True) + errors.append(error_msg) + + except Exception as e: + error_msg = f"Error during DAO proposal embedding task: {str(e)}" + logger.error(error_msg, exc_info=True) + errors.append(error_msg) + + success = not errors + message = ( + f"DELTA PROCESSING COMPLETE - Checked {proposals_checked} DAO proposals, embedded {proposals_embedded} new ones in vector store" + if success + else f"DAO proposal embedding task failed. Errors: {'; '.join(errors)}" + ) + + return [ + DAOProposalEmbeddingResult( + success=success, + message=message, + proposals_checked=proposals_checked, + proposals_embedded=proposals_embedded, + errors=errors, + ) + ] + + +# Create instance for auto-registration +dao_proposal_embedder = DAOProposalEmbedderTask() diff --git a/services/infrastructure/job_management/tasks/dao_proposal_evaluation.py b/services/infrastructure/job_management/tasks/dao_proposal_evaluation.py new file mode 100644 index 00000000..e6babcf6 --- /dev/null +++ b/services/infrastructure/job_management/tasks/dao_proposal_evaluation.py @@ -0,0 +1,451 @@ +"""DAO proposal evaluation task implementation.""" + +import asyncio +import time +from dataclasses import dataclass +from typing import Any, Dict, List + +from backend.factory import backend +from backend.models import ( + QueueMessage, + QueueMessageBase, + QueueMessageFilter, + QueueMessageType, + VoteCreate, + VoteFilter, +) +from lib.logger import configure_logger +from services.infrastructure.job_management.base import ( + BaseTask, + JobContext, + RunnerResult, +) +from services.infrastructure.job_management.decorators import JobPriority, job +from services.ai.workflows.comprehensive_evaluation import ( + evaluate_proposal_comprehensive, +) + +logger = configure_logger(__name__) + + +@dataclass +class DAOProposalEvaluationResult(RunnerResult): + """Result of DAO proposal evaluation operation.""" + + proposals_processed: int = 0 + proposals_evaluated: int = 0 + errors: List[str] = None + + def __post_init__(self): + self.errors = self.errors or [] + + +@job( + job_type="dao_proposal_evaluation", + name="DAO Proposal Evaluator", + description="Evaluates DAO proposals using AI analysis with concurrent processing", + interval_seconds=30, + priority=JobPriority.HIGH, + max_retries=3, + retry_delay_seconds=60, + timeout_seconds=300, + max_concurrent=1, + requires_ai=True, + batch_size=5, + enable_dead_letter_queue=True, +) +class DAOProposalEvaluationTask(BaseTask[DAOProposalEvaluationResult]): + """Task runner for evaluating DAO proposals with concurrent processing. + + This task processes multiple DAO proposal evaluation messages concurrently + instead of sequentially. Key features: + - Uses asyncio.gather() for concurrent execution + - Semaphore controls maximum concurrent operations to prevent resource exhaustion + - Configurable concurrency limit (default: 5) + - Graceful error handling that doesn't stop the entire batch + - Performance timing and detailed logging + """ + + QUEUE_TYPE = QueueMessageType.get_or_create("dao_proposal_evaluation") + DEFAULT_SCORE_THRESHOLD = 70.0 + DEFAULT_AUTO_VOTE = False + DEFAULT_MAX_CONCURRENT_EVALUATIONS = ( + 5 # Limit concurrent evaluations to avoid rate limits + ) + + async def _validate_task_specific(self, context: JobContext) -> bool: + """Validate task-specific conditions.""" + try: + # Get pending messages from the queue + pending_messages = await self.get_pending_messages() + message_count = len(pending_messages) + logger.debug(f"Found {message_count} pending proposal evaluation messages") + + if message_count == 0: + logger.info("No pending proposal evaluation messages found") + return False + + # Validate that at least one message has a valid proposal + for message in pending_messages: + message_data = message.message or {} + proposal_id = message_data.get("proposal_id") + + if not proposal_id: + logger.warning(f"Message {message.id} missing proposal_id") + continue + + # Check if the proposal exists in the database + proposal = backend.get_proposal(proposal_id) + if proposal: + logger.info(f"Found valid proposal {proposal_id} to process") + return True + else: + logger.warning(f"Proposal {proposal_id} not found in database") + + logger.warning("No valid proposals found in pending messages") + return False + + except Exception as e: + logger.error( + f"Error validating proposal evaluation task: {str(e)}", exc_info=True + ) + return False + + def _has_proposal_been_evaluated( + self, proposal_id: str, wallet_id: str = None + ) -> bool: + """Check if a proposal has already been evaluated by looking at the votes table. + + Args: + proposal_id: The UUID of the proposal to check + wallet_id: Optional wallet ID to check for specific wallet evaluation + + Returns: + bool: True if the proposal has been evaluated, False otherwise + """ + try: + # Create filter to look for existing votes for this proposal + vote_filter = VoteFilter(proposal_id=proposal_id) + + # If wallet_id is provided, check for evaluation by that specific wallet + if wallet_id: + vote_filter.wallet_id = wallet_id + + # Get existing votes for this proposal + existing_votes = backend.list_votes(filters=vote_filter) + + # Check if any votes have evaluation data (indicating evaluation was performed) + for vote in existing_votes: + if ( + vote.evaluation_score + or vote.evaluation + or vote.reasoning + or vote.confidence is not None + ): + logger.debug( + f"Found existing evaluation for proposal {proposal_id}" + ) + return True + + return False + + except Exception as e: + logger.error( + f"Error checking if proposal {proposal_id} was evaluated: {str(e)}" + ) + return False + + async def process_message(self, message: QueueMessage) -> Dict[str, Any]: + """Process a single DAO proposal evaluation message.""" + message_id = message.id + message_data = message.message or {} + wallet_id = message.wallet_id + dao_id = message.dao_id + + logger.debug( + f"Processing proposal evaluation message {message_id} for wallet {wallet_id}" + ) + + # Get the proposal ID from the message + proposal_id = message_data.get("proposal_id") + if not proposal_id: + error_msg = f"Missing proposal_id in message {message_id}" + logger.error(error_msg) + return {"success": False, "error": error_msg} + + try: + # Get the proposal details from the database + proposal = backend.get_proposal(proposal_id) + if not proposal: + error_msg = f"Proposal {proposal_id} not found in database" + logger.error(error_msg) + return {"success": False, "error": error_msg} + + # Check if this proposal has already been evaluated by this wallet + if self._has_proposal_been_evaluated(proposal_id, wallet_id): + logger.info( + f"Proposal {proposal_id} already evaluated by wallet {wallet_id}, skipping..." + ) + return {"success": True, "skipped": True, "reason": "Already evaluated"} + + # Get the DAO information + dao = backend.get_dao(dao_id) if dao_id else None + if not dao: + error_msg = f"DAO not found for proposal {proposal_id}" + logger.error(error_msg) + return {"success": False, "error": error_msg} + + # Execute the proposal evaluation workflow + logger.info(f"Evaluating proposal {proposal.id} for DAO {dao.name}") + + # Get proposal data + proposal_content = proposal.content or "No content provided" + + # Set up config for evaluation + config = { + "debug_level": 0, # Normal debug level + } + + evaluation = await evaluate_proposal_comprehensive( + proposal_id=str(proposal.id), + proposal_content=proposal_content, + config=config, + dao_id=str(dao_id) if dao_id else None, + agent_id=None, # No specific agent for job processing + profile_id=None, # No specific profile for job processing + ) + + # Extract evaluation results from the Pydantic model + approval = evaluation.decision + overall_score = evaluation.final_score + reasoning = evaluation.explanation + formatted_prompt = "" # Not available in the new model structure + total_cost = ( + evaluation.token_usage.get("total_cost", 0.0) + if evaluation.token_usage + else 0.0 + ) + model = ( + evaluation.token_usage.get("model", "Unknown") + if evaluation.token_usage + else "Unknown" + ) + evaluation_scores = { + "categories": [ + { + "category": cat.category, + "score": cat.score, + "weight": cat.weight, + "reasoning": cat.reasoning, + } + for cat in evaluation.categories + ], + "final_score": evaluation.final_score, + } # Convert categories to scores format + evaluation_flags = evaluation.flags + + logger.info( + f"Proposal {proposal.id} ({dao.name}): Evaluated with result " + f"{'FOR' if approval else 'AGAINST'} with score {overall_score}" + ) + + wallet = backend.get_wallet(wallet_id) + + # Create a vote record with the evaluation results + vote_data = VoteCreate( + wallet_id=wallet_id, + dao_id=dao_id, + agent_id=( + wallet.agent_id if wallet else None + ), # This will be set from the wallet if it exists + proposal_id=proposal_id, + answer=approval, + reasoning=reasoning, + confidence=overall_score + / 100.0, # Convert score to 0-1 range for compatibility + prompt=formatted_prompt, + cost=total_cost, + model=model, + profile_id=wallet.profile_id if wallet else None, + evaluation_score=evaluation_scores, # Store the complete evaluation scores + flags=evaluation_flags, # Store the evaluation flags + evaluation=evaluation.model_dump(), # Convert Pydantic model to dict for storage + ) + + # Create the vote record + vote = backend.create_vote(vote_data) + if not vote: + logger.error("Failed to create vote record") + return {"success": False, "error": "Failed to create vote record"} + + logger.info(f"Created vote record {vote.id} for proposal {proposal_id}") + + # Mark the evaluation message as processed + update_data = QueueMessageBase( + is_processed=True, + result={ + "success": True, + "vote_id": str(vote.id), + "approve": approval, + "overall_score": overall_score, + }, + ) + backend.update_queue_message(message_id, update_data) + + return { + "success": True, + "vote_id": str(vote.id), + "approve": approval, + "overall_score": overall_score, + } + + except Exception as e: + error_msg = f"Error processing message {message_id}: {str(e)}" + logger.error(error_msg, exc_info=True) + update_data = QueueMessageBase( + is_processed=True, + result={ + "success": False, + "error": error_msg, + }, + ) + backend.update_queue_message(message_id, update_data) + + return {"success": False, "error": error_msg} + + async def get_pending_messages(self) -> List[QueueMessage]: + """Get all unprocessed messages from the queue.""" + filters = QueueMessageFilter(type=self.QUEUE_TYPE, is_processed=False) + return backend.list_queue_messages(filters=filters) + + async def process_message_with_semaphore( + self, semaphore: asyncio.Semaphore, message: QueueMessage + ) -> Dict[str, Any]: + """Process a message with concurrency control using semaphore. + + This wrapper ensures that each message processing is controlled by the + semaphore to limit concurrent operations and prevent resource exhaustion. + """ + async with semaphore: + try: + return await self.process_message(message) + except Exception as e: + # Log the error and return a failure result instead of raising + # This prevents one failed message from crashing the entire batch + error_msg = f"Failed to process message {message.id}: {str(e)}" + logger.error(error_msg, exc_info=True) + return {"success": False, "error": error_msg} + + def get_max_concurrent_evaluations(self, context: JobContext) -> int: + """Get the maximum number of concurrent evaluations from context or default. + + This allows for dynamic configuration of concurrency limits based on: + - Context configuration + - Environment variables + - System load considerations + """ + # Allow context to override the default concurrency limit + context_limit = getattr(context, "max_concurrent_evaluations", None) + + if context_limit is not None: + logger.debug(f"Using context-provided concurrency limit: {context_limit}") + return context_limit + + # Could also check environment variables or system resources here + # import os + # env_limit = os.getenv("DAO_EVAL_MAX_CONCURRENT") + # if env_limit: + # return int(env_limit) + + return self.DEFAULT_MAX_CONCURRENT_EVALUATIONS + + async def _execute_impl( + self, context: JobContext + ) -> List[DAOProposalEvaluationResult]: + """Run the DAO proposal evaluation task with concurrent processing. + + This method processes multiple proposal evaluation messages concurrently + instead of sequentially, which significantly improves performance when + dealing with multiple proposals. The concurrency is controlled by a + semaphore to avoid overwhelming the system or hitting rate limits. + """ + pending_messages = await self.get_pending_messages() + message_count = len(pending_messages) + logger.debug(f"Found {message_count} pending proposal evaluation messages") + + if not pending_messages: + return [ + DAOProposalEvaluationResult( + success=True, + message="No pending messages found", + proposals_processed=0, + proposals_evaluated=0, + ) + ] + + # Process messages concurrently with semaphore to limit concurrent operations + max_concurrent = min( + self.get_max_concurrent_evaluations(context), len(pending_messages) + ) + semaphore = asyncio.Semaphore(max_concurrent) + + logger.info( + f"Processing {len(pending_messages)} messages with max {max_concurrent} concurrent evaluations" + ) + + # Create tasks for concurrent processing + tasks = [ + self.process_message_with_semaphore(semaphore, message) + for message in pending_messages + ] + + # Execute all tasks concurrently and collect results + start_time = time.time() + results = await asyncio.gather(*tasks, return_exceptions=True) + execution_time = time.time() - start_time + + logger.info( + f"Completed concurrent processing of {len(pending_messages)} messages in {execution_time:.2f} seconds" + ) + + # Process results + processed_count = len(results) + evaluated_count = 0 + errors = [] + + for i, result in enumerate(results): + if isinstance(result, Exception): + error_msg = f"Exception processing message {pending_messages[i].id}: {str(result)}" + logger.error(error_msg, exc_info=True) + errors.append(error_msg) + elif isinstance(result, dict): + if result.get("success"): + if not result.get( + "skipped", False + ): # Don't count skipped as evaluated + evaluated_count += 1 + else: + errors.append(result.get("error", "Unknown error")) + else: + error_msg = f"Unexpected result type for message {pending_messages[i].id}: {type(result)}" + logger.error(error_msg) + errors.append(error_msg) + + logger.debug( + f"Task metrics - Processed: {processed_count}, " + f"Evaluated: {evaluated_count}, Errors: {len(errors)}" + ) + + return [ + DAOProposalEvaluationResult( + success=True, + message=f"Processed {processed_count} proposal(s), evaluated {evaluated_count} proposal(s)", + proposals_processed=processed_count, + proposals_evaluated=evaluated_count, + errors=errors, + ) + ] + + +# Instantiate the task for use in the registry +dao_proposal_evaluation = DAOProposalEvaluationTask() diff --git a/services/infrastructure/job_management/tasks/dao_proposal_voter.py b/services/infrastructure/job_management/tasks/dao_proposal_voter.py new file mode 100644 index 00000000..22ecbed8 --- /dev/null +++ b/services/infrastructure/job_management/tasks/dao_proposal_voter.py @@ -0,0 +1,494 @@ +"""DAO proposal voter task implementation.""" + +import json +from dataclasses import dataclass +from typing import Any, Dict, List, Optional + +from backend.factory import backend +from backend.models import ( + UUID, + QueueMessage, + QueueMessageBase, + QueueMessageFilter, + QueueMessageType, + VoteBase, + VoteFilter, +) +from config import config +from lib.logger import configure_logger +from services.infrastructure.job_management.base import ( + BaseTask, + JobContext, + RunnerResult, +) +from services.infrastructure.job_management.decorators import JobPriority, job +from tools.dao_ext_action_proposals import VoteOnActionProposalTool + +logger = configure_logger(__name__) + + +@dataclass +class DAOProposalVoteResult(RunnerResult): + """Result of DAO proposal voting operation.""" + + proposals_processed: int = 0 + proposals_voted: int = 0 + votes_cast: int = 0 + errors: List[str] = None + + def __post_init__(self): + self.errors = self.errors or [] + + +@job( + job_type="dao_proposal_vote", + name="DAO Proposal Voter", + description="Processes and votes on DAO proposals with enhanced monitoring and error handling", + interval_seconds=30, + priority=JobPriority.HIGH, + max_retries=2, + retry_delay_seconds=60, + timeout_seconds=300, + max_concurrent=1, + requires_blockchain=True, + batch_size=3, + enable_dead_letter_queue=True, +) +class DAOProposalVoterTask(BaseTask[DAOProposalVoteResult]): + """Task runner for processing and voting on DAO proposals with enhanced capabilities.""" + + QUEUE_TYPE = QueueMessageType.get_or_create("dao_proposal_vote") + + async def get_pending_messages(self) -> List[QueueMessage]: + """Get all unprocessed DAO proposal vote messages from the queue.""" + filters = QueueMessageFilter(type=self.QUEUE_TYPE, is_processed=False) + return backend.list_queue_messages(filters=filters) + + async def _validate_config(self, context: JobContext) -> bool: + """Validate task configuration.""" + try: + # Check if voting tool can be initialized + if not config.scheduler: + logger.error("Scheduler config not available") + return False + return True + except Exception as e: + logger.error( + f"Error validating proposal voter config: {str(e)}", exc_info=True + ) + return False + + async def _validate_resources(self, context: JobContext) -> bool: + """Validate resource availability.""" + try: + return True + except Exception as e: + logger.error(f"Backend not available: {str(e)}") + return False + + async def _validate_task_specific(self, context: JobContext) -> bool: + """Validate that we have pending messages to process.""" + try: + pending_messages = await self.get_pending_messages() + + if not pending_messages: + logger.info("No pending DAO proposal vote messages to process") + return False + + # Validate each message has required data + valid_messages = [] + for message in pending_messages: + if await self._is_message_valid(message): + valid_messages.append(message) + + if valid_messages: + logger.info( + f"Found {len(valid_messages)} valid DAO proposal vote messages" + ) + return True + + logger.info("No valid DAO proposal vote messages to process") + return False + + except Exception as e: + logger.error( + f"Error validating proposal voter task: {str(e)}", exc_info=True + ) + return False + + async def _is_message_valid(self, message: QueueMessage) -> bool: + """Check if a proposal vote message is valid for processing.""" + try: + if not message.wallet_id or not message.message: + return False + + proposal_id = message.message.get("proposal_id") + if not proposal_id: + return False + + # Check if proposal exists + try: + proposal_uuid = UUID(proposal_id) + proposal = backend.get_proposal(proposal_uuid) + if not proposal: + return False + except (ValueError, Exception): + return False + + return True + except Exception: + return False + + async def _process_message(self, message: QueueMessage) -> Dict[str, Any]: + """Process a single DAO proposal voting message with enhanced error handling.""" + message_id = message.id + message_data = message.message or {} + wallet_id = message.wallet_id + + logger.debug( + f"Processing proposal voting message {message_id} for wallet {wallet_id}" + ) + + # Get the proposal ID from the message (this should be the database UUID) + proposal_id = message_data.get("proposal_id") + if not proposal_id: + error_msg = f"Missing proposal_id in message {message_id}" + logger.error(error_msg) + return {"success": False, "error": error_msg} + + try: + # Convert string UUID to UUID object + try: + proposal_uuid = UUID(proposal_id) + except ValueError: + error_msg = ( + f"Invalid proposal_id format {proposal_id} in message {message_id}" + ) + logger.error(error_msg) + return {"success": False, "error": error_msg} + + # Get the proposal by its database ID + proposal = backend.get_proposal(proposal_uuid) + if not proposal: + error_msg = f"Proposal {proposal_id} not found in database" + logger.error(error_msg) + return {"success": False, "error": error_msg} + + # Get the wallet + wallet = backend.get_wallet(wallet_id) + if not wallet: + error_msg = f"Wallet {wallet_id} not found" + logger.error(error_msg) + return {"success": False, "error": error_msg} + + # Get unvoted votes for this specific proposal and wallet + votes = backend.list_votes( + VoteFilter(proposal_id=proposal_uuid, wallet_id=wallet_id) + ) + if not votes: + error_msg = ( + f"No votes found for proposal {proposal_id} and wallet {wallet_id}" + ) + logger.warning(error_msg) + return { + "success": True, + "message": "No votes to process", + "votes_processed": 0, + } + + # Filter out already voted votes + unvoted_votes = [vote for vote in votes if not vote.voted] + + if not unvoted_votes: + error_msg = f"No unvoted votes found for proposal {proposal_id} and wallet {wallet_id}" + logger.warning(error_msg) + return { + "success": True, + "message": "No votes to process", + "votes_processed": 0, + } + + logger.info( + f"Found {len(unvoted_votes)} unvoted votes for proposal {proposal_id} and wallet {wallet_id}" + ) + + # Initialize the voting tool + voting_tool = VoteOnActionProposalTool(wallet_id=wallet_id) + + # Process each unvoted vote + results = [] + for vote in unvoted_votes: + # Submit the vote + vote_result = await voting_tool._arun( + dao_action_proposal_voting_contract=proposal.contract_principal, + proposal_id=proposal.proposal_id, + vote_for=vote.answer, + ) + + if not vote_result.get("success", False): + error_msg = f"Failed to submit vote {vote.id}: {vote_result.get('message', 'Unknown error')}" + logger.error(error_msg) + results.append( + {"success": False, "error": error_msg, "vote_id": vote.id} + ) + continue + + try: + # Parse the output JSON string + output_data = ( + json.loads(vote_result["output"]) + if isinstance(vote_result["output"], str) + else vote_result["output"] + ) + # Get the transaction ID from the nested data structure + tx_id = output_data.get("data", {}).get("txid") + + if not tx_id: + logger.warning(f"No txid found in parsed output: {output_data}") + results.append( + { + "success": False, + "error": "No transaction ID found in response", + "vote_id": vote.id, + "vote_result": vote_result, + } + ) + continue + + except (json.JSONDecodeError, KeyError) as e: + logger.error(f"Error parsing vote result output: {str(e)}") + results.append( + { + "success": False, + "error": f"Failed to parse vote result: {str(e)}", + "vote_id": vote.id, + "vote_result": vote_result, + } + ) + continue + + # Get the correct address based on network configuration + address = ( + wallet.mainnet_address + if config.network.network == "mainnet" + else wallet.testnet_address + ) + + vote_data = VoteBase( + tx_id=tx_id, + voted=True, + address=address, + profile_id=wallet.profile_id, + ) + + try: + updated_vote = backend.update_vote(vote.id, vote_data) + if updated_vote: + logger.info( + f"Successfully updated vote {vote.id} with transaction ID {tx_id}" + ) + results.append( + { + "success": True, + "vote_id": vote.id, + "tx_id": tx_id, + } + ) + else: + logger.error( + f"Failed to update vote {vote.id} - update_vote returned None" + ) + results.append( + { + "success": False, + "error": "Failed to update vote in database", + "vote_id": vote.id, + } + ) + except Exception as e: + logger.error( + f"Error updating vote {vote.id}: {str(e)}", exc_info=True + ) + results.append( + { + "success": False, + "error": f"Failed to update vote: {str(e)}", + "vote_id": vote.id, + } + ) + + # Mark the message as processed ONLY if ALL votes were handled successfully + successful_votes = len([r for r in results if r["success"]]) + if successful_votes == len(results) and successful_votes > 0: + result = { + "success": True, + "votes_processed": successful_votes, + "votes_failed": len(results) - successful_votes, + "results": results, + } + update_data = QueueMessageBase(is_processed=True, result=result) + backend.update_queue_message(message_id, update_data) + logger.info( + f"Successfully processed all {successful_votes} votes for message {message_id} - marking as processed" + ) + elif successful_votes > 0: + result = { + "success": False, + "votes_processed": successful_votes, + "votes_failed": len(results) - successful_votes, + "results": results, + "message": "Partial success - some votes failed", + } + update_data = QueueMessageBase(result=result) + backend.update_queue_message(message_id, update_data) + logger.warning( + f"Only {successful_votes}/{len(results)} votes succeeded for message {message_id} - leaving unprocessed for retry" + ) + else: + result = { + "success": False, + "votes_processed": 0, + "votes_failed": len(results), + "results": results, + "message": "All votes failed", + } + update_data = QueueMessageBase(result=result) + backend.update_queue_message(message_id, update_data) + logger.error( + f"No votes succeeded for message {message_id} - leaving unprocessed for retry" + ) + + return { + "success": True, + "votes_processed": successful_votes, + "votes_failed": len(results) - successful_votes, + "results": results, + } + + except Exception as e: + error_msg = f"Error processing message {message_id}: {str(e)}" + logger.error(error_msg, exc_info=True) + result = {"success": False, "error": error_msg} + + # Store result even for failed processing + update_data = QueueMessageBase(result=result) + backend.update_queue_message(message_id, update_data) + + return result + + def _should_retry_on_error(self, error: Exception, context: JobContext) -> bool: + """Determine if error should trigger retry.""" + # Retry on network errors, blockchain timeouts + retry_errors = ( + ConnectionError, + TimeoutError, + ) + + # Don't retry on validation errors + if "not found" in str(error).lower(): + return False + if "invalid" in str(error).lower() and "format" in str(error).lower(): + return False + + return isinstance(error, retry_errors) + + async def _handle_execution_error( + self, error: Exception, context: JobContext + ) -> Optional[List[DAOProposalVoteResult]]: + """Handle execution errors with recovery logic.""" + if "blockchain" in str(error).lower() or "proposal" in str(error).lower(): + logger.warning(f"Blockchain/proposal error: {str(error)}, will retry") + return None + + if isinstance(error, (ConnectionError, TimeoutError)): + logger.warning(f"Network error: {str(error)}, will retry") + return None + + # For validation errors, don't retry + return [ + DAOProposalVoteResult( + success=False, + message=f"Unrecoverable error: {str(error)}", + error=error, + ) + ] + + async def _post_execution_cleanup( + self, context: JobContext, results: List[DAOProposalVoteResult] + ) -> None: + """Cleanup after task execution.""" + logger.debug("DAO proposal voter task cleanup completed") + + async def _execute_impl(self, context: JobContext) -> List[DAOProposalVoteResult]: + """Run the DAO proposal voter task by processing each message with batch processing.""" + # Get pending messages + pending_messages = await self.get_pending_messages() + + if not pending_messages: + return [ + DAOProposalVoteResult( + success=True, + message="No pending messages found", + proposals_processed=0, + proposals_voted=0, + ) + ] + + message_count = len(pending_messages) + logger.info(f"Processing {message_count} pending proposal voting messages") + + # Process each message + processed_count = 0 + total_votes_processed = 0 + total_votes_cast = 0 + errors = [] + batch_size = getattr(context, "batch_size", 3) + + # Process messages in batches + for i in range(0, len(pending_messages), batch_size): + batch = pending_messages[i : i + batch_size] + + for message in batch: + try: + result = await self._process_message(message) + processed_count += 1 + + if result.get("success"): + votes_processed = result.get("votes_processed", 0) + total_votes_processed += votes_processed + if votes_processed > 0: + total_votes_cast += votes_processed + logger.debug( + f"Message {message.id}: processed {votes_processed} votes" + ) + else: + error_msg = result.get("error", "Unknown error") + errors.append(f"Message {message.id}: {error_msg}") + logger.error( + f"Failed to process message {message.id}: {error_msg}" + ) + + except Exception as e: + error_msg = f"Exception processing message {message.id}: {str(e)}" + errors.append(error_msg) + logger.error(error_msg, exc_info=True) + + logger.info( + f"DAO proposal voter task completed - Processed: {processed_count}/{message_count} messages, " + f"Votes cast: {total_votes_cast}, Errors: {len(errors)}" + ) + + return [ + DAOProposalVoteResult( + success=True, + message=f"Processed {processed_count} message(s), voted on {total_votes_cast} vote(s)", + proposals_processed=processed_count, + proposals_voted=total_votes_processed, + votes_cast=total_votes_cast, + errors=errors, + ) + ] + + +# Create instance for auto-registration +dao_proposal_voter = DAOProposalVoterTask() diff --git a/services/infrastructure/job_management/tasks/dao_token_holders_monitor.py b/services/infrastructure/job_management/tasks/dao_token_holders_monitor.py new file mode 100644 index 00000000..2bdcfa88 --- /dev/null +++ b/services/infrastructure/job_management/tasks/dao_token_holders_monitor.py @@ -0,0 +1,382 @@ +"""DAO token holders monitoring task implementation.""" + +from dataclasses import dataclass +from datetime import datetime +from typing import List, Optional + +from backend.factory import backend +from backend.models import ( + HolderBase, + HolderCreate, + HolderFilter, + WalletFilter, +) +from services.integrations.hiro.hiro_api import HiroApi +from lib.logger import configure_logger +from services.infrastructure.job_management.base import ( + BaseTask, + JobContext, + RunnerConfig, + RunnerResult, +) +from services.infrastructure.job_management.decorators import JobPriority, job + +logger = configure_logger(__name__) + + +@dataclass +class DaoTokenHoldersMonitorResult(RunnerResult): + """Result of DAO token holders monitoring operation.""" + + tokens_processed: int = 0 + holders_created: int = 0 + holders_updated: int = 0 + holders_removed: int = 0 + errors: List[str] = None + + def __post_init__(self): + """Initialize default values after dataclass creation.""" + if self.errors is None: + self.errors = [] + + +@job( + job_type="dao_token_holders_monitor", + name="DAO Token Holders Monitor", + description="Monitors and syncs DAO token holders with blockchain data every 5 minutes", + interval_seconds=300, # 5 minutes + priority=JobPriority.MEDIUM, + max_retries=3, + retry_delay_seconds=120, + timeout_seconds=600, # 10 minutes timeout + max_concurrent=1, + requires_blockchain=True, + batch_size=10, + enable_dead_letter_queue=True, +) +class DaoTokenHoldersMonitorTask(BaseTask[DaoTokenHoldersMonitorResult]): + """Task for monitoring and syncing DAO token holders with blockchain data.""" + + def __init__(self, config: Optional[RunnerConfig] = None): + super().__init__(config) + self.hiro_api = HiroApi() + + async def _validate_config(self, context: JobContext) -> bool: + """Validate task configuration.""" + try: + # Token holders monitor doesn't require wallet configuration + # It only reads from the blockchain and updates database + return True + except Exception as e: + logger.error( + f"Error validating DAO token holders monitor config: {str(e)}", + exc_info=True, + ) + return False + + async def _validate_resources(self, context: JobContext) -> bool: + """Validate resource availability for blockchain monitoring.""" + try: + # Test HiroApi initialization and connectivity + hiro_api = HiroApi() + api_info = await hiro_api.aget_info() + if not api_info: + logger.error("Cannot connect to Hiro API") + return False + + return True + except Exception as e: + logger.error(f"Resource validation failed: {str(e)}") + return False + + async def _validate_task_specific(self, context: JobContext) -> bool: + """Validate task-specific conditions.""" + try: + # Always valid to run - we want to keep holders data fresh + return True + except Exception as e: + logger.error( + f"Error validating DAO token holders monitor task: {str(e)}", + exc_info=True, + ) + return False + + def _should_retry_on_error(self, error: Exception, context: JobContext) -> bool: + """Determine if error should trigger retry.""" + # Retry on network errors, blockchain RPC issues + retry_errors = ( + ConnectionError, + TimeoutError, + ) + + # Don't retry on configuration errors + if "not configured" in str(error).lower(): + return False + if "invalid token" in str(error).lower(): + return False + + return isinstance(error, retry_errors) + + async def _handle_execution_error( + self, error: Exception, context: JobContext + ) -> Optional[List[DaoTokenHoldersMonitorResult]]: + """Handle execution errors with recovery logic.""" + if "blockchain" in str(error).lower() or "rpc" in str(error).lower(): + logger.warning(f"Blockchain/RPC error: {str(error)}, will retry") + return None + + if isinstance(error, (ConnectionError, TimeoutError)): + logger.warning(f"Network error: {str(error)}, will retry") + return None + + # For configuration errors, don't retry + return [ + DaoTokenHoldersMonitorResult( + success=False, + message=f"Unrecoverable error: {str(error)}", + error=error, + ) + ] + + async def _post_execution_cleanup( + self, context: JobContext, results: List[DaoTokenHoldersMonitorResult] + ) -> None: + """Cleanup after task execution.""" + logger.debug("DAO token holders monitor task cleanup completed") + + def _parse_token_identifier(self, token) -> Optional[str]: + """Parse token identifier for Hiro API call.""" + if hasattr(token, "contract_principal") and token.contract_principal: + return token.contract_principal + elif hasattr(token, "symbol") and token.symbol: + return token.symbol + elif hasattr(token, "name") and token.name: + return token.name + else: + logger.warning(f"Could not determine token identifier for token {token.id}") + return None + + def _get_wallet_for_address(self, address: str): + """Get existing wallet for a given address. Returns None if no wallet exists.""" + try: + # Try to find existing wallet by address + mainnet_wallets = backend.list_wallets( + filters=WalletFilter(mainnet_address=address) + ) + if mainnet_wallets: + return mainnet_wallets[0] + + testnet_wallets = backend.list_wallets( + filters=WalletFilter(testnet_address=address) + ) + if testnet_wallets: + return testnet_wallets[0] + + # If no wallet found, return None (don't create new wallet) + logger.debug(f"No existing wallet found for address {address}") + return None + + except Exception as e: + logger.error(f"Error getting wallet for address {address}: {str(e)}") + return None + + async def _sync_token_holders( + self, token, result: DaoTokenHoldersMonitorResult + ) -> None: + """Sync holders for a specific token.""" + try: + token_identifier = self._parse_token_identifier(token) + if not token_identifier: + error_msg = f"Could not parse token identifier for token {token.id}" + logger.error(error_msg) + result.errors.append(error_msg) + return + + logger.info(f"Syncing holders for token {token.name} ({token_identifier})") + + # Get all current holders from Hiro API (with pagination) + try: + api_holders_response = self.hiro_api.get_all_token_holders( + token_identifier + ) + logger.debug( + f"API response for token {token_identifier}: {api_holders_response}" + ) + except Exception as e: + error_msg = f"Error fetching holders from API for token {token_identifier}: {str(e)}" + logger.error(error_msg) + result.errors.append(error_msg) + return + + # Parse API response + api_holders = [] + if ( + isinstance(api_holders_response, dict) + and "results" in api_holders_response + ): + api_holders = api_holders_response["results"] + elif isinstance(api_holders_response, list): + api_holders = api_holders_response + else: + logger.warning( + f"Unexpected API response format for token {token_identifier}" + ) + return + + logger.info( + f"Found {len(api_holders)} holders from API for token {token.name}" + ) + + # Get current holders from database + db_holders = backend.list_holders(HolderFilter(token_id=token.id)) + logger.info( + f"Found {len(db_holders)} existing holders in database for token {token.name}" + ) + + # Create lookup maps + db_holders_by_wallet = {holder.wallet_id: holder for holder in db_holders} + api_holders_by_address = {} + + # Process API holders + for api_holder in api_holders: + try: + # Parse holder data from Hiro API response format + address = api_holder.get("address") + balance = api_holder.get("balance", "0") + + if not address: + logger.warning( + f"No address found in API holder data: {api_holder}" + ) + continue + + if not balance: + logger.warning( + f"No balance found for address {address}, defaulting to 0" + ) + balance = "0" + + api_holders_by_address[address] = balance + + # Get existing wallet for this address + wallet = self._get_wallet_for_address(address) + if not wallet: + logger.debug( + f"No existing wallet found for address {address}, skipping holder record" + ) + continue + + # Check if we already have this holder in the database + if wallet.id in db_holders_by_wallet: + # Update existing holder + existing_holder = db_holders_by_wallet[wallet.id] + if existing_holder.amount != str(balance): + logger.info( + f"Updating holder {address}: {existing_holder.amount} -> {balance}" + ) + update_data = HolderBase( + amount=str(balance), + updated_at=datetime.now(), + address=address, + ) + backend.update_holder(existing_holder.id, update_data) + result.holders_updated += 1 + else: + # Create new holder + logger.info( + f"Creating new holder {address} with balance {balance}" + ) + holder_create = HolderCreate( + wallet_id=wallet.id, + token_id=token.id, + dao_id=token.dao_id, + amount=str(balance), + updated_at=datetime.now(), + address=address, + ) + backend.create_holder(holder_create) + result.holders_created += 1 + + except Exception as e: + error_msg = f"Error processing API holder {api_holder}: {str(e)}" + logger.error(error_msg) + result.errors.append(error_msg) + + # Check for holders that are no longer in the API response (removed holders) + for db_holder in db_holders: + wallet = backend.get_wallet(db_holder.wallet_id) + if wallet: + address = wallet.mainnet_address or wallet.testnet_address + if address and address not in api_holders_by_address: + # This holder is no longer holding tokens, remove from database + logger.info( + f"Removing holder {address} (no longer holds tokens)" + ) + backend.delete_holder(db_holder.id) + result.holders_removed += 1 + + except Exception as e: + error_msg = f"Error syncing holders for token {token.id}: {str(e)}" + logger.error(error_msg, exc_info=True) + result.errors.append(error_msg) + + async def _execute_impl( + self, context: JobContext + ) -> List[DaoTokenHoldersMonitorResult]: + """Execute DAO token holders monitoring task.""" + logger.info("Starting DAO token holders monitoring task") + + result = DaoTokenHoldersMonitorResult( + success=True, message="DAO token holders sync completed" + ) + + try: + # Get all tokens from the database + all_tokens = backend.list_tokens() + logger.info(f"Found {len(all_tokens)} tokens to process") + + if not all_tokens: + result.message = "No tokens found to process" + return [result] + + # Process each token + for token in all_tokens: + try: + logger.info(f"Processing token: {token.name} (ID: {token.id})") + await self._sync_token_holders(token, result) + result.tokens_processed += 1 + + except Exception as e: + error_msg = f"Error processing token {token.id}: {str(e)}" + logger.error(error_msg, exc_info=True) + result.errors.append(error_msg) + # Continue processing other tokens even if one fails + + # Update result message with summary + summary = ( + f"Processed {result.tokens_processed} tokens. " + f"Created {result.holders_created}, updated {result.holders_updated}, " + f"removed {result.holders_removed} holders." + ) + + if result.errors: + summary += f" Encountered {len(result.errors)} errors." + result.success = False + + result.message = summary + logger.info(summary) + + return [result] + + except Exception as e: + error_msg = f"Error executing DAO token holders monitoring task: {str(e)}" + logger.error(error_msg, exc_info=True) + return [ + DaoTokenHoldersMonitorResult( + success=False, message=error_msg, error=e, errors=[error_msg] + ) + ] + + +# Create instance for auto-registration +dao_token_holders_monitor = DaoTokenHoldersMonitorTask() diff --git a/services/infrastructure/job_management/tasks/discord_task.py b/services/infrastructure/job_management/tasks/discord_task.py new file mode 100644 index 00000000..480d87cf --- /dev/null +++ b/services/infrastructure/job_management/tasks/discord_task.py @@ -0,0 +1,428 @@ +from dataclasses import dataclass +from typing import List, Optional +from uuid import UUID + +from backend.factory import backend +from backend.models import ( + QueueMessage, + QueueMessageBase, + QueueMessageFilter, + QueueMessageType, +) +from config import config +from lib.logger import configure_logger +from services.communication.discord.discord_factory import create_discord_service +from services.infrastructure.job_management.base import ( + BaseTask, + JobContext, + RunnerConfig, + RunnerResult, +) +from services.infrastructure.job_management.decorators import JobPriority, job + +logger = configure_logger(__name__) + + +@dataclass +class DiscordProcessingResult(RunnerResult): + """Result of Discord message processing operation.""" + + queue_message_id: Optional[UUID] = None + dao_id: Optional[UUID] = None + messages_sent: int = 0 + webhook_url_used: Optional[str] = None + + +@job( + job_type="discord", + name="Discord Message Sender", + description="Sends Discord messages from queue with webhook support and enhanced error handling", + interval_seconds=20, + priority=JobPriority.MEDIUM, + max_retries=3, + retry_delay_seconds=30, + timeout_seconds=120, + max_concurrent=1, + requires_discord=True, + batch_size=10, + enable_dead_letter_queue=True, +) +class DiscordTask(BaseTask[DiscordProcessingResult]): + """Task for sending Discord messages from the queue with enhanced capabilities.""" + + def __init__(self, config: Optional[RunnerConfig] = None): + super().__init__(config) + self._pending_messages: Optional[List[QueueMessage]] = None + self._discord_services: dict[str, object] = {} + + async def _validate_config(self, context: JobContext) -> bool: + """Validate task configuration.""" + try: + # Check if at least one webhook URL is configured + if ( + not config.discord.webhook_url_passed + and not config.discord.webhook_url_failed + ): + logger.error("No Discord webhook URLs configured") + return False + return True + except Exception as e: + logger.error(f"Error validating Discord config: {str(e)}", exc_info=True) + return False + + async def _validate_resources(self, context: JobContext) -> bool: + """Validate resource availability.""" + try: + # Test Discord service creation + test_webhook = ( + config.discord.webhook_url_passed or config.discord.webhook_url_failed + ) + discord_service = create_discord_service(webhook_url=test_webhook) + if not discord_service: + logger.error("Cannot create Discord service") + return False + return True + except Exception as e: + logger.error(f"Discord resource validation failed: {str(e)}") + return False + + async def _validate_prerequisites(self, context: JobContext) -> bool: + """Validate task prerequisites.""" + try: + self._pending_messages = backend.list_queue_messages( + filters=QueueMessageFilter( + type=QueueMessageType.get_or_create("discord"), is_processed=False + ) + ) + return True + except Exception as e: + logger.error( + f"Error validating Discord prerequisites: {str(e)}", exc_info=True + ) + self._pending_messages = None + return False + + async def _validate_task_specific(self, context: JobContext) -> bool: + """Validate task-specific conditions.""" + if not self._pending_messages: + logger.debug("No pending Discord messages found") + return False + + # Validate each message has required content + valid_messages = [] + for message in self._pending_messages: + if await self._is_message_valid(message): + valid_messages.append(message) + + self._pending_messages = valid_messages + + if valid_messages: + logger.debug(f"Found {len(valid_messages)} valid Discord messages") + return True + + logger.debug("No valid Discord messages to process") + return False + + async def _is_message_valid(self, message: QueueMessage) -> bool: + """Check if a Discord message is valid for processing.""" + try: + if not message.message or not isinstance(message.message, dict): + logger.debug( + f"Message {message.id} invalid: message field is not a dict" + ) + return False + + content = message.message.get("content") + if not content or not str(content).strip(): + logger.debug(f"Message {message.id} invalid: content field is empty") + return False + + # Check for required Discord message structure + # Content should be a string + if not isinstance(content, str): + logger.debug(f"Message {message.id} invalid: content is not a string") + return False + + # Optional fields should have correct types if present + embeds = message.message.get("embeds") + if embeds is not None and not isinstance(embeds, list): + logger.debug(f"Message {message.id} invalid: embeds is not a list") + return False + + tts = message.message.get("tts") + if tts is not None and not isinstance(tts, bool): + logger.debug(f"Message {message.id} invalid: tts is not a boolean") + return False + + proposal_status = message.message.get("proposal_status") + if proposal_status is not None and not isinstance(proposal_status, str): + logger.debug( + f"Message {message.id} invalid: proposal_status is not a string" + ) + return False + + return True + except Exception as e: + logger.debug(f"Message {message.id} validation error: {str(e)}") + return False + + def _get_webhook_url(self, message: QueueMessage) -> str: + """Get the appropriate webhook URL for the message.""" + # Allow message-level webhook override + webhook_url = message.message.get("webhook_url") + if webhook_url: + return webhook_url + + # Select based on proposal status + proposal_status = message.message.get("proposal_status") + if proposal_status == "passed": + return config.discord.webhook_url_passed + elif proposal_status == "failed": + return config.discord.webhook_url_failed + elif proposal_status in ["veto_window_open", "veto_window_closed"]: + # Veto window notifications go to passed webhook (info/updates channel) + return config.discord.webhook_url_passed + else: + # Default to passed webhook for backwards compatibility + return config.discord.webhook_url_passed + + def _get_discord_service(self, webhook_url: str): + """Get or create Discord service with caching.""" + if webhook_url in self._discord_services: + return self._discord_services[webhook_url] + + discord_service = create_discord_service(webhook_url=webhook_url) + if discord_service: + self._discord_services[webhook_url] = discord_service + + return discord_service + + async def _process_discord_message( + self, message: QueueMessage + ) -> DiscordProcessingResult: + """Process a single Discord queue message with enhanced error handling.""" + try: + # Extract content and optional parameters from message.message + if not message.message: + logger.warning(f"Discord message {message.id} has empty message field") + return DiscordProcessingResult( + success=False, + message="Discord message is empty", + queue_message_id=message.id, + dao_id=message.dao_id, + ) + + if not isinstance(message.message, dict): + logger.warning( + f"Discord message {message.id} message field is not a dict: {type(message.message)}" + ) + return DiscordProcessingResult( + success=False, + message=f"Discord message format invalid: expected dict, got {type(message.message)}", + queue_message_id=message.id, + dao_id=message.dao_id, + ) + + content = message.message.get("content") + embeds = message.message.get("embeds") + tts = message.message.get("tts", False) + + # Validate content exists and is not empty + if not content or not str(content).strip(): + logger.warning(f"Discord message {message.id} has empty content field") + return DiscordProcessingResult( + success=False, + message="Discord message content is empty", + queue_message_id=message.id, + dao_id=message.dao_id, + ) + + # Get appropriate webhook URL + webhook_url = self._get_webhook_url(message) + if not webhook_url: + return DiscordProcessingResult( + success=False, + message="No webhook URL available for Discord message", + queue_message_id=message.id, + dao_id=message.dao_id, + ) + + # Get Discord service + discord_service = self._get_discord_service(webhook_url) + if not discord_service: + return DiscordProcessingResult( + success=False, + message="Failed to initialize Discord service", + queue_message_id=message.id, + dao_id=message.dao_id, + ) + + logger.info(f"Sending Discord message for queue {message.id}") + logger.debug(f"Content: {content[:100]}..." if content else "No content") + logger.debug( + f"Proposal status: {message.message.get('proposal_status', 'none')}" + ) + logger.debug(f"Webhook URL used: {webhook_url}") + + # Send the message + result = discord_service.send_message(content, embeds=embeds, tts=tts) + + if result.get("success"): + logger.info(f"Successfully sent Discord message for queue {message.id}") + return DiscordProcessingResult( + success=True, + message="Successfully sent Discord message", + queue_message_id=message.id, + dao_id=message.dao_id, + messages_sent=1, + webhook_url_used=webhook_url, + ) + else: + logger.error(f"Failed to send Discord message: {result}") + return DiscordProcessingResult( + success=False, + message=f"Failed to send Discord message: {result}", + queue_message_id=message.id, + dao_id=message.dao_id, + ) + + except Exception as e: + logger.error( + f"Error processing Discord message {message.id}: {str(e)}", + exc_info=True, + ) + return DiscordProcessingResult( + success=False, + message=f"Error sending Discord message: {str(e)}", + error=e, + queue_message_id=message.id, + dao_id=message.dao_id, + ) + + def _should_retry_on_error(self, error: Exception, context: JobContext) -> bool: + """Determine if error should trigger retry.""" + # Retry on network errors, API timeouts, webhook issues + retry_errors = ( + ConnectionError, + TimeoutError, + ) + + # Don't retry on configuration errors + if "webhook" in str(error).lower() and "not configured" in str(error).lower(): + return False + + return isinstance(error, retry_errors) + + async def _handle_execution_error( + self, error: Exception, context: JobContext + ) -> Optional[List[DiscordProcessingResult]]: + """Handle execution errors with recovery logic.""" + if "webhook" in str(error).lower() or "discord" in str(error).lower(): + logger.warning(f"Discord service error: {str(error)}, will retry") + return None + + if isinstance(error, (ConnectionError, TimeoutError)): + logger.warning(f"Network error: {str(error)}, will retry") + return None + + # For configuration errors, don't retry + return [ + DiscordProcessingResult( + success=False, + message=f"Unrecoverable error: {str(error)}", + error=error, + ) + ] + + async def _post_execution_cleanup( + self, context: JobContext, results: List[DiscordProcessingResult] + ) -> None: + """Cleanup after task execution.""" + # Clear cached pending messages + self._pending_messages = None + + # Keep Discord services cached for reuse + logger.debug( + f"Discord task cleanup completed. Cached services: {len(self._discord_services)}" + ) + + async def _execute_impl(self, context: JobContext) -> List[DiscordProcessingResult]: + """Execute Discord message sending task with batch processing.""" + results: List[DiscordProcessingResult] = [] + + if not self._pending_messages: + logger.debug("No pending Discord messages to process") + return results + + processed_count = 0 + success_count = 0 + batch_size = getattr(context, "batch_size", 10) + + # Process messages in batches + for i in range(0, len(self._pending_messages), batch_size): + batch = self._pending_messages[i : i + batch_size] + + for message in batch: + logger.debug(f"Processing Discord message: {message.id}") + result = await self._process_discord_message(message) + results.append(result) + processed_count += 1 + + if result.success: + success_count += 1 + # Mark message as processed with result + result_dict = { + "success": result.success, + "message": result.message, + "queue_message_id": ( + str(result.queue_message_id) + if result.queue_message_id + else None + ), + "dao_id": str(result.dao_id) if result.dao_id else None, + "messages_sent": result.messages_sent, + "webhook_url_used": result.webhook_url_used, + "error": str(result.error) if result.error else None, + } + backend.update_queue_message( + queue_message_id=message.id, + update_data=QueueMessageBase( + is_processed=True, result=result_dict + ), + ) + logger.debug( + f"Marked Discord message {message.id} as processed with result" + ) + else: + # Store result for failed processing + result_dict = { + "success": result.success, + "message": result.message, + "queue_message_id": ( + str(result.queue_message_id) + if result.queue_message_id + else None + ), + "dao_id": str(result.dao_id) if result.dao_id else None, + "messages_sent": result.messages_sent, + "webhook_url_used": result.webhook_url_used, + "error": str(result.error) if result.error else None, + } + backend.update_queue_message( + queue_message_id=message.id, + update_data=QueueMessageBase(result=result_dict), + ) + logger.debug( + f"Stored result for failed Discord message {message.id}" + ) + + logger.info( + f"Discord task completed - Processed: {processed_count}, " + f"Successful: {success_count}, Failed: {processed_count - success_count}" + ) + + return results + + +# Create instance for auto-registration +discord_task = DiscordTask() diff --git a/services/infrastructure/job_management/tasks/tweet_task.py b/services/infrastructure/job_management/tasks/tweet_task.py new file mode 100644 index 00000000..f626a4d6 --- /dev/null +++ b/services/infrastructure/job_management/tasks/tweet_task.py @@ -0,0 +1,795 @@ +"""Enhanced Tweet Task using the new job queue system.""" + +from dataclasses import dataclass +from typing import List, Optional +from uuid import UUID + + +from backend.factory import backend +from backend.models import ( + QueueMessage, + QueueMessageBase, + QueueMessageFilter, + QueueMessageType, + XCredsFilter, +) +from config import config +from lib.logger import configure_logger +from lib.utils import extract_image_urls +from services.communication.twitter_service import TwitterService +from services.infrastructure.job_management.base import ( + BaseTask, + JobContext, + RunnerConfig, + RunnerResult, +) +from services.infrastructure.job_management.decorators import JobPriority, job +import re + +logger = configure_logger(__name__) + + +@dataclass +class TweetProcessingResult(RunnerResult): + """Result of tweet processing operation.""" + + tweet_id: Optional[str] = None + dao_id: Optional[UUID] = None + tweets_sent: int = 0 + chunks_processed: int = 0 + + +@job( + job_type="tweet", + name="Tweet Processor", + description="Processes and sends tweets for DAOs with automatic retry and error handling", + interval_seconds=30, + priority=JobPriority.HIGH, + max_retries=3, + retry_delay_seconds=60, + timeout_seconds=300, + max_concurrent=1, + requires_twitter=True, + batch_size=5, + enable_dead_letter_queue=True, +) +class TweetTask(BaseTask[TweetProcessingResult]): + """Enhanced task for sending tweets with improved error handling and monitoring.""" + + def __init__(self, config: Optional[RunnerConfig] = None): + super().__init__(config) + self._pending_messages: Optional[List[QueueMessage]] = None + self._twitter_services: dict[UUID, TwitterService] = {} + + async def _get_twitter_service(self, dao_id: UUID) -> Optional[TwitterService]: + """Get or create Twitter service for a DAO with caching.""" + if dao_id in self._twitter_services: + return self._twitter_services[dao_id] + + try: + # Get Twitter credentials for the DAO + creds = backend.list_x_creds(filters=XCredsFilter(dao_id=dao_id)) + if not creds: + logger.error(f"No Twitter credentials found for DAO {dao_id}") + return None + + # Initialize Twitter service with the credentials + twitter_service = TwitterService( + consumer_key=creds[0].consumer_key, + consumer_secret=creds[0].consumer_secret, + client_id=creds[0].client_id, + client_secret=creds[0].client_secret, + access_token=creds[0].access_token, + access_secret=creds[0].access_secret, + bearer_token=creds[0].bearer_token, + ) + await twitter_service._ainitialize() + + # Cache the service + self._twitter_services[dao_id] = twitter_service + logger.debug(f"Initialized and cached Twitter service for DAO {dao_id}") + return twitter_service + + except Exception as e: + logger.error( + f"Error initializing Twitter service for DAO {dao_id}: {str(e)}", + exc_info=True, + ) + return None + + async def _initialize_twitter_service(self, dao_id: UUID) -> bool: + """Initialize Twitter service with credentials from config.""" + try: + # Check if Twitter is enabled in config + if not config.twitter.enabled: + logger.error("Twitter service is disabled in configuration") + return False + + # Validate that required Twitter credentials are configured + if not all( + [ + config.twitter.consumer_key, + config.twitter.consumer_secret, + config.twitter.client_id, + config.twitter.client_secret, + config.twitter.access_token, + config.twitter.access_secret, + ] + ): + logger.error("Missing required Twitter credentials in configuration") + return False + + # Initialize Twitter service with credentials from config + self.twitter_service = TwitterService( + consumer_key=config.twitter.consumer_key, + consumer_secret=config.twitter.consumer_secret, + client_id=config.twitter.client_id, + client_secret=config.twitter.client_secret, + access_token=config.twitter.access_token, + access_secret=config.twitter.access_secret, + ) + await self.twitter_service._ainitialize() + logger.debug(f"Initialized Twitter service for DAO {dao_id}") + return True + + except Exception as e: + logger.error(f"Failed to initialize Twitter service: {str(e)}") + return False + + async def _validate_config(self, context: JobContext) -> bool: + """Validate task configuration.""" + # Enhanced validation with timeout check + if context.timeout_seconds and context.timeout_seconds < 60: + logger.warning("Tweet task timeout should be at least 60 seconds") + return False + return True + + async def _validate_resources(self, context: JobContext) -> bool: + """Validate resource availability.""" + try: + # Validate Twitter configuration + if not config.twitter.enabled: + logger.debug("Twitter service is disabled") + return False + + if not all( + [ + config.twitter.consumer_key, + config.twitter.consumer_secret, + config.twitter.client_id, + config.twitter.client_secret, + config.twitter.access_token, + config.twitter.access_secret, + ] + ): + logger.error("Missing required Twitter credentials in configuration") + return False + + return True + except Exception as e: + logger.error(f"Backend not available: {str(e)}") + return False + + async def _validate_prerequisites(self, context: JobContext) -> bool: + """Validate task prerequisites.""" + try: + # Cache pending messages for later use + self._pending_messages = backend.list_queue_messages( + filters=QueueMessageFilter( + type=QueueMessageType.get_or_create("tweet"), is_processed=False + ) + ) + logger.debug( + f"Found {len(self._pending_messages)} unprocessed tweet messages" + ) + + # Log some details about the messages for debugging + if self._pending_messages: + for idx, msg in enumerate(self._pending_messages[:3]): # Log first 3 + logger.debug( + f"Tweet message {idx + 1}: ID={msg.id}, DAO={msg.dao_id}, " + f"Message type={type(msg.message)}, Content preview: {str(msg.message)[:100]}" + ) + + return True + except Exception as e: + logger.error(f"Error loading pending tweets: {str(e)}", exc_info=True) + self._pending_messages = None + return False + + async def _validate_task_specific(self, context: JobContext) -> bool: + """Validate task-specific conditions.""" + if not self._pending_messages: + logger.debug("No pending tweet messages found") + return False + + # Validate each message before processing + valid_messages = [] + invalid_count = 0 + + for message in self._pending_messages: + if await self._is_message_valid(message): + valid_messages.append(message) + else: + invalid_count += 1 + + self._pending_messages = valid_messages + + logger.info( + f"Tweet validation complete: {len(valid_messages)} valid, {invalid_count} invalid messages" + ) + + if valid_messages: + logger.debug(f"Found {len(valid_messages)} valid tweet messages") + return True + + logger.warning( + f"No valid tweet messages to process (found {invalid_count} invalid messages)" + ) + return False + + async def _is_message_valid(self, message: QueueMessage) -> bool: + """Check if a message is valid for processing.""" + try: + if not message.message: + logger.debug( + f"Tweet message {message.id} invalid: message field is empty" + ) + return False + + if not message.dao_id: + logger.debug(f"Tweet message {message.id} invalid: dao_id is missing") + return False + + if not isinstance(message.message, dict): + logger.debug( + f"Tweet message {message.id} invalid: message field is not a dict, got {type(message.message)}" + ) + return False + + # Check for new chunked format + if "chunks" in message.message: + chunks = message.message["chunks"] + if not isinstance(chunks, list): + logger.debug( + f"Tweet message {message.id} invalid: 'chunks' is not a list, got {type(chunks)}" + ) + return False + + if not chunks: + logger.debug( + f"Tweet message {message.id} invalid: 'chunks' array is empty" + ) + return False + + # Validate each chunk + for i, chunk in enumerate(chunks): + if not isinstance(chunk, str): + logger.debug( + f"Tweet message {message.id} invalid: chunk {i} is not a string, got {type(chunk)}" + ) + return False + if not chunk.strip(): + logger.debug( + f"Tweet message {message.id} invalid: chunk {i} is empty or whitespace" + ) + return False + + logger.debug( + f"Tweet message {message.id} is valid with {len(chunks)} chunks" + ) + return True + + # Check for legacy format (backward compatibility) + elif "message" in message.message: + tweet_text = message.message["message"] + if not tweet_text: + logger.debug( + f"Tweet message {message.id} invalid: tweet text is None or empty" + ) + return False + + if not isinstance(tweet_text, str): + logger.debug( + f"Tweet message {message.id} invalid: tweet text is not a string, got {type(tweet_text)}" + ) + return False + + if not tweet_text.strip(): + logger.debug( + f"Tweet message {message.id} invalid: tweet text is only whitespace" + ) + return False + + logger.debug( + f"Tweet message {message.id} is valid (legacy format) with content: {tweet_text[:50]}..." + ) + return True + else: + logger.debug( + f"Tweet message {message.id} invalid: neither 'chunks' nor 'message' key found. Keys: {list(message.message.keys())}" + ) + return False + + except Exception as e: + logger.debug(f"Tweet message {message.id} validation error: {str(e)}") + return False + + async def _process_tweet_message( + self, message: QueueMessage + ) -> TweetProcessingResult: + """Process a single tweet message with enhanced error handling and threading support. + + Now supports the new chunked message format: + { + "chunks": ["chunk1", "chunk2", "chunk3", ...], + "total_chunks": 3 + } + + Also maintains backward compatibility with legacy format: + { + "message": "Main tweet content", + "reply_to_tweet_id": "optional_tweet_id_to_reply_to", + "follow_up_message": "optional_follow_up_content" + } + """ + try: + # Validate message structure first + if not message.message or not isinstance(message.message, dict): + logger.warning( + f"Tweet message {message.id} has invalid message structure" + ) + return TweetProcessingResult( + success=False, + message="Tweet message structure is invalid", + dao_id=message.dao_id, + ) + + # Get Twitter service for this DAO + twitter_service = await self._get_twitter_service(message.dao_id) + if not twitter_service: + return TweetProcessingResult( + success=False, + message=f"Failed to get Twitter service for DAO: {message.dao_id}", + dao_id=message.dao_id, + ) + + # Check for new chunked format + if "chunks" in message.message: + chunks = message.message["chunks"] + + logger.info( + f"Processing chunked tweet message for DAO {message.dao_id} with {len(chunks)} chunks" + ) + logger.debug( + f"First chunk preview: {chunks[0][:100]}..." + if chunks + else "No chunks" + ) + + return await self._process_chunked_message( + message, twitter_service, chunks + ) + + # Handle legacy format for backward compatibility + elif "message" in message.message: + logger.info( + f"Processing legacy format tweet message for DAO {message.dao_id}" + ) + return await self._process_legacy_message(message, twitter_service) + + else: + logger.warning(f"Tweet message {message.id} has unrecognized format") + return TweetProcessingResult( + success=False, + message="Tweet message format not recognized", + dao_id=message.dao_id, + ) + + except Exception as e: + logger.error( + f"Error processing tweet message {message.id}: {str(e)}", exc_info=True + ) + return TweetProcessingResult( + success=False, + message=f"Error sending tweet: {str(e)}", + error=e, + tweet_id=getattr(message, "tweet_id", None), + dao_id=message.dao_id, + ) + + async def _process_chunked_message( + self, message: QueueMessage, twitter_service: TwitterService, chunks: List[str] + ) -> TweetProcessingResult: + """Process a message with pre-chunked content.""" + previous_tweet_id = message.tweet_id # Use existing tweet_id if threading + tweets_sent = 0 + + # Check if chunks already have thread indices (e.g., "(1/3)") + has_indices = len(chunks) > 1 and any( + "(" in chunk and "/" in chunk and ")" in chunk for chunk in chunks + ) + + logger.info( + f"Processing {len(chunks)} pre-chunked tweets for DAO {message.dao_id}" + f"{' with thread indices' if has_indices else ''}" + ) + + for index, chunk in enumerate(chunks): + try: + # Check for image URLs in the chunk + image_urls = extract_image_urls(chunk) + image_url = image_urls[0] if image_urls else None + + if image_url: + # Remove image URL from text + chunk = re.sub(re.escape(image_url), "", chunk).strip() + chunk = re.sub(r"\s+", " ", chunk) + + # Post the tweet + if index == 0 and image_url: + tweet_response = await twitter_service.post_tweet_with_media( + image_url=image_url, + text=chunk, + reply_id=previous_tweet_id, + ) + else: + tweet_response = await twitter_service._apost_tweet( + text=chunk, + reply_in_reply_to_tweet_id=previous_tweet_id, + ) + + if tweet_response: + tweets_sent += 1 + previous_tweet_id = tweet_response.id + logger.info( + f"Successfully posted tweet chunk {index + 1}/{len(chunks)}: {tweet_response.id}" + f"{f' - {chunk[:50]}...' if len(chunk) > 50 else f' - {chunk}'}" + ) + else: + logger.error( + f"Failed to send tweet chunk {index + 1}/{len(chunks)}" + ) + if index == 0: # If first chunk fails, whole message fails + return TweetProcessingResult( + success=False, + message="Failed to send first tweet chunk", + dao_id=message.dao_id, + tweet_id=previous_tweet_id, + chunks_processed=index, + ) + # For subsequent chunks, we can continue + + except Exception as chunk_error: + logger.error( + f"Error sending chunk {index + 1}/{len(chunks)}: {str(chunk_error)}" + ) + if index == 0: # Critical failure on first chunk + raise chunk_error + + return TweetProcessingResult( + success=tweets_sent > 0, + message=f"Successfully sent {tweets_sent}/{len(chunks)} tweet chunks" + f"{' with thread indices' if has_indices else ''}", + tweet_id=previous_tweet_id, + dao_id=message.dao_id, + tweets_sent=tweets_sent, + chunks_processed=len(chunks), + ) + + async def _process_legacy_message( + self, message: QueueMessage, twitter_service: TwitterService + ) -> TweetProcessingResult: + """Process a message in the legacy format for backward compatibility.""" + # Extract tweet text + tweet_text = message.message["message"] + if not isinstance(tweet_text, str): + logger.warning( + f"Tweet message {message.id} content is not a string: {type(tweet_text)}" + ) + return TweetProcessingResult( + success=False, + message=f"Tweet content is not a string: {type(tweet_text)}", + dao_id=message.dao_id, + ) + + # Check for threading information + reply_to_tweet_id = message.message.get("reply_to_tweet_id") + if reply_to_tweet_id: + logger.info( + f"Tweet will be threaded as reply to tweet ID: {reply_to_tweet_id}" + ) + + logger.info(f"Sending legacy format tweet for DAO {message.dao_id}") + logger.debug(f"Tweet content: {tweet_text[:100]}...") + + # Look for image URLs in the text + image_urls = extract_image_urls(tweet_text) + image_url = image_urls[0] if image_urls else None + + if image_url: + # Remove image URL from text + tweet_text = re.sub(re.escape(image_url), "", tweet_text).strip() + tweet_text = re.sub(r"\s+", " ", tweet_text) + + # Split tweet text if necessary + chunks = self._split_text_into_chunks(tweet_text) + # Use reply_to_tweet_id as initial thread ID, or message.tweet_id for continuation + previous_tweet_id = reply_to_tweet_id or message.tweet_id + tweets_sent = 0 + + for index, chunk in enumerate(chunks): + try: + if index == 0 and image_url: + tweet_response = await twitter_service.post_tweet_with_media( + image_url=image_url, + text=chunk, + reply_id=previous_tweet_id, + ) + else: + tweet_response = await twitter_service._apost_tweet( + text=chunk, + reply_in_reply_to_tweet_id=previous_tweet_id, + ) + + if tweet_response: + tweets_sent += 1 + previous_tweet_id = tweet_response.id + logger.info( + f"Successfully posted tweet chunk {index + 1}: {tweet_response.id}" + ) + else: + logger.error(f"Failed to send tweet chunk {index + 1}") + if index == 0: + return TweetProcessingResult( + success=False, + message="Failed to send first tweet chunk", + dao_id=message.dao_id, + tweet_id=previous_tweet_id, + chunks_processed=index, + ) + + except Exception as chunk_error: + logger.error(f"Error sending chunk {index + 1}: {str(chunk_error)}") + if index == 0: + raise chunk_error + + result = TweetProcessingResult( + success=tweets_sent > 0, + message=f"Successfully sent {tweets_sent}/{len(chunks)} tweet chunks", + tweet_id=previous_tweet_id, + dao_id=message.dao_id, + tweets_sent=tweets_sent, + chunks_processed=len(chunks), + ) + + # Check if there's a follow-up message to create as a thread + if result.success and result.tweet_id: + follow_up_tweet_id = await self._create_follow_up_tweet( + message, result.tweet_id + ) + if follow_up_tweet_id: + result.tweets_sent += 1 + result.tweet_id = follow_up_tweet_id + result.message += " with follow-up thread" + logger.info( + f"Successfully created follow-up tweet thread: {follow_up_tweet_id}" + ) + + return result + + def _should_retry_on_error(self, error: Exception, context: JobContext) -> bool: + """Determine if error should trigger retry.""" + # Import tweepy exceptions for retry logic + try: + import tweepy + + retry_errors = ( + ConnectionError, + TimeoutError, + tweepy.TooManyRequests, + tweepy.ServiceUnavailable, + ) + return isinstance(error, retry_errors) + except ImportError: + # Fallback if tweepy not available + retry_errors = (ConnectionError, TimeoutError) + return isinstance(error, retry_errors) + + def _split_text_into_chunks(self, text: str, limit: int = 280) -> List[str]: + """Split text into chunks not exceeding the limit without cutting words.""" + words = text.split() + chunks = [] + current = "" + for word in words: + if len(current) + len(word) + (1 if current else 0) <= limit: + current = f"{current} {word}".strip() + else: + if current: + chunks.append(current) + current = word + if current: + chunks.append(current) + return chunks + + async def _handle_execution_error( + self, error: Exception, context: JobContext + ) -> Optional[List[TweetProcessingResult]]: + """Handle execution errors with recovery logic.""" + try: + import tweepy + + if isinstance(error, tweepy.TooManyRequests): + logger.warning("Twitter API rate limit reached, will retry later") + return None # Let default retry handling take over + + if isinstance(error, tweepy.ServiceUnavailable): + logger.warning("Twitter service unavailable, will retry later") + return None + except ImportError: + pass + + if isinstance(error, (ConnectionError, TimeoutError)): + logger.warning(f"Network error: {str(error)}, will retry") + return None + + # For other errors, don't retry + return [ + TweetProcessingResult( + success=False, + message=f"Unrecoverable error: {str(error)}", + error=error, + ) + ] + + async def _post_execution_cleanup( + self, context: JobContext, results: List[TweetProcessingResult] + ) -> None: + """Cleanup after task execution.""" + # Clear cached pending messages + self._pending_messages = None + + # Don't clear Twitter services cache as they can be reused + logger.debug( + f"Cleanup completed. Cached Twitter services: {len(self._twitter_services)}" + ) + + async def _execute_impl(self, context: JobContext) -> List[TweetProcessingResult]: + """Execute tweet sending task with batch processing.""" + results: List[TweetProcessingResult] = [] + + if not self._pending_messages: + logger.debug("No pending tweet messages to process") + return results + + processed_count = 0 + success_count = 0 + batch_size = getattr(context, "batch_size", 5) + + # Process messages in batches + for i in range(0, len(self._pending_messages), batch_size): + batch = self._pending_messages[i : i + batch_size] + + for message in batch: + logger.debug(f"Processing tweet message: {message.id}") + result = await self._process_tweet_message(message) + results.append(result) + processed_count += 1 + + if result.success: + success_count += 1 + # Mark message as processed with result + result_dict = { + "success": result.success, + "message": result.message, + "tweet_id": result.tweet_id, + "dao_id": str(result.dao_id) if result.dao_id else None, + "tweets_sent": result.tweets_sent, + "chunks_processed": result.chunks_processed, + "error": str(result.error) if result.error else None, + } + backend.update_queue_message( + queue_message_id=message.id, + update_data=QueueMessageBase( + is_processed=True, result=result_dict + ), + ) + logger.debug( + f"Marked message {message.id} as processed with result" + ) + else: + # Store result for failed processing + result_dict = { + "success": result.success, + "message": result.message, + "tweet_id": result.tweet_id, + "dao_id": str(result.dao_id) if result.dao_id else None, + "tweets_sent": result.tweets_sent, + "chunks_processed": result.chunks_processed, + "error": str(result.error) if result.error else None, + } + backend.update_queue_message( + queue_message_id=message.id, + update_data=QueueMessageBase(result=result_dict), + ) + logger.debug(f"Stored result for failed message {message.id}") + + logger.info( + f"Tweet task completed - Processed: {processed_count}, " + f"Successful: {success_count}, Failed: {processed_count - success_count}" + ) + + return results + + async def _create_follow_up_tweet( + self, message: QueueMessage, original_tweet_id: str + ) -> Optional[str]: + """Create a follow-up tweet as a thread to the original tweet (legacy format only).""" + try: + follow_up_content = message.message.get("follow_up_message") + if not follow_up_content: + return None + + logger.info(f"Creating follow-up tweet as thread to {original_tweet_id}") + + # Get Twitter service for this DAO + twitter_service = await self._get_twitter_service(message.dao_id) + if not twitter_service: + logger.error("Failed to get Twitter service for follow-up tweet") + return None + + # Check for image URLs in the follow-up text + image_urls = extract_image_urls(follow_up_content) + image_url = image_urls[0] if image_urls else None + + if image_url: + # Remove image URL from text + follow_up_content = re.sub( + re.escape(image_url), "", follow_up_content + ).strip() + follow_up_content = re.sub(r"\s+", " ", follow_up_content) + + # Split follow-up text if necessary + chunks = self._split_text_into_chunks(follow_up_content) + previous_tweet_id = original_tweet_id + + for index, chunk in enumerate(chunks): + try: + if index == 0 and image_url: + tweet_response = await twitter_service.post_tweet_with_media( + image_url=image_url, + text=chunk, + reply_id=previous_tweet_id, + ) + else: + tweet_response = await twitter_service._apost_tweet( + text=chunk, + reply_in_reply_to_tweet_id=previous_tweet_id, + ) + + if tweet_response: + previous_tweet_id = tweet_response.id + logger.info( + f"Successfully posted follow-up tweet chunk {index + 1}: {tweet_response.id}" + ) + else: + logger.error( + f"Failed to send follow-up tweet chunk {index + 1}" + ) + break + + except Exception as chunk_error: + logger.error( + f"Error sending follow-up tweet chunk {index + 1}: {str(chunk_error)}" + ) + break + + return previous_tweet_id + + except Exception as e: + logger.error(f"Error creating follow-up tweet: {str(e)}", exc_info=True) + return None + + +# Create instance for auto-registration +tweet_task = TweetTask() diff --git a/services/schedule.py b/services/infrastructure/scheduler_service.py similarity index 98% rename from services/schedule.py rename to services/infrastructure/scheduler_service.py index 0147033a..a5fd2f32 100644 --- a/services/schedule.py +++ b/services/infrastructure/scheduler_service.py @@ -10,7 +10,7 @@ from backend.models import JobBase, JobCreate, StepCreate, Task, TaskFilter from lib.logger import configure_logger from lib.persona import generate_persona -from services.workflows import execute_langgraph_stream +from services.ai.workflows import execute_workflow_stream from tools.tools_factory import exclude_tools_by_names, initialize_tools logger = configure_logger(__name__) @@ -136,13 +136,13 @@ async def _process_job_stream( "content": "Sure, what exactly would you like to know?", } ] - persona = generate_persona(agent) + persona = generate_persona() tools_map = initialize_tools(profile, agent.id) tools_map_filtered = exclude_tools_by_names( ["db_update_scheduled_task", "db_add_scheduled_task"], tools_map ) - stream_generator = execute_langgraph_stream( + stream_generator = execute_workflow_stream( history=history, input_str=task.prompt, persona=persona, diff --git a/services/infrastructure/startup_service.py b/services/infrastructure/startup_service.py new file mode 100644 index 00000000..c1eb707e --- /dev/null +++ b/services/infrastructure/startup_service.py @@ -0,0 +1,311 @@ +"""Enhanced startup service with auto-discovery and comprehensive monitoring.""" + +import asyncio +import signal +import sys +from typing import Any, Dict, Optional + +from apscheduler.schedulers.asyncio import AsyncIOScheduler + +from config import config +from lib.logger import configure_logger +from services.communication.telegram_bot_service import start_application +from services.infrastructure.job_management.auto_discovery import ( + discover_and_register_tasks, +) +from services.infrastructure.job_management.job_manager import JobManager +from services.infrastructure.job_management.monitoring import ( + MetricsCollector, + SystemMetrics, +) +from services.communication.websocket_service import websocket_manager + +logger = configure_logger(__name__) + +# Global enhanced job manager instance +job_manager: Optional[JobManager] = None +shutdown_event = asyncio.Event() +metrics_collector = MetricsCollector() +system_metrics = SystemMetrics() + + +def signal_handler(signum, frame): + """Handle shutdown signals gracefully.""" + logger.info(f"Received signal {signum}, initiating graceful shutdown...") + shutdown_event.set() + + +class EnhancedStartupService: + """Enhanced service to manage application startup with auto-discovery and monitoring.""" + + def __init__(self, scheduler: Optional[AsyncIOScheduler] = None): + self.scheduler = scheduler or AsyncIOScheduler() + self.cleanup_task: Optional[asyncio.Task] = None + self.bot_application: Optional[Any] = None + self.job_manager: Optional[JobManager] = None + + async def initialize_job_system(self): + """Initialize the enhanced job system with auto-discovery.""" + try: + # Initialize enhanced job manager + self.job_manager = JobManager() + + # Auto-discover and register all jobs (this populates JobRegistry) + discover_and_register_tasks() + + # Get registered jobs from JobRegistry + from services.infrastructure.job_management.decorators import JobRegistry + + registered_jobs = JobRegistry.list_jobs() + + logger.info( + f"Enhanced job system initialized with {len(registered_jobs)} jobs discovered" + ) + return True + + except Exception as e: + logger.error( + f"Failed to initialize enhanced job system: {e}", exc_info=True + ) + return False + + async def start_websocket_cleanup(self) -> None: + """Start the WebSocket cleanup task.""" + try: + await websocket_manager.start_cleanup_task() + logger.info("WebSocket cleanup task started") + except Exception as e: + logger.error(f"Error starting WebSocket cleanup task: {str(e)}") + raise + + async def start_bot(self) -> Any: + """Start the Telegram bot in the background.""" + if not config.telegram.enabled: + logger.info("Telegram bot disabled. Skipping initialization.") + return None + + try: + self.bot_application = await start_application() + logger.info("Telegram bot started successfully") + return self.bot_application + except Exception as e: + logger.error(f"Failed to start Telegram bot: {e}") + raise + + async def start_enhanced_job_system(self) -> None: + """Start the enhanced job system.""" + if not await self.initialize_job_system(): + logger.error("Failed to initialize enhanced job system") + raise RuntimeError("Job system initialization failed") + + # Schedule jobs with the scheduler + any_jobs_scheduled = self.job_manager.schedule_jobs(self.scheduler) + if any_jobs_scheduled: + # Start the scheduler + self.scheduler.start() + logger.info("Job scheduler started successfully") + else: + logger.warning("No jobs were scheduled") + + # Start the job executor + await self.job_manager.start_executor() + logger.info("Enhanced job manager executor started successfully") + + # Start system metrics collection + await system_metrics.start_monitoring() + logger.info("System metrics monitoring started") + + async def init_background_tasks(self) -> asyncio.Task: + """Initialize all enhanced background tasks.""" + logger.info("Starting Enhanced AIBTC Background Services...") + + try: + # Start enhanced job system + await self.start_enhanced_job_system() + + # Start websocket cleanup task + self.cleanup_task = asyncio.create_task(self.start_websocket_cleanup()) + + # Start bot if enabled + await self.start_bot() + + logger.info("All enhanced background services started successfully") + return self.cleanup_task + + except Exception as e: + logger.error(f"Failed to start background services: {e}", exc_info=True) + raise + + async def shutdown(self) -> None: + """Enhanced cleanup and shutdown with graceful task termination.""" + logger.info("Initiating enhanced shutdown sequence...") + + try: + # Stop system metrics collection + if system_metrics: + await system_metrics.stop_monitoring() + logger.info("System metrics collection stopped") + + # Stop the scheduler + if self.scheduler and self.scheduler.running: + self.scheduler.shutdown() + logger.info("Job scheduler stopped") + + # Gracefully shutdown enhanced job manager + if self.job_manager: + logger.info("Stopping enhanced job manager...") + await self.job_manager.stop_executor() + logger.info("Enhanced job manager stopped successfully") + + # Stop websocket cleanup + if self.cleanup_task: + self.cleanup_task.cancel() + try: + await self.cleanup_task + except asyncio.CancelledError: + pass + logger.info("WebSocket cleanup task stopped") + + # Stop bot + if self.bot_application: + logger.info("Stopping Telegram bot...") + # Add any necessary bot shutdown code here + logger.info("Telegram bot stopped") + + except Exception as e: + logger.error(f"Error during enhanced shutdown: {e}", exc_info=True) + + logger.info("Enhanced shutdown complete") + + def get_health_status(self) -> Dict: + """Get comprehensive health status of the enhanced startup service.""" + if not self.job_manager: + return { + "status": "unhealthy", + "message": "Enhanced job manager not initialized", + "jobs": {"running": 0, "registered": 0, "failed": 0}, + "system": {}, + "uptime": 0, + } + + # Get comprehensive health data + health_data = self.job_manager.get_system_health() + system_health = system_metrics.get_current_metrics() + + return { + "status": health_data["status"], + "message": "Enhanced job system running", + "jobs": { + "running": health_data["executor"]["running"], + "registered": health_data["tasks"]["total_registered"], + "enabled": health_data["tasks"]["enabled"], + "disabled": health_data["tasks"]["disabled"], + "total_executions": health_data["metrics"]["total_executions"], + }, + "system": { + "cpu_usage": system_health.get("cpu_usage", 0), + "memory_usage": system_health.get("memory_usage", 0), + "disk_usage": system_health.get("disk_usage", 0), + }, + "uptime": health_data.get("uptime_seconds", 0), + "last_updated": system_health.get("timestamp"), + "version": "2.0-enhanced", + "services": { + "websocket_cleanup": self.cleanup_task is not None + and not self.cleanup_task.done(), + "telegram_bot": self.bot_application is not None, + "job_manager": self.job_manager is not None + and self.job_manager.is_running, + }, + } + + def get_job_metrics(self) -> Dict: + """Get detailed job execution metrics.""" + if not self.job_manager: + return {"error": "Enhanced job manager not available"} + + return self.job_manager.get_comprehensive_metrics() + + def get_system_metrics(self) -> Dict: + """Get current system performance metrics.""" + return system_metrics.get_current_metrics() + + def trigger_job(self, job_type: str) -> Dict: + """Manually trigger a specific job type.""" + if not self.job_manager: + return {"error": "Enhanced job manager not available"} + + return self.job_manager.trigger_job(job_type) + + +# Global enhanced instance for convenience +startup_service = EnhancedStartupService() + + +# Enhanced convenience functions that use the global instance +async def run() -> asyncio.Task: + """Initialize all enhanced background tasks using the global startup service.""" + global job_manager + + # Setup signal handlers for standalone mode + signal.signal(signal.SIGINT, signal_handler) + signal.signal(signal.SIGTERM, signal_handler) + + try: + cleanup_task = await startup_service.init_background_tasks() + job_manager = startup_service.job_manager + + logger.info("Enhanced AIBTC services running. Press Ctrl+C to stop.") + return cleanup_task + + except Exception as e: + logger.error(f"Failed to start enhanced services: {e}", exc_info=True) + raise + + +async def shutdown() -> None: + """Shutdown all enhanced services using the global startup service.""" + await startup_service.shutdown() + + +# Enhanced health check functions +def get_health_status() -> Dict: + """Get comprehensive health status.""" + return startup_service.get_health_status() + + +def get_job_metrics() -> Dict: + """Get detailed job execution metrics.""" + return startup_service.get_job_metrics() + + +def get_system_metrics() -> Dict: + """Get current system performance metrics.""" + return startup_service.get_system_metrics() + + +def trigger_job(job_type: str) -> Dict: + """Manually trigger a specific job type.""" + return startup_service.trigger_job(job_type) + + +# Enhanced standalone mode for direct execution +async def run_standalone(): + """Run the enhanced startup service in standalone mode.""" + try: + await run() + + # Wait for shutdown signal + await shutdown_event.wait() + + except KeyboardInterrupt: + logger.info("Received keyboard interrupt") + except Exception as e: + logger.error(f"Critical error in standalone mode: {e}", exc_info=True) + sys.exit(1) + finally: + await shutdown() + + +if __name__ == "__main__": + asyncio.run(run_standalone()) diff --git a/services/integrations/__init__.py b/services/integrations/__init__.py new file mode 100644 index 00000000..1e7c2320 --- /dev/null +++ b/services/integrations/__init__.py @@ -0,0 +1,25 @@ +"""Integration services for external APIs and webhooks.""" + +from .hiro.hiro_api import HiroApi +from .hiro.platform_api import PlatformApi +from .hiro.utils import ( + ChainHookBuilder, + ChainType, + EventScope, + HiroApiError, + HiroApiRateLimitError, + HiroApiTimeoutError, + WebhookConfig, +) + +__all__ = [ + "HiroApi", + "PlatformApi", + "ChainHookBuilder", + "ChainType", + "EventScope", + "HiroApiError", + "HiroApiRateLimitError", + "HiroApiTimeoutError", + "WebhookConfig", +] diff --git a/services/integrations/hiro/__init__.py b/services/integrations/hiro/__init__.py new file mode 100644 index 00000000..024b21f0 --- /dev/null +++ b/services/integrations/hiro/__init__.py @@ -0,0 +1,32 @@ +"""Hiro API integration module. + +This module provides clients for interacting with the Hiro API and Platform API, +including blockchain operations, chainhook management, and token queries. +""" + +from .hiro_api import HiroApi +from .models import BlockTransactionsResponse, HiroApiInfo +from .platform_api import PlatformApi +from .utils import ( + ChainHookBuilder, + ChainType, + EventScope, + HiroApiError, + HiroApiRateLimitError, + HiroApiTimeoutError, + WebhookConfig, +) + +__all__ = [ + "HiroApi", + "PlatformApi", + "ChainHookBuilder", + "ChainType", + "EventScope", + "HiroApiError", + "HiroApiRateLimitError", + "HiroApiTimeoutError", + "WebhookConfig", + "BlockTransactionsResponse", + "HiroApiInfo", +] diff --git a/services/integrations/hiro/base.py b/services/integrations/hiro/base.py new file mode 100644 index 00000000..e5ac5d13 --- /dev/null +++ b/services/integrations/hiro/base.py @@ -0,0 +1,310 @@ +"""Base API client for Hiro services with rate limiting and error handling.""" + +import time +from functools import wraps +from typing import Any, ClassVar, Dict, List, Optional + +import aiohttp +import httpx +from cachetools import TTLCache + +from config import config +from lib.logger import configure_logger + +from .utils import HiroApiError, HiroApiRateLimitError, HiroApiTimeoutError + +logger = configure_logger(__name__) + + +class BaseHiroApi: + """Base class for Hiro API clients with shared functionality.""" + + # Default rate limiting settings (will be updated from API headers) + DEFAULT_SECOND_LIMIT: ClassVar[int] = 20 + DEFAULT_MINUTE_LIMIT: ClassVar[int] = 50 + + # Rate limit tracking (shared across all instances) + _second_limit: ClassVar[int] = DEFAULT_SECOND_LIMIT + _minute_limit: ClassVar[int] = DEFAULT_MINUTE_LIMIT + _second_requests: ClassVar[List[float]] = [] + _minute_requests: ClassVar[List[float]] = [] + + # Retry settings + MAX_RETRIES = 3 + RETRY_DELAY = 1 # seconds + + def __init__(self, base_url: str): + """Initialize the base API client. + + Args: + base_url: The base URL for the API + """ + self.base_url = base_url + self.api_key = config.api.hiro_api_key + if not self.api_key: + raise ValueError("HIRO_API_KEY environment variable is required") + + self._cache = TTLCache(maxsize=100, ttl=300) # Cache with 5-minute TTL + self._session: Optional[aiohttp.ClientSession] = None + logger.debug("Initialized API client with base URL: %s", self.base_url) + + def _update_rate_limits(self, headers: Dict[str, str]) -> None: + """Update rate limit settings from response headers. + + Args: + headers: Response headers containing rate limit information + """ + # Update limits if headers are present + if "x-ratelimit-limit-second" in headers: + old_limit = self.__class__._second_limit + self.__class__._second_limit = int(headers["x-ratelimit-limit-second"]) + logger.debug( + "Second rate limit updated: %d → %d", + old_limit, + self.__class__._second_limit, + ) + + if "x-ratelimit-limit-minute" in headers: + old_limit = self.__class__._minute_limit + self.__class__._minute_limit = int(headers["x-ratelimit-limit-minute"]) + logger.debug( + "Minute rate limit updated: %d → %d", + old_limit, + self.__class__._minute_limit, + ) + + # Log remaining rate limit information if available + if "x-ratelimit-remaining-second" in headers: + logger.debug( + "Second rate limit remaining: %s", + headers["x-ratelimit-remaining-second"], + ) + + if "x-ratelimit-remaining-minute" in headers: + logger.debug( + "Minute rate limit remaining: %s", + headers["x-ratelimit-remaining-minute"], + ) + + logger.debug( + "Current rate limit state - second: %d/%d, minute: %d/%d", + len(self.__class__._second_requests), + self.__class__._second_limit, + len(self.__class__._minute_requests), + self.__class__._minute_limit, + ) + + def _rate_limit(self) -> None: + """Implement rate limiting for both second and minute windows.""" + current_time = time.time() + + # Update second window requests + old_second_count = len(self.__class__._second_requests) + self.__class__._second_requests = [ + t for t in self.__class__._second_requests if current_time - t < 1.0 + ] + new_second_count = len(self.__class__._second_requests) + + if old_second_count != new_second_count: + logger.debug( + "Pruned expired second window requests: %d → %d", + old_second_count, + new_second_count, + ) + + # Update minute window requests + old_minute_count = len(self.__class__._minute_requests) + self.__class__._minute_requests = [ + t for t in self.__class__._minute_requests if current_time - t < 60.0 + ] + new_minute_count = len(self.__class__._minute_requests) + + if old_minute_count != new_minute_count: + logger.debug( + "Pruned expired minute window requests: %d → %d", + old_minute_count, + new_minute_count, + ) + + # Check second limit + if len(self.__class__._second_requests) >= self.__class__._second_limit: + sleep_time = self.__class__._second_requests[0] + 1.0 - current_time + if sleep_time > 0: + logger.warning( + "Second rate limit reached (%d/%d), sleeping for %.2f seconds", + len(self.__class__._second_requests), + self.__class__._second_limit, + sleep_time, + ) + time.sleep(sleep_time) + # Recalculate current time after sleep + current_time = time.time() + else: + logger.debug( + "Second rate limit check: %d/%d (%.1f%% of limit)", + len(self.__class__._second_requests), + self.__class__._second_limit, + (len(self.__class__._second_requests) / self.__class__._second_limit) + * 100, + ) + + # Check minute limit + if len(self.__class__._minute_requests) >= self.__class__._minute_limit: + sleep_time = self.__class__._minute_requests[0] + 60.0 - current_time + if sleep_time > 0: + logger.warning( + "Minute rate limit reached (%d/%d), sleeping for %.2f seconds", + len(self.__class__._minute_requests), + self.__class__._minute_limit, + sleep_time, + ) + time.sleep(sleep_time) + else: + logger.debug( + "Minute rate limit check: %d/%d (%.1f%% of limit)", + len(self.__class__._minute_requests), + self.__class__._minute_limit, + (len(self.__class__._minute_requests) / self.__class__._minute_limit) + * 100, + ) + + # Record the new request + self.__class__._second_requests.append(time.time()) + self.__class__._minute_requests.append(time.time()) + + logger.debug( + "New request recorded: second window now %d/%d, minute window now %d/%d", + len(self.__class__._second_requests), + self.__class__._second_limit, + len(self.__class__._minute_requests), + self.__class__._minute_limit, + ) + + def _retry_on_error(func): + """Decorator to retry API calls on transient errors.""" + + @wraps(func) + def wrapper(self, *args, **kwargs): + for attempt in range(self.MAX_RETRIES): + try: + return func(self, *args, **kwargs) + except ( + httpx.TimeoutException, + httpx.ConnectError, + ) as e: + if attempt == self.MAX_RETRIES - 1: + logger.error( + "Max retries reached for %s: %s", func.__name__, str(e) + ) + raise HiroApiTimeoutError(f"Max retries reached: {str(e)}") + logger.warning( + "Retry attempt %d for %s: %s", + attempt + 1, + func.__name__, + str(e), + ) + time.sleep(self.RETRY_DELAY * (attempt + 1)) + return None + + return wrapper + + @_retry_on_error + def _make_request( + self, + method: str, + endpoint: str, + headers: Optional[Dict[str, str]] = None, + params: Optional[Dict[str, Any]] = None, + json: Optional[Dict[str, Any]] = None, + ) -> Dict[str, Any]: + """Make an HTTP request with retries and rate limiting. + + Args: + method: HTTP method (GET, POST, etc.) + endpoint: API endpoint + headers: Optional request headers + params: Optional query parameters + json: Optional JSON body + + Returns: + Dict containing the response data + """ + try: + self._rate_limit() + url = f"{self.base_url}{endpoint}" + headers = headers or {} + + # Set default Accept header if not provided + if "Accept" not in headers: + headers["Accept"] = "application/json" + + # Add X-API-Key header if api_key is set + if self.api_key: + headers["X-API-Key"] = self.api_key + + logger.debug("Making %s request to %s", method, url) + response = httpx.request( + method, url, headers=headers, params=params, json=json + ) + + # Update rate limits from headers + self._update_rate_limits(response.headers) + + response.raise_for_status() + return response.json() + except httpx.HTTPStatusError as e: + if e.response.status_code == 429: + logger.error("Rate limit exceeded: %s", str(e)) + raise HiroApiRateLimitError(f"Rate limit exceeded: {str(e)}") + logger.error("HTTP error occurred: %s", str(e)) + raise HiroApiError(f"HTTP error occurred: {str(e)}") + except Exception as e: + logger.error("Unexpected error in request: %s", str(e)) + raise HiroApiError(f"Unexpected error: {str(e)}") + + async def _amake_request( + self, + method: str, + endpoint: str, + headers: Optional[Dict[str, str]] = None, + params: Optional[Dict[str, Any]] = None, + json: Optional[Dict[str, Any]] = None, + ) -> Dict[str, Any]: + """Async version of _make_request.""" + if self._session is None: + self._session = aiohttp.ClientSession() + + try: + self._rate_limit() + url = f"{self.base_url}{endpoint}" + headers = headers or {} + + # Set default Accept header if not provided + if "Accept" not in headers: + headers["Accept"] = "application/json" + + # Add X-API-Key header if api_key is set + if self.api_key: + headers["X-API-Key"] = self.api_key + + logger.debug("Making async %s request to %s", method, url) + async with self._session.request( + method, url, headers=headers, params=params, json=json + ) as response: + # Update rate limits from headers + self._update_rate_limits(response.headers) + + response.raise_for_status() + return await response.json() + except aiohttp.ClientError as e: + if isinstance(e, aiohttp.ClientResponseError) and e.status == 429: + logger.error("Rate limit exceeded in async request: %s", str(e)) + raise HiroApiRateLimitError(f"Rate limit exceeded: {str(e)}") + logger.error("Async request error: %s", str(e)) + raise HiroApiError(f"Async request error: {str(e)}") + + async def close(self) -> None: + """Close the async session.""" + if self._session: + await self._session.close() + self._session = None diff --git a/services/integrations/hiro/hiro_api.py b/services/integrations/hiro/hiro_api.py new file mode 100644 index 00000000..1db2e697 --- /dev/null +++ b/services/integrations/hiro/hiro_api.py @@ -0,0 +1,394 @@ +"""Hiro API client for blockchain data queries and operations.""" + +import httpx +from cachetools import cached +from typing import Any, Dict + +from config import config +from lib.logger import configure_logger + +from .base import BaseHiroApi +from .models import BlockTransactionsResponse, HiroApiInfo + +logger = configure_logger(__name__) + + +class HiroApi(BaseHiroApi): + """Client for interacting with the Hiro API. + + This client provides methods to interact with various Hiro API endpoints, + organized by category (transactions, blocks, addresses, etc.). + It includes features like rate limiting, retries, caching, and async support. + """ + + # API endpoint categories + ENDPOINTS = { + "transactions": "/extended/v1/tx", + "blocks": "/extended/v1/block", + "addresses": "/extended/v1/address", + "tokens": "/extended/v1/tokens", + "contracts": "/extended/v1/contract", + "burnchain": "/extended/v1/burnchain", + "search": "/extended/v1/search", + "fee_rate": "/extended/v1/fee_rate", + "stx_supply": "/extended/v1/stx_supply", + } + + def __init__(self): + """Initialize the Hiro API client.""" + super().__init__(config.api.hiro_api_url) + + @cached(lambda self: self._cache) + def get_token_holders( + self, token: str, limit: int = 20, offset: int = 0 + ) -> Dict[str, Any]: + """Retrieve a list of token holders with caching and pagination support. + + Args: + token: Token identifier (contract principal or symbol) + limit: Maximum number of holders to return (default: 20) + offset: Pagination offset (default: 0) + + Returns: + Dict containing the response with holders data + """ + logger.debug( + "Retrieving token holders for %s with limit %d offset %d", + token, + limit, + offset, + ) + return self._make_request( + "GET", + f"{self.ENDPOINTS['tokens']}/ft/{token}/holders", + params={"limit": limit, "offset": offset}, + ) + + def get_all_token_holders(self, token: str, page_size: int = 20) -> Dict[str, Any]: + """Get all token holders by paginating through results. + + Args: + token: Token identifier (contract principal or symbol) + page_size: Number of holders per page request (default: 20) + + Returns: + Combined response with all holders + """ + logger.debug("Getting all token holders for %s", token) + + # Get first page to determine total + first_page = self.get_token_holders(token, limit=page_size) + + # If we got all holders in the first request, return it + total_holders = first_page.get("total", 0) + if total_holders <= page_size: + return first_page + + # Initialize with first page results + all_holders = first_page.get("results", []).copy() + + # Paginate through the rest + remaining = total_holders - page_size + offset = page_size + + while remaining > 0: + current_limit = min(page_size, remaining) + logger.debug( + "Fetching %d more token holders with offset %d", current_limit, offset + ) + + page = self.get_token_holders(token, limit=current_limit, offset=offset) + page_results = page.get("results", []) + all_holders.extend(page_results) + + offset += current_limit + remaining -= current_limit + + # Create combined response + return { + "total_supply": first_page.get("total_supply"), + "limit": total_holders, + "offset": 0, + "total": total_holders, + "results": all_holders, + } + + async def aget_token_holders( + self, token: str, limit: int = 20, offset: int = 0 + ) -> Dict[str, Any]: + """Async version of get_token_holders with pagination support. + + Args: + token: Token identifier (contract principal or symbol) + limit: Maximum number of holders to return (default: 20) + offset: Pagination offset (default: 0) + + Returns: + Dict containing the response with holders data + """ + logger.debug( + "Async retrieving token holders for %s with limit %d offset %d", + token, + limit, + offset, + ) + return await self._amake_request( + "GET", + f"{self.ENDPOINTS['tokens']}/ft/{token}/holders", + params={"limit": limit, "offset": offset}, + ) + + async def aget_all_token_holders( + self, token: str, page_size: int = 20 + ) -> Dict[str, Any]: + """Async version to get all token holders by paginating through results. + + Args: + token: Token identifier (contract principal or symbol) + page_size: Number of holders per page request (default: 20) + + Returns: + Combined response with all holders + """ + logger.debug("Async getting all token holders for %s", token) + + # Get first page to determine total + first_page = await self.aget_token_holders(token, limit=page_size) + + # If we got all holders in the first request, return it + total_holders = first_page.get("total", 0) + if total_holders <= page_size: + return first_page + + # Initialize with first page results + all_holders = first_page.get("results", []).copy() + + # Paginate through the rest + remaining = total_holders - page_size + offset = page_size + + while remaining > 0: + current_limit = min(page_size, remaining) + logger.debug( + "Async fetching %d more token holders with offset %d", + current_limit, + offset, + ) + + page = await self.aget_token_holders( + token, limit=current_limit, offset=offset + ) + page_results = page.get("results", []) + all_holders.extend(page_results) + + offset += current_limit + remaining -= current_limit + + # Create combined response + return { + "total_supply": first_page.get("total_supply"), + "limit": total_holders, + "offset": 0, + "total": total_holders, + "results": all_holders, + } + + def get_address_balance(self, addr: str) -> Dict[str, Any]: + """Retrieve wallet balance for an address.""" + logger.debug("Retrieving balance for address %s", addr) + return self._make_request( + "GET", f"{self.ENDPOINTS['addresses']}/{addr}/balances" + ) + + async def aget_address_balance(self, addr: str) -> Dict[str, Any]: + """Async version of get_address_balance.""" + logger.debug("Async retrieving balance for address %s", addr) + return await self._amake_request( + "GET", f"{self.ENDPOINTS['addresses']}/{addr}/balances" + ) + + # Transaction related endpoints + def get_transaction(self, tx_id: str) -> Dict[str, Any]: + """Get transaction details.""" + return self._make_request("GET", f"/extended/v1/tx/{tx_id}") + + def get_raw_transaction(self, tx_id: str) -> Dict[str, Any]: + """Get raw transaction details.""" + return self._make_request("GET", f"/extended/v1/tx/{tx_id}/raw") + + def get_transactions_by_block( + self, block_height: int, limit: int = 50, offset: int = 0 + ) -> BlockTransactionsResponse: + """Get transactions in a block. + + Args: + block_height: The height of the block to get transactions for + limit: The maximum number of transactions to return (default: 50) + offset: Pagination offset (default: 0) + + Returns: + Typed response containing transaction data + """ + logger.debug( + "Getting transactions for block height %d with limit %d offset %d", + block_height, + limit, + offset, + ) + response = self._make_request( + "GET", + f"/extended/v2/blocks/{block_height}/transactions", + params={"limit": limit, "offset": offset}, + ) + + logger.debug(f"API response type: {type(response)}") + logger.debug( + f"API response keys: {response.keys() if isinstance(response, dict) else 'Not a dict'}" + ) + + # For debugging purposes + if ( + "results" in response + and response["results"] + and isinstance(response["results"], list) + ): + logger.debug(f"First result type: {type(response['results'][0])}") + logger.debug( + f"First result sample keys: {list(response['results'][0].keys())[:5]}" + ) + + # We're getting dictionaries back, so create BlockTransactionsResponse manually + # This ensures we don't lose the raw data structure if dataclass conversion fails + try: + return BlockTransactionsResponse(**response) + except Exception as e: + logger.warning(f"Error creating BlockTransactionsResponse: {str(e)}") + # Fall back to returning a raw dictionary-based response + return BlockTransactionsResponse( + limit=response.get("limit", 0), + offset=response.get("offset", 0), + total=response.get("total", 0), + results=response.get("results", []), + ) + + @cached(lambda self: self._cache) + def get_stx_price(self) -> float: + """Get the current STX price with caching.""" + logger.debug("Retrieving current STX price") + response = httpx.get( + "https://explorer.hiro.so/stxPrice", params={"blockBurnTime": "current"} + ) + response.raise_for_status() + return response.json()["price"] + + def get_current_block_height(self) -> int: + """Get the current block height""" + logger.debug("Retrieving current block height") + logger.debug(f"Endpoint: {self.ENDPOINTS['blocks']}") + response = self._make_request( + method="GET", + endpoint=self.ENDPOINTS["blocks"], + params={"limit": 1, "offset": 0}, + ) + logger.debug(f"Response: {response}") + return response["results"][0]["height"] + + def get_info(self) -> HiroApiInfo: + """Get Hiro API server information and chain tip. + + Returns: + Server information including version, status, and current chain tip + """ + logger.debug("Retrieving Hiro API server info") + response = self._make_request("GET", "/extended") + return HiroApiInfo(**response) + + async def aget_info(self) -> HiroApiInfo: + """Async version of get_info. + + Returns: + Server information including version, status, and current chain tip + """ + logger.debug("Async retrieving Hiro API server info") + response = await self._amake_request("GET", "/extended") + return HiroApiInfo(**response) + + def search(self, query_id: str) -> Dict[str, Any]: + """Search for blocks, transactions, contracts, or addresses.""" + logger.debug("Performing search for query: %s", query_id) + return self._make_request("GET", f"{self.ENDPOINTS['search']}/{query_id}") + + # Additional methods from the original file + def get_all_transactions_by_block( + self, block_height: int, page_size: int = 50 + ) -> BlockTransactionsResponse: + """Get all transactions in a block by paginating through results.""" + logger.debug(f"Getting all transactions for block height {block_height}") + + # Get first page to determine total + first_page = self.get_transactions_by_block(block_height, limit=page_size) + + # If we got all transactions in the first request, return it + if first_page.total <= page_size: + return first_page + + # Initialize with first page results + all_transactions = first_page.results.copy() + + # Paginate through the rest + remaining = first_page.total - page_size + offset = page_size + + while remaining > 0: + current_limit = min(page_size, remaining) + logger.debug( + f"Fetching {current_limit} more transactions with offset {offset}" + ) + + page = self.get_transactions_by_block( + block_height, limit=current_limit, offset=offset + ) + + all_transactions.extend(page.results) + offset += current_limit + remaining -= current_limit + + # Create combined response + return BlockTransactionsResponse( + limit=first_page.total, + offset=0, + total=first_page.total, + results=all_transactions, + ) + + def get_transactions_by_block_hash(self, block_hash: str) -> Dict[str, Any]: + """Get transactions in a block by hash.""" + return self._make_request("GET", f"/extended/v1/tx/block/{block_hash}") + + def get_mempool_transactions(self) -> Dict[str, Any]: + """Get pending transactions.""" + return self._make_request("GET", "/extended/v1/tx/mempool") + + def get_blocks(self) -> Dict[str, Any]: + """Get recent blocks.""" + return self._make_request("GET", "/extended/v1/block") + + def get_block_by_height(self, height: int) -> Dict[str, Any]: + """Get block by height.""" + return self._make_request("GET", f"/extended/v1/block/by_height/{height}") + + def get_contract_by_id(self, contract_id: str) -> Dict[str, Any]: + """Get contract details.""" + return self._make_request("GET", f"/extended/v1/contract/{contract_id}") + + @cached(lambda self: self._cache) + def get_fee_rate(self) -> Dict[str, Any]: + """Get current fee rate with caching.""" + logger.debug("Retrieving current fee rate") + return self._make_request("GET", "/extended/v1/fee_rate") + + @cached(lambda self: self._cache) + def get_stx_supply(self) -> Dict[str, Any]: + """Get STX supply with caching.""" + logger.debug("Retrieving STX supply") + return self._make_request("GET", "/extended/v1/stx_supply") diff --git a/services/integrations/hiro/models.py b/services/integrations/hiro/models.py new file mode 100644 index 00000000..55be5b20 --- /dev/null +++ b/services/integrations/hiro/models.py @@ -0,0 +1,150 @@ +"""Hiro API specific data models.""" + +from dataclasses import dataclass +from typing import Any, Dict, List, Union, Optional + + +@dataclass +class ChainTip: + """Current chain tip information.""" + + block_height: int + block_hash: str + index_block_hash: str + microblock_hash: str + microblock_sequence: int + burn_block_height: int + + +@dataclass +class HiroApiInfo: + """Hiro API server information.""" + + server_version: str + status: str + pox_v1_unlock_height: int + pox_v2_unlock_height: int + pox_v3_unlock_height: int + chain_tip: Union[ChainTip, Dict[str, Any]] + + def __post_init__(self): + """Convert chain_tip from dict to ChainTip object if needed.""" + # If chain_tip is a dictionary, convert it to a ChainTip object + if isinstance(self.chain_tip, dict) and not isinstance( + self.chain_tip, ChainTip + ): + # Some implementations might only include a subset of fields + self.chain_tip = ChainTip( + block_height=self.chain_tip.get("block_height", 0), + block_hash=self.chain_tip.get("block_hash", ""), + index_block_hash=self.chain_tip.get("index_block_hash", ""), + microblock_hash=self.chain_tip.get("microblock_hash", ""), + microblock_sequence=self.chain_tip.get("microblock_sequence", 0), + burn_block_height=self.chain_tip.get("burn_block_height", 0), + ) + + +@dataclass +class Principal: + """Principal for post condition.""" + + type_id: str + + +@dataclass +class PostCondition: + """Post condition in a transaction.""" + + principal: Principal + condition_code: str + amount: str + type: str + + +@dataclass +class ClarityValue: + """Clarity value representation.""" + + hex: str + repr: str + + +@dataclass +class ContractLog: + """Contract log in an event.""" + + contract_id: str + topic: str + value: ClarityValue + + +@dataclass +class TransactionEvent: + """Event in a transaction.""" + + event_index: int + event_type: str + tx_id: str + contract_log: Optional[ContractLog] = None + + +@dataclass +class TokenTransfer: + """Token transfer details.""" + + recipient_address: str + amount: str + memo: Optional[str] = None + + +@dataclass +class BlockTransaction: + """Transaction in a block.""" + + tx_id: str + nonce: int + fee_rate: str + sender_address: str + post_condition_mode: str + post_conditions: List[PostCondition] + anchor_mode: str + block_hash: str + block_height: int + block_time: int + block_time_iso: str + burn_block_height: int + burn_block_time: int + burn_block_time_iso: str + parent_burn_block_time: int + parent_burn_block_time_iso: str + canonical: bool + tx_index: int + tx_status: str + tx_result: ClarityValue + event_count: int + parent_block_hash: str + is_unanchored: bool + execution_cost_read_count: int + execution_cost_read_length: int + execution_cost_runtime: int + execution_cost_write_count: int + execution_cost_write_length: int + events: List[TransactionEvent] + tx_type: str + sponsor_nonce: Optional[int] = None + sponsored: Optional[bool] = None + sponsor_address: Optional[str] = None + microblock_hash: Optional[str] = None + microblock_sequence: Optional[int] = None + microblock_canonical: Optional[bool] = None + token_transfer: Optional[TokenTransfer] = None + + +@dataclass +class BlockTransactionsResponse: + """Response from the block transactions API.""" + + limit: int + offset: int + total: int + results: List[BlockTransaction] diff --git a/services/integrations/hiro/platform_api.py b/services/integrations/hiro/platform_api.py new file mode 100644 index 00000000..81163bf0 --- /dev/null +++ b/services/integrations/hiro/platform_api.py @@ -0,0 +1,273 @@ +"""Platform API client for Hiro chainhook management.""" + +from typing import Any, Dict, List, Optional + +from config import config + +from .base import BaseHiroApi +from .utils import ChainHookBuilder, ChainHookPredicate, WebhookConfig + + +class PlatformApi(BaseHiroApi): + """Client for interacting with the Hiro Platform API.""" + + def __init__(self): + """Initialize the Platform API client.""" + super().__init__(config.api.platform_base_url) + self.default_webhook = WebhookConfig( + url=config.api.webhook_url, auth_header=config.api.webhook_auth + ) + + def create_chainhook(self, predicate: ChainHookPredicate) -> Dict[str, Any]: + """Create a new chainhook. + + Args: + predicate: The chainhook predicate configuration + + Returns: + Dict containing the response from the API + """ + return self._make_request( + "POST", + f"/v1/ext/{self.api_key}/chainhooks", + headers={"Content-Type": "application/json"}, + json=predicate, + ) + + async def acreate_chainhook(self, predicate: ChainHookPredicate) -> Dict[str, Any]: + """Async version of create_chainhook.""" + return await self._amake_request( + "POST", + f"/v1/ext/{self.api_key}/chainhooks", + headers={"Content-Type": "application/json"}, + json=predicate, + ) + + def create_transaction_hook( + self, + txid: str, + name: str = "tx-monitor", + start_block: Optional[int] = None, + network: str = "testnet", + webhook: Optional[WebhookConfig] = None, + expire_after_occurrence: int = 1, + ) -> Dict[str, Any]: + """Create a chainhook for monitoring specific transactions.""" + predicate = ( + ChainHookBuilder(name, network=network) + .with_transaction_filter(txid) + .with_blocks(start_block) + .with_webhook(webhook or self.default_webhook) + .with_expiration(expire_after_occurrence) + .build() + ) + return self.create_chainhook(predicate) + + def create_contract_call_hook( + self, + contract_identifier: str, + method: str, + name: str = "contract-call-monitor", + start_block: Optional[int] = None, + network: str = "testnet", + webhook: Optional[WebhookConfig] = None, + end_block: Optional[int] = None, + expire_after_occurrence: Optional[int] = None, + ) -> Dict[str, Any]: + """Create a chainhook for monitoring contract calls.""" + builder = ( + ChainHookBuilder(name, network=network) + .with_contract_call_filter(contract_identifier, method) + .with_blocks(start_block, end_block) + .with_webhook(webhook or self.default_webhook) + ) + + if expire_after_occurrence is not None: + builder.with_expiration(expire_after_occurrence) + + return self.create_chainhook(builder.build()) + + def create_ft_event_hook( + self, + asset_identifier: str, + actions: List[str], + name: str = "ft-event-monitor", + start_block: Optional[int] = None, + network: str = "testnet", + webhook: Optional[WebhookConfig] = None, + end_block: Optional[int] = None, + ) -> Dict[str, Any]: + """Create a chainhook for monitoring fungible token events.""" + predicate = ( + ChainHookBuilder(name, network=network) + .with_ft_event_filter(asset_identifier, actions) + .with_blocks(start_block, end_block) + .with_webhook(webhook or self.default_webhook) + .build() + ) + return self.create_chainhook(predicate) + + def create_nft_event_hook( + self, + asset_identifier: str, + actions: List[str], + name: str = "nft-event-monitor", + start_block: Optional[int] = None, + network: str = "testnet", + webhook: Optional[WebhookConfig] = None, + end_block: Optional[int] = None, + ) -> Dict[str, Any]: + """Create a chainhook for monitoring non-fungible token events.""" + predicate = ( + ChainHookBuilder(name, network=network) + .with_nft_event_filter(asset_identifier, actions) + .with_blocks(start_block, end_block) + .with_webhook(webhook or self.default_webhook) + .build() + ) + return self.create_chainhook(predicate) + + def create_stx_event_hook( + self, + actions: List[str], + name: str = "stx-event-monitor", + start_block: Optional[int] = None, + network: str = "testnet", + webhook: Optional[WebhookConfig] = None, + end_block: Optional[int] = None, + ) -> Dict[str, Any]: + """Create a chainhook for monitoring STX events.""" + predicate = ( + ChainHookBuilder(name, network=network) + .with_stx_event_filter(actions) + .with_blocks(start_block, end_block) + .with_webhook(webhook or self.default_webhook) + .build() + ) + return self.create_chainhook(predicate) + + def create_print_event_hook( + self, + contract_identifier: str, + topic: str, + name: str = "print-event-monitor", + start_block: Optional[int] = None, + network: str = "testnet", + webhook: Optional[WebhookConfig] = None, + end_block: Optional[int] = None, + ) -> Dict[str, Any]: + """Create a chainhook for monitoring print events.""" + predicate = ( + ChainHookBuilder(name, network=network) + .with_print_event_filter(contract_identifier, topic) + .with_blocks(start_block, end_block) + .with_webhook(webhook or self.default_webhook) + .build() + ) + return self.create_chainhook(predicate) + + def create_contract_deployment_hook( + self, + txid: str, + name: str = "contract-deployment-monitor", + start_block: Optional[int] = 75996, + network: str = "testnet", + end_block: Optional[int] = None, + expire_after_occurrence: int = 1, + webhook: Optional[WebhookConfig] = None, + ) -> Dict[str, Any]: + """Create a chainhook for monitoring contract deployments.""" + predicate = ( + ChainHookBuilder(name, network=network) + .with_transaction_filter(txid) + .with_blocks(start_block, end_block) + .with_webhook(webhook or self.default_webhook) + .with_expiration(expire_after_occurrence) + .build() + ) + return self.create_chainhook(predicate) + + def create_dao_x_linkage_hook( + self, + contract_identifier: str, + method: str = "send", + name: str = "dao-x-linkage", + start_block: int = 601924, + network: str = "mainnet", + end_block: Optional[int] = None, + webhook: Optional[WebhookConfig] = None, + ) -> Dict[str, Any]: + """Create a chainhook for monitoring DAO X linkage.""" + predicate = ( + ChainHookBuilder(name, network=network) + .with_contract_call_filter(contract_identifier, method) + .with_blocks(start_block, end_block) + .with_webhook(webhook or self.default_webhook) + .build() + ) + return self.create_chainhook(predicate) + + # Async versions of the hook creation methods + async def acreate_transaction_hook( + self, + txid: str, + name: str = "tx-monitor", + start_block: Optional[int] = None, + network: str = "testnet", + webhook: Optional[WebhookConfig] = None, + expire_after_occurrence: int = 1, + ) -> Dict[str, Any]: + """Async version of create_transaction_hook.""" + predicate = ( + ChainHookBuilder(name, network=network) + .with_transaction_filter(txid) + .with_blocks(start_block) + .with_webhook(webhook or self.default_webhook) + .with_expiration(expire_after_occurrence) + .build() + ) + return await self.acreate_chainhook(predicate) + + async def acreate_contract_call_hook( + self, + contract_identifier: str, + method: str, + name: str = "contract-call-monitor", + start_block: Optional[int] = None, + network: str = "testnet", + webhook: Optional[WebhookConfig] = None, + end_block: Optional[int] = None, + expire_after_occurrence: Optional[int] = None, + ) -> Dict[str, Any]: + """Async version of create_contract_call_hook.""" + builder = ( + ChainHookBuilder(name, network=network) + .with_contract_call_filter(contract_identifier, method) + .with_blocks(start_block, end_block) + .with_webhook(webhook or self.default_webhook) + ) + + if expire_after_occurrence is not None: + builder.with_expiration(expire_after_occurrence) + + return await self.acreate_chainhook(builder.build()) + + async def acreate_dao_x_linkage_hook( + self, + contract_identifier: str, + method: str = "send", + name: str = "dao-x-linkage", + start_block: int = 601924, + network: str = "mainnet", + end_block: Optional[int] = None, + webhook: Optional[WebhookConfig] = None, + ) -> Dict[str, Any]: + """Async version of create_dao_x_linkage_hook.""" + predicate = ( + ChainHookBuilder(name, network=network) + .with_contract_call_filter(contract_identifier, method) + .with_blocks(start_block, end_block) + .with_webhook(webhook or self.default_webhook) + .build() + ) + return await self.acreate_chainhook(predicate) diff --git a/services/integrations/hiro/utils.py b/services/integrations/hiro/utils.py new file mode 100644 index 00000000..65ad70a7 --- /dev/null +++ b/services/integrations/hiro/utils.py @@ -0,0 +1,216 @@ +"""Utility classes and types for Hiro API integration.""" + +from dataclasses import dataclass +from enum import Enum +from typing import Any, Dict, List, Optional, TypedDict + + +class HiroApiError(Exception): + """Base exception for Hiro API errors.""" + + pass + + +class HiroApiRateLimitError(HiroApiError): + """Exception for rate limit errors.""" + + pass + + +class HiroApiTimeoutError(HiroApiError): + """Exception for timeout errors.""" + + pass + + +class ChainType(str, Enum): + """Supported blockchain types for chainhooks.""" + + STACKS = "stacks" + BITCOIN = "bitcoin" + + +class EventScope(str, Enum): + """Supported event scopes for chainhooks.""" + + TXID = "txid" + CONTRACT_CALL = "contract_call" + PRINT_EVENT = "print_event" + FT_EVENT = "ft_event" + NFT_EVENT = "nft_event" + STX_EVENT = "stx_event" + + +@dataclass +class WebhookConfig: + """Configuration for webhook endpoints.""" + + url: str + auth_header: str + retry_count: int = 3 + timeout: int = 10 + events: List[str] = None + + def to_dict(self) -> Dict[str, Any]: + """Convert webhook config to dictionary format.""" + return { + "url": self.url, + "authorization_header": self.auth_header, + "retry_count": self.retry_count, + "timeout": self.timeout, + "events": self.events, + } + + +class ChainHookPredicate(TypedDict): + """Type definition for chainhook predicates.""" + + name: str + chain: str + version: int + networks: Dict[str, Any] + + +class ChainHookBuilder: + """Builder class for creating chainhook predicates.""" + + def __init__( + self, + name: str, + chain_type: ChainType = ChainType.STACKS, + network: str = "testnet", + version: int = 1, + ): + """Initialize the chainhook builder. + + Args: + name: Name of the chainhook + chain_type: Type of blockchain to monitor + network: Network to monitor (testnet/mainnet) + version: API version + """ + self.name = name + self.chain_type = chain_type + self.network = network + self.version = version + self.conditions: Dict[str, Any] = {} + self.start_block: Optional[int] = None + self.end_block: Optional[int] = None + self.decode_clarity_values: bool = True + self.expire_after_occurrence: Optional[int] = None + self.webhook: Optional[WebhookConfig] = None + + def with_transaction_filter(self, txid: str) -> "ChainHookBuilder": + """Add transaction ID filter.""" + self.conditions = {"scope": EventScope.TXID, "equals": txid} + return self + + def with_contract_call_filter( + self, + contract_identifier: str, + method: str, + ) -> "ChainHookBuilder": + """Add contract call filter.""" + self.conditions = { + "scope": EventScope.CONTRACT_CALL, + "method": method, + "contract_identifier": contract_identifier, + } + return self + + def with_print_event_filter( + self, + contract_identifier: str, + topic: str, + ) -> "ChainHookBuilder": + """Add print event filter.""" + self.conditions = { + "scope": EventScope.PRINT_EVENT, + "contract_identifier": contract_identifier, + "topic": topic, + } + return self + + def with_ft_event_filter( + self, + asset_identifier: str, + actions: List[str], + ) -> "ChainHookBuilder": + """Add fungible token event filter.""" + self.conditions = { + "scope": EventScope.FT_EVENT, + "asset_identifier": asset_identifier, + "actions": actions, + } + return self + + def with_nft_event_filter( + self, + asset_identifier: str, + actions: List[str], + ) -> "ChainHookBuilder": + """Add non-fungible token event filter.""" + self.conditions = { + "scope": EventScope.NFT_EVENT, + "asset_identifier": asset_identifier, + "actions": actions, + } + return self + + def with_stx_event_filter( + self, + actions: List[str], + ) -> "ChainHookBuilder": + """Add STX event filter.""" + self.conditions = { + "scope": EventScope.STX_EVENT, + "actions": actions, + } + return self + + def with_blocks( + self, + start_block: Optional[int] = None, + end_block: Optional[int] = None, + ) -> "ChainHookBuilder": + """Set block range.""" + self.start_block = start_block + self.end_block = end_block + return self + + def with_webhook(self, webhook: WebhookConfig) -> "ChainHookBuilder": + """Set webhook configuration.""" + self.webhook = webhook + return self + + def with_expiration(self, occurrences: int) -> "ChainHookBuilder": + """Set expiration after number of occurrences.""" + self.expire_after_occurrence = occurrences + return self + + def build(self) -> ChainHookPredicate: + """Build the chainhook predicate.""" + if not self.conditions: + raise ValueError("No conditions set for chainhook") + if not self.webhook: + raise ValueError("No webhook configured for chainhook") + + network_config = { + "if_this": self.conditions, + "then_that": {"http_post": self.webhook.to_dict()}, + "decode_clarity_values": self.decode_clarity_values, + } + + if self.start_block is not None: + network_config["start_block"] = self.start_block + if self.end_block is not None: + network_config["end_block"] = self.end_block + if self.expire_after_occurrence is not None: + network_config["expire_after_occurrence"] = self.expire_after_occurrence + + return { + "name": self.name, + "chain": self.chain_type, + "version": self.version, + "networks": {self.network: network_config}, + } diff --git a/services/integrations/webhooks/__init__.py b/services/integrations/webhooks/__init__.py new file mode 100644 index 00000000..d5fcedc6 --- /dev/null +++ b/services/integrations/webhooks/__init__.py @@ -0,0 +1,5 @@ +"""Webhook services package.""" + +from services.integrations.webhooks.chainhook import ChainhookService + +__all__ = ["ChainhookService"] diff --git a/services/webhooks/base.py b/services/integrations/webhooks/base.py similarity index 100% rename from services/webhooks/base.py rename to services/integrations/webhooks/base.py diff --git a/services/webhooks/chainhook/__init__.py b/services/integrations/webhooks/chainhook/__init__.py similarity index 54% rename from services/webhooks/chainhook/__init__.py rename to services/integrations/webhooks/chainhook/__init__.py index f546a5bf..8c984f08 100644 --- a/services/webhooks/chainhook/__init__.py +++ b/services/integrations/webhooks/chainhook/__init__.py @@ -3,18 +3,18 @@ This module provides components for parsing and handling Chainhook webhook payloads. """ -from services.webhooks.chainhook.handler import ChainhookHandler -from services.webhooks.chainhook.handlers import ( +from services.integrations.webhooks.chainhook.handler import ChainhookHandler +from services.integrations.webhooks.chainhook.handlers import ( + ActionConcluderHandler, BlockStateHandler, ChainhookEventHandler, - ContractMessageHandler, DAOProposalBurnHeightHandler, DAOProposalConclusionHandler, DAOVoteHandler, ) -from services.webhooks.chainhook.models import ChainHookData -from services.webhooks.chainhook.parser import ChainhookParser -from services.webhooks.chainhook.service import ChainhookService +from services.integrations.webhooks.chainhook.models import ChainHookData +from services.integrations.webhooks.chainhook.parser import ChainhookParser +from services.integrations.webhooks.chainhook.service import ChainhookService __all__ = [ "ChainhookService", @@ -22,7 +22,7 @@ "ChainhookHandler", "ChainHookData", "ChainhookEventHandler", - "ContractMessageHandler", + "ActionConcluderHandler", "DAOProposalBurnHeightHandler", "DAOVoteHandler", "DAOProposalConclusionHandler", diff --git a/services/webhooks/chainhook/handler.py b/services/integrations/webhooks/chainhook/handler.py similarity index 82% rename from services/webhooks/chainhook/handler.py rename to services/integrations/webhooks/chainhook/handler.py index 94d08620..6e07b9dd 100644 --- a/services/webhooks/chainhook/handler.py +++ b/services/integrations/webhooks/chainhook/handler.py @@ -3,22 +3,32 @@ from typing import Any, Dict from lib.logger import configure_logger -from services.webhooks.base import WebhookHandler -from services.webhooks.chainhook.handlers.block_state_handler import BlockStateHandler -from services.webhooks.chainhook.handlers.buy_event_handler import BuyEventHandler -from services.webhooks.chainhook.handlers.contract_message_handler import ( - ContractMessageHandler, +from services.integrations.webhooks.base import WebhookHandler +from services.integrations.webhooks.chainhook.handlers.action_concluder_handler import ( + ActionConcluderHandler, ) -from services.webhooks.chainhook.handlers.dao_proposal_burn_height_handler import ( +from services.integrations.webhooks.chainhook.handlers.block_state_handler import ( + BlockStateHandler, +) +from services.integrations.webhooks.chainhook.handlers.buy_event_handler import ( + BuyEventHandler, +) +from services.integrations.webhooks.chainhook.handlers.dao_proposal_burn_height_handler import ( DAOProposalBurnHeightHandler, ) -from services.webhooks.chainhook.handlers.dao_proposal_conclusion_handler import ( +from services.integrations.webhooks.chainhook.handlers.dao_proposal_conclusion_handler import ( DAOProposalConclusionHandler, ) -from services.webhooks.chainhook.handlers.dao_proposal_handler import DAOProposalHandler -from services.webhooks.chainhook.handlers.dao_vote_handler import DAOVoteHandler -from services.webhooks.chainhook.handlers.sell_event_handler import SellEventHandler -from services.webhooks.chainhook.models import ChainHookData +from services.integrations.webhooks.chainhook.handlers.dao_proposal_handler import ( + DAOProposalHandler, +) +from services.integrations.webhooks.chainhook.handlers.dao_vote_handler import ( + DAOVoteHandler, +) +from services.integrations.webhooks.chainhook.handlers.sell_event_handler import ( + SellEventHandler, +) +from services.integrations.webhooks.chainhook.models import ChainHookData class ChainhookHandler(WebhookHandler): @@ -40,7 +50,7 @@ def __init__(self): # Initialize BlockStateHandler first as it needs to validate block heights self.block_state_handler = BlockStateHandler() self.handlers = [ - ContractMessageHandler(), + ActionConcluderHandler(), BuyEventHandler(), SellEventHandler(), DAOProposalHandler(), @@ -103,9 +113,8 @@ async def handle(self, parsed_data: ChainHookData) -> Dict[str, Any]: # Process other block-level handlers for handler in self.handlers: - if ( - handler != self.block_state_handler - and handler.can_handle_block(apply) + if handler != self.block_state_handler and handler.can_handle_block( + apply ): self.logger.debug( f"Using handler {handler.__class__.__name__} for block-level processing" @@ -130,7 +139,7 @@ async def handle(self, parsed_data: ChainHookData) -> Dict[str, Any]: for handler in self.handlers: await handler.post_block_processing() - self.logger.info( + self.logger.debug( "Finished processing all blocks and transactions in webhook" ) return { diff --git a/services/integrations/webhooks/chainhook/handlers/__init__.py b/services/integrations/webhooks/chainhook/handlers/__init__.py new file mode 100644 index 00000000..b29607ee --- /dev/null +++ b/services/integrations/webhooks/chainhook/handlers/__init__.py @@ -0,0 +1,42 @@ +"""Chainhook webhook handlers module. + +This module contains specialized handlers for different types of chainhook events. +""" + +from services.integrations.webhooks.chainhook.handlers.action_concluder_handler import ( + ActionConcluderHandler, +) +from services.integrations.webhooks.chainhook.handlers.base import ChainhookEventHandler +from services.integrations.webhooks.chainhook.handlers.block_state_handler import ( + BlockStateHandler, +) +from services.integrations.webhooks.chainhook.handlers.buy_event_handler import ( + BuyEventHandler, +) +from services.integrations.webhooks.chainhook.handlers.dao_proposal_burn_height_handler import ( + DAOProposalBurnHeightHandler, +) +from services.integrations.webhooks.chainhook.handlers.dao_proposal_conclusion_handler import ( + DAOProposalConclusionHandler, +) +from services.integrations.webhooks.chainhook.handlers.dao_proposal_handler import ( + DAOProposalHandler, +) +from services.integrations.webhooks.chainhook.handlers.dao_vote_handler import ( + DAOVoteHandler, +) +from services.integrations.webhooks.chainhook.handlers.sell_event_handler import ( + SellEventHandler, +) + +__all__ = [ + "ChainhookEventHandler", + "ActionConcluderHandler", + "BuyEventHandler", + "SellEventHandler", + "DAOProposalHandler", + "DAOProposalBurnHeightHandler", + "DAOVoteHandler", + "DAOProposalConclusionHandler", + "BlockStateHandler", +] diff --git a/services/integrations/webhooks/chainhook/handlers/action_concluder_handler.py b/services/integrations/webhooks/chainhook/handlers/action_concluder_handler.py new file mode 100644 index 00000000..9c433b86 --- /dev/null +++ b/services/integrations/webhooks/chainhook/handlers/action_concluder_handler.py @@ -0,0 +1,402 @@ +"""Handler for capturing action proposal conclusions and updating proposal records.""" + +from typing import Dict, List, Optional + + +from backend.factory import backend +from backend.models import ( + ExtensionFilter, + ProposalBase, + ProposalFilter, + QueueMessageCreate, + QueueMessageType, +) +from config import config +from lib.utils import strip_metadata_section, create_message_chunks +from services.integrations.webhooks.chainhook.handlers.base import ChainhookEventHandler +from services.integrations.webhooks.chainhook.models import ( + Event, + TransactionWithReceipt, +) + + +class ActionConcluderHandler(ChainhookEventHandler): + """Handler for capturing and processing action proposal conclusions. + + This handler identifies contract calls with conclude-action-proposal method and: + 1. Updates proposal records with conclusion data from the blockchain + 2. Creates appropriate queue messages for further processing (tweets, etc.) + """ + + def can_handle_transaction(self, transaction: TransactionWithReceipt) -> bool: + """Check if this handler can handle the given transaction. + + This handler can handle contract call transactions with conclude-action-proposal method. + + Args: + transaction: The transaction to check + + Returns: + bool: True if this handler can handle the transaction, False otherwise + """ + tx_data = self.extract_transaction_data(transaction) + tx_kind = tx_data["tx_kind"] + tx_data_content = tx_data["tx_data"] + tx_metadata = tx_data["tx_metadata"] + + # Only handle ContractCall type transactions + if not isinstance(tx_kind, dict): + self.logger.debug(f"Skipping: tx_kind is not a dict: {type(tx_kind)}") + return False + + tx_kind_type = tx_kind.get("type") + + if not isinstance(tx_data_content, dict): + self.logger.debug( + f"Skipping: tx_data_content is not a dict: {type(tx_data_content)}" + ) + return False + + # Check if the method name is exactly "conclude-action-proposal" + tx_method = tx_data_content.get("method", "") + is_conclude_proposal = tx_method == "conclude-action-proposal" + + # Access success from TransactionMetadata + tx_success = tx_metadata.success + + if is_conclude_proposal and tx_success: + self.logger.debug(f"Found conclude-action-proposal method: {tx_method}") + + return ( + tx_kind_type == "ContractCall" + and is_conclude_proposal + and tx_success is True + ) + + def _find_dao_for_contract(self, contract_identifier: str) -> Optional[Dict]: + """Find the DAO associated with the given contract. + + Args: + contract_identifier: The contract identifier to search for + + Returns: + Optional[Dict]: The DAO data if found, None otherwise + """ + # Find extensions with this contract principal + extensions = backend.list_extensions( + filters=ExtensionFilter( + contract_principal=contract_identifier, + ) + ) + + if not extensions: + self.logger.warning( + f"No extensions found for contract {contract_identifier}" + ) + return None + + # Get the DAO for the first matching extension + dao_id = extensions[0].dao_id + if not dao_id: + self.logger.warning("Extension found but no DAO ID associated with it") + return None + + dao = backend.get_dao(dao_id) + if not dao: + self.logger.warning(f"No DAO found with ID {dao_id}") + return None + + self.logger.info(f"Found DAO for contract {contract_identifier}: {dao.name}") + return dao.model_dump() + + def _get_proposal_conclusion_data(self, events: List[Event]) -> Optional[Dict]: + """Extract proposal conclusion data from action-proposal-voting contract events. + + Args: + events: List of events from the transaction + + Returns: + Optional[Dict]: The proposal conclusion data if found, None otherwise + """ + for event in events: + # Find print events from action-proposal-voting contract + if ( + event.type == "SmartContractEvent" + and hasattr(event, "data") + and event.data.get("topic") == "print" + and "action-proposal-voting" + in event.data.get("contract_identifier", "") + ): + value = event.data.get("value") + + # Handle structured format with payload + if isinstance(value, dict): + notification = value.get("notification", "") + if "conclude-action-proposal" in notification: + payload = value.get("payload", {}) + if isinstance(payload, dict): + return payload + + self.logger.warning( + "Could not find proposal conclusion data in transaction events" + ) + return None + + def _update_proposal_record( + self, dao_data: Dict, conclusion_data: Dict + ) -> Optional[Dict]: + """Update proposal record with conclusion data. + + Args: + dao_data: The DAO data + conclusion_data: The conclusion data from the blockchain + + Returns: + Optional[Dict]: The updated proposal if found and updated, None otherwise + """ + proposal_id = conclusion_data.get("proposalId") + if proposal_id is None: + self.logger.warning("No proposal ID found in conclusion data") + return None + + # Find the proposal by DAO and proposal ID + proposals = backend.list_proposals( + filters=ProposalFilter( + dao_id=dao_data["id"], + proposal_id=proposal_id, + ) + ) + + if not proposals: + self.logger.warning( + f"No proposal found with ID {proposal_id} for DAO {dao_data['name']}" + ) + return None + + proposal = proposals[0] + + # Update proposal with conclusion data + update_data = ProposalBase( + executed=conclusion_data.get("executed"), + passed=conclusion_data.get("passed"), + met_quorum=conclusion_data.get("metQuorum"), + met_threshold=conclusion_data.get("metThreshold"), + votes_for=str(conclusion_data.get("votesFor", 0)), + votes_against=str(conclusion_data.get("votesAgainst", 0)), + liquid_tokens=str(conclusion_data.get("liquidTokens", 0)), + bond=str(conclusion_data.get("bond", 0)), + concluded_by=conclusion_data.get("txSender"), + creator=conclusion_data.get("creator"), + ) + + self.logger.info( + f"Updating proposal {proposal_id} for DAO {dao_data['name']} with conclusion data" + ) + + updated_proposal = backend.update_proposal(proposal.id, update_data) + return updated_proposal.model_dump() if updated_proposal else None + + async def handle_transaction(self, transaction: TransactionWithReceipt) -> None: + """Handle action proposal conclusion transactions. + + Processes contract call transactions that conclude action proposals: + 1. Updates proposal records with conclusion data from the blockchain + 2. Creates queue messages for tweets with the onchain message content + 3. Associates them with the appropriate DAO + + Args: + transaction: The transaction to handle + """ + tx_data = self.extract_transaction_data(transaction) + tx_data_content = tx_data["tx_data"] + tx_metadata = tx_data["tx_metadata"] + + # Get contract identifier + contract_identifier = tx_data_content.get("contract_identifier") + if not contract_identifier: + self.logger.warning("No contract identifier found in transaction data") + return + + # Find the DAO for this contract + dao_data = self._find_dao_for_contract(contract_identifier) + if not dao_data: + self.logger.warning(f"No DAO found for contract {contract_identifier}") + return + + # Get the events from the transaction + events = tx_metadata.receipt.events if hasattr(tx_metadata, "receipt") else [] + + # Extract proposal conclusion data and update the proposal record + conclusion_data = self._get_proposal_conclusion_data(events) + if not conclusion_data: + self.logger.warning( + "No proposal conclusion data found in transaction events" + ) + return + + updated_proposal = self._update_proposal_record(dao_data, conclusion_data) + if not updated_proposal: + self.logger.warning( + f"Failed to update proposal {conclusion_data.get('proposalId')} " + f"for DAO {dao_data['name']}" + ) + return + + self.logger.info( + f"Successfully updated proposal {conclusion_data.get('proposalId')} " + f"for DAO {dao_data['name']}" + ) + + # Look up the full proposal record to get the content field + proposal_id = conclusion_data.get("proposalId") + proposals = backend.list_proposals( + filters=ProposalFilter( + dao_id=dao_data["id"], + proposal_id=proposal_id, + ) + ) + + if not proposals: + self.logger.warning( + f"Could not find proposal {proposal_id} for content lookup" + ) + return + + proposal = proposals[0] + message = proposal.content + if not message: + self.logger.warning("No content found in the proposal") + return + + # Clean the message content by removing metadata section + clean_message = strip_metadata_section(message) + + self.logger.info( + f"Processing concluded proposal message from DAO {dao_data['name']}: {clean_message[:100]}..." + ) + + # Check if proposal passed and create appropriate queue messages + proposal_passed = proposal.passed or False + + if proposal_passed: + # Create follow-up message content for threading + proposal_number = proposal.proposal_id + dao_name = dao_data["name"] + reward_amount = 1000 + proposal_url = f"{config.api.base_url}/proposals/{proposal.id}" + + follow_up_message = ( + f"This message was approved by proposal #{proposal_number} of {dao_name}.\n\n" + f"{reward_amount:,} DAO tokens has been rewarded to the submitter.\n\n" + f"View proposal details: {proposal_url}" + ) + + # Create chunked message array from main message only + main_chunks = create_message_chunks(clean_message, add_indices=True) + + # Add the follow-up message as a separate final chunk in the thread + follow_up_chunk = ( + f"({len(main_chunks) + 1}/{len(main_chunks) + 1}) {follow_up_message}" + ) + + # Combine main chunks with follow-up chunk + message_chunks = main_chunks + [follow_up_chunk] + + # Create queue message for Twitter with chunked message array + tweet_message = backend.create_queue_message( + QueueMessageCreate( + type=QueueMessageType.get_or_create("tweet"), + message={ + "chunks": message_chunks, + "total_chunks": len(message_chunks), + }, + dao_id=dao_data["id"], + ) + ) + self.logger.info( + f"Created tweet queue message with {len(message_chunks)} chunks: {tweet_message.id}" + ) + + # Calculate participation and approval percentages for passed proposal + votes_for = int(proposal.votes_for or 0) + votes_against = int(proposal.votes_against or 0) + total_votes = votes_for + votes_against + + participation_pct = 0.0 + approval_pct = 0.0 + + if total_votes > 0: + # For participation, we'd need total eligible voters - using liquid_tokens as proxy + liquid_tokens = int(proposal.liquid_tokens or 0) + if liquid_tokens > 0: + participation_pct = (total_votes / liquid_tokens) * 100 + + # Approval percentage is votes_for / total_votes + approval_pct = (votes_for / total_votes) * 100 + + # Format the Discord message with header and footer for passed proposal + formatted_message = ( + f"🟩 {dao_data['name']} PROPOSAL #{proposal.proposal_id}: PASSED 🟩\n\n" + ) + formatted_message += "---\n\n" + formatted_message += f"{clean_message}\n\n" + formatted_message += "---\n\n" + formatted_message += f"Start: Block {proposal.vote_start or 'N/A'}\n" + formatted_message += f"End: Block {proposal.vote_end or 'N/A'}\n" + formatted_message += f"Participation: {participation_pct:.1f}%\n" + formatted_message += f"Approval: {approval_pct:.1f}%" + + discord_message = backend.create_queue_message( + QueueMessageCreate( + type=QueueMessageType.get_or_create("discord"), + message={"content": formatted_message, "proposal_status": "passed"}, + dao_id=dao_data["id"], + ) + ) + self.logger.info( + f"Created Discord queue message (proposal passed): {discord_message.id}" + ) + else: + # For failed proposals, create only Discord message (no Twitter) + # But still chunk the message for consistency (if needed for future use) + message_chunks = create_message_chunks(clean_message, add_indices=True) + + # Calculate participation and approval percentages + votes_for = int(proposal.votes_for or 0) + votes_against = int(proposal.votes_against or 0) + total_votes = votes_for + votes_against + + participation_pct = 0.0 + approval_pct = 0.0 + + if total_votes > 0: + # For participation, we'd need total eligible voters - using liquid_tokens as proxy + liquid_tokens = int(proposal.liquid_tokens or 0) + if liquid_tokens > 0: + participation_pct = (total_votes / liquid_tokens) * 100 + + # Approval percentage is votes_for / total_votes + approval_pct = (votes_for / total_votes) * 100 + + # Format the Discord message with header and footer + formatted_message = ( + f"🟥 {dao_data['name']} PROPOSAL #{proposal.proposal_id}: FAILED 🟥\n\n" + ) + formatted_message += "---\n\n" + formatted_message += f"{clean_message}\n\n" + formatted_message += "---\n\n" + formatted_message += f"Start: Block {proposal.vote_start or 'N/A'}\n" + formatted_message += f"End: Block {proposal.vote_end or 'N/A'}\n" + formatted_message += f"Participation: {participation_pct:.1f}%\n" + formatted_message += f"Approval: {approval_pct:.1f}%" + + discord_message = backend.create_queue_message( + QueueMessageCreate( + type=QueueMessageType.get_or_create("discord"), + message={"content": formatted_message, "proposal_status": "failed"}, + dao_id=dao_data["id"], + ) + ) + self.logger.info( + f"Created Discord queue message (proposal failed): {discord_message.id}" + ) diff --git a/services/integrations/webhooks/chainhook/handlers/action_proposal_handler.py b/services/integrations/webhooks/chainhook/handlers/action_proposal_handler.py new file mode 100644 index 00000000..253fb59a --- /dev/null +++ b/services/integrations/webhooks/chainhook/handlers/action_proposal_handler.py @@ -0,0 +1,556 @@ +"""Handler for capturing new DAO action proposals.""" + +from typing import Dict, List, Optional +from uuid import UUID + +from backend.factory import backend +from backend.models import ( + ContractStatus, + ProposalBase, + ProposalCreate, + ProposalFilter, + ProposalType, + QueueMessageCreate, + QueueMessageType, +) +from lib.utils import decode_hex_parameters +from services.integrations.webhooks.chainhook.handlers.base_proposal_handler import ( + BaseProposalHandler, +) +from services.integrations.webhooks.chainhook.models import ( + Event, + TransactionWithReceipt, +) +from services.ai.workflows.agents import ProposalMetadataAgent + + +class ActionProposalHandler(BaseProposalHandler): + """Handler for capturing and processing new DAO action proposals. + + This handler identifies contract calls related to proposing actions in DAO contracts, + creates proposal records in the database, and tracks their lifecycle. + """ + + def can_handle_transaction(self, transaction: TransactionWithReceipt) -> bool: + """Check if this handler can handle the given transaction. + + This handler can handle contract call transactions related to proposing actions. + + Args: + transaction: The transaction to check + + Returns: + bool: True if this handler can handle the transaction, False otherwise + """ + tx_data = self.extract_transaction_data(transaction) + tx_kind = tx_data["tx_kind"] + tx_data_content = tx_data["tx_data"] + tx_metadata = tx_data["tx_metadata"] + + # Only handle ContractCall type transactions + if not isinstance(tx_kind, dict): + self.logger.debug(f"Skipping: tx_kind is not a dict: {type(tx_kind)}") + return False + + tx_kind_type = tx_kind.get("type") + + if not isinstance(tx_data_content, dict): + self.logger.debug( + f"Skipping: tx_data_content is not a dict: {type(tx_data_content)}" + ) + return False + + # Check if the method name is exactly "create-action-proposal" + tx_method = tx_data_content.get("method", "") + is_proposal_method = tx_method == "create-action-proposal" + + # Access success from TransactionMetadata + tx_success = tx_metadata.success + + if is_proposal_method and tx_success: + self.logger.debug(f"Found action proposal method: {tx_method}") + + return ( + tx_kind_type == "ContractCall" and is_proposal_method and tx_success is True + ) + + def _get_proposal_info_from_events(self, events: list[Event]) -> Optional[Dict]: + """Extract the action proposal information from transaction events. + + Args: + events: List of events from the transaction + + Returns: + Optional[Dict]: Dictionary containing proposal information if found, None otherwise + """ + for event in events: + # Find SmartContractEvent events + if event.type != "SmartContractEvent" or not hasattr(event, "data"): + continue + + event_data = event.data + + # Check if this is a print event + if event_data.get("topic") != "print": + continue + + # Get the value, which might be None + value = event_data.get("value") + + # Skip events with null values + if value is None: + self.logger.debug("Value is None in SmartContractEvent data") + continue + + # Check if this is a proposal event - updated to handle new notification format + notification = value.get("notification", "") + if notification == "create-action-proposal" or notification.endswith( + "/create-action-proposal" + ): + payload = value.get("payload", {}) + if not payload: + self.logger.warning("Empty payload in proposal event") + return None + + return { + "proposal_id": payload.get("proposalId"), + "action": payload.get("action"), + "caller": payload.get("caller"), + "creator": payload.get("creator"), + "liquid_tokens": str(payload.get("liquidTokens")), + "parameters": payload.get("parameters"), + "bond": str(payload.get("bond")), + # Fields from updated payload + "contract_caller": payload.get("contractCaller"), + "created_btc": payload.get("createdBtc"), + "created_stx": payload.get("createdStx"), + "creator_user_id": payload.get("creatorUserId"), + "exec_end": payload.get("execEnd"), + "exec_start": payload.get("execStart"), + "memo": payload.get("memo"), + "tx_sender": payload.get("txSender"), + "vote_end": payload.get("voteEnd"), + "vote_start": payload.get("voteStart"), + "voting_delay": payload.get("votingDelay"), + "voting_period": payload.get("votingPeriod"), + "voting_quorum": payload.get("votingQuorum"), + "voting_reward": ( + str(payload.get("votingReward")) + if payload.get("votingReward") is not None + else None + ), + "voting_threshold": payload.get("votingThreshold"), + } + + self.logger.warning("Could not find proposal information in transaction events") + return None + + def _sanitize_string(self, input_string: Optional[str]) -> Optional[str]: + """Sanitize string by removing null bytes and other invalid characters. + + Args: + input_string: The string to sanitize + + Returns: + A sanitized string or None if input was None + """ + if input_string is None: + return None + + # Replace null bytes and other control characters + sanitized = "" + for char in input_string: + if ( + ord(char) >= 32 or char in "\n\r\t" + ): # Keep printable chars and some whitespace + sanitized += char + + return sanitized + + def _get_agent_token_holders(self, dao_id: UUID) -> List[Dict]: + """Get agents that hold tokens for the given DAO. + + Args: + dao_id: The ID of the DAO + + Returns: + List[Dict]: List of agents with their wallet IDs + """ + # Use the specialized backend method for getting agents with DAO tokens + agents_with_tokens_dto = backend.get_agents_with_dao_tokens(dao_id) + + if not agents_with_tokens_dto: + self.logger.error(f"No agents found with tokens for DAO {dao_id}") + return [] + + # Convert DTOs to the expected format + agents_with_tokens = [ + {"agent_id": dto.agent_id, "wallet_id": dto.wallet_id} + for dto in agents_with_tokens_dto + ] + + self.logger.info( + f"Found {len(agents_with_tokens)} agents holding tokens for DAO {dao_id}" + ) + + return agents_with_tokens + + async def _parse_and_generate_proposal_metadata( + self, parameters: str, dao_name: str, proposal_id: str + ) -> Dict[str, str]: + """Parse proposal content for title/tags and generate summary using AI agent. + + First parses the proposal content looking for the structured format: + - Original message + - "\n\n--- Metadata ---" (metadata section marker) + - "\nTitle: {title}" if there's a title + - "\nTags: {tags_string}" where tags_string is tags joined by "|" + + Then uses ProposalMetadataAgent to generate a summary and fill in missing components. + + Args: + parameters: The decoded proposal parameters/content + dao_name: Name of the DAO + proposal_id: The proposal ID + + Returns: + Dict containing 'title', 'summary', and 'tags' keys + """ + if not parameters: + return { + "title": f"Action Proposal #{proposal_id}", + "summary": "", + "tags": [], + } + + # Parse content for structured metadata section + parsed_title = "" + parsed_tags = [] + base_content = parameters + + # Look for metadata section: "--- Metadata ---" + metadata_marker = "--- Metadata ---" + if metadata_marker in parameters: + parts = parameters.split(metadata_marker, 1) + if len(parts) == 2: + base_content = parts[0].strip() + metadata_section = parts[1].strip() + + # Parse metadata section line by line + for line in metadata_section.split("\n"): + line = line.strip() + if line.startswith("Title: "): + parsed_title = line[7:].strip() # Remove "Title: " prefix + elif line.startswith("Tags: "): + tags_string = line[6:].strip() # Remove "Tags: " prefix + if "|" in tags_string: + parsed_tags = [ + tag.strip() + for tag in tags_string.split("|") + if tag.strip() + ] + + # Clean base content for AI processing + clean_content = base_content.strip() + + # Use ProposalMetadataAgent to generate summary and fill missing components + try: + metadata_agent = ProposalMetadataAgent() + + # Use clean content for AI processing + proposal_content = clean_content or f"Action proposal {proposal_id}" + + state = { + "proposal_content": proposal_content, + "dao_name": dao_name, + "proposal_type": "action", + } + + # Generate metadata using AI + ai_result = await metadata_agent.process(state) + + # Combine parsed and AI-generated results + final_title = ( + parsed_title + if parsed_title + else ( + ai_result.get("title", f"Action Proposal #{proposal_id}") + if "error" not in ai_result + else f"Action Proposal #{proposal_id}" + ) + ) + + final_tags = ( + parsed_tags + if parsed_tags + else (ai_result.get("tags", []) if "error" not in ai_result else []) + ) + + # Always use AI-generated summary as it's specifically designed for summarization + final_summary = ( + ai_result.get("summary", clean_content) + if "error" not in ai_result + else clean_content + ) + + self.logger.info( + f"Combined metadata for proposal {proposal_id} - " + f"Title: '{final_title}' (parsed: {bool(parsed_title)}), " + f"Tags: {final_tags} (parsed: {bool(parsed_tags)}), " + f"Summary: AI-generated" + ) + + return { + "title": final_title, + "summary": final_summary, + "tags": final_tags, + } + + except Exception as e: + self.logger.error( + f"Error in AI metadata generation for proposal {proposal_id}: {str(e)}" + ) + + # Fallback to parsed results with defaults + return { + "title": ( + parsed_title if parsed_title else f"Action Proposal #{proposal_id}" + ), + "summary": clean_content, + "tags": parsed_tags, + } + + async def handle_transaction(self, transaction: TransactionWithReceipt) -> None: + """Handle action proposal transactions. + + Processes new action proposal transactions and creates proposal records in the database. + + Args: + transaction: The transaction to handle + """ + tx_data = self.extract_transaction_data(transaction) + tx_id = tx_data["tx_id"] + tx_data_content = tx_data["tx_data"] + tx_metadata = tx_data["tx_metadata"] + + # Get contract identifier + contract_identifier = tx_data_content.get("contract_identifier") + if not contract_identifier: + self.logger.warning("No contract identifier found in transaction data") + return + + # Find the DAO for this contract + dao_data = self._find_dao_for_contract(contract_identifier) + if not dao_data: + self.logger.warning(f"No DAO found for contract {contract_identifier}") + return + + # Get the proposal info from the transaction events + events = tx_metadata.receipt.events if hasattr(tx_metadata, "receipt") else [] + proposal_info = self._get_proposal_info_from_events(events) + if proposal_info is None: + self.logger.warning( + "Could not determine proposal information from transaction" + ) + return + + self.logger.info( + f"Processing new action proposal {proposal_info['proposal_id']} for DAO {dao_data['name']} " + f"(contract: {contract_identifier})" + ) + + # Check if the proposal already exists in the database + existing_proposals = backend.list_proposals( + filters=ProposalFilter( + tx_id=tx_id, + ) + ) + + if not existing_proposals: + try: + # First try to decode parameters as hex + decoded_parameters = decode_hex_parameters(proposal_info["parameters"]) + + # Sanitize the decoded parameters to remove null bytes and invalid characters + if decoded_parameters is not None: + parameters = self._sanitize_string(decoded_parameters) + self.logger.debug( + f"Decoded and sanitized parameters: {parameters[:100]}..." + ) + else: + parameters = proposal_info["parameters"] + self.logger.debug("Using original parameters (hex decoding failed)") + + # Parse title/tags from content and generate summary using AI + metadata = await self._parse_and_generate_proposal_metadata( + parameters, dao_data["name"], str(proposal_info["proposal_id"]) + ) + # Create a new proposal record in the database + proposal = backend.create_proposal( + ProposalCreate( + dao_id=dao_data["id"], + title=metadata["title"], + content=parameters, + summary=metadata["summary"], + contract_principal=contract_identifier, + tx_id=tx_id, + proposal_id=proposal_info["proposal_id"], + status=ContractStatus.DEPLOYED, # Since it's already on-chain + type=ProposalType.ACTION, + # Add fields from payload + action=proposal_info["action"], + caller=proposal_info["caller"], + creator=proposal_info["creator"], + liquid_tokens=proposal_info["liquid_tokens"], + bond=proposal_info["bond"], + # Fields from updated payload + contract_caller=proposal_info["contract_caller"], + created_btc=proposal_info["created_btc"], + created_stx=proposal_info["created_stx"], + creator_user_id=proposal_info["creator_user_id"], + exec_end=proposal_info["exec_end"], + exec_start=proposal_info["exec_start"], + memo=proposal_info["memo"], + tx_sender=proposal_info["tx_sender"], + vote_end=proposal_info["vote_end"], + vote_start=proposal_info["vote_start"], + voting_delay=proposal_info["voting_delay"], + voting_period=proposal_info["voting_period"], + voting_quorum=proposal_info["voting_quorum"], + voting_reward=proposal_info["voting_reward"], + voting_threshold=proposal_info["voting_threshold"], + ) + ) + self.logger.info( + f"Created new action proposal record in database: {proposal.id}" + ) + + # Queue evaluation messages for agents holding governance tokens + agents = self._get_agent_token_holders(dao_data["id"]) + if agents: + for agent in agents: + # Create message with only the proposal ID + message_data = { + "proposal_id": proposal.id, # Only pass the proposal UUID + } + + backend.create_queue_message( + QueueMessageCreate( + type=QueueMessageType.get_or_create( + "dao_proposal_evaluation" + ), + message=message_data, + dao_id=dao_data["id"], + wallet_id=agent["wallet_id"], + ) + ) + + self.logger.info( + f"Created evaluation queue message for agent {agent['agent_id']} " + f"to evaluate proposal {proposal.id}" + ) + else: + self.logger.warning( + f"No agents found holding tokens for DAO {dao_data['id']}" + ) + except Exception as e: + self.logger.error(f"Error creating proposal in database: {str(e)}") + raise + else: + # Update existing proposal with new data from chainhook + existing_proposal = existing_proposals[0] + self.logger.info( + f"Updating existing action proposal in database: {existing_proposal.id}" + ) + + try: + # First try to decode parameters as hex + decoded_parameters = decode_hex_parameters(proposal_info["parameters"]) + + # Sanitize the decoded parameters to remove null bytes and invalid characters + if decoded_parameters is not None: + parameters = self._sanitize_string(decoded_parameters) + self.logger.debug( + f"Decoded and sanitized parameters: {parameters[:100]}..." + ) + else: + parameters = proposal_info["parameters"] + self.logger.debug("Using original parameters (hex decoding failed)") + + # Parse title/tags from content and generate summary using AI + metadata = await self._parse_and_generate_proposal_metadata( + parameters, dao_data["name"], str(proposal_info["proposal_id"]) + ) + + # Prepare update data with new information from chainhook + update_data = ProposalBase( + title=metadata["title"], + content=parameters, + summary=metadata["summary"], + status=ContractStatus.DEPLOYED, # Ensure status reflects on-chain state + # Update fields from payload + action=proposal_info["action"], + caller=proposal_info["caller"], + creator=proposal_info["creator"], + proposal_id=proposal_info["proposal_id"], + liquid_tokens=proposal_info["liquid_tokens"], + bond=proposal_info["bond"], + # Fields from updated payload + contract_caller=proposal_info["contract_caller"], + created_btc=proposal_info["created_btc"], + created_stx=proposal_info["created_stx"], + creator_user_id=proposal_info["creator_user_id"], + exec_end=proposal_info["exec_end"], + exec_start=proposal_info["exec_start"], + memo=proposal_info["memo"], + tx_sender=proposal_info["tx_sender"], + vote_end=proposal_info["vote_end"], + vote_start=proposal_info["vote_start"], + voting_delay=proposal_info["voting_delay"], + voting_period=proposal_info["voting_period"], + voting_quorum=proposal_info["voting_quorum"], + voting_reward=proposal_info["voting_reward"], + voting_threshold=proposal_info["voting_threshold"], + ) + + # Update the existing proposal + updated_proposal = backend.update_proposal( + existing_proposal.id, update_data + ) + + self.logger.info( + f"Successfully updated action proposal {updated_proposal.id} with chainhook data" + ) + + # Check if we need to queue evaluation messages for agents + agents = self._get_agent_token_holders(dao_data["id"]) + if agents: + for agent in agents: + # Create message with only the proposal ID + message_data = { + "proposal_id": updated_proposal.id, # Only pass the proposal UUID + } + + backend.create_queue_message( + QueueMessageCreate( + type=QueueMessageType.get_or_create( + "dao_proposal_evaluation" + ), + message=message_data, + dao_id=dao_data["id"], + wallet_id=agent["wallet_id"], + ) + ) + + self.logger.info( + f"Created evaluation queue message for agent {agent['agent_id']} " + f"to evaluate updated proposal {updated_proposal.id}" + ) + else: + self.logger.warning( + f"No agents found holding tokens for DAO {dao_data['id']}" + ) + + except Exception as e: + self.logger.error(f"Error updating proposal in database: {str(e)}") + raise diff --git a/services/webhooks/chainhook/handlers/action_vote_handler.py b/services/integrations/webhooks/chainhook/handlers/action_vote_handler.py similarity index 78% rename from services/webhooks/chainhook/handlers/action_vote_handler.py rename to services/integrations/webhooks/chainhook/handlers/action_vote_handler.py index c9c97493..745e46c2 100644 --- a/services/webhooks/chainhook/handlers/action_vote_handler.py +++ b/services/integrations/webhooks/chainhook/handlers/action_vote_handler.py @@ -4,8 +4,10 @@ from backend.factory import backend from backend.models import ProposalFilter, ProposalType -from services.webhooks.chainhook.handlers.base_vote_handler import BaseVoteHandler -from services.webhooks.chainhook.models import Event +from services.integrations.webhooks.chainhook.handlers.base_vote_handler import ( + BaseVoteHandler, +) +from services.integrations.webhooks.chainhook.models import Event class ActionVoteHandler(BaseVoteHandler): @@ -62,7 +64,9 @@ def _get_vote_info_from_events(self, events: list[Event]) -> Optional[Dict]: event_data = event.data value = event_data.get("value", {}) - if value.get("notification") == "vote-on-proposal": + # Check for the new notification format + notification = value.get("notification", "") + if "vote-on-action-proposal" in notification: payload = value.get("payload", {}) if not payload: self.logger.warning("Empty payload in vote event") @@ -73,9 +77,13 @@ def _get_vote_info_from_events(self, events: list[Event]) -> Optional[Dict]: "proposalId" ), # Numeric ID for action proposals "voter": payload.get("voter"), - "caller": payload.get("caller"), + "caller": payload.get("contractCaller"), # Updated field name + "tx_sender": payload.get("txSender"), # New field "amount": str(payload.get("amount")), - "vote_value": None, # Will be extracted from transaction args + "vote_value": payload.get( + "vote" + ), # Vote value is now directly in payload + "voter_user_id": payload.get("voterUserId"), # New field } self.logger.warning("Could not find vote information in transaction events") diff --git a/services/webhooks/chainhook/handlers/base.py b/services/integrations/webhooks/chainhook/handlers/base.py similarity index 98% rename from services/webhooks/chainhook/handlers/base.py rename to services/integrations/webhooks/chainhook/handlers/base.py index 25594a20..580dd6e4 100644 --- a/services/webhooks/chainhook/handlers/base.py +++ b/services/integrations/webhooks/chainhook/handlers/base.py @@ -4,7 +4,7 @@ from typing import Any, Dict, Optional from lib.logger import configure_logger -from services.webhooks.chainhook.models import ( +from services.integrations.webhooks.chainhook.models import ( Apply, ChainHookData, TransactionWithReceipt, diff --git a/services/webhooks/chainhook/handlers/base_proposal_handler.py b/services/integrations/webhooks/chainhook/handlers/base_proposal_handler.py similarity index 94% rename from services/webhooks/chainhook/handlers/base_proposal_handler.py rename to services/integrations/webhooks/chainhook/handlers/base_proposal_handler.py index 512c2d1a..35f3ca88 100644 --- a/services/webhooks/chainhook/handlers/base_proposal_handler.py +++ b/services/integrations/webhooks/chainhook/handlers/base_proposal_handler.py @@ -5,8 +5,11 @@ from backend.factory import backend from backend.models import ExtensionFilter from lib.logger import configure_logger -from services.webhooks.chainhook.handlers.base import ChainhookEventHandler -from services.webhooks.chainhook.models import Event, TransactionWithReceipt +from services.integrations.webhooks.chainhook.handlers.base import ChainhookEventHandler +from services.integrations.webhooks.chainhook.models import ( + Event, + TransactionWithReceipt, +) class BaseProposalHandler(ChainhookEventHandler): diff --git a/services/webhooks/chainhook/handlers/base_vote_handler.py b/services/integrations/webhooks/chainhook/handlers/base_vote_handler.py similarity index 84% rename from services/webhooks/chainhook/handlers/base_vote_handler.py rename to services/integrations/webhooks/chainhook/handlers/base_vote_handler.py index fce39dc9..8be20785 100644 --- a/services/webhooks/chainhook/handlers/base_vote_handler.py +++ b/services/integrations/webhooks/chainhook/handlers/base_vote_handler.py @@ -3,10 +3,13 @@ from typing import Dict, List, Optional from backend.factory import backend -from backend.models import ProposalFilter, VoteBase, VoteCreate, VoteFilter +from backend.models import VoteBase, VoteCreate, VoteFilter from lib.logger import configure_logger -from services.webhooks.chainhook.handlers.base import ChainhookEventHandler -from services.webhooks.chainhook.models import Event, TransactionWithReceipt +from services.integrations.webhooks.chainhook.handlers.base import ChainhookEventHandler +from services.integrations.webhooks.chainhook.models import ( + Event, + TransactionWithReceipt, +) class BaseVoteHandler(ChainhookEventHandler): @@ -76,9 +79,13 @@ def can_handle_transaction(self, transaction: TransactionWithReceipt) -> bool: ) return False - # Check if the method name contains "vote-on-proposal" + # Check if the method name contains "vote" and "proposal" tx_method = tx_data_content.get("method", "") - is_vote_method = tx_method == "vote-on-proposal" + is_vote_method = ( + tx_method == "vote-on-proposal" + or "vote-on-action-proposal" in tx_method + or "vote-on-core-proposal" in tx_method + ) # Access success from TransactionMetadata tx_success = tx_metadata.success @@ -130,6 +137,10 @@ async def handle_transaction(self, transaction: TransactionWithReceipt) -> None: self.logger.info( f"Extracted vote value from transaction args: {vote_value}" ) + else: + self.logger.info( + f"Vote value found directly in event payload: {vote_value}" + ) if not proposal_identifier or not voter_address: self.logger.warning( @@ -186,7 +197,13 @@ async def handle_transaction(self, transaction: TransactionWithReceipt) -> None: update_data = VoteBase(tx_id=tx_id) if amount and not vote.amount: update_data.amount = amount + self.logger.info(f"[DEBUG] Setting amount in update_data: {amount}") + else: + self.logger.info( + f"[DEBUG] Not setting amount - amount: {amount}, existing vote.amount: {vote.amount}" + ) + self.logger.info(f"[DEBUG] Update data: {update_data.model_dump()}") backend.update_vote(vote.id, update_data) self.logger.info(f"Updated vote {vote.id}") else: @@ -209,8 +226,19 @@ async def handle_transaction(self, transaction: TransactionWithReceipt) -> None: amount=amount, ) + self.logger.info( + f"[DEBUG] Creating vote with data: {new_vote.model_dump()}" + ) + try: vote = backend.create_vote(new_vote) self.logger.info(f"Created new vote record with ID: {vote.id}") + self.logger.info(f"[DEBUG] Created vote details: {vote.model_dump()}") except Exception as e: self.logger.error(f"Failed to create vote record: {str(e)}") + self.logger.error( + f"[DEBUG] Vote data that failed: {new_vote.model_dump()}" + ) + import traceback + + self.logger.error(f"[DEBUG] Full traceback: {traceback.format_exc()}") diff --git a/services/webhooks/chainhook/handlers/block_state_handler.py b/services/integrations/webhooks/chainhook/handlers/block_state_handler.py similarity index 72% rename from services/webhooks/chainhook/handlers/block_state_handler.py rename to services/integrations/webhooks/chainhook/handlers/block_state_handler.py index 3836ca31..d8a7a2de 100644 --- a/services/webhooks/chainhook/handlers/block_state_handler.py +++ b/services/integrations/webhooks/chainhook/handlers/block_state_handler.py @@ -5,10 +5,8 @@ from backend.factory import backend from backend.models import ChainState, ChainStateBase, ChainStateCreate from config import config -from lib.logger import configure_logger -from services.webhooks.chainhook.models import ( +from services.integrations.webhooks.chainhook.models import ( Apply, - ChainHookData, TransactionWithReceipt, ) @@ -70,6 +68,7 @@ async def handle_block(self, block: Apply) -> None: block: The block to handle """ try: + self.logger.debug(f"Received block metadata: {block.metadata}") # Get current chain state current_state = backend.get_latest_chain_state( network=config.network.network @@ -79,8 +78,20 @@ async def handle_block(self, block: Apply) -> None: # Extract block info block_height = block.block_identifier.index block_hash = block.block_identifier.hash + + # Safely extract bitcoin block height + bitcoin_block_height = None + if block.metadata and block.metadata.bitcoin_anchor_block_identifier: + bitcoin_block_height = ( + block.metadata.bitcoin_anchor_block_identifier.index + ) + self.logger.debug( + f"Extracted bitcoin_block_height: {bitcoin_block_height} for block_hash {block.block_identifier.hash}" + ) + self.logger.info( - f"Processing block: height={block_height}, hash={block_hash}" + f"Processing block: height={block_height}, hash={block_hash}, " + f"bitcoin_height={bitcoin_block_height}" ) if current_state: @@ -97,9 +108,21 @@ async def handle_block(self, block: Apply) -> None: f"Updating chain state from height {current_state.block_height} " f"to {block_height}" ) + # Prepare update data, omitting bitcoin_block_height if None + update_data = { + "block_height": block_height, + "block_hash": block_hash, + "network": current_state.network, + } + if bitcoin_block_height is not None: + update_data["bitcoin_block_height"] = bitcoin_block_height + + self.logger.debug( + f"Updating chain_state {current_state.id} with update_data: {update_data}" + ) updated = backend.update_chain_state( current_state.id, - ChainStateBase(block_height=block_height, block_hash=block_hash), + ChainStateBase(**update_data), ) if not updated: self.logger.error( @@ -118,13 +141,16 @@ async def handle_block(self, block: Apply) -> None: f"No existing chain state found. Creating first record for " f"block {block_height}" ) - created = backend.create_chain_state( - ChainStateCreate( - block_height=block_height, - block_hash=block_hash, - network=config.network.network, - ) + chain_state_create_payload = ChainStateCreate( + block_height=block_height, + block_hash=block_hash, + network=config.network.network, + bitcoin_block_height=bitcoin_block_height, + ) + self.logger.debug( + f"Creating new chain_state with payload: {chain_state_create_payload.model_dump_json()}" ) + created = backend.create_chain_state(chain_state_create_payload) if not created: self.logger.error( f"Failed to create chain state for block {block_height}" diff --git a/services/webhooks/chainhook/handlers/buy_event_handler.py b/services/integrations/webhooks/chainhook/handlers/buy_event_handler.py similarity index 97% rename from services/webhooks/chainhook/handlers/buy_event_handler.py rename to services/integrations/webhooks/chainhook/handlers/buy_event_handler.py index 23806757..0aa329d4 100644 --- a/services/webhooks/chainhook/handlers/buy_event_handler.py +++ b/services/integrations/webhooks/chainhook/handlers/buy_event_handler.py @@ -9,8 +9,8 @@ WalletFilter, ) from lib.logger import configure_logger -from services.webhooks.chainhook.handlers.base import ChainhookEventHandler -from services.webhooks.chainhook.models import TransactionWithReceipt +from services.integrations.webhooks.chainhook.handlers.base import ChainhookEventHandler +from services.integrations.webhooks.chainhook.models import TransactionWithReceipt class BuyEventHandler(ChainhookEventHandler): diff --git a/services/webhooks/chainhook/handlers/core_proposal_handler.py b/services/integrations/webhooks/chainhook/handlers/core_proposal_handler.py similarity index 72% rename from services/webhooks/chainhook/handlers/core_proposal_handler.py rename to services/integrations/webhooks/chainhook/handlers/core_proposal_handler.py index de2a3a9e..947ca31c 100644 --- a/services/webhooks/chainhook/handlers/core_proposal_handler.py +++ b/services/integrations/webhooks/chainhook/handlers/core_proposal_handler.py @@ -9,10 +9,13 @@ ProposalFilter, ProposalType, ) -from services.webhooks.chainhook.handlers.base_proposal_handler import ( +from services.integrations.webhooks.chainhook.handlers.base_proposal_handler import ( BaseProposalHandler, ) -from services.webhooks.chainhook.models import Event, TransactionWithReceipt +from services.integrations.webhooks.chainhook.models import ( + Event, + TransactionWithReceipt, +) class CoreProposalHandler(BaseProposalHandler): @@ -94,7 +97,10 @@ def _get_proposal_info_from_events(self, events: list[Event]) -> Optional[Dict]: continue # Check if this is a proposal event - if value.get("notification") == "create-proposal": + notification = value.get("notification", "") + if notification == "create-proposal" or notification.endswith( + "/create-proposal" + ): payload = value.get("payload", {}) if not payload: self.logger.warning("Empty payload in proposal event") @@ -104,11 +110,28 @@ def _get_proposal_info_from_events(self, events: list[Event]) -> Optional[Dict]: "proposal": payload.get("proposal"), # Contract to be deployed "caller": payload.get("caller"), "creator": payload.get("creator"), - "created_at_block": payload.get("createdAt"), - "end_block": payload.get("endBlock"), - "start_block": payload.get("startBlock"), "liquid_tokens": str(payload.get("liquidTokens")), "bond": str(payload.get("bond")), + # Fields from updated payload (if available) + "contract_caller": payload.get("contractCaller"), + "created_btc": payload.get("createdBtc"), + "created_stx": payload.get("createdStx"), + "creator_user_id": payload.get("creatorUserId"), + "exec_end": payload.get("execEnd"), + "exec_start": payload.get("execStart"), + "memo": payload.get("memo"), + "tx_sender": payload.get("txSender"), + "vote_end": payload.get("voteEnd"), + "vote_start": payload.get("voteStart"), + "voting_delay": payload.get("votingDelay"), + "voting_period": payload.get("votingPeriod"), + "voting_quorum": payload.get("votingQuorum"), + "voting_reward": ( + str(payload.get("votingReward")) + if payload.get("votingReward") is not None + else None + ), + "voting_threshold": payload.get("votingThreshold"), } self.logger.warning("Could not find proposal information in transaction events") @@ -158,7 +181,6 @@ async def handle_transaction(self, transaction: TransactionWithReceipt) -> None: filters=ProposalFilter( dao_id=dao_data["id"], contract_principal=contract_identifier, - proposal_contract=proposal_info["proposal"], type=ProposalType.CORE, ) ) @@ -170,20 +192,32 @@ async def handle_transaction(self, transaction: TransactionWithReceipt) -> None: ProposalCreate( dao_id=dao_data["id"], title=proposal_title, - description=f"Core contract proposal for {proposal_info['proposal']}", + content=f"Core contract proposal for {proposal_info['proposal']}", contract_principal=contract_identifier, tx_id=tx_id, status=ContractStatus.DEPLOYED, # Since it's already on-chain type=ProposalType.CORE, - proposal_contract=proposal_info["proposal"], # Add fields from payload caller=proposal_info["caller"], creator=proposal_info["creator"], - created_at_block=proposal_info["created_at_block"], - end_block=proposal_info["end_block"], - start_block=proposal_info["start_block"], liquid_tokens=proposal_info["liquid_tokens"], bond=proposal_info["bond"], + # Fields from updated payload (if available) + contract_caller=proposal_info["contract_caller"], + created_btc=proposal_info["created_btc"], + created_stx=proposal_info["created_stx"], + creator_user_id=proposal_info["creator_user_id"], + exec_end=proposal_info["exec_end"], + exec_start=proposal_info["exec_start"], + memo=proposal_info["memo"], + tx_sender=proposal_info["tx_sender"], + vote_end=proposal_info["vote_end"], + vote_start=proposal_info["vote_start"], + voting_delay=proposal_info["voting_delay"], + voting_period=proposal_info["voting_period"], + voting_quorum=proposal_info["voting_quorum"], + voting_reward=proposal_info["voting_reward"], + voting_threshold=proposal_info["voting_threshold"], ) ) self.logger.info( diff --git a/services/webhooks/chainhook/handlers/core_vote_handler.py b/services/integrations/webhooks/chainhook/handlers/core_vote_handler.py similarity index 56% rename from services/webhooks/chainhook/handlers/core_vote_handler.py rename to services/integrations/webhooks/chainhook/handlers/core_vote_handler.py index 17b1171d..4d8f1dca 100644 --- a/services/webhooks/chainhook/handlers/core_vote_handler.py +++ b/services/integrations/webhooks/chainhook/handlers/core_vote_handler.py @@ -4,8 +4,10 @@ from backend.factory import backend from backend.models import ProposalFilter, ProposalType -from services.webhooks.chainhook.handlers.base_vote_handler import BaseVoteHandler -from services.webhooks.chainhook.models import Event +from services.integrations.webhooks.chainhook.handlers.base_vote_handler import ( + BaseVoteHandler, +) +from services.integrations.webhooks.chainhook.models import Event class CoreVoteHandler(BaseVoteHandler): @@ -30,7 +32,6 @@ def _find_proposal( proposals = backend.list_proposals( filters=ProposalFilter( contract_principal=contract_identifier, - proposal_contract=proposal_identifier, type=ProposalType.CORE, ) ) @@ -62,21 +63,51 @@ def _get_vote_info_from_events(self, events: list[Event]) -> Optional[Dict]: event_data = event.data value = event_data.get("value", {}) - if value.get("notification") == "vote-on-proposal": + # Check for both old and new notification formats + notification = value.get("notification", "") + if ( + notification == "vote-on-proposal" + or "vote-on-core-proposal" in notification + ): payload = value.get("payload", {}) if not payload: self.logger.warning("Empty payload in vote event") return None + # Handle both old and new payload structures + proposal_id = payload.get("proposal") or payload.get("proposalId") + caller = payload.get("caller") or payload.get("contractCaller") + return { - "proposal_identifier": payload.get( - "proposal" - ), # Contract principal for core proposals + "proposal_identifier": proposal_id, # Contract principal for core proposals "voter": payload.get("voter"), - "caller": payload.get("caller"), - "amount": str(payload.get("amount")), - "vote_value": None, # Will be extracted from transaction args + "caller": caller, + "tx_sender": payload.get("txSender"), # New field + "amount": self._extract_amount(payload.get("amount")), + "vote_value": payload.get( + "vote" + ), # Vote value may be directly in payload now + "voter_user_id": payload.get("voterUserId"), # New field } self.logger.warning("Could not find vote information in transaction events") return None + + def _extract_amount(self, amount) -> str: + """Extract and convert the amount from Clarity format to a string. + + Args: + amount: The amount value which could be a string with 'u' prefix, integer, or None + + Returns: + str: The amount as a string, or "0" if None + """ + if amount is None: + return "0" + + amount_str = str(amount) + if amount_str.startswith("u"): + # Remove the 'u' prefix and return as string + return amount_str[1:] + else: + return amount_str diff --git a/services/webhooks/chainhook/handlers/dao_proposal_burn_height_handler.py b/services/integrations/webhooks/chainhook/handlers/dao_proposal_burn_height_handler.py similarity index 52% rename from services/webhooks/chainhook/handlers/dao_proposal_burn_height_handler.py rename to services/integrations/webhooks/chainhook/handlers/dao_proposal_burn_height_handler.py index 8aa23241..6043715a 100644 --- a/services/webhooks/chainhook/handlers/dao_proposal_burn_height_handler.py +++ b/services/integrations/webhooks/chainhook/handlers/dao_proposal_burn_height_handler.py @@ -8,12 +8,16 @@ ContractStatus, ProposalFilter, QueueMessageCreate, + QueueMessageFilter, QueueMessageType, ) from config import config from lib.logger import configure_logger -from services.webhooks.chainhook.handlers.base import ChainhookEventHandler -from services.webhooks.chainhook.models import ChainHookData, TransactionWithReceipt +from services.integrations.webhooks.chainhook.handlers.base import ChainhookEventHandler +from services.integrations.webhooks.chainhook.models import ( + ChainHookData, + TransactionWithReceipt, +) class DAOProposalBurnHeightHandler(ChainhookEventHandler): @@ -107,11 +111,47 @@ def _get_agent_token_holders(self, dao_id: UUID) -> List[Dict]: return agents_with_tokens + def _queue_message_exists( + self, + message_type: QueueMessageType, + proposal_id: UUID, + dao_id: UUID, + wallet_id: Optional[UUID] = None, + ) -> bool: + """Check if a queue message already exists for the given parameters. + + Args: + message_type: Type of queue message + proposal_id: The proposal ID + dao_id: The DAO ID + wallet_id: Optional wallet ID for vote messages + + Returns: + bool: True if message exists, False otherwise + """ + filters = QueueMessageFilter( + type=message_type, + dao_id=dao_id, + is_processed=False, + ) + + if wallet_id: + filters.wallet_id = wallet_id + + existing_messages = backend.list_queue_messages(filters=filters) + + # Check if any existing message is for this specific proposal + return any( + msg.message and msg.message.get("proposal_id") == str(proposal_id) + for msg in existing_messages + ) + async def handle_transaction(self, transaction: TransactionWithReceipt) -> None: """Handle burn height check transactions. Processes burn height events, finds proposals that should start at or after the current burn height, and creates queue messages for token holders to vote. + Also handles veto window notifications. Args: transaction: The transaction to handle @@ -133,80 +173,135 @@ async def handle_transaction(self, transaction: TransactionWithReceipt) -> None: ) # Filter proposals that should start or end at this burn height - start_proposals = [ + vote_proposals = [ p for p in proposals - if p.start_block is not None - and p.end_block is not None - and p.start_block == burn_height - and p.parameters is not None # Ensure parameters exist + if p.vote_start is not None + and p.vote_end is not None + and p.vote_start == burn_height + and p.content is not None # Ensure content exists ] end_proposals = [ p for p in proposals - if p.start_block is not None - and p.end_block is not None - and p.end_block == burn_height - and p.parameters is not None # Ensure parameters exist + if p.vote_start is not None + and p.exec_start is not None + and p.exec_start == burn_height + and p.content is not None # Ensure content exists ] - # Filter proposals that should trigger voting after delay - vote_delay = config.scheduler.dao_proposal_vote_delay_blocks - vote_proposals = [ + # Add veto window proposals + veto_start_proposals = [ p for p in proposals - if p.start_block is not None - and p.end_block is not None - and p.start_block - vote_delay == burn_height - and p.parameters is not None # Ensure parameters exist + if p.vote_end is not None + and p.vote_end == burn_height + and p.content is not None ] - if not start_proposals and not end_proposals and not vote_proposals: + veto_end_proposals = [ + p + for p in proposals + if p.exec_start is not None + and p.exec_start == burn_height + and p.content is not None + ] + + if not ( + vote_proposals + or end_proposals + or veto_start_proposals + or veto_end_proposals + ): self.logger.info( f"No eligible proposals found for burn height {burn_height}" ) return self.logger.info( - f"Found {len(start_proposals)} proposals to start, {len(end_proposals)} proposals to conclude, " - f"and {len(vote_proposals)} proposals ready for voting" + f"Found {len(vote_proposals)} proposals to vote, " + f"{len(end_proposals)} proposals to conclude, " + f"{len(veto_start_proposals)} proposals entering veto window, " + f"{len(veto_end_proposals)} proposals ending veto window" ) - # Process proposals that are starting - for proposal in start_proposals: - # Get the DAO for this proposal + # Process veto window start notifications + for proposal in veto_start_proposals: dao = backend.get_dao(proposal.dao_id) if not dao: self.logger.warning(f"No DAO found for proposal {proposal.id}") continue - # Get agents holding governance tokens - agents = self._get_agent_token_holders(dao.id) - if not agents: - self.logger.warning(f"No agents found holding tokens for DAO {dao.id}") + # Check if a veto notification message already exists + if self._queue_message_exists( + QueueMessageType.get_or_create("discord"), proposal.id, dao.id + ): + self.logger.debug( + f"Veto notification Discord message already exists for proposal {proposal.id}, skipping" + ) continue - # Create queue messages for each agent to evaluate and vote - for agent in agents: - # Create message with only the proposal ID - message_data = { - "proposal_id": proposal.id, # Only pass the proposal UUID - } + # Create veto window start Discord message + message = ( + f"⚠️ **VETO WINDOW OPEN: Proposal #{proposal.proposal_id} of {dao.name}**\n\n" + f"**Proposal:**\n{proposal.content[:100]}...\n\n" + f"**Veto Window Details:**\n" + f"• Opens at: Block {proposal.vote_end}\n" + f"• Closes at: Block {proposal.exec_start}\n\n" + f"View proposal details: {config.api.base_url}/proposals/{proposal.id}" + ) - backend.create_queue_message( - QueueMessageCreate( - type=QueueMessageType.DAO_PROPOSAL_EVALUATION, - message=message_data, - dao_id=dao.id, - wallet_id=agent["wallet_id"], - ) + backend.create_queue_message( + QueueMessageCreate( + type=QueueMessageType.get_or_create("discord"), + message={"content": message, "proposal_status": "veto_window_open"}, + dao_id=dao.id, ) + ) + self.logger.info( + f"Created veto window start Discord message for proposal {proposal.id}" + ) - self.logger.info( - f"Created evaluation queue message for agent {agent['agent_id']} " - f"to evaluate proposal {proposal.id}" + # Process veto window end notifications + for proposal in veto_end_proposals: + dao = backend.get_dao(proposal.dao_id) + if not dao: + self.logger.warning(f"No DAO found for proposal {proposal.id}") + continue + + # Check if a veto end notification message already exists + if self._queue_message_exists( + QueueMessageType.get_or_create("discord"), proposal.id, dao.id + ): + self.logger.debug( + f"Veto end notification Discord message already exists for proposal {proposal.id}, skipping" + ) + continue + + # Create veto window end Discord message + message = ( + f"🔒 **VETO WINDOW CLOSED: Proposal #{proposal.proposal_id} of {dao.name}**\n\n" + f"**Proposal:**\n{proposal.content[:100]}...\n\n" + f"**Status:**\n" + f"• Veto window has now closed\n" + f"• Proposal will be executed if it passed voting\n\n" + f"View proposal details: {config.api.base_url}/proposals/{proposal.id}" + ) + + backend.create_queue_message( + QueueMessageCreate( + type=QueueMessageType.get_or_create("discord"), + message={ + "content": message, + "proposal_status": "veto_window_closed", + }, + dao_id=dao.id, ) + ) + self.logger.info( + f"Created veto window end Discord message for proposal {proposal.id}" + ) # Process proposals that are ending for proposal in end_proposals: @@ -215,6 +310,17 @@ async def handle_transaction(self, transaction: TransactionWithReceipt) -> None: self.logger.warning(f"No DAO found for proposal {proposal.id}") continue + # Check if a conclude message already exists for this proposal + if self._queue_message_exists( + QueueMessageType.get_or_create("dao_proposal_conclude"), + proposal.id, + dao.id, + ): + self.logger.debug( + f"Conclude queue message already exists for proposal {proposal.id}, skipping" + ) + continue + # For conclude messages, we only need to create one message per proposal message_data = { "proposal_id": proposal.id, @@ -222,7 +328,7 @@ async def handle_transaction(self, transaction: TransactionWithReceipt) -> None: backend.create_queue_message( QueueMessageCreate( - type=QueueMessageType.DAO_PROPOSAL_CONCLUDE, + type=QueueMessageType.get_or_create("dao_proposal_conclude"), message=message_data, dao_id=dao.id, wallet_id=None, # No specific wallet needed for conclusion @@ -249,13 +355,26 @@ async def handle_transaction(self, transaction: TransactionWithReceipt) -> None: # Create vote queue messages for each agent for agent in agents: + # Check if a queue message already exists for this proposal+wallet combination + if self._queue_message_exists( + QueueMessageType.get_or_create("dao_proposal_vote"), + proposal.id, + dao.id, + agent["wallet_id"], + ): + self.logger.debug( + f"Queue message already exists for proposal {proposal.id} " + f"and wallet {agent['wallet_id']}, skipping" + ) + continue + message_data = { "proposal_id": proposal.id, } backend.create_queue_message( QueueMessageCreate( - type=QueueMessageType.DAO_PROPOSAL_VOTE, + type=QueueMessageType.get_or_create("dao_proposal_vote"), message=message_data, dao_id=dao.id, wallet_id=agent["wallet_id"], diff --git a/services/webhooks/chainhook/handlers/dao_proposal_conclusion_handler.py b/services/integrations/webhooks/chainhook/handlers/dao_proposal_conclusion_handler.py similarity index 96% rename from services/webhooks/chainhook/handlers/dao_proposal_conclusion_handler.py rename to services/integrations/webhooks/chainhook/handlers/dao_proposal_conclusion_handler.py index 747d0fbf..4bfec42d 100644 --- a/services/webhooks/chainhook/handlers/dao_proposal_conclusion_handler.py +++ b/services/integrations/webhooks/chainhook/handlers/dao_proposal_conclusion_handler.py @@ -5,8 +5,11 @@ from backend.factory import backend from backend.models import ProposalBase, ProposalFilter, ProposalType from lib.logger import configure_logger -from services.webhooks.chainhook.handlers.base import ChainhookEventHandler -from services.webhooks.chainhook.models import Event, TransactionWithReceipt +from services.integrations.webhooks.chainhook.handlers.base import ChainhookEventHandler +from services.integrations.webhooks.chainhook.models import ( + Event, + TransactionWithReceipt, +) class DAOProposalConclusionHandler(ChainhookEventHandler): @@ -132,7 +135,8 @@ def _get_conclusion_info_from_events(self, events: List[Event]) -> Optional[Dict base_data["type"] = ProposalType.ACTION elif "proposal" in payload: # Core proposal - base_data["proposal_contract"] = payload.get("proposal") + # Note: proposal_contract field was removed from the model + # The proposal contract info is stored in contract_principal base_data["type"] = ProposalType.CORE else: self.logger.warning( diff --git a/services/webhooks/chainhook/handlers/dao_proposal_handler.py b/services/integrations/webhooks/chainhook/handlers/dao_proposal_handler.py similarity index 83% rename from services/webhooks/chainhook/handlers/dao_proposal_handler.py rename to services/integrations/webhooks/chainhook/handlers/dao_proposal_handler.py index 6c3b75eb..093261fd 100644 --- a/services/webhooks/chainhook/handlers/dao_proposal_handler.py +++ b/services/integrations/webhooks/chainhook/handlers/dao_proposal_handler.py @@ -1,15 +1,13 @@ """Handler for capturing new DAO proposals.""" -from typing import Dict, Optional - -from services.webhooks.chainhook.handlers.action_proposal_handler import ( +from services.integrations.webhooks.chainhook.handlers.action_proposal_handler import ( ActionProposalHandler, ) -from services.webhooks.chainhook.handlers.base import ChainhookEventHandler -from services.webhooks.chainhook.handlers.core_proposal_handler import ( +from services.integrations.webhooks.chainhook.handlers.base import ChainhookEventHandler +from services.integrations.webhooks.chainhook.handlers.core_proposal_handler import ( CoreProposalHandler, ) -from services.webhooks.chainhook.models import TransactionWithReceipt +from services.integrations.webhooks.chainhook.models import TransactionWithReceipt class DAOProposalHandler(ChainhookEventHandler): @@ -57,7 +55,7 @@ async def handle_transaction(self, transaction: TransactionWithReceipt) -> None: if method == "create-proposal": await self.core_handler.handle_transaction(transaction) - elif method == "propose-action": + elif method == "create-action-proposal": await self.action_handler.handle_transaction(transaction) else: self.logger.warning(f"Unknown proposal method: {method}") diff --git a/services/webhooks/chainhook/handlers/dao_vote_handler.py b/services/integrations/webhooks/chainhook/handlers/dao_vote_handler.py similarity index 76% rename from services/webhooks/chainhook/handlers/dao_vote_handler.py rename to services/integrations/webhooks/chainhook/handlers/dao_vote_handler.py index cd66803e..013f9630 100644 --- a/services/webhooks/chainhook/handlers/dao_vote_handler.py +++ b/services/integrations/webhooks/chainhook/handlers/dao_vote_handler.py @@ -2,13 +2,18 @@ from typing import Dict, List, Optional -from backend.factory import backend -from backend.models import ProposalFilter, VoteBase, VoteCreate, VoteFilter from lib.logger import configure_logger -from services.webhooks.chainhook.handlers.action_vote_handler import ActionVoteHandler -from services.webhooks.chainhook.handlers.base import ChainhookEventHandler -from services.webhooks.chainhook.handlers.core_vote_handler import CoreVoteHandler -from services.webhooks.chainhook.models import Event, TransactionWithReceipt +from services.integrations.webhooks.chainhook.handlers.action_vote_handler import ( + ActionVoteHandler, +) +from services.integrations.webhooks.chainhook.handlers.base import ChainhookEventHandler +from services.integrations.webhooks.chainhook.handlers.core_vote_handler import ( + CoreVoteHandler, +) +from services.integrations.webhooks.chainhook.models import ( + Event, + TransactionWithReceipt, +) class DAOVoteHandler(ChainhookEventHandler): @@ -73,24 +78,28 @@ def _get_vote_info_from_events(self, events: List[Event]) -> Optional[Dict]: proposal_id = payload.get("proposal_id") # Get voter address - voter = None - if "voter" in payload: - voter = payload.get("voter") + voter = payload.get("voter") # Get vote value (true/false) - vote_value = None - if "vote" in payload: - vote_value = payload.get("vote") + vote_value = payload.get("vote") - # Get token amount + # Get token amount - ensure it's converted to string amount = None if "amount" in payload: amount = str(payload.get("amount")) elif "liquidTokens" in payload: amount = str(payload.get("liquidTokens")) - # Try to determine the vote value from the transaction args - # This is needed because some contracts don't include the vote value in the event + # Get contract caller + contract_caller = payload.get("contractCaller") + + # Get tx sender + tx_sender = payload.get("txSender") + + # Get voter user ID + voter_user_id = payload.get("voterUserId") + + # Try to determine the vote value from the transaction args if not found if vote_value is None: # Check if we can extract it from the method args args = event_data.get("args", []) @@ -99,10 +108,16 @@ def _get_vote_info_from_events(self, events: List[Event]) -> Optional[Dict]: if vote_str in ["true", "false"]: vote_value = vote_str == "true" + self.logger.info( + f"Extracted vote info: proposal_id={proposal_id}, voter={voter}, vote_value={vote_value}, amount={amount}, contract_caller={contract_caller}, tx_sender={tx_sender}, voter_user_id={voter_user_id}" + ) + return { "proposal_id": proposal_id, "voter": voter, - "caller": payload.get("caller"), + "contract_caller": contract_caller, + "tx_sender": tx_sender, + "voter_user_id": voter_user_id, "amount": amount, "vote_value": vote_value, } @@ -126,9 +141,9 @@ async def handle_transaction(self, transaction: TransactionWithReceipt) -> None: contract_identifier = tx_data_content.get("contract_identifier", "") # Check if this is a core or action proposal vote based on the contract name - if "core-proposals" in contract_identifier: + if "core-proposal" in contract_identifier: await self.core_handler.handle_transaction(transaction) - elif "action-proposals" in contract_identifier: + elif "action-proposal" in contract_identifier: await self.action_handler.handle_transaction(transaction) else: self.logger.warning( diff --git a/services/webhooks/chainhook/handlers/sell_event_handler.py b/services/integrations/webhooks/chainhook/handlers/sell_event_handler.py similarity index 96% rename from services/webhooks/chainhook/handlers/sell_event_handler.py rename to services/integrations/webhooks/chainhook/handlers/sell_event_handler.py index 410d187c..177b561e 100644 --- a/services/webhooks/chainhook/handlers/sell_event_handler.py +++ b/services/integrations/webhooks/chainhook/handlers/sell_event_handler.py @@ -8,8 +8,8 @@ WalletFilter, ) from lib.logger import configure_logger -from services.webhooks.chainhook.handlers.base import ChainhookEventHandler -from services.webhooks.chainhook.models import TransactionWithReceipt +from services.integrations.webhooks.chainhook.handlers.base import ChainhookEventHandler +from services.integrations.webhooks.chainhook.models import TransactionWithReceipt class SellEventHandler(ChainhookEventHandler): @@ -149,6 +149,7 @@ async def handle_transaction(self, transaction: TransactionWithReceipt) -> None: token_id=record.token_id, dao_id=record.dao_id, amount=str(new_amount), + address=sender, ) backend.update_holder(record.id, update_data) self.logger.info( diff --git a/services/integrations/webhooks/chainhook/models.py b/services/integrations/webhooks/chainhook/models.py new file mode 100644 index 00000000..03a3c81c --- /dev/null +++ b/services/integrations/webhooks/chainhook/models.py @@ -0,0 +1,287 @@ +"""Chainhook webhook data models.""" + +import logging +from dataclasses import dataclass +from typing import Any, Dict, List, Optional, Union + +# Configure logger +logger = logging.getLogger(__name__) + + +@dataclass +class TransactionIdentifier: + """Transaction identifier with hash.""" + + hash: str + + +@dataclass +class BlockIdentifier: + """Block identifier with hash and index.""" + + hash: str + index: int + + +@dataclass +class Operation: + """Operation within a transaction.""" + + account: Dict[str, str] + amount: Dict[str, Any] + operation_identifier: Dict[str, int] + status: str + type: str + related_operations: Optional[List[Dict[str, int]]] = None + + +@dataclass +class Event: + """Event data from transaction receipt.""" + + data: Dict[str, Any] + position: Dict[str, int] + type: str + + +@dataclass +class Receipt: + """Transaction receipt containing events and other metadata.""" + + contract_calls_stack: List[Any] + events: List[Event] + mutated_assets_radius: List[Any] + mutated_contracts_radius: List[Any] + + +@dataclass +class TransactionMetadata: + """Metadata about a transaction including execution cost and kind.""" + + description: str + execution_cost: Dict[str, int] + fee: int + kind: Dict[str, Any] + nonce: int + position: Dict[str, int] + raw_tx: str + receipt: Receipt + result: str + sender: str + sponsor: Optional[str] + success: bool + + +@dataclass +class TransactionWithReceipt: + """Transaction with receipt including metadata and operations.""" + + transaction_identifier: TransactionIdentifier + metadata: Union[Dict[str, Any], TransactionMetadata] + operations: List[Union[Dict[str, Any], Operation]] + + +@dataclass +class BlockMetadata: + """Metadata about a block.""" + + bitcoin_anchor_block_identifier: Optional[BlockIdentifier] = None + block_time: Optional[int] = None + confirm_microblock_identifier: Optional[Any] = None + cycle_number: Optional[int] = None + pox_cycle_index: Optional[int] = None + pox_cycle_length: Optional[int] = None + pox_cycle_position: Optional[int] = None + reward_set: Optional[Any] = None + signer_bitvec: Optional[str] = None + signer_public_keys: Optional[List[str]] = None + signer_signature: Optional[List[str]] = None + stacks_block_hash: Optional[str] = None + tenure_height: Optional[int] = None + + +@dataclass +class Apply: + """Apply block data structure containing transactions.""" + + block_identifier: BlockIdentifier + transactions: List[TransactionWithReceipt] + metadata: Optional[BlockMetadata] = None + parent_block_identifier: Optional[BlockIdentifier] = None + timestamp: Optional[int] = None + + +@dataclass +class Predicate: + """Predicate for chainhook filter.""" + + scope: str + higher_than: int + + +@dataclass +class ChainHookInfo: + """Information about the chainhook itself.""" + + is_streaming_blocks: bool + predicate: Predicate + uuid: str + + +@dataclass +class ChainHookData: + """Top-level data structure for Chainhook webhook payloads.""" + + apply: List[Apply] + chainhook: ChainHookInfo + events: List[Any] + rollback: List[Any] + + +# V2 API models for block transactions + + +@dataclass +class Principal: + """Principal for post condition.""" + + type_id: str + + +@dataclass +class PostCondition: + """Post condition in a transaction.""" + + principal: Principal + condition_code: str + amount: str + type: str + + +@dataclass +class ClarityValue: + """Clarity value representation.""" + + hex: str + repr: str + + +@dataclass +class ContractLog: + """Contract log in an event.""" + + contract_id: str + topic: str + value: ClarityValue + + +@dataclass +class TransactionEvent: + """Event in a transaction.""" + + event_index: int + event_type: str + tx_id: str + contract_log: Optional[ContractLog] = None + + +@dataclass +class TokenTransfer: + """Token transfer details.""" + + recipient_address: str + amount: str + memo: Optional[str] = None + + +@dataclass +class BlockTransaction: + """Transaction in a block.""" + + tx_id: str + nonce: int + fee_rate: str + sender_address: str + post_condition_mode: str + post_conditions: List[PostCondition] + anchor_mode: str + block_hash: str + block_height: int + block_time: int + block_time_iso: str + burn_block_height: int + burn_block_time: int + burn_block_time_iso: str + parent_burn_block_time: int + parent_burn_block_time_iso: str + canonical: bool + tx_index: int + tx_status: str + tx_result: ClarityValue + event_count: int + parent_block_hash: str + is_unanchored: bool + execution_cost_read_count: int + execution_cost_read_length: int + execution_cost_runtime: int + execution_cost_write_count: int + execution_cost_write_length: int + events: List[TransactionEvent] + tx_type: str + sponsor_nonce: Optional[int] = None + sponsored: Optional[bool] = None + sponsor_address: Optional[str] = None + microblock_hash: Optional[str] = None + microblock_sequence: Optional[int] = None + microblock_canonical: Optional[bool] = None + token_transfer: Optional[TokenTransfer] = None + + +@dataclass +class BlockTransactionsResponse: + """Response from the block transactions API.""" + + limit: int + offset: int + total: int + results: List[BlockTransaction] + + +@dataclass +class ChainTip: + """Current chain tip information.""" + + block_height: int + block_hash: str + index_block_hash: str + microblock_hash: str + microblock_sequence: int + burn_block_height: int + + +@dataclass +class HiroApiInfo: + """Hiro API server information.""" + + server_version: str + status: str + pox_v1_unlock_height: int + pox_v2_unlock_height: int + pox_v3_unlock_height: int + chain_tip: Union[ChainTip, Dict[str, Any]] + + def __post_init__(self): + """Convert chain_tip from dict to ChainTip object if needed.""" + # If chain_tip is a dictionary, convert it to a ChainTip object + if isinstance(self.chain_tip, dict) and not isinstance( + self.chain_tip, ChainTip + ): + # Some implementations might only include a subset of fields + self.chain_tip = ChainTip( + block_height=self.chain_tip.get("block_height", 0), + block_hash=self.chain_tip.get("block_hash", ""), + index_block_hash=self.chain_tip.get("index_block_hash", ""), + microblock_hash=self.chain_tip.get("microblock_hash", ""), + microblock_sequence=self.chain_tip.get("microblock_sequence", 0), + burn_block_height=self.chain_tip.get("burn_block_height", 0), + ) diff --git a/services/webhooks/chainhook/parser.py b/services/integrations/webhooks/chainhook/parser.py similarity index 98% rename from services/webhooks/chainhook/parser.py rename to services/integrations/webhooks/chainhook/parser.py index 47c6cf3e..2ed33d59 100644 --- a/services/webhooks/chainhook/parser.py +++ b/services/integrations/webhooks/chainhook/parser.py @@ -4,8 +4,8 @@ from typing import Any, Dict from lib.logger import configure_logger -from services.webhooks.base import WebhookParser -from services.webhooks.chainhook.models import ( +from services.integrations.webhooks.base import WebhookParser +from services.integrations.webhooks.chainhook.models import ( Apply, BlockIdentifier, BlockMetadata, diff --git a/services/webhooks/chainhook/service.py b/services/integrations/webhooks/chainhook/service.py similarity index 72% rename from services/webhooks/chainhook/service.py rename to services/integrations/webhooks/chainhook/service.py index 1b2ec7c1..364cdd65 100644 --- a/services/webhooks/chainhook/service.py +++ b/services/integrations/webhooks/chainhook/service.py @@ -1,9 +1,9 @@ """Chainhook webhook service implementation.""" from lib.logger import configure_logger -from services.webhooks.base import WebhookService -from services.webhooks.chainhook.handler import ChainhookHandler -from services.webhooks.chainhook.parser import ChainhookParser +from services.integrations.webhooks.base import WebhookService +from services.integrations.webhooks.chainhook.handler import ChainhookHandler +from services.integrations.webhooks.chainhook.parser import ChainhookParser class ChainhookService(WebhookService): diff --git a/services/webhooks/dao/__init__.py b/services/integrations/webhooks/dao/__init__.py similarity index 79% rename from services/webhooks/dao/__init__.py rename to services/integrations/webhooks/dao/__init__.py index d5c7b9ad..56f6051a 100644 --- a/services/webhooks/dao/__init__.py +++ b/services/integrations/webhooks/dao/__init__.py @@ -5,6 +5,6 @@ creating DAOs, extensions, and tokens. """ -from services.webhooks.dao.service import DAOService +from services.integrations.webhooks.dao.service import DAOService __all__ = ["DAOService"] diff --git a/services/integrations/webhooks/dao/handler.py b/services/integrations/webhooks/dao/handler.py new file mode 100644 index 00000000..3e2414d7 --- /dev/null +++ b/services/integrations/webhooks/dao/handler.py @@ -0,0 +1,143 @@ +"""Handler for DAO webhook payloads.""" + +from typing import Any, Dict, List +from uuid import UUID + +from backend.factory import backend +from backend.models import ( + ContractStatus, + DAOCreate, + ExtensionCreate, + TokenCreate, + XCredsCreate, +) +from config import config +from lib.logger import configure_logger +from services.integrations.webhooks.base import WebhookHandler +from services.integrations.webhooks.dao.models import ( + DAOWebhookPayload, + DAOWebhookResponse, +) + + +class DAOHandler(WebhookHandler): + """Handler for DAO webhook payloads. + + This handler processes validated DAO webhook payloads and creates + the corresponding DAO, extensions, and token in the database. + """ + + def __init__(self): + """Initialize the DAO webhook handler.""" + super().__init__() + self.logger = configure_logger(self.__class__.__name__) + self.db = backend + + async def handle(self, parsed_data: DAOWebhookPayload) -> Dict[str, Any]: + """Handle the parsed DAO webhook data. + + Args: + parsed_data: The parsed and validated DAO webhook payload + + Returns: + Dict containing the result of handling the webhook with created entities + + Raises: + Exception: If there is an error creating any of the entities + """ + try: + self.logger.info(f"Handling DAO webhook for '{parsed_data.name}'") + + # Create the DAO + dao_create = DAOCreate( + name=parsed_data.name, + mission=parsed_data.mission, + description=parsed_data.description or parsed_data.mission, + is_deployed=True, + is_broadcasted=True, + ) + + dao = self.db.create_dao(dao_create) + self.logger.info(f"Created DAO with ID: {dao.id}") + + # Create X credentials for the DAO + new_cred = XCredsCreate( + dao_id=dao.id, + consumer_key=config.twitter.default_consumer_key, + consumer_secret=config.twitter.default_consumer_secret, + client_id=config.twitter.default_client_id, + client_secret=config.twitter.default_client_secret, + access_token=config.twitter.default_access_token, + access_secret=config.twitter.default_access_secret, + username=config.twitter.default_username, + bearer_token=config.twitter.default_bearer_token, + ) + + x_creds = self.db.create_x_creds(new_cred) + self.logger.info(f"Created X credentials with ID: {x_creds.id}") + + # Find the main DAO token contract + dao_token_contract = None + for contract in parsed_data.contracts: + if contract.type.value == "TOKEN" and contract.subtype == "DAO": + dao_token_contract = contract + break + + if not dao_token_contract: + raise ValueError("No DAO token contract found in contracts list") + + # Create the main DAO token + token_create = TokenCreate( + dao_id=dao.id, + contract_principal=dao_token_contract.contract_principal, + tx_id=dao_token_contract.tx_id, + name=parsed_data.name, # Use DAO name as token name + description=parsed_data.description or parsed_data.mission, + symbol=parsed_data.token_info.symbol, + decimals=parsed_data.token_info.decimals, + max_supply=parsed_data.token_info.max_supply, + uri=parsed_data.token_info.uri, + image_url=parsed_data.token_info.image_url, + x_url=parsed_data.token_info.x_url, + telegram_url=parsed_data.token_info.telegram_url, + website_url=parsed_data.token_info.website_url, + status=ContractStatus.DEPLOYED, + ) + + token = self.db.create_token(token_create) + self.logger.info(f"Created token with ID: {token.id}") + + # Create extensions for DAO extension contracts + extension_ids: List[UUID] = [] + for contract in parsed_data.contracts: + extension_create = ExtensionCreate( + dao_id=dao.id, + type=contract.type.value, + subtype=contract.subtype, + contract_principal=contract.contract_principal, + tx_id=contract.tx_id, + status=ContractStatus.DEPLOYED, + ) + + extension = self.db.create_extension(extension_create) + extension_ids.append(extension.id) + self.logger.info( + f"Created extension with ID: {extension.id} for type: {contract.type.value} and subtype: {contract.subtype}" + ) + + # Prepare response + response = DAOWebhookResponse( + dao_id=dao.id, + extension_ids=extension_ids if extension_ids else None, + token_id=token.id, + ) + + return { + "success": True, + "message": f"Successfully created DAO '{dao.name}' with ID: {dao.id}", + "data": response.model_dump(), + } + + except Exception as e: + self.logger.error(f"Error handling DAO webhook: {str(e)}", exc_info=True) + raise diff --git a/services/integrations/webhooks/dao/models.py b/services/integrations/webhooks/dao/models.py new file mode 100644 index 00000000..56891ad7 --- /dev/null +++ b/services/integrations/webhooks/dao/models.py @@ -0,0 +1,244 @@ +"""Models for DAO webhook service.""" + +from enum import Enum +from typing import List, Optional +from uuid import UUID + +from pydantic import BaseModel, ConfigDict, Field + + +class ContractType(str, Enum): + """Contract types enum.""" + + AGENT = "AGENT" + BASE = "BASE" + ACTIONS = "ACTIONS" + EXTENSIONS = "EXTENSIONS" + PROPOSALS = "PROPOSALS" + TOKEN = "TOKEN" + + +class ClarityVersion(int, Enum): + """Clarity version enum.""" + + CLARITY1 = 1 + CLARITY2 = 2 + CLARITY3 = 3 + + +class ContractCategory(str, Enum): + """Contract categories enum.""" + + BASE = "BASE" + ACTIONS = "ACTIONS" + EXTENSIONS = "EXTENSIONS" + PROPOSALS = "PROPOSALS" + EXTERNAL = "EXTERNAL" + TOKEN = "TOKEN" + + +# Contract subtypes for each type +class AgentSubtype(str, Enum): + """Agent contract subtypes.""" + + AGENT_ACCOUNT = "AGENT_ACCOUNT" + + +class BaseSubtype(str, Enum): + """Base contract subtypes.""" + + DAO = "DAO" + + +class ActionsSubtype(str, Enum): + """Actions contract subtypes.""" + + SEND_MESSAGE = "SEND_MESSAGE" + + +class ExtensionsSubtype(str, Enum): + """Extensions contract subtypes.""" + + ACTION_PROPOSAL_VOTING = "ACTION_PROPOSAL_VOTING" + DAO_CHARTER = "DAO_CHARTER" + DAO_EPOCH = "DAO_EPOCH" + DAO_USERS = "DAO_USERS" + ONCHAIN_MESSAGING = "ONCHAIN_MESSAGING" + REWARDS_ACCOUNT = "REWARDS_ACCOUNT" + TOKEN_OWNER = "TOKEN_OWNER" + TREASURY = "TREASURY" + + +class ProposalsSubtype(str, Enum): + """Proposals contract subtypes.""" + + INITIALIZE_DAO = "INITIALIZE_DAO" + + +class TokenSubtype(str, Enum): + """Token contract subtypes.""" + + DAO = "DAO" + DEX = "DEX" + POOL = "POOL" + PRELAUNCH = "PRELAUNCH" + + +# Contract subcategories for each category +class BaseSubcategory(str, Enum): + """Base contract subcategories.""" + + DAO = "DAO" + + +class ActionsSubcategory(str, Enum): + """Actions contract subcategories.""" + + CONFIGURE_TIMED_VAULT_DAO = "CONFIGURE_TIMED_VAULT_DAO" + CONFIGURE_TIMED_VAULT_SBTC = "CONFIGURE_TIMED_VAULT_SBTC" + CONFIGURE_TIMED_VAULT_STX = "CONFIGURE_TIMED_VAULT_STX" + PMT_DAO_ADD_RESOURCE = "PMT_DAO_ADD_RESOURCE" + PMT_DAO_TOGGLE_RESOURCE = "PMT_DAO_TOGGLE_RESOURCE" + PMT_SBTC_ADD_RESOURCE = "PMT_SBTC_ADD_RESOURCE" + PMT_SBTC_TOGGLE_RESOURCE = "PMT_SBTC_TOGGLE_RESOURCE" + PMT_STX_ADD_RESOURCE = "PMT_STX_ADD_RESOURCE" + PMT_STX_TOGGLE_RESOURCE = "PMT_STX_TOGGLE_RESOURCE" + MESSAGING_SEND_MESSAGE = "MESSAGING_SEND_MESSAGE" + TREASURY_ALLOW_ASSET = "TREASURY_ALLOW_ASSET" + + +class ExtensionsSubcategory(str, Enum): + """Extensions contract subcategories.""" + + ACTION_PROPOSALS = "ACTION_PROPOSALS" + CORE_PROPOSALS = "CORE_PROPOSALS" + CHARTER = "CHARTER" + MESSAGING = "MESSAGING" + PAYMENTS_DAO = "PAYMENTS_DAO" + PAYMENTS_SBTC = "PAYMENTS_SBTC" + PAYMENTS_STX = "PAYMENTS_STX" + TIMED_VAULT_DAO = "TIMED_VAULT_DAO" + TIMED_VAULT_SBTC = "TIMED_VAULT_SBTC" + TIMED_VAULT_STX = "TIMED_VAULT_STX" + TOKEN_OWNER = "TOKEN_OWNER" + TREASURY = "TREASURY" + + +class ProposalsSubcategory(str, Enum): + """Proposals contract subcategories.""" + + BOOTSTRAP_INIT = "BOOTSTRAP_INIT" + + +class ExternalSubcategory(str, Enum): + """External contract subcategories.""" + + STANDARD_SIP009 = "STANDARD_SIP009" + STANDARD_SIP010 = "STANDARD_SIP010" + FAKTORY_SIP010 = "FAKTORY_SIP010" + BITFLOW_POOL = "BITFLOW_POOL" + BITFOW_SIP010 = "BITFOW_SIP010" + + +class TokenSubcategory(str, Enum): + """Token contract subcategories.""" + + DAO = "DAO" + DEX = "DEX" + POOL = "POOL" + POOL_STX = "POOL_STX" + PRELAUNCH = "PRELAUNCH" + + +class DeployedContract(BaseModel): + """Deployed contract model for the new webhook structure.""" + + name: str + display_name: Optional[str] = Field(None, alias="displayName") + type: ContractType + subtype: str # Handle union of subtypes as string for flexibility + tx_id: str = Field(alias="txId") + deployer: str + contract_principal: str = Field(alias="contractPrincipal") + + model_config = ConfigDict(validate_by_name=True, validate_by_alias=True) + + +class TokenInfo(BaseModel): + """Token information model for DAO webhook.""" + + symbol: str + decimals: int + max_supply: str = Field(alias="maxSupply") + uri: str + image_url: str = Field(alias="imageUrl") + x_url: Optional[str] = Field(None, alias="xUrl") + telegram_url: Optional[str] = Field(None, alias="telegramUrl") + website_url: Optional[str] = Field(None, alias="websiteUrl") + + model_config = ConfigDict(validate_by_name=True, validate_by_alias=True) + + +class DAOWebhookPayload(BaseModel): + """Webhook payload for DAO creation with deployed contracts structure.""" + + name: str + mission: str + description: Optional[str] = None + contracts: List[DeployedContract] + token_info: TokenInfo = Field(alias="tokenInfo") + + model_config = ConfigDict(validate_by_name=True, validate_by_alias=True) + + +class DAOWebhookResponse(BaseModel): + """Response model for DAO creation webhook.""" + + dao_id: UUID + extension_ids: Optional[List[UUID]] = None + token_id: Optional[UUID] = None + + +# Legacy models for backward compatibility +class ContractResponse(BaseModel): + """Contract response model.""" + + name: str + display_name: Optional[str] = Field(None, alias="displayName") + type: ContractType + subtype: str # Handle union of subtypes as string for flexibility + source: Optional[str] = None + hash: Optional[str] = None + deployment_order: Optional[int] = Field(None, alias="deploymentOrder") + clarity_version: Optional[ClarityVersion] = Field(None, alias="clarityVersion") + + model_config = ConfigDict(validate_by_name=True, validate_by_alias=True) + + +class DeployedContractRegistryEntry(ContractResponse): + """Deployed contract registry entry model.""" + + sender: str + success: bool + tx_id: Optional[str] = Field(None, alias="txId") + address: str + error: Optional[str] = None + + model_config = ConfigDict(validate_by_name=True, validate_by_alias=True) + + +class TokenData(BaseModel): + """Token data model for DAO webhook.""" + + name: str + symbol: str + decimals: int + description: str + max_supply: str + uri: str + tx_id: str + contract_principal: str + image_url: str + x_url: Optional[str] = None + telegram_url: Optional[str] = None + website_url: Optional[str] = None diff --git a/services/webhooks/dao/parser.py b/services/integrations/webhooks/dao/parser.py similarity index 91% rename from services/webhooks/dao/parser.py rename to services/integrations/webhooks/dao/parser.py index 26981479..54f38a2e 100644 --- a/services/webhooks/dao/parser.py +++ b/services/integrations/webhooks/dao/parser.py @@ -3,8 +3,8 @@ from typing import Any, Dict from lib.logger import configure_logger -from services.webhooks.base import WebhookParser -from services.webhooks.dao.models import DAOWebhookPayload +from services.integrations.webhooks.base import WebhookParser +from services.integrations.webhooks.dao.models import DAOWebhookPayload class DAOParser(WebhookParser): diff --git a/services/webhooks/dao/service.py b/services/integrations/webhooks/dao/service.py similarity index 75% rename from services/webhooks/dao/service.py rename to services/integrations/webhooks/dao/service.py index 26056f38..ac29f554 100644 --- a/services/webhooks/dao/service.py +++ b/services/integrations/webhooks/dao/service.py @@ -1,9 +1,9 @@ """DAO webhook service implementation.""" from lib.logger import configure_logger -from services.webhooks.base import WebhookService -from services.webhooks.dao.handler import DAOHandler -from services.webhooks.dao.parser import DAOParser +from services.integrations.webhooks.base import WebhookService +from services.integrations.webhooks.dao.handler import DAOHandler +from services.integrations.webhooks.dao.parser import DAOParser class DAOService(WebhookService): diff --git a/services/processing/__init__.py b/services/processing/__init__.py new file mode 100644 index 00000000..e69de29b diff --git a/services/chat.py b/services/processing/chat_processor.py similarity index 54% rename from services/chat.py rename to services/processing/chat_processor.py index 7f6d6b90..1b0296b4 100644 --- a/services/chat.py +++ b/services/processing/chat_processor.py @@ -1,118 +1,26 @@ import asyncio import datetime -from concurrent.futures import ThreadPoolExecutor -from dataclasses import dataclass -from typing import Any, Dict, List, Optional, TypedDict +from typing import Any, Dict, List, Optional from uuid import UUID from backend.factory import backend -from backend.models import JobBase, JobFilter, Profile, StepCreate, StepFilter +from backend.models import JobBase, Profile, StepCreate from lib.logger import configure_logger from lib.persona import generate_persona, generate_static_persona -from services.websocket import websocket_manager -from services.workflows import execute_workflow_stream +from services.communication.websocket_service import websocket_manager +from services.ai.workflows import execute_workflow_stream +from services.processing.message_handler import ( + Message, + MessageHandler, + StepMessageHandler, + ToolExecutionHandler, +) +from services.processing.streaming_service import running_jobs from tools.tools_factory import initialize_tools logger = configure_logger(__name__) -class JobInfo(TypedDict): - """Information about a running job.""" - - queue: asyncio.Queue - thread_id: UUID - agent_id: Optional[UUID] - task: Optional[asyncio.Task] - connection_active: bool - - -# Global job tracking -thread_pool = ThreadPoolExecutor() -running_jobs: Dict[str, JobInfo] = {} - - -@dataclass -class Message: - """Base message structure for chat communication.""" - - content: str - type: str - thread_id: str - tool: Optional[str] = None - tool_input: Optional[str] = None - tool_output: Optional[str] = None - agent_id: Optional[str] = None - role: str = "assistant" - status: Optional[str] = None - created_at: Optional[str] = None - - def to_dict(self) -> Dict[str, Any]: - """Convert message to dictionary, excluding None values.""" - return {k: v for k, v in self.__dict__.items() if v is not None} - - -class MessageHandler: - """Handler for token-type messages.""" - - def process_token_message(self, message: Dict[str, Any]) -> Dict[str, Any]: - """Process a token message and prepare it for streaming.""" - # Default to processing status if not specified - status = message.get("status", "processing") - - return { - "type": "token", - "status": status, # Use the status or default to "processing" - "content": message.get("content", ""), - "created_at": datetime.datetime.now().isoformat(), - "role": "assistant", - "thread_id": message.get("thread_id"), - "agent_id": message.get("agent_id"), - } - - -class StepMessageHandler: - """Handler for step/planning messages.""" - - def process_step_message(self, message: Dict[str, Any]) -> Dict[str, Any]: - """Process a planning step message.""" - # Ensure we have a timestamp for proper ordering - timestamp = datetime.datetime.now().isoformat() - - return { - "type": "step", - "status": "planning", # Always use planning status for steps - "content": message.get("content", ""), - "thought": message.get("thought"), - "created_at": message.get("created_at", timestamp), - "role": "assistant", - "thread_id": message.get("thread_id"), - "agent_id": message.get("agent_id"), - # Add a special flag to identify this as a planning-only message - "planning_only": True, - } - - -class ToolExecutionHandler: - """Handler for tool execution messages.""" - - def process_tool_message(self, message: Dict[str, Any]) -> Dict[str, Any]: - """Process a tool execution message.""" - # Use provided status or default to "processing" - status = message.get("status", "processing") - - return { - "role": "assistant", - "type": "tool", - "status": status, # Use the exact status passed from the tool execution - "tool": message.get("tool"), - "tool_input": message.get("input"), - "tool_output": message.get("output"), - "created_at": datetime.datetime.now().isoformat(), - "thread_id": message.get("thread_id"), - "agent_id": message.get("agent_id"), - } - - class ChatProcessor: """Processes chat messages and manages streaming responses.""" @@ -479,14 +387,14 @@ async def process_stream(self) -> None: if not agent: logger.error(f"Agent with ID {self.agent_id} not found") return - persona = generate_persona(agent) + persona = generate_persona() else: persona = generate_static_persona() tools_map = initialize_tools(self.profile, agent_id=self.agent_id) first_end = True - # Determine if vector collections are configured for this agent + # Determine vector collections vector_collections = None if self.agent_id: agent_config = ( @@ -496,17 +404,15 @@ async def process_stream(self) -> None: agent_config.get("vector_collections") if agent_config else None ) - # Default collections if none specified if not vector_collections: vector_collections = ["dao_collection", "knowledge_collection"] logger.info( f"No vector collections configured, defaulting to: {vector_collections}" ) - # Always use vector_preplan workflow since we always have vector collections now workflow_type = "vector_preplan" logger.info( - f"Using {workflow_type} workflow with collections: {vector_collections}" + f"Starting {workflow_type} workflow with collections: {vector_collections}" ) logger.info( @@ -609,6 +515,8 @@ async def _finalize_processing(self) -> None: # As a fallback, retrieve steps from the database to find the final content if not final_result_content: logger.info("No final content found in memory, checking database steps") + from backend.models import StepFilter + steps = backend.list_steps(filters=StepFilter(job_id=self.job_id)) # Filter to complete steps that aren't planning or tool steps @@ -677,381 +585,3 @@ async def _cleanup(self) -> None: # Always clean up the running jobs entry if job_id_str in running_jobs: del running_jobs[job_id_str] - - -class ChatService: - """Main service for chat processing and management.""" - - @staticmethod - async def process_chat_message( - job_id: UUID, - thread_id: UUID, - profile: Profile, - agent_id: Optional[UUID], - input_str: str, - history: List[Dict[str, Any]], - output_queue: asyncio.Queue, - ) -> None: - """Process a chat message. - - Args: - job_id: The ID of the job - thread_id: The ID of the thread - profile: The user's profile - agent_id: Optional agent ID - input_str: The input message - history: Chat history - output_queue: Queue for streaming output - """ - # Initialize job info in running_jobs - job_id_str = str(job_id) - running_jobs[job_id_str] = { - "queue": output_queue, - "thread_id": thread_id, - "agent_id": agent_id, - "task": None, - "connection_active": True, - } - - processor = ChatProcessor( - job_id=job_id, - thread_id=thread_id, - profile=profile, - agent_id=agent_id, - input_str=input_str, - history=history, - output_queue=output_queue, - ) - - try: - await processor.process_stream() - except Exception as e: - logger.error(f"Error processing chat message: {e}") - raise - finally: - # Clean up job info - if job_id_str in running_jobs: - del running_jobs[job_id_str] - - @staticmethod - def get_job_history(thread_id: UUID, profile_id: UUID) -> List[Dict[str, Any]]: - """Get the chat history for a specific job. - - Args: - thread_id: The ID of the thread - profile_id: The ID of the profile - - Returns: - List of formatted chat messages - """ - logger.debug( - f"Fetching job history for thread {thread_id} and profile {profile_id}" - ) - jobs = backend.list_jobs(filters=JobFilter(thread_id=thread_id)) - formatted_history = [] - for job in jobs: - if job.profile_id == profile_id: - # Get all steps first to determine proper timing - steps = backend.list_steps(filters=StepFilter(job_id=job.id)) - - # Create a timeline of all messages per job - job_messages = [] - - # Add user message - job_messages.append( - { - "role": "user", - "content": job.input, - "created_at": job.created_at.isoformat(), - "thread_id": str(thread_id), - "type": "user", - } - ) - - # Add planning steps with original timestamps - planning_steps = [ - step - for step in steps - if step.status == "planning" or step.thought == "Planning Phase" - ] - for step in planning_steps: - job_messages.append( - { - "role": "assistant", - "content": step.content, - "created_at": step.created_at.isoformat(), - "thread_id": str(thread_id), - "type": "step", - "status": "planning", - "thought": step.thought, - } - ) - - # Add the final response with correct timestamp - has_final_result = job.result and job.result.strip() - if has_final_result: - # For the final result, look for its step to get the correct timestamp - final_step = None - for step in steps: - if step.status == "complete" and step.content == job.result: - final_step = step - break - - # Use the job's result as the primary response - job_messages.append( - { - "role": "assistant", - "content": job.result, - "created_at": ( - final_step.created_at.isoformat() - if final_step - else job.created_at.isoformat() - ), - "thread_id": str(thread_id), - "type": "token", - "status": "complete", - } - ) - else: - # If no job result, look for complete step content - final_steps = [ - step - for step in steps - if step.status == "complete" and step.content and not step.tool - ] - - if final_steps: - # Use the last complete step's content - final_step = max(final_steps, key=lambda s: s.created_at) - job_messages.append( - { - "role": "assistant", - "content": final_step.content, - "created_at": final_step.created_at.isoformat(), - "thread_id": str(thread_id), - "type": "token", - "status": "complete", - } - ) - elif steps: - # No complete steps with content, use all non-tool steps to reconstruct - content_steps = [ - step - for step in steps - if step.content - and not step.tool - and step.status != "planning" - ] - - if content_steps: - # Sort by creation time - content_steps.sort(key=lambda s: s.created_at) - # Use all content joined together - combined_content = " ".join( - step.content for step in content_steps - ) - - job_messages.append( - { - "role": "assistant", - "content": combined_content, - "created_at": job.created_at.isoformat(), - "thread_id": str(thread_id), - "type": "token", - "status": "complete", - } - ) - - # Add tool steps with their original timestamps - for step in steps: - if step.tool: - tool_msg = { - "role": "assistant", - "type": "tool", - "status": step.status or "complete", - "tool": step.tool, - "tool_input": step.tool_input, - "tool_output": step.tool_output, - "created_at": step.created_at.isoformat(), - "thread_id": str(thread_id), - } - if step.agent_id: - tool_msg["agent_id"] = str(step.agent_id) - job_messages.append(tool_msg) - - # Sort this job's messages by timestamp - job_messages.sort(key=lambda x: x["created_at"]) - - # Add all job messages to the history - formatted_history.extend(job_messages) - - # Sort the full history again to ensure proper ordering - formatted_history.sort(key=lambda x: x["created_at"]) - - logger.debug(f"Found {len(formatted_history)} messages in job history") - return formatted_history - - @staticmethod - def get_thread_history(thread_id: UUID, profile_id: UUID) -> List[Dict[str, Any]]: - """Get the complete thread history including all steps. - - Args: - thread_id: The ID of the thread - profile_id: The ID of the profile - - Returns: - List of formatted chat messages and steps - """ - logger.debug( - f"Fetching thread history for thread {thread_id} and profile {profile_id}" - ) - thread = backend.get_thread(thread_id=thread_id) - if thread.profile_id != profile_id: - logger.warning( - f"Profile {profile_id} not authorized for thread {thread_id}" - ) - return [] - - jobs = backend.list_jobs(filters=JobFilter(thread_id=thread.id)) - formatted_history = [] - if jobs: - for job in jobs: - logger.debug(f"Processing job {job}") - # Get all steps for this job first to determine proper timing - steps = backend.list_steps(filters=StepFilter(job_id=job.id)) - - # Create a timeline of all messages per job - job_messages = [] - - # Add user input message - job_messages.append( - { - "role": "user", - "content": job.input, - "created_at": job.created_at.isoformat(), - "thread_id": str(thread.id), - "type": "user", - } - ) - - # Add planning steps with their original timestamps - planning_steps = [ - step - for step in steps - if step.status == "planning" or step.thought == "Planning Phase" - ] - for step in planning_steps: - job_messages.append( - { - "role": step.role, - "content": step.content, - "created_at": step.created_at.isoformat(), - "thought": step.thought, - "thread_id": str(thread.id), - "type": "step", - "status": "planning", - } - ) - - # Add result or final content with correct timestamp - has_final_result = job.result and job.result.strip() - if has_final_result: - # For the final result, look for its step to get the correct timestamp - final_step = None - for step in steps: - if step.status == "complete" and step.content == job.result: - final_step = step - break - - # Use the job's result - job_messages.append( - { - "role": "assistant", - "content": job.result, - "created_at": ( - final_step.created_at.isoformat() - if final_step - else job.created_at.isoformat() - ), - "thread_id": str(thread.id), - "type": "token", - "status": "complete", - } - ) - else: - # No result in job, find the final step's content - final_steps = [ - step - for step in steps - if step.status == "complete" and step.content and not step.tool - ] - - if final_steps: - # Use the last complete step's content - final_step = max(final_steps, key=lambda s: s.created_at) - job_messages.append( - { - "role": "assistant", - "content": final_step.content, - "created_at": final_step.created_at.isoformat(), - "thread_id": str(thread.id), - "type": "token", - "status": "complete", - } - ) - - # Add tool steps with their original timestamps - for step in steps: - if step.tool: - tool_msg = { - "role": "assistant", - "content": step.content if step.content else "", - "created_at": step.created_at.isoformat(), - "thread_id": str(thread.id), - "type": "tool", - "status": step.status or "complete", - "tool": step.tool, - "tool_input": step.tool_input, - "tool_output": step.tool_output, - } - if step.agent_id: - tool_msg["agent_id"] = str(step.agent_id) - job_messages.append(tool_msg) - - # Sort this job's messages by timestamp - job_messages.sort(key=lambda x: x["created_at"]) - - # Add all job messages to the history - formatted_history.extend(job_messages) - - logger.debug(f"Found {len(formatted_history)} messages in thread history") - return formatted_history - - -async def mark_jobs_disconnected_for_session(session_id: str) -> None: - """Mark all running jobs associated with a session as disconnected. - - Args: - session_id: The session ID to mark jobs for - """ - disconnected_count = 0 - for job_id, job_info in running_jobs.items(): - if job_info.get("task") and job_info.get("connection_active", True): - logger.info( - f"Marking job {job_id} as disconnected due to WebSocket disconnect for session {session_id}" - ) - job_info["connection_active"] = False - disconnected_count += 1 - - if disconnected_count > 0: - logger.info( - f"Marked {disconnected_count} jobs as disconnected for session {session_id}" - ) - else: - logger.debug(f"No active jobs found for session {session_id}") - - -# For backward compatibility -process_chat_message = ChatService.process_chat_message -get_job_history = ChatService.get_job_history -get_thread_history = ChatService.get_thread_history diff --git a/services/processing/message_handler.py b/services/processing/message_handler.py new file mode 100644 index 00000000..e5fbf6cc --- /dev/null +++ b/services/processing/message_handler.py @@ -0,0 +1,85 @@ +import datetime +from dataclasses import dataclass +from typing import Any, Dict, Optional + + +@dataclass +class Message: + """Base message structure for chat communication.""" + + content: str + type: str + thread_id: str + tool: Optional[str] = None + tool_input: Optional[str] = None + tool_output: Optional[str] = None + agent_id: Optional[str] = None + role: str = "assistant" + status: Optional[str] = None + created_at: Optional[str] = None + + def to_dict(self) -> Dict[str, Any]: + """Convert message to dictionary, excluding None values.""" + return {k: v for k, v in self.__dict__.items() if v is not None} + + +class MessageHandler: + """Handler for token-type messages.""" + + def process_token_message(self, message: Dict[str, Any]) -> Dict[str, Any]: + """Process a token message and prepare it for streaming.""" + # Default to processing status if not specified + status = message.get("status", "processing") + + return { + "type": "token", + "status": status, # Use the status or default to "processing" + "content": message.get("content", ""), + "created_at": datetime.datetime.now().isoformat(), + "role": "assistant", + "thread_id": message.get("thread_id"), + "agent_id": message.get("agent_id"), + } + + +class StepMessageHandler: + """Handler for step/planning messages.""" + + def process_step_message(self, message: Dict[str, Any]) -> Dict[str, Any]: + """Process a planning step message.""" + # Ensure we have a timestamp for proper ordering + timestamp = datetime.datetime.now().isoformat() + + return { + "type": "step", + "status": "planning", # Always use planning status for steps + "content": message.get("content", ""), + "thought": message.get("thought"), + "created_at": message.get("created_at", timestamp), + "role": "assistant", + "thread_id": message.get("thread_id"), + "agent_id": message.get("agent_id"), + # Add a special flag to identify this as a planning-only message + "planning_only": True, + } + + +class ToolExecutionHandler: + """Handler for tool execution messages.""" + + def process_tool_message(self, message: Dict[str, Any]) -> Dict[str, Any]: + """Process a tool execution message.""" + # Use provided status or default to "processing" + status = message.get("status", "processing") + + return { + "role": "assistant", + "type": "tool", + "status": status, # Use the exact status passed from the tool execution + "tool": message.get("tool"), + "tool_input": message.get("input"), + "tool_output": message.get("output"), + "created_at": datetime.datetime.now().isoformat(), + "thread_id": message.get("thread_id"), + "agent_id": message.get("agent_id"), + } diff --git a/services/processing/streaming_service.py b/services/processing/streaming_service.py new file mode 100644 index 00000000..f54b5f57 --- /dev/null +++ b/services/processing/streaming_service.py @@ -0,0 +1,46 @@ +import asyncio +from concurrent.futures import ThreadPoolExecutor +from typing import Dict, Optional, TypedDict +from uuid import UUID + +from lib.logger import configure_logger + +logger = configure_logger(__name__) + + +class JobInfo(TypedDict): + """Information about a running job.""" + + queue: asyncio.Queue + thread_id: UUID + agent_id: Optional[UUID] + task: Optional[asyncio.Task] + connection_active: bool + + +# Global job tracking +thread_pool = ThreadPoolExecutor() +running_jobs: Dict[str, JobInfo] = {} + + +async def mark_jobs_disconnected_for_session(session_id: str) -> None: + """Mark all running jobs associated with a session as disconnected. + + Args: + session_id: The session ID to mark jobs for + """ + disconnected_count = 0 + for job_id, job_info in running_jobs.items(): + if job_info.get("task") and job_info.get("connection_active", True): + logger.info( + f"Marking job {job_id} as disconnected due to WebSocket disconnect for session {session_id}" + ) + job_info["connection_active"] = False + disconnected_count += 1 + + if disconnected_count > 0: + logger.info( + f"Marked {disconnected_count} jobs as disconnected for session {session_id}" + ) + else: + logger.debug(f"No active jobs found for session {session_id}") diff --git a/services/runner/__init__.py b/services/runner/__init__.py deleted file mode 100644 index e3506e17..00000000 --- a/services/runner/__init__.py +++ /dev/null @@ -1,57 +0,0 @@ -"""Runner module for executing tasks such as DAO processing and Twitter interactions.""" - -from services.runner.base import BaseTask, JobContext, JobType -from services.runner.job_manager import JobConfig, JobManager -from services.runner.registry import JobRegistry, execute_runner_job -from services.runner.tasks.agent_account_deployer import ( - AgentAccountDeployerTask, - agent_account_deployer, -) -from services.runner.tasks.dao_proposal_concluder import ( - DAOProposalConcluderTask, - dao_proposal_concluder, -) -from services.runner.tasks.dao_proposal_evaluation import ( - DAOProposalEvaluationTask, - dao_proposal_evaluation, -) -from services.runner.tasks.dao_proposal_voter import ( - DAOProposalVoterTask, - dao_proposal_voter, -) -from services.runner.tasks.dao_task import DAOTask, dao_task -from services.runner.tasks.dao_tweet_task import DAOTweetTask, dao_tweet_task -from services.runner.tasks.tweet_task import TweetTask, tweet_task - -# Register tasks with the registry -JobRegistry.register(JobType.DAO, DAOTask) -JobRegistry.register(JobType.DAO_PROPOSAL_VOTE, DAOProposalVoterTask) -JobRegistry.register(JobType.DAO_PROPOSAL_CONCLUDE, DAOProposalConcluderTask) -JobRegistry.register(JobType.DAO_PROPOSAL_EVALUATION, DAOProposalEvaluationTask) -JobRegistry.register(JobType.DAO_TWEET, DAOTweetTask) -JobRegistry.register(JobType.TWEET, TweetTask) -JobRegistry.register(JobType.AGENT_ACCOUNT_DEPLOY, AgentAccountDeployerTask) - -__all__ = [ - "BaseTask", - "JobContext", - "JobRegistry", - "JobType", - "JobConfig", - "JobManager", - "DAOTask", - "dao_task", - "DAOProposalVoterTask", - "dao_proposal_voter", - "DAOTweetTask", - "dao_tweet_task", - "TweetTask", - "tweet_task", - "execute_runner_job", - "DAOProposalConcluderTask", - "dao_proposal_concluder", - "DAOProposalEvaluationTask", - "dao_proposal_evaluation", - "AgentAccountDeployerTask", - "agent_account_deployer", -] diff --git a/services/runner/base.py b/services/runner/base.py deleted file mode 100644 index 6e072586..00000000 --- a/services/runner/base.py +++ /dev/null @@ -1,205 +0,0 @@ -import os -from abc import ABC, abstractmethod -from dataclasses import dataclass -from enum import Enum -from typing import Any, Dict, Generic, List, Optional, Type, TypeVar -from uuid import UUID - -from backend.models import QueueMessageType -from lib.logger import configure_logger - -logger = configure_logger(__name__) - - -@dataclass -class RunnerResult: - """Base class for runner operation results.""" - - success: bool - message: str - error: Optional[Exception] = None - - -T = TypeVar("T", bound=RunnerResult) - - -def get_required_env_var(name: str) -> UUID: - """Get a required environment variable and convert it to UUID.""" - value = os.getenv(name) - if not value: - raise ValueError(f"{name} environment variable is not set") - return UUID(value) - - -@dataclass -class RunnerConfig: - """Configuration class for runners.""" - - twitter_profile_id: UUID - twitter_agent_id: UUID - twitter_wallet_id: UUID - - @classmethod - def from_env(cls) -> "RunnerConfig": - """Create configuration from environment variables.""" - from backend.factory import backend - from backend.models import WalletFilter - - twitter_profile_id = get_required_env_var("AIBTC_TWITTER_PROFILE_ID") - twitter_agent_id = get_required_env_var("AIBTC_TWITTER_AGENT_ID") - - twitter_wallet = backend.list_wallets( - filters=WalletFilter(profile_id=twitter_profile_id) - ) - if not twitter_wallet: - logger.critical( - "No Twitter wallet found - critical system component missing" - ) - raise RuntimeError("Twitter wallet not found") - - return cls( - twitter_profile_id=twitter_profile_id, - twitter_agent_id=twitter_agent_id, - twitter_wallet_id=twitter_wallet[0].id, - ) - - -class JobType(str, Enum): - """Types of jobs that can be run.""" - - DAO = "dao" - DAO_PROPOSAL_VOTE = "dao_proposal_vote" - DAO_PROPOSAL_CONCLUDE = "dao_proposal_conclude" - DAO_PROPOSAL_EVALUATION = "dao_proposal_evaluation" - DAO_TWEET = "dao_tweet" - TWEET = "tweet" - AGENT_ACCOUNT_DEPLOY = "agent_account_deploy" - - def __str__(self): - return self.value - - -@dataclass -class JobContext: - """Context information for job execution.""" - - job_type: JobType - config: RunnerConfig - parameters: Optional[Dict[str, Any]] = None - retry_count: int = 0 - max_retries: int = 3 - - -class BaseTask(ABC, Generic[T]): - """Base class for all tasks.""" - - def __init__(self, config: Optional[RunnerConfig] = None): - self.config = config or RunnerConfig.from_env() - self._start_time: Optional[float] = None - - @property - def task_name(self) -> str: - """Get the task name for logging purposes.""" - return self.__class__.__name__ - - def _log_task_start(self) -> None: - """Log task start with standard format.""" - import time - - self._start_time = time.time() - logger.info(f"Starting task: {self.task_name}") - logger.debug(f"{self.task_name}: Configuration - {self.config}") - - def _log_task_completion(self, results: List[T]) -> None: - """Log task completion with standard format and metrics.""" - import time - - if not self._start_time: - return - - duration = time.time() - self._start_time - success_count = len([r for r in results if r.success]) - failure_count = len([r for r in results if not r.success]) - - logger.info( - f"Completed task: {self.task_name} in {duration:.2f}s - " - f"Success: {success_count}, Failures: {failure_count}" - ) - - if failure_count > 0: - for result in results: - if not result.success: - logger.error(f"{self.task_name} failure: {result.message}") - - @classmethod - def get_result_class(cls) -> Type[RunnerResult]: - """Get the result class for this task.""" - return cls.__orig_bases__[0].__args__[0] # type: ignore - - async def validate(self, context: JobContext) -> bool: - """Validate that the task can be executed. - - This method provides a validation pipeline: - 1. Configuration validation - 2. Prerequisites validation - 3. Task-specific validation - """ - try: - logger.debug(f"Starting validation for {self.task_name}") - - # Step 1: Configuration validation - if not await self._validate_config(context): - logger.warning(f"{self.task_name}: Configuration validation failed") - return False - - # Step 2: Prerequisites validation - if not await self._validate_prerequisites(context): - logger.debug(f"{self.task_name}: Prerequisites validation failed") - return False - - # Step 3: Task-specific validation - if not await self._validate_task_specific(context): - logger.debug(f"{self.task_name}: Task-specific validation failed") - return False - - logger.debug(f"{self.task_name}: All validation checks passed") - return True - except Exception as e: - logger.error( - f"Error in validation for {self.task_name}: {str(e)}", exc_info=True - ) - return False - - async def _validate_config(self, context: JobContext) -> bool: - """Validate task configuration.""" - return True - - async def _validate_prerequisites(self, context: JobContext) -> bool: - """Validate task prerequisites.""" - return True - - async def _validate_task_specific(self, context: JobContext) -> bool: - """Validate task-specific conditions.""" - return True - - async def execute(self, context: JobContext) -> List[T]: - """Execute the task with given context.""" - self._log_task_start() - try: - results = await self._execute_impl(context) - self._log_task_completion(results) - return results - except Exception as e: - logger.error(f"Error executing {self.task_name}: {str(e)}", exc_info=True) - result_class = self.get_result_class() - return [ - result_class( - success=False, message=f"Error executing task: {str(e)}", error=e - ) - ] - - @abstractmethod - async def _execute_impl(self, context: JobContext) -> List[T]: - """Implementation of task execution logic. - This method should be implemented by subclasses.""" - pass diff --git a/services/runner/job_manager.py b/services/runner/job_manager.py deleted file mode 100644 index e8ded37a..00000000 --- a/services/runner/job_manager.py +++ /dev/null @@ -1,178 +0,0 @@ -"""Job management utilities for the runner service.""" - -from dataclasses import dataclass -from typing import Any, Callable, List, Optional, cast - -from apscheduler.schedulers.asyncio import AsyncIOScheduler - -from config import config -from lib.logger import configure_logger - -from .base import JobType -from .registry import execute_runner_job - -logger = configure_logger(__name__) - - -@dataclass -class JobConfig: - """Configuration for a scheduled job.""" - - name: str - enabled: bool - func: Callable - seconds: int - args: Optional[List[Any]] = None - job_id: Optional[str] = None - - -class JobManager: - """Manager for scheduled jobs.""" - - @staticmethod - def get_all_jobs() -> List[JobConfig]: - """Get configurations for all available jobs. - - Returns: - List of job configurations - """ - # Static configurations for built-in jobs - jobs = [ - JobConfig( - name="Twitter Service", - enabled=config.twitter.enabled, - func=cast( - Callable, "execute_twitter_job" - ), # Import at runtime to avoid circular imports - seconds=config.twitter.interval_seconds, - job_id="twitter_service", - ), - JobConfig( - name="Schedule Sync Service", - enabled=config.scheduler.sync_enabled, - func=cast( - Callable, "sync_schedules" - ), # Import at runtime to avoid circular imports - seconds=config.scheduler.sync_interval_seconds, - args=[ - "scheduler" - ], # Special case - will be replaced with actual scheduler - job_id="schedule_sync_service", - ), - ] - - # Add runner jobs (could be extended with more job types) - runner_jobs = [ - ( - "DAO Runner Service", - config.scheduler.dao_runner_enabled, - config.scheduler.dao_runner_interval_seconds, - JobType.DAO.value, - ), - ( - "DAO Tweet Runner Service", - config.scheduler.dao_tweet_runner_enabled, - config.scheduler.dao_tweet_runner_interval_seconds, - JobType.DAO_TWEET.value, - ), - ( - "Tweet Runner Service", - config.scheduler.tweet_runner_enabled, - config.scheduler.tweet_runner_interval_seconds, - JobType.TWEET.value, - ), - ( - "DAO Proposal Vote Runner Service", - config.scheduler.dao_proposal_vote_runner_enabled, - config.scheduler.dao_proposal_vote_runner_interval_seconds, - JobType.DAO_PROPOSAL_VOTE.value, - ), - ( - "DAO Proposal Conclude Runner Service", - config.scheduler.dao_proposal_conclude_runner_enabled, - config.scheduler.dao_proposal_conclude_runner_interval_seconds, - JobType.DAO_PROPOSAL_CONCLUDE.value, - ), - ( - "DAO Proposal Evaluation Runner Service", - config.scheduler.dao_proposal_evaluation_runner_enabled, - config.scheduler.dao_proposal_evaluation_runner_interval_seconds, - JobType.DAO_PROPOSAL_EVALUATION.value, - ), - ( - "Agent Account Deploy Runner Service", - config.scheduler.agent_account_deploy_runner_enabled, - config.scheduler.agent_account_deploy_runner_interval_seconds, - JobType.AGENT_ACCOUNT_DEPLOY.value, - ), - ] - - # Add all runner jobs with common structure - for name, enabled, seconds, job_type in runner_jobs: - jobs.append( - JobConfig( - name=name, - enabled=enabled, - func=execute_runner_job, - seconds=seconds, - args=[job_type], - job_id=f"{job_type}_runner", - ) - ) - - return jobs - - @staticmethod - def schedule_jobs(scheduler: AsyncIOScheduler) -> bool: - """Schedule all enabled jobs. - - Args: - scheduler: The scheduler to add jobs to - - Returns: - True if any jobs were scheduled, False otherwise - """ - # Import at runtime to avoid circular imports - from services.schedule import sync_schedules - from services.twitter import execute_twitter_job - - # Get all job configurations - jobs = JobManager.get_all_jobs() - - # Map function names to actual functions - func_map = { - "execute_twitter_job": execute_twitter_job, - "sync_schedules": sync_schedules, - } - - # Add enabled jobs to the scheduler - any_enabled = False - for job in jobs: - if job.enabled: - any_enabled = True - - # Handle special cases - job_func = job.func - if isinstance(job_func, str): - job_func = func_map.get(job_func, job_func) - - job_args = {} - if job.args: - # Special case for scheduler argument - if "scheduler" in job.args: - job_args["args"] = [scheduler] - else: - job_args["args"] = job.args - - # Add the job with a specific ID for easier management - job_id = job.job_id or f"{job.name.lower().replace(' ', '_')}" - scheduler.add_job( - job_func, "interval", seconds=job.seconds, id=job_id, **job_args - ) - logger.info( - f"{job.name} started with interval of {job.seconds} seconds" - ) - else: - logger.info(f"{job.name} is disabled") - - return any_enabled diff --git a/services/runner/tasks/__init__.py b/services/runner/tasks/__init__.py deleted file mode 100644 index e1992934..00000000 --- a/services/runner/tasks/__init__.py +++ /dev/null @@ -1,23 +0,0 @@ -"""Task runners for scheduled and on-demand jobs.""" - -from .dao_proposal_concluder import DAOProposalConcluderTask, dao_proposal_concluder -from .dao_proposal_evaluation import DAOProposalEvaluationTask, dao_proposal_evaluation -from .dao_proposal_voter import DAOProposalVoterTask, dao_proposal_voter -from .dao_task import DAOTask, dao_task -from .dao_tweet_task import DAOTweetTask, dao_tweet_task -from .tweet_task import TweetTask, tweet_task - -__all__ = [ - "DAOTask", - "dao_task", - "DAOProposalVoterTask", - "dao_proposal_voter", - "DAOTweetTask", - "dao_tweet_task", - "TweetTask", - "tweet_task", - "DAOProposalConcluderTask", - "dao_proposal_concluder", - "DAOProposalEvaluationTask", - "dao_proposal_evaluation", -] diff --git a/services/runner/tasks/agent_account_deployer.py b/services/runner/tasks/agent_account_deployer.py deleted file mode 100644 index 051e4437..00000000 --- a/services/runner/tasks/agent_account_deployer.py +++ /dev/null @@ -1,173 +0,0 @@ -"""Agent account deployment task implementation.""" - -from dataclasses import dataclass -from typing import Any, Dict, List - -from backend.factory import backend -from backend.models import ( - QueueMessage, - QueueMessageBase, - QueueMessageFilter, - QueueMessageType, -) -from config import config -from lib.logger import configure_logger -from services.runner.base import BaseTask, JobContext, RunnerResult -from tools.smartwallet import SmartWalletDeploySmartWalletTool - -logger = configure_logger(__name__) - - -@dataclass -class AgentAccountDeployResult(RunnerResult): - """Result of agent account deployment operation.""" - - accounts_processed: int = 0 - accounts_deployed: int = 0 - errors: List[str] = None - - def __post_init__(self): - self.errors = self.errors or [] - - -class AgentAccountDeployerTask(BaseTask[AgentAccountDeployResult]): - """Task runner for deploying agent accounts.""" - - QUEUE_TYPE = QueueMessageType.AGENT_ACCOUNT_DEPLOY - - async def _validate_task_specific(self, context: JobContext) -> bool: - """Validate task-specific conditions.""" - try: - # Get pending messages from the queue - pending_messages = await self.get_pending_messages() - message_count = len(pending_messages) - logger.debug( - f"Found {message_count} pending agent account deployment messages" - ) - - if message_count == 0: - logger.info("No pending agent account deployment messages found") - return False - - # Validate that at least one message has valid deployment data - for message in pending_messages: - message_data = message.message or {} - if self._validate_message_data(message_data): - logger.info("Found valid agent account deployment message") - return True - - logger.warning("No valid deployment data found in pending messages") - return False - - except Exception as e: - logger.error( - f"Error validating agent account deployment task: {str(e)}", - exc_info=True, - ) - return False - - def _validate_message_data(self, message_data: Dict[str, Any]) -> bool: - """Validate the message data contains required fields.""" - required_fields = [ - "owner_address", - "dao_token_contract", - "dao_token_dex_contract", - ] - return all(field in message_data for field in required_fields) - - async def process_message(self, message: QueueMessage) -> Dict[str, Any]: - """Process a single agent account deployment message.""" - message_id = message.id - message_data = message.message or {} - - logger.debug(f"Processing agent account deployment message {message_id}") - - try: - # Validate message data - if not self._validate_message_data(message_data): - error_msg = f"Invalid message data in message {message_id}" - logger.error(error_msg) - return {"success": False, "error": error_msg} - - # Initialize the SmartWalletDeploySmartWalletTool - logger.debug("Preparing to deploy agent account") - deploy_tool = SmartWalletDeploySmartWalletTool( - wallet_id=config.scheduler.agent_account_deploy_runner_wallet_id - ) - - # Execute the deployment - logger.debug("Executing deployment...") - deployment_result = await deploy_tool._arun( - owner_address=message_data["owner_address"], - dao_token_contract=message_data["dao_token_contract"], - dao_token_dex_contract=message_data["dao_token_dex_contract"], - ) - logger.debug(f"Deployment result: {deployment_result}") - - # Mark the message as processed - update_data = QueueMessageBase(is_processed=True) - backend.update_queue_message(message_id, update_data) - - return {"success": True, "deployed": True, "result": deployment_result} - - except Exception as e: - error_msg = f"Error processing message {message_id}: {str(e)}" - logger.error(error_msg, exc_info=True) - return {"success": False, "error": error_msg} - - async def get_pending_messages(self) -> List[QueueMessage]: - """Get all unprocessed messages from the queue.""" - filters = QueueMessageFilter(type=self.QUEUE_TYPE, is_processed=False) - return backend.list_queue_messages(filters=filters) - - async def _execute_impl( - self, context: JobContext - ) -> List[AgentAccountDeployResult]: - """Run the agent account deployment task.""" - pending_messages = await self.get_pending_messages() - message_count = len(pending_messages) - logger.debug(f"Found {message_count} pending agent account deployment messages") - - if not pending_messages: - return [ - AgentAccountDeployResult( - success=True, - message="No pending messages found", - accounts_processed=0, - accounts_deployed=0, - ) - ] - - # Process each message - processed_count = 0 - deployed_count = 0 - errors = [] - - for message in pending_messages: - result = await self.process_message(message) - processed_count += 1 - - if result.get("success"): - if result.get("deployed", False): - deployed_count += 1 - else: - errors.append(result.get("error", "Unknown error")) - - logger.debug( - f"Task metrics - Processed: {processed_count}, " - f"Deployed: {deployed_count}, Errors: {len(errors)}" - ) - - return [ - AgentAccountDeployResult( - success=True, - message=f"Processed {processed_count} account(s), deployed {deployed_count} account(s)", - accounts_processed=processed_count, - accounts_deployed=deployed_count, - errors=errors, - ) - ] - - -# Instantiate the task for use in the registry -agent_account_deployer = AgentAccountDeployerTask() diff --git a/services/runner/tasks/dao_proposal_concluder.py b/services/runner/tasks/dao_proposal_concluder.py deleted file mode 100644 index b212c8e2..00000000 --- a/services/runner/tasks/dao_proposal_concluder.py +++ /dev/null @@ -1,199 +0,0 @@ -"""DAO proposal conclusion task implementation.""" - -from dataclasses import dataclass -from typing import Any, Dict, List - -from backend.factory import backend -from backend.models import ( - QueueMessage, - QueueMessageBase, - QueueMessageFilter, - QueueMessageType, - TokenFilter, -) -from config import config -from lib.logger import configure_logger -from services.runner.base import BaseTask, JobContext, RunnerResult -from tools.dao_ext_action_proposals import ConcludeActionProposalTool - -logger = configure_logger(__name__) - - -@dataclass -class DAOProposalConcludeResult(RunnerResult): - """Result of DAO proposal conclusion operation.""" - - proposals_processed: int = 0 - proposals_concluded: int = 0 - errors: List[str] = None - - def __post_init__(self): - self.errors = self.errors or [] - - -class DAOProposalConcluderTask(BaseTask[DAOProposalConcludeResult]): - """Task runner for processing and concluding DAO proposals.""" - - QUEUE_TYPE = QueueMessageType.DAO_PROPOSAL_CONCLUDE - - async def _validate_task_specific(self, context: JobContext) -> bool: - """Validate task-specific conditions.""" - try: - # Get pending messages from the queue - pending_messages = await self.get_pending_messages() - message_count = len(pending_messages) - logger.debug(f"Found {message_count} pending proposal conclusion messages") - - if message_count == 0: - logger.info("No pending proposal conclusion messages found") - return False - - # Validate that at least one message has a valid proposal - for message in pending_messages: - message_data = message.message or {} - proposal_id = message_data.get("proposal_id") - - if not proposal_id: - logger.warning(f"Message {message.id} missing proposal_id") - continue - - # Check if the proposal exists in the database - proposal = backend.get_proposal(proposal_id) - if proposal: - logger.info(f"Found valid proposal {proposal_id} to conclude") - return True - else: - logger.warning(f"Proposal {proposal_id} not found in database") - - logger.warning("No valid proposals found in pending messages") - return False - - except Exception as e: - logger.error( - f"Error validating proposal conclusion task: {str(e)}", exc_info=True - ) - return False - - async def process_message(self, message: QueueMessage) -> Dict[str, Any]: - """Process a single DAO proposal conclusion message.""" - message_id = message.id - message_data = message.message or {} - dao_id = message.dao_id - - logger.debug(f"Processing proposal conclusion message {message_id}") - - # Get the proposal ID from the message - proposal_id = message_data.get("proposal_id") - if not proposal_id: - error_msg = f"Missing proposal_id in message {message_id}" - logger.error(error_msg) - return {"success": False, "error": error_msg} - - try: - # Get the proposal details from the database - proposal = backend.get_proposal(proposal_id) - if not proposal: - error_msg = f"Proposal {proposal_id} not found in database" - logger.error(error_msg) - return {"success": False, "error": error_msg} - - # Get the DAO information - dao = backend.get_dao(dao_id) if dao_id else None - if not dao: - error_msg = f"DAO not found for proposal {proposal_id}" - logger.error(error_msg) - return {"success": False, "error": error_msg} - - # Get the DAO token information - tokens = backend.list_tokens(filters=TokenFilter(dao_id=dao_id)) - if not tokens: - error_msg = f"No token found for DAO: {dao_id}" - logger.error(error_msg) - return {"success": False, "error": error_msg} - - # Use the first token as the DAO token - dao_token = tokens[0] - - # Initialize the ConcludeActionProposalTool - logger.debug(f"Preparing to conclude proposal {proposal.proposal_id}") - conclude_tool = ConcludeActionProposalTool( - wallet_id=config.scheduler.dao_proposal_conclude_runner_wallet_id - ) - - # Execute the conclusion - logger.debug("Executing conclusion...") - conclusion_result = await conclude_tool._arun( - action_proposals_voting_extension=proposal.contract_principal, # This is the voting extension contract - proposal_id=proposal.proposal_id, # This is the on-chain proposal ID - action_proposal_contract_to_execute=proposal.action, # This is the contract that will be executed - dao_token_contract_address=dao_token.contract_principal, # This is the DAO token contract - ) - logger.debug(f"Conclusion result: {conclusion_result}") - - # Mark the message as processed - update_data = QueueMessageBase(is_processed=True) - backend.update_queue_message(message_id, update_data) - - return {"success": True, "concluded": True, "result": conclusion_result} - - except Exception as e: - error_msg = f"Error processing message {message_id}: {str(e)}" - logger.error(error_msg, exc_info=True) - return {"success": False, "error": error_msg} - - async def get_pending_messages(self) -> List[QueueMessage]: - """Get all unprocessed messages from the queue.""" - filters = QueueMessageFilter(type=self.QUEUE_TYPE, is_processed=False) - return backend.list_queue_messages(filters=filters) - - async def _execute_impl( - self, context: JobContext - ) -> List[DAOProposalConcludeResult]: - """Run the DAO proposal conclusion task.""" - pending_messages = await self.get_pending_messages() - message_count = len(pending_messages) - logger.debug(f"Found {message_count} pending proposal conclusion messages") - - if not pending_messages: - return [ - DAOProposalConcludeResult( - success=True, - message="No pending messages found", - proposals_processed=0, - proposals_concluded=0, - ) - ] - - # Process each message - processed_count = 0 - concluded_count = 0 - errors = [] - - for message in pending_messages: - result = await self.process_message(message) - processed_count += 1 - - if result.get("success"): - if result.get("concluded", False): - concluded_count += 1 - else: - errors.append(result.get("error", "Unknown error")) - - logger.debug( - f"Task metrics - Processed: {processed_count}, " - f"Concluded: {concluded_count}, Errors: {len(errors)}" - ) - - return [ - DAOProposalConcludeResult( - success=True, - message=f"Processed {processed_count} proposal(s), concluded {concluded_count} proposal(s)", - proposals_processed=processed_count, - proposals_concluded=concluded_count, - errors=errors, - ) - ] - - -# Instantiate the task for use in the registry -dao_proposal_concluder = DAOProposalConcluderTask() diff --git a/services/runner/tasks/dao_proposal_evaluation.py b/services/runner/tasks/dao_proposal_evaluation.py deleted file mode 100644 index d1f1dd6b..00000000 --- a/services/runner/tasks/dao_proposal_evaluation.py +++ /dev/null @@ -1,255 +0,0 @@ -"""DAO proposal evaluation task implementation.""" - -from dataclasses import dataclass -from typing import Any, Dict, List - -from backend.factory import backend -from backend.models import ( - QueueMessage, - QueueMessageBase, - QueueMessageCreate, - QueueMessageFilter, - QueueMessageType, - VoteBase, - VoteCreate, -) -from lib.logger import configure_logger -from services.runner.base import BaseTask, JobContext, RunnerResult -from services.workflows.proposal_evaluation import evaluate_and_vote_on_proposal - -logger = configure_logger(__name__) - - -@dataclass -class DAOProposalEvaluationResult(RunnerResult): - """Result of DAO proposal evaluation operation.""" - - proposals_processed: int = 0 - proposals_evaluated: int = 0 - errors: List[str] = None - - def __post_init__(self): - self.errors = self.errors or [] - - -class DAOProposalEvaluationTask(BaseTask[DAOProposalEvaluationResult]): - """Task runner for evaluating DAO proposals.""" - - QUEUE_TYPE = QueueMessageType.DAO_PROPOSAL_EVALUATION - DEFAULT_CONFIDENCE_THRESHOLD = 0.7 - DEFAULT_AUTO_VOTE = False - - async def _validate_task_specific(self, context: JobContext) -> bool: - """Validate task-specific conditions.""" - try: - # Get pending messages from the queue - pending_messages = await self.get_pending_messages() - message_count = len(pending_messages) - logger.debug(f"Found {message_count} pending proposal evaluation messages") - - if message_count == 0: - logger.info("No pending proposal evaluation messages found") - return False - - # Validate that at least one message has a valid proposal - for message in pending_messages: - message_data = message.message or {} - proposal_id = message_data.get("proposal_id") - - if not proposal_id: - logger.warning(f"Message {message.id} missing proposal_id") - continue - - # Check if the proposal exists in the database - proposal = backend.get_proposal(proposal_id) - if proposal: - logger.info(f"Found valid proposal {proposal_id} to process") - return True - else: - logger.warning(f"Proposal {proposal_id} not found in database") - - logger.warning("No valid proposals found in pending messages") - return False - - except Exception as e: - logger.error( - f"Error validating proposal evaluation task: {str(e)}", exc_info=True - ) - return False - - async def process_message(self, message: QueueMessage) -> Dict[str, Any]: - """Process a single DAO proposal evaluation message.""" - message_id = message.id - message_data = message.message or {} - wallet_id = message.wallet_id - dao_id = message.dao_id - - logger.debug( - f"Processing proposal evaluation message {message_id} for wallet {wallet_id}" - ) - - # Get the proposal ID from the message - proposal_id = message_data.get("proposal_id") - if not proposal_id: - error_msg = f"Missing proposal_id in message {message_id}" - logger.error(error_msg) - return {"success": False, "error": error_msg} - - try: - # Get the proposal details from the database - proposal = backend.get_proposal(proposal_id) - if not proposal: - error_msg = f"Proposal {proposal_id} not found in database" - logger.error(error_msg) - return {"success": False, "error": error_msg} - - # Get the DAO information - dao = backend.get_dao(dao_id) if dao_id else None - if not dao: - error_msg = f"DAO not found for proposal {proposal_id}" - logger.error(error_msg) - return {"success": False, "error": error_msg} - - # Execute the proposal evaluation workflow - logger.info(f"Evaluating proposal {proposal.id} for DAO {dao.name}") - - result = await evaluate_and_vote_on_proposal( - proposal_id=proposal.id, - wallet_id=wallet_id, - auto_vote=self.DEFAULT_AUTO_VOTE, # Don't auto-vote, just evaluate - confidence_threshold=self.DEFAULT_CONFIDENCE_THRESHOLD, - dao_id=dao_id, - ) - - # Extract evaluation results - evaluation = result.get("evaluation", {}) - approval = evaluation.get("approve", False) - confidence = evaluation.get("confidence_score", 0.0) - reasoning = evaluation.get("reasoning", "No reasoning provided") - formatted_prompt = result.get("formatted_prompt", "No prompt provided") - total_cost = result.get("token_costs", {}).get("total_cost", 0.0) - model = result.get("model_info", {}).get("name", "Unknown") - - logger.info( - f"Proposal {proposal.id} ({dao.name}): Evaluated with result " - f"{'FOR' if approval else 'AGAINST'} with confidence {confidence:.2f}" - ) - - wallet = backend.get_wallet(wallet_id) - - # Create a vote record with the evaluation results - vote_data = VoteCreate( - wallet_id=wallet_id, - dao_id=dao_id, - agent_id=None, # This will be set from the wallet if it exists - proposal_id=proposal_id, - answer=approval, - reasoning=reasoning, - confidence=confidence, - prompt=formatted_prompt, - cost=total_cost, - model=model, - profile_id=wallet.profile_id, - ) - - # Create the vote record - vote = backend.create_vote(vote_data) - if not vote: - logger.error("Failed to create vote record") - return {"success": False, "error": "Failed to create vote record"} - - logger.info(f"Created vote record {vote.id} for proposal {proposal_id}") - - # Create a DAO_PROPOSAL_VOTE message with the vote record ID - vote_message_data = {"proposal_id": proposal_id, "vote_id": str(vote.id)} - - vote_message = backend.create_queue_message( - QueueMessageCreate( - type=QueueMessageType.DAO_PROPOSAL_VOTE, - message=vote_message_data, - dao_id=dao_id, - wallet_id=wallet_id, - ) - ) - - if not vote_message: - logger.error("Failed to create vote queue message") - return { - "success": False, - "error": "Failed to create vote queue message", - } - - logger.info(f"Created vote queue message {vote_message.id}") - - # Mark the evaluation message as processed - update_data = QueueMessageBase(is_processed=True) - backend.update_queue_message(message_id, update_data) - - return { - "success": True, - "vote_id": str(vote.id), - "vote_message_id": str(vote_message.id), - "approve": approval, - "confidence": confidence, - } - - except Exception as e: - error_msg = f"Error processing message {message_id}: {str(e)}" - logger.error(error_msg, exc_info=True) - return {"success": False, "error": error_msg} - - async def get_pending_messages(self) -> List[QueueMessage]: - """Get all unprocessed messages from the queue.""" - filters = QueueMessageFilter(type=self.QUEUE_TYPE, is_processed=False) - return backend.list_queue_messages(filters=filters) - - async def _execute_impl( - self, context: JobContext - ) -> List[DAOProposalEvaluationResult]: - """Run the DAO proposal evaluation task.""" - pending_messages = await self.get_pending_messages() - message_count = len(pending_messages) - logger.debug(f"Found {message_count} pending proposal evaluation messages") - - if not pending_messages: - return [ - DAOProposalEvaluationResult( - success=True, - message="No pending messages found", - proposals_processed=0, - proposals_evaluated=0, - ) - ] - - # Process each message - processed_count = 0 - evaluated_count = 0 - errors = [] - - for message in pending_messages: - result = await self.process_message(message) - processed_count += 1 - - if result.get("success"): - evaluated_count += 1 - else: - errors.append(result.get("error", "Unknown error")) - - logger.debug( - f"Task metrics - Processed: {processed_count}, " - f"Evaluated: {evaluated_count}, Errors: {len(errors)}" - ) - - return [ - DAOProposalEvaluationResult( - success=True, - message=f"Processed {processed_count} proposal(s), evaluated {evaluated_count} proposal(s)", - proposals_processed=processed_count, - proposals_evaluated=evaluated_count, - errors=errors, - ) - ] - - -# Instantiate the task for use in the registry -dao_proposal_evaluation = DAOProposalEvaluationTask() diff --git a/services/runner/tasks/dao_proposal_voter.py b/services/runner/tasks/dao_proposal_voter.py deleted file mode 100644 index 21e4b708..00000000 --- a/services/runner/tasks/dao_proposal_voter.py +++ /dev/null @@ -1,332 +0,0 @@ -"""DAO proposal voter task implementation.""" - -import json -from dataclasses import dataclass -from typing import Any, Dict, List - -from backend.factory import backend -from backend.models import ( - QueueMessage, - QueueMessageBase, - QueueMessageFilter, - QueueMessageType, - VoteBase, - VoteFilter, -) -from config import config -from lib.logger import configure_logger -from services.runner.base import BaseTask, JobContext, RunnerResult -from tools.dao_ext_action_proposals import VoteOnActionProposalTool - -logger = configure_logger(__name__) - - -@dataclass -class DAOProposalVoteResult(RunnerResult): - """Result of DAO proposal voting operation.""" - - proposals_processed: int = 0 - proposals_voted: int = 0 - errors: List[str] = None - - def __post_init__(self): - self.errors = self.errors or [] - - -class DAOProposalVoterTask(BaseTask[DAOProposalVoteResult]): - """Task runner for processing and voting on DAO proposals.""" - - QUEUE_TYPE = QueueMessageType.DAO_PROPOSAL_VOTE - - async def _validate_task_specific(self, context: JobContext) -> bool: - """Validate task-specific conditions.""" - try: - # Get pending messages from the queue - pending_messages = await self.get_pending_messages() - message_count = len(pending_messages) - logger.debug(f"Found {message_count} pending proposal voting messages") - - if message_count == 0: - logger.info("No pending proposal voting messages found") - return False - - # Validate that at least one message has a valid proposal ID - for message in pending_messages: - message_data = message.message or {} - proposal_id = message_data.get("proposal_id") - - if not proposal_id: - logger.warning(f"Message {message.id} missing proposal_id") - continue - - # Check if the proposal exists in the database - proposal = backend.get_proposal(proposal_id) - if proposal: - # Check if there are any unvoted votes for this proposal - unvoted_votes = backend.list_votes( - VoteFilter( - proposal_id=proposal_id, - voted=False, - ) - ) - - if unvoted_votes: - logger.info( - f"Found valid proposal {proposal_id} with {len(unvoted_votes)} unvoted votes to process" - ) - return True - else: - logger.warning( - f"No unvoted votes found for proposal {proposal_id}" - ) - else: - logger.warning(f"Proposal {proposal_id} not found in database") - - logger.warning( - "No valid proposals with unvoted votes found in pending messages" - ) - return False - - except Exception as e: - logger.error( - f"Error validating proposal voter task: {str(e)}", exc_info=True - ) - return False - - async def process_message(self, message: QueueMessage) -> Dict[str, Any]: - """Process a single DAO proposal voting message.""" - message_id = message.id - message_data = message.message or {} - wallet_id = message.wallet_id - dao_id = message.dao_id - - logger.debug( - f"Processing proposal voting message {message_id} for wallet {wallet_id}" - ) - - # Get the proposal ID from the message - proposal_id = message_data.get("proposal_id") - - if not proposal_id: - error_msg = f"Missing proposal_id in message {message_id}" - logger.error(error_msg) - return {"success": False, "error": error_msg} - - try: - # Get the proposal details from the database - proposal = backend.get_proposal(proposal_id) - if not proposal: - error_msg = f"Proposal {proposal_id} not found in database" - logger.error(error_msg) - return {"success": False, "error": error_msg} - - # Get unvoted votes for this proposal and wallet - unvoted_votes = backend.list_votes( - VoteFilter( - proposal_id=proposal_id, - wallet_id=wallet_id, - voted=False, - ) - ) - - if not unvoted_votes: - error_msg = f"No unvoted votes found for proposal {proposal_id} and wallet {wallet_id}" - logger.error(error_msg) - return {"success": False, "error": error_msg} - - # Initialize the voting tool - voting_tool = VoteOnActionProposalTool(wallet_id=wallet_id) - - # Process each unvoted vote - results = [] - for vote in unvoted_votes: - # Submit the vote - vote_result = await voting_tool._arun( - action_proposals_voting_extension=proposal.contract_principal, - proposal_id=proposal.proposal_id, - vote=vote.answer, - ) - - if not vote_result.get("success", False): - error_msg = f"Failed to submit vote {vote.id}: {vote_result.get('message', 'Unknown error')}" - logger.error(error_msg) - results.append( - {"success": False, "error": error_msg, "vote_id": vote.id} - ) - continue - - try: - # Parse the output JSON string - output_data = ( - json.loads(vote_result["output"]) - if isinstance(vote_result["output"], str) - else vote_result["output"] - ) - # Get the transaction ID from the nested data structure - tx_id = output_data.get("data", {}).get("txid") - - if not tx_id: - logger.warning(f"No txid found in parsed output: {output_data}") - results.append( - { - "success": False, - "error": "No transaction ID found in response", - "vote_id": vote.id, - "vote_result": vote_result, - } - ) - continue - - except (json.JSONDecodeError, KeyError) as e: - logger.error(f"Error parsing vote result output: {str(e)}") - results.append( - { - "success": False, - "error": f"Failed to parse vote result: {str(e)}", - "vote_id": vote.id, - "vote_result": vote_result, - } - ) - continue - - # Log the txid for debugging - ## Get the correct address based on network configuration - wallet = backend.get_wallet(wallet_id) - address = ( - wallet.mainnet_address - if config.network.network == "mainnet" - else wallet.testnet_address - ) - logger.debug(f"Found txid in response: {tx_id}") - vote_data = VoteBase( - tx_id=tx_id, - voted=True, - address=address, - profile_id=wallet.profile_id, - ) - logger.debug( - f"Attempting to update vote {vote.id} with data: {vote_data.model_dump()}" - ) - try: - # Log the current vote state before update - current_vote = backend.get_vote(vote.id) - logger.debug( - f"Current vote state before update: {current_vote.model_dump() if current_vote else None}" - ) - - updated_vote = backend.update_vote(vote.id, vote_data) - if updated_vote: - logger.info( - f"Successfully updated vote {vote.id} with transaction ID {tx_id} and marked as voted" - ) - logger.debug(f"Updated vote state: {updated_vote.model_dump()}") - else: - logger.error( - f"Failed to update vote {vote.id} - update_vote returned None" - ) - except Exception as e: - logger.error( - f"Error updating vote {vote.id}: {str(e)}", exc_info=True - ) - results.append( - { - "success": False, - "error": f"Failed to update vote: {str(e)}", - "vote_id": vote.id, - "vote_result": vote_result, - } - ) - continue - results.append( - { - "success": True, - "vote_id": vote.id, - "tx_id": tx_id, - "vote_result": vote_result, - } - ) - - # Mark the message as processed if all votes were handled - if all(result["success"] for result in results): - update_data = QueueMessageBase(is_processed=True) - backend.update_queue_message(message_id, update_data) - logger.info( - f"Successfully processed all votes for message {message_id}" - ) - return { - "success": True, - "results": results, - } - else: - # Some votes failed - return { - "success": False, - "error": "Some votes failed to process", - "results": results, - } - - except Exception as e: - error_msg = f"Error processing message {message_id}: {str(e)}" - logger.error(error_msg, exc_info=True) - return {"success": False, "error": error_msg} - - async def get_pending_messages(self) -> List[QueueMessage]: - """Get all unprocessed messages from the queue.""" - filters = QueueMessageFilter(type=self.QUEUE_TYPE, is_processed=False) - return backend.list_queue_messages(filters=filters) - - async def _execute_impl(self, context: JobContext) -> List[DAOProposalVoteResult]: - """Run the DAO proposal voter task.""" - pending_messages = await self.get_pending_messages() - message_count = len(pending_messages) - logger.debug(f"Found {message_count} pending proposal voting messages") - - if not pending_messages: - return [ - DAOProposalVoteResult( - success=True, - message="No pending messages found", - proposals_processed=0, - proposals_voted=0, - ) - ] - - # Process each message - processed_count = 0 - voted_count = 0 - errors = [] - - for message in pending_messages: - result = await self.process_message(message) - processed_count += 1 - - if result.get("success"): - # Count successful votes from the results - voted_count += len( - [r for r in result.get("results", []) if r.get("success")] - ) - else: - errors.append(result.get("error", "Unknown error")) - # Also add any individual vote errors - for vote_result in result.get("results", []): - if not vote_result.get("success"): - errors.append(vote_result.get("error", "Unknown vote error")) - - logger.debug( - f"Task metrics - Processed: {processed_count}, " - f"Voted: {voted_count}, Errors: {len(errors)}" - ) - - return [ - DAOProposalVoteResult( - success=True, - message=f"Processed {processed_count} proposal(s), voted on {voted_count} proposal(s)", - proposals_processed=processed_count, - proposals_voted=voted_count, - errors=errors, - ) - ] - - -# Instantiate the task for use in the registry -dao_proposal_voter = DAOProposalVoterTask() diff --git a/services/runner/tasks/dao_task.py b/services/runner/tasks/dao_task.py deleted file mode 100644 index 3f91e4e7..00000000 --- a/services/runner/tasks/dao_task.py +++ /dev/null @@ -1,239 +0,0 @@ -from dataclasses import dataclass -from datetime import datetime -from typing import Any, Dict, List, Optional -from uuid import UUID - -from backend.factory import backend -from backend.models import ( - DAOFilter, - Profile, - QueueMessage, - QueueMessageBase, - QueueMessageFilter, - QueueMessageType, -) -from lib.logger import configure_logger -from services.workflows import execute_langgraph_stream -from tools.tools_factory import filter_tools_by_names, initialize_tools - -from ..base import BaseTask, JobContext, RunnerConfig, RunnerResult - -logger = configure_logger(__name__) - - -@dataclass -class DAOProcessingResult(RunnerResult): - """Result of DAO processing operation.""" - - dao_id: Optional[UUID] = None - deployment_data: Optional[Dict[str, Any]] = None - - -class DAOTask(BaseTask[DAOProcessingResult]): - """Task for processing DAO deployments.""" - - def __init__(self, config: Optional[RunnerConfig] = None): - super().__init__(config) - self._pending_messages = None - self.tools_map_all = initialize_tools( - Profile(id=self.config.twitter_profile_id, created_at=datetime.now()), - agent_id=self.config.twitter_agent_id, - ) - self.tools_map = filter_tools_by_names( - ["contract_deploy_dao"], self.tools_map_all - ) - logger.debug(f"Initialized {len(self.tools_map)} DAO deployment tools") - - async def _validate_config(self, context: JobContext) -> bool: - """Validate task configuration.""" - try: - if not self.tools_map: - logger.error("No DAO deployment tools available") - return False - - if not self.tools_map_all: - logger.error("Tools not properly initialized") - return False - - return True - except Exception as e: - logger.error(f"Error validating DAO config: {str(e)}", exc_info=True) - return False - - async def _validate_prerequisites(self, context: JobContext) -> bool: - """Validate task prerequisites.""" - try: - # Check for pending DAOs first - pending_daos = backend.list_daos( - filters=DAOFilter( - is_deployed=False, - is_broadcasted=True, - wallet_id=self.config.twitter_wallet_id, - ) - ) - if pending_daos: - logger.info( - f"Found {len(pending_daos)} pending Twitter DAO(s), skipping queue processing" - ) - return False - - # Cache pending messages for later use - self._pending_messages = backend.list_queue_messages( - filters=QueueMessageFilter( - type=QueueMessageType.DAO, is_processed=False - ) - ) - return True - except Exception as e: - logger.error(f"Error validating DAO prerequisites: {str(e)}", exc_info=True) - self._pending_messages = None - return False - - async def _validate_task_specific(self, context: JobContext) -> bool: - """Validate task-specific conditions.""" - try: - if not self._pending_messages: - logger.debug("No pending DAO messages found") - return False - - message_count = len(self._pending_messages) - if message_count > 0: - logger.debug(f"Found {message_count} unprocessed DAO messages") - return True - - logger.debug("No unprocessed DAO messages to process") - return False - - except Exception as e: - logger.error(f"Error in DAO task validation: {str(e)}", exc_info=True) - return False - - async def _validate_message( - self, message: QueueMessage - ) -> Optional[DAOProcessingResult]: - """Validate a single message before processing.""" - try: - params = message.message.get("parameters", {}) - required_params = [ - "token_symbol", - "token_name", - "token_description", - "token_max_supply", - "token_decimals", - "origin_address", - "mission", - ] - - missing_params = [p for p in required_params if p not in params] - if missing_params: - return DAOProcessingResult( - success=False, - message=f"Missing required parameters: {', '.join(missing_params)}", - ) - - return None # Validation passed - - except Exception as e: - logger.error( - f"Error validating message {message.id}: {str(e)}", exc_info=True - ) - return DAOProcessingResult( - success=False, - message=f"Error validating message: {str(e)}", - error=e, - ) - - def _get_dao_parameters(self, message: QueueMessage) -> Optional[str]: - """Extract and format DAO parameters from message.""" - try: - params = message.message["parameters"] - return ( - f"Please deploy a DAO with the following parameters:\n" - f"Token Symbol: {params['token_symbol']}\n" - f"Token Name: {params['token_name']}\n" - f"Token Description: {params['token_description']}\n" - f"Token Max Supply: {params['token_max_supply']}\n" - f"Token Decimals: {params['token_decimals']}\n" - f"Origin Address: {params['origin_address']}\n" - f"Tweet Origin: {message.tweet_id}\n" - f"Mission: {params['mission']}" - ) - except KeyError as e: - logger.error(f"Missing required parameter in message: {e}") - return None - - async def _process_dao_message(self, message: QueueMessage) -> DAOProcessingResult: - """Process a single DAO message.""" - try: - # Validate message first - validation_result = await self._validate_message(message) - if validation_result: - return validation_result - - tool_input = self._get_dao_parameters(message) - if not tool_input: - return DAOProcessingResult( - success=False, - message="Failed to extract DAO parameters from message", - ) - - logger.info(f"Processing DAO deployment for message {message.id}") - logger.debug(f"DAO deployment parameters: {tool_input}") - - deployment_data = {} - async for chunk in execute_langgraph_stream( - history=[], input_str=tool_input, tools_map=self.tools_map - ): - if chunk["type"] == "result": - deployment_data = chunk["content"] - logger.info("DAO deployment completed successfully") - logger.debug(f"Deployment data: {deployment_data}") - elif chunk["type"] == "tool": - logger.debug(f"Executing tool: {chunk}") - - return DAOProcessingResult( - success=True, - message="Successfully processed DAO deployment", - deployment_data=deployment_data, - ) - - except Exception as e: - logger.error(f"Error processing DAO message: {str(e)}", exc_info=True) - return DAOProcessingResult( - success=False, message=f"Error processing DAO: {str(e)}", error=e - ) - - async def _execute_impl(self, context: JobContext) -> List[DAOProcessingResult]: - """Execute DAO deployment task.""" - results: List[DAOProcessingResult] = [] - try: - if not self._pending_messages: - return results - - # Process one message at a time for DAOs - message = self._pending_messages[0] - logger.debug(f"Processing DAO deployment message: {message.id}") - - result = await self._process_dao_message(message) - results.append(result) - - if result.success: - backend.update_queue_message( - queue_message_id=message.id, - update_data=QueueMessageBase(is_processed=True), - ) - logger.debug(f"Marked message {message.id} as processed") - - return results - - except Exception as e: - logger.error(f"Error in DAO task: {str(e)}", exc_info=True) - results.append( - DAOProcessingResult( - success=False, message=f"Error in DAO task: {str(e)}", error=e - ) - ) - return results - - -dao_task = DAOTask() diff --git a/services/runner/tasks/dao_tweet_task.py b/services/runner/tasks/dao_tweet_task.py deleted file mode 100644 index e3bf185d..00000000 --- a/services/runner/tasks/dao_tweet_task.py +++ /dev/null @@ -1,230 +0,0 @@ -from dataclasses import dataclass -from typing import Any, List, Optional -from uuid import UUID - -from backend.factory import backend -from backend.models import ( - QueueMessageBase, - QueueMessageCreate, - QueueMessageFilter, - QueueMessageType, - TokenFilter, -) -from lib.logger import configure_logger -from services.workflows import generate_dao_tweet - -from ..base import BaseTask, JobContext, RunnerConfig, RunnerResult - -logger = configure_logger(__name__) - - -@dataclass -class DAOTweetProcessingResult(RunnerResult): - """Result of DAO tweet processing operation.""" - - dao_id: Optional[UUID] = None - tweet_id: Optional[str] = None - - -class DAOTweetTask(BaseTask[DAOTweetProcessingResult]): - """Task for generating tweets for completed DAOs.""" - - def __init__(self, config: Optional[RunnerConfig] = None): - super().__init__(config) - self._pending_messages = None - - async def _validate_config(self, context: JobContext) -> bool: - """Validate task configuration.""" - try: - # No specific config requirements for this task - return True - except Exception as e: - logger.error( - f"Error validating DAO tweet task config: {str(e)}", exc_info=True - ) - return False - - async def _validate_prerequisites(self, context: JobContext) -> bool: - """Validate task prerequisites.""" - try: - # Cache pending messages for later use - self._pending_messages = backend.list_queue_messages( - filters=QueueMessageFilter( - type=QueueMessageType.DAO_TWEET, is_processed=False - ) - ) - return True - except Exception as e: - logger.error( - f"Error validating DAO tweet prerequisites: {str(e)}", exc_info=True - ) - self._pending_messages = None - return False - - async def _validate_task_specific(self, context: JobContext) -> bool: - """Validate task-specific conditions.""" - try: - if not self._pending_messages: - logger.debug("No pending DAO tweet messages found") - return False - - message_count = len(self._pending_messages) - if message_count > 0: - logger.debug(f"Found {message_count} pending DAO tweet messages") - return True - - logger.debug("No pending DAO tweet messages to process") - return False - - except Exception as e: - logger.error(f"Error in DAO tweet task validation: {str(e)}", exc_info=True) - return False - - async def _validate_message( - self, message: Any - ) -> Optional[DAOTweetProcessingResult]: - """Validate a single message before processing.""" - try: - if not message.dao_id: - return DAOTweetProcessingResult( - success=False, message="DAO message has no dao_id", dao_id=None - ) - - # Validate DAO exists and is deployed - dao = backend.get_dao(message.dao_id) - if not dao: - return DAOTweetProcessingResult( - success=False, - message=f"No DAO found for id: {message.dao_id}", - dao_id=message.dao_id, - ) - - if not dao.is_deployed: - return DAOTweetProcessingResult( - success=False, - message=f"DAO is not deployed: {message.dao_id}", - dao_id=message.dao_id, - ) - - # Validate token exists - token = backend.list_tokens(filters=TokenFilter(dao_id=message.dao_id)) - if not token: - return DAOTweetProcessingResult( - success=False, - message=f"No token found for DAO: {message.dao_id}", - dao_id=message.dao_id, - ) - - return None # Validation passed - - except Exception as e: - logger.error( - f"Error validating message {message.id}: {str(e)}", exc_info=True - ) - return DAOTweetProcessingResult( - success=False, - message=f"Error validating message: {str(e)}", - error=e, - dao_id=message.dao_id if hasattr(message, "dao_id") else None, - ) - - async def _process_dao_message(self, message: Any) -> DAOTweetProcessingResult: - """Process a single DAO message.""" - try: - # Validate message first - validation_result = await self._validate_message(message) - if validation_result: - return validation_result - - # Get the validated DAO and token info - dao = backend.get_dao(message.dao_id) - token = backend.list_tokens(filters=TokenFilter(dao_id=message.dao_id))[0] - - logger.info(f"Generating tweet for DAO: {dao.name} ({dao.id})") - logger.debug( - f"DAO details - Symbol: {token.symbol}, Mission: {dao.mission}" - ) - - # Generate tweet - generated_tweet = await generate_dao_tweet( - dao_name=dao.name, - dao_symbol=token.symbol, - dao_mission=dao.mission, - dao_id=dao.id, - ) - - # Create a new tweet message in the queue - tweet_message = backend.create_queue_message( - QueueMessageCreate( - type="tweet", - dao_id=dao.id, - message={"body": generated_tweet["tweet_text"]}, - tweet_id=message.tweet_id, - conversation_id=message.conversation_id, - ) - ) - - logger.info(f"Created tweet message for DAO: {dao.name}") - logger.debug(f"Tweet message ID: {tweet_message.id}") - - return DAOTweetProcessingResult( - success=True, - message="Successfully generated tweet", - dao_id=dao.id, - tweet_id=message.tweet_id, - ) - - except Exception as e: - logger.error( - f"Error processing DAO message {message.id}: {str(e)}", exc_info=True - ) - return DAOTweetProcessingResult( - success=False, - message=f"Error processing DAO: {str(e)}", - error=e, - dao_id=message.dao_id if hasattr(message, "dao_id") else None, - ) - - async def _execute_impl( - self, context: JobContext - ) -> List[DAOTweetProcessingResult]: - """Execute DAO tweet processing task.""" - results: List[DAOTweetProcessingResult] = [] - try: - if not self._pending_messages: - return results - - processed_count = 0 - success_count = 0 - - for message in self._pending_messages: - logger.debug(f"Processing DAO tweet message: {message.id}") - result = await self._process_dao_message(message) - results.append(result) - processed_count += 1 - - if result.success: - success_count += 1 - backend.update_queue_message( - queue_message_id=message.id, - update_data=QueueMessageBase(is_processed=True), - ) - logger.debug(f"Marked message {message.id} as processed") - - logger.debug( - f"Task metrics - Processed: {processed_count}, Successful: {success_count}" - ) - - return results - - except Exception as e: - logger.error(f"Error in DAO tweet task: {str(e)}", exc_info=True) - results.append( - DAOTweetProcessingResult( - success=False, message=f"Error in DAO tweet task: {str(e)}", error=e - ) - ) - return results - - -dao_tweet_task = DAOTweetTask() diff --git a/services/runner/tasks/tweet_task.py b/services/runner/tasks/tweet_task.py deleted file mode 100644 index 10ae8fb1..00000000 --- a/services/runner/tasks/tweet_task.py +++ /dev/null @@ -1,284 +0,0 @@ -from dataclasses import dataclass -from typing import List, Optional -from uuid import UUID - -from backend.factory import backend -from backend.models import ( - QueueMessage, - QueueMessageBase, - QueueMessageFilter, - QueueMessageType, - XCredsFilter, -) -from lib.logger import configure_logger -from lib.twitter import TwitterService -from services.discord import create_discord_service -from services.runner.base import BaseTask, JobContext, RunnerConfig, RunnerResult - -logger = configure_logger(__name__) - - -@dataclass -class TweetProcessingResult(RunnerResult): - """Result of tweet processing operation.""" - - tweet_id: Optional[str] = None - dao_id: Optional[UUID] = None - - -class TweetTask(BaseTask[TweetProcessingResult]): - """Task for sending tweets.""" - - def __init__(self, config: Optional[RunnerConfig] = None): - super().__init__(config) - self._pending_messages: Optional[List[QueueMessage]] = None - self.twitter_service = None - - async def _initialize_twitter_service(self, dao_id: UUID) -> bool: - """Initialize Twitter service with credentials for the given DAO.""" - try: - # Get Twitter credentials for the DAO - creds = backend.list_x_creds(filters=XCredsFilter(dao_id=dao_id)) - if not creds: - logger.error(f"No Twitter credentials found for DAO {dao_id}") - return False - - # Initialize Twitter service with the credentials - self.twitter_service = TwitterService( - consumer_key=creds[0].consumer_key, - consumer_secret=creds[0].consumer_secret, - client_id=creds[0].client_id, - client_secret=creds[0].client_secret, - access_token=creds[0].access_token, - access_secret=creds[0].access_secret, - ) - await self.twitter_service._ainitialize() - logger.debug(f"Initialized Twitter service for DAO {dao_id}") - return True - - except Exception as e: - logger.error(f"Error initializing Twitter service: {str(e)}", exc_info=True) - return False - - async def _validate_config(self, context: JobContext) -> bool: - """Validate task configuration.""" - try: - # No specific config validation needed as credentials are per-DAO - return True - except Exception as e: - logger.error(f"Error validating tweet task config: {str(e)}", exc_info=True) - return False - - async def _validate_prerequisites(self, context: JobContext) -> bool: - """Validate task prerequisites.""" - try: - # Cache pending messages for later use - self._pending_messages = backend.list_queue_messages( - filters=QueueMessageFilter( - type=QueueMessageType.TWEET, is_processed=False - ) - ) - return True - except Exception as e: - logger.error( - f"Error validating tweet prerequisites: {str(e)}", exc_info=True - ) - self._pending_messages = None - return False - - async def _validate_task_specific(self, context: JobContext) -> bool: - """Validate task-specific conditions.""" - try: - if not self._pending_messages: - logger.debug("No pending tweet messages found") - return False - - message_count = len(self._pending_messages) - if message_count > 0: - logger.debug(f"Found {message_count} pending tweet messages") - return True - - logger.debug("No pending tweet messages to process") - return False - - except Exception as e: - logger.error(f"Error in tweet task validation: {str(e)}", exc_info=True) - return False - - async def _validate_message( - self, message: QueueMessage - ) -> Optional[TweetProcessingResult]: - """Validate a single message before processing.""" - try: - # Check if message exists - if not message.message: - return TweetProcessingResult( - success=False, - message="Tweet message is empty", - tweet_id=message.tweet_id, - ) - - # Extract tweet text from the message field - tweet_text = None - if isinstance(message.message, dict) and "message" in message.message: - tweet_text = message.message["message"] - else: - return TweetProcessingResult( - success=False, - message=f"Unsupported tweet message format: {message.message}", - tweet_id=message.tweet_id, - ) - - if not tweet_text: - return TweetProcessingResult( - success=False, - message="Tweet message content is empty", - tweet_id=message.tweet_id, - ) - - if not message.dao_id: - return TweetProcessingResult( - success=False, - message="Tweet message has no dao_id", - dao_id=None, - ) - - # Check tweet length - if len(tweet_text) > 280: # Twitter's character limit - return TweetProcessingResult( - success=False, - message=f"Tweet exceeds character limit: {len(tweet_text)} chars", - tweet_id=message.tweet_id, - dao_id=message.dao_id, - ) - - # No need to modify the message structure, keep it as is - return None - - except Exception as e: - logger.error( - f"Error validating message {message.id}: {str(e)}", exc_info=True - ) - return TweetProcessingResult( - success=False, - message=f"Error validating message: {str(e)}", - error=e, - tweet_id=message.tweet_id if hasattr(message, "tweet_id") else None, - dao_id=message.dao_id if hasattr(message, "dao_id") else None, - ) - - async def _process_tweet_message( - self, message: QueueMessage - ) -> TweetProcessingResult: - """Process a single tweet message.""" - try: - # Validate message first - validation_result = await self._validate_message(message) - if validation_result: - return validation_result - - # Initialize Twitter service for this DAO - if not await self._initialize_twitter_service(message.dao_id): - return TweetProcessingResult( - success=False, - message=f"Failed to initialize Twitter service for DAO: {message.dao_id}", - dao_id=message.dao_id, - ) - - # Extract tweet text directly from the message format - tweet_text = message.message["message"] - logger.info(f"Sending tweet for DAO {message.dao_id}") - logger.debug(f"Tweet content: {tweet_text}") - - # Prepare tweet parameters - tweet_params = {"text": tweet_text} - if message.tweet_id: - tweet_params["reply_in_reply_to_tweet_id"] = message.tweet_id - - # Send tweet using Twitter service - tweet_response = await self.twitter_service._apost_tweet(**tweet_params) - - if not tweet_response: - return TweetProcessingResult( - success=False, - message="Failed to send tweet", - dao_id=message.dao_id, - tweet_id=message.tweet_id, - ) - - logger.info(f"Successfully posted tweet {tweet_response.id}") - logger.debug(f"Tweet ID: {tweet_response.id}") - - # Discord Service - try: - discord_service = create_discord_service() - - if discord_service: - discord_result = discord_service.send_message(tweet_text) - logger.info(f"Discord message sent: {discord_result['success']}") - - except Exception as e: - logger.warning(f"Failed to send Discord message: {str(e)}") - - return TweetProcessingResult( - success=True, - message="Successfully sent tweet", - tweet_id=tweet_response.id, - dao_id=message.dao_id, - ) - - except Exception as e: - logger.error( - f"Error processing tweet message {message.id}: {str(e)}", exc_info=True - ) - return TweetProcessingResult( - success=False, - message=f"Error sending tweet: {str(e)}", - error=e, - tweet_id=message.tweet_id if hasattr(message, "tweet_id") else None, - dao_id=message.dao_id if hasattr(message, "dao_id") else None, - ) - - async def _execute_impl(self, context: JobContext) -> List[TweetProcessingResult]: - """Execute tweet sending task.""" - results: List[TweetProcessingResult] = [] - try: - if not self._pending_messages: - return results - - processed_count = 0 - success_count = 0 - - for message in self._pending_messages: - logger.debug(f"Processing tweet message: {message.id}") - result = await self._process_tweet_message(message) - results.append(result) - processed_count += 1 - - if result.success: - success_count += 1 - backend.update_queue_message( - queue_message_id=message.id, - update_data=QueueMessageBase(is_processed=True), - ) - logger.debug(f"Marked message {message.id} as processed") - - logger.debug( - f"Task metrics - Processed: {processed_count}, Successful: {success_count}" - ) - - return results - - except Exception as e: - logger.error(f"Error in tweet task: {str(e)}", exc_info=True) - results.append( - TweetProcessingResult( - success=False, - message=f"Error in tweet task: {str(e)}", - error=e, - ) - ) - return results - - -tweet_task = TweetTask() \ No newline at end of file diff --git a/services/startup.py b/services/startup.py deleted file mode 100644 index cbb710de..00000000 --- a/services/startup.py +++ /dev/null @@ -1,100 +0,0 @@ -import asyncio -from typing import Any, Optional - -from apscheduler.schedulers.asyncio import AsyncIOScheduler - -from config import config -from lib.logger import configure_logger -from services.bot import start_application -from services.runner.job_manager import JobManager -from services.websocket import websocket_manager - -logger = configure_logger(__name__) - - -class StartupService: - """Service to manage application startup and background tasks.""" - - def __init__(self, scheduler: Optional[AsyncIOScheduler] = None): - self.scheduler = scheduler or AsyncIOScheduler() - self.cleanup_task: Optional[asyncio.Task] = None - - async def start_websocket_cleanup(self) -> None: - """Start the WebSocket cleanup task.""" - try: - await websocket_manager.start_cleanup_task() - except Exception as e: - logger.error(f"Error starting WebSocket cleanup task: {str(e)}") - raise - - async def start_bot(self) -> Any: - """Start the Telegram bot in the background.""" - if not config.telegram.enabled: - logger.info("Telegram bot disabled. Skipping initialization.") - return None - - try: - application = await start_application() - logger.info("Bot started successfully") - return application - except Exception as e: - logger.error(f"Failed to start Telegram bot: {e}") - raise - - def init_scheduler(self) -> None: - """Initialize and start the scheduler with configured jobs.""" - # Use the JobManager to schedule all enabled jobs - any_enabled = JobManager.schedule_jobs(self.scheduler) - - # Start the scheduler if any jobs are enabled - if any_enabled: - logger.info("Starting scheduler") - self.scheduler.start() - logger.info("Scheduler started") - else: - logger.info("Scheduler is disabled") - - async def init_background_tasks(self) -> asyncio.Task: - """Initialize all background tasks.""" - # Initialize scheduler - self.init_scheduler() - - # Start websocket cleanup task - self.cleanup_task = asyncio.create_task(self.start_websocket_cleanup()) - - # Start bot if enabled - await self.start_bot() - - # Return the cleanup task for management - return self.cleanup_task - - async def shutdown(self) -> None: - """Shutdown all services gracefully.""" - logger.info("Shutting down services...") - - if self.scheduler.running: - self.scheduler.shutdown() - logger.info("Scheduler shutdown complete") - - if self.cleanup_task: - self.cleanup_task.cancel() - try: - await self.cleanup_task - except asyncio.CancelledError: - pass - logger.info("Cleanup task shutdown complete") - - -# Global instance for convenience -startup_service = StartupService() - - -# Convenience functions that use the global instance -async def run() -> asyncio.Task: - """Initialize all background tasks using the global startup service.""" - return await startup_service.init_background_tasks() - - -async def shutdown() -> None: - """Shutdown all services using the global startup service.""" - await startup_service.shutdown() diff --git a/services/twitter.py b/services/twitter.py deleted file mode 100644 index 23d86d76..00000000 --- a/services/twitter.py +++ /dev/null @@ -1,389 +0,0 @@ -from typing import Dict, List, Optional, TypedDict - -from pydantic import BaseModel - -from backend.factory import backend -from backend.models import ( - QueueMessageCreate, - XTweetBase, - XTweetCreate, - XTweetFilter, - XUserCreate, - XUserFilter, -) -from config import config -from lib.logger import configure_logger -from lib.twitter import TwitterService -from services.workflows import analyze_tweet - -logger = configure_logger(__name__) - - -class UserProfile(TypedDict): - """Type definition for user profile data.""" - - name: str - age: int - email: str - - -class TweetData(BaseModel): - """Pydantic model for tweet data.""" - - tweet_id: Optional[str] = None - author_id: Optional[str] = None - text: Optional[str] = None - conversation_id: Optional[str] = None - - -class TwitterConfig(BaseModel): - """Configuration for Twitter service.""" - - consumer_key: str - consumer_secret: str - client_id: str - client_secret: str - access_token: str - access_secret: str - user_id: str - whitelisted_authors: List[str] - whitelist_enabled: bool = False - - -class TweetRepository: - """Repository for handling tweet storage and retrieval.""" - - async def store_tweet(self, tweet_data: TweetData) -> None: - """Store tweet and author data in the database.""" - try: - authors = backend.list_x_users( - filters=XUserFilter(user_id=tweet_data.author_id) - ) - if authors and len(authors) > 0: - author = authors[0] - logger.debug( - f"Found existing author {tweet_data.author_id} in database" - ) - else: - logger.info(f"Creating new author record for {tweet_data.author_id}") - author = backend.create_x_user( - XUserCreate(user_id=tweet_data.author_id) - ) - - logger.debug(f"Creating tweet record for {tweet_data.tweet_id}") - backend.create_x_tweet( - XTweetCreate( - author_id=author.id, - tweet_id=tweet_data.tweet_id, - message=tweet_data.text, - conversation_id=tweet_data.conversation_id, - ) - ) - except Exception as e: - logger.error(f"Failed to store tweet/author data: {str(e)}", exc_info=True) - raise - - async def update_tweet_analysis( - self, - tweet_id: str, - is_worthy: bool, - tweet_type: str, - confidence_score: float, - reason: str, - ) -> None: - """Update tweet with analysis results.""" - try: - tweets = backend.list_x_tweets(filters=XTweetFilter(tweet_id=tweet_id)) - if tweets and len(tweets) > 0: - logger.debug("Updating existing tweet record with analysis results") - backend.update_x_tweet( - x_tweet_id=tweets[0].id, - update_data=XTweetBase( - is_worthy=is_worthy, - tweet_type=tweet_type, - confidence_score=confidence_score, - reason=reason, - ), - ) - except Exception as e: - logger.error(f"Failed to update tweet analysis: {str(e)}", exc_info=True) - raise - - async def get_conversation_history( - self, conversation_id: str, user_id: str - ) -> List[Dict[str, str]]: - """Retrieve conversation history for a given conversation ID.""" - try: - conversation_tweets = backend.list_x_tweets( - filters=XTweetFilter(conversation_id=conversation_id) - ) - logger.debug( - f"Retrieved {len(conversation_tweets)} tweets from conversation {conversation_id}" - ) - return [ - { - "role": "user" if tweet.author_id != user_id else "assistant", - "content": tweet.message, - } - for tweet in conversation_tweets - if tweet.message - ] - except Exception as e: - logger.error( - f"Failed to retrieve conversation history: {str(e)}", exc_info=True - ) - raise - - -class TweetAnalyzer: - """Handles tweet analysis and processing logic.""" - - def __init__(self, tweet_repository: TweetRepository): - """Initialize with dependencies.""" - self.tweet_repository = tweet_repository - - async def analyze_tweet_content( - self, tweet_data: TweetData, history: List[Dict[str, str]] - ) -> Dict: - """Analyze tweet content and determine if it needs processing.""" - logger.info( - f"Analyzing tweet {tweet_data.tweet_id} from user {tweet_data.author_id}" - ) - logger.debug(f"Tweet content: {tweet_data.text}") - logger.debug(f"Conversation history size: {len(history)} messages") - - filtered_content = "\n".join( - f"{msg['role']}: {msg['content']}" for msg in history - ) - - try: - analysis_result = await analyze_tweet( - tweet_text=tweet_data.text, - filtered_content=filtered_content, - ) - - logger.info( - f"Analysis complete for {tweet_data.tweet_id} - " - f"Worthy: {analysis_result['is_worthy']}, " - f"Type: {analysis_result['tweet_type']}, " - f"Confidence: {analysis_result['confidence_score']}" - ) - logger.debug(f"Analysis reason: {analysis_result['reason']}") - - await self.tweet_repository.update_tweet_analysis( - tweet_id=tweet_data.tweet_id, - is_worthy=analysis_result["is_worthy"], - tweet_type=analysis_result["tweet_type"], - confidence_score=analysis_result["confidence_score"], - reason=analysis_result["reason"], - ) - - return analysis_result - except Exception as e: - logger.error( - f"Analysis failed for tweet {tweet_data.tweet_id}: {str(e)}", - exc_info=True, - ) - raise - - -class TwitterMentionHandler: - """Handles Twitter mention processing and responses.""" - - def __init__( - self, - config: TwitterConfig, - tweet_repository: TweetRepository, - tweet_analyzer: TweetAnalyzer, - ): - """Initialize with dependencies.""" - self.config = config - self.tweet_repository = tweet_repository - self.tweet_analyzer = tweet_analyzer - self.twitter_service = TwitterService( - consumer_key=config.consumer_key, - consumer_secret=config.consumer_secret, - client_id=config.client_id, - client_secret=config.client_secret, - access_token=config.access_token, - access_secret=config.access_secret, - ) - - async def _post_response( - self, tweet_data: TweetData, response_content: str - ) -> None: - """Post a response tweet. - - Args: - tweet_data: Data about the tweet to respond to - response_content: Content of the response tweet - """ - logger.debug(f"Posting response to tweet {tweet_data.tweet_id}") - await self.twitter_service._ainitialize() - await self.twitter_service._apost_tweet( - text=response_content, reply_in_reply_to_tweet_id=tweet_data.tweet_id - ) - - def _is_author_whitelisted(self, author_id: str) -> bool: - """Check if the author is in the whitelist.""" - logger.debug(f"Checking whitelist status for author {author_id}") - is_whitelisted = str(author_id) in self.config.whitelisted_authors - logger.debug(f"Author {author_id} whitelist status: {is_whitelisted}") - return is_whitelisted - - async def _handle_mention(self, mention) -> None: - """Process a single mention for analysis.""" - tweet_data = TweetData( - tweet_id=mention.id, - author_id=mention.author_id, - text=mention.text, - conversation_id=mention.conversation_id, - ) - - logger.debug( - f"Processing mention - Tweet ID: {tweet_data.tweet_id}, " - f"Author: {tweet_data.author_id}, Text: {tweet_data.text[:50]}..." - ) - - # Check if tweet exists in our database - try: - existing_tweets = backend.list_x_tweets( - filters=XTweetFilter(tweet_id=tweet_data.tweet_id) - ) - if existing_tweets and len(existing_tweets) > 0: - logger.debug( - f"Tweet {tweet_data.tweet_id} already exists in database, skipping processing" - ) - return - except Exception as e: - logger.error( - f"Database error checking tweet {tweet_data.tweet_id}: {str(e)}", - exc_info=True, - ) - raise - - await self.tweet_repository.store_tweet(tweet_data) - - try: - if self.config.whitelist_enabled: - if self._is_author_whitelisted(tweet_data.author_id): - logger.info( - f"Processing whitelisted mention {tweet_data.tweet_id} " - f"from user {tweet_data.author_id}" - ) - await self._process_mention(tweet_data) - else: - logger.warning( - f"Skipping non-whitelisted mention {tweet_data.tweet_id} " - f"from user {tweet_data.author_id}" - ) - else: - logger.debug("Whitelist check disabled, processing all mentions") - await self._process_mention(tweet_data) - except Exception as e: - logger.error( - f"Failed to process mention {tweet_data.tweet_id}: {str(e)}", - exc_info=True, - ) - raise - - async def _process_mention(self, tweet_data: TweetData) -> None: - """Process mention after validation.""" - history = await self.tweet_repository.get_conversation_history( - tweet_data.conversation_id, self.config.user_id - ) - - analysis_result = await self.tweet_analyzer.analyze_tweet_content( - tweet_data, history - ) - - if analysis_result["is_worthy"] and analysis_result["tool_request"]: - logger.info( - f"Queueing tool request for tweet {tweet_data.tweet_id} - " - f"Tool: {analysis_result['tool_request'].tool_name}" - ) - backend.create_queue_message( - new_queue_message=QueueMessageCreate( - type="daos", - tweet_id=tweet_data.tweet_id, - conversation_id=tweet_data.conversation_id, - message=analysis_result["tool_request"].model_dump(), - ) - ) - elif analysis_result["is_worthy"]: - logger.debug( - f"Tweet {tweet_data.tweet_id} worthy but no tool request present" - ) - else: - logger.debug(f"Tweet {tweet_data.tweet_id} not worthy of processing") - - async def process_mentions(self) -> None: - """Process all new mentions for analysis.""" - try: - logger.info("Starting Twitter mention processing") - await self.twitter_service._ainitialize() - mentions = await self.twitter_service.get_mentions_by_user_id( - self.config.user_id - ) - - if not mentions: - logger.info("No new mentions found to process") - return - - logger.info(f"Found {len(mentions)} mentions to process") - for mention in mentions: - try: - logger.debug(f"Processing mention {mention.id}") - await self._handle_mention(mention) - except Exception as e: - logger.error( - f"Failed to process mention {mention.id}: {str(e)}", - exc_info=True, - ) - continue - - except Exception as e: - logger.error(f"Twitter mention processing failed: {str(e)}", exc_info=True) - raise - - -def create_twitter_handler() -> TwitterMentionHandler: - """Factory function to create TwitterMentionHandler with dependencies.""" - twitter_config = TwitterConfig( - consumer_key=config.twitter.consumer_key, - consumer_secret=config.twitter.consumer_secret, - client_id=config.twitter.client_id, - client_secret=config.twitter.client_secret, - access_token=config.twitter.access_token, - access_secret=config.twitter.access_secret, - user_id=config.twitter.automated_user_id, - whitelisted_authors=config.twitter.whitelisted_authors, - whitelist_enabled=False, - ) - - tweet_repository = TweetRepository() - tweet_analyzer = TweetAnalyzer(tweet_repository) - - return TwitterMentionHandler(twitter_config, tweet_repository, tweet_analyzer) - - -# Global handler instance -handler = create_twitter_handler() - - -async def execute_twitter_job() -> None: - """Execute the Twitter job to process mentions.""" - try: - if not handler.config.user_id: - logger.error( - "Cannot execute Twitter job: AIBTC_TWITTER_AUTOMATED_USER_ID not set" - ) - return - - logger.info("Starting Twitter mention check job") - await handler.process_mentions() - logger.info("Successfully completed Twitter mention check job") - - except Exception as e: - logger.error(f"Twitter job execution failed: {str(e)}", exc_info=True) - raise diff --git a/services/webhooks/__init__.py b/services/webhooks/__init__.py deleted file mode 100644 index 46266e25..00000000 --- a/services/webhooks/__init__.py +++ /dev/null @@ -1,5 +0,0 @@ -"""Webhook services package.""" - -from services.webhooks.chainhook import ChainhookService - -__all__ = ["ChainhookService"] diff --git a/services/webhooks/chainhook/handlers/__init__.py b/services/webhooks/chainhook/handlers/__init__.py deleted file mode 100644 index 979c63a9..00000000 --- a/services/webhooks/chainhook/handlers/__init__.py +++ /dev/null @@ -1,32 +0,0 @@ -"""Chainhook webhook handlers module. - -This module contains specialized handlers for different types of chainhook events. -""" - -from services.webhooks.chainhook.handlers.base import ChainhookEventHandler -from services.webhooks.chainhook.handlers.block_state_handler import BlockStateHandler -from services.webhooks.chainhook.handlers.buy_event_handler import BuyEventHandler -from services.webhooks.chainhook.handlers.contract_message_handler import ( - ContractMessageHandler, -) -from services.webhooks.chainhook.handlers.dao_proposal_burn_height_handler import ( - DAOProposalBurnHeightHandler, -) -from services.webhooks.chainhook.handlers.dao_proposal_conclusion_handler import ( - DAOProposalConclusionHandler, -) -from services.webhooks.chainhook.handlers.dao_proposal_handler import DAOProposalHandler -from services.webhooks.chainhook.handlers.dao_vote_handler import DAOVoteHandler -from services.webhooks.chainhook.handlers.sell_event_handler import SellEventHandler - -__all__ = [ - "ChainhookEventHandler", - "ContractMessageHandler", - "BuyEventHandler", - "SellEventHandler", - "DAOProposalHandler", - "DAOProposalBurnHeightHandler", - "DAOVoteHandler", - "DAOProposalConclusionHandler", - "BlockStateHandler", -] diff --git a/services/webhooks/chainhook/handlers/action_proposal_handler.py b/services/webhooks/chainhook/handlers/action_proposal_handler.py deleted file mode 100644 index 9f39f332..00000000 --- a/services/webhooks/chainhook/handlers/action_proposal_handler.py +++ /dev/null @@ -1,199 +0,0 @@ -"""Handler for capturing new DAO action proposals.""" - -from typing import Dict, Optional - -from backend.factory import backend -from backend.models import ( - ContractStatus, - ProposalCreate, - ProposalFilter, - ProposalType, -) -from services.webhooks.chainhook.handlers.base_proposal_handler import ( - BaseProposalHandler, -) -from services.webhooks.chainhook.models import Event, TransactionWithReceipt - - -class ActionProposalHandler(BaseProposalHandler): - """Handler for capturing and processing new DAO action proposals. - - This handler identifies contract calls related to proposing actions in DAO contracts, - creates proposal records in the database, and tracks their lifecycle. - """ - - def can_handle_transaction(self, transaction: TransactionWithReceipt) -> bool: - """Check if this handler can handle the given transaction. - - This handler can handle contract call transactions related to proposing actions. - - Args: - transaction: The transaction to check - - Returns: - bool: True if this handler can handle the transaction, False otherwise - """ - tx_data = self.extract_transaction_data(transaction) - tx_kind = tx_data["tx_kind"] - tx_data_content = tx_data["tx_data"] - tx_metadata = tx_data["tx_metadata"] - - # Only handle ContractCall type transactions - if not isinstance(tx_kind, dict): - self.logger.debug(f"Skipping: tx_kind is not a dict: {type(tx_kind)}") - return False - - tx_kind_type = tx_kind.get("type") - - if not isinstance(tx_data_content, dict): - self.logger.debug( - f"Skipping: tx_data_content is not a dict: {type(tx_data_content)}" - ) - return False - - # Check if the method name is exactly "propose-action" - tx_method = tx_data_content.get("method", "") - is_proposal_method = tx_method == "propose-action" - - # Access success from TransactionMetadata - tx_success = tx_metadata.success - - if is_proposal_method and tx_success: - self.logger.debug(f"Found action proposal method: {tx_method}") - - return ( - tx_kind_type == "ContractCall" and is_proposal_method and tx_success is True - ) - - def _get_proposal_info_from_events(self, events: list[Event]) -> Optional[Dict]: - """Extract the action proposal information from transaction events. - - Args: - events: List of events from the transaction - - Returns: - Optional[Dict]: Dictionary containing proposal information if found, None otherwise - """ - for event in events: - # Find SmartContractEvent events - if event.type != "SmartContractEvent" or not hasattr(event, "data"): - continue - - event_data = event.data - - # Check if this is a print event - if event_data.get("topic") != "print": - continue - - # Get the value, which might be None - value = event_data.get("value") - - # Skip events with null values - if value is None: - self.logger.debug("Value is None in SmartContractEvent data") - continue - - # Check if this is a proposal event - if value.get("notification") == "propose-action": - payload = value.get("payload", {}) - if not payload: - self.logger.warning("Empty payload in proposal event") - return None - - return { - "proposal_id": payload.get("proposalId"), - "action": payload.get("action"), - "caller": payload.get("caller"), - "creator": payload.get("creator"), - "created_at_block": payload.get("createdAt"), - "end_block": payload.get("endBlock"), - "start_block": payload.get("startBlock"), - "liquid_tokens": str(payload.get("liquidTokens")), - "parameters": payload.get("parameters"), - "bond": str(payload.get("bond")), - } - - self.logger.warning("Could not find proposal information in transaction events") - return None - - async def handle_transaction(self, transaction: TransactionWithReceipt) -> None: - """Handle action proposal transactions. - - Processes new action proposal transactions and creates proposal records in the database. - - Args: - transaction: The transaction to handle - """ - tx_data = self.extract_transaction_data(transaction) - tx_id = tx_data["tx_id"] - tx_data_content = tx_data["tx_data"] - tx_metadata = tx_data["tx_metadata"] - - # Get contract identifier - contract_identifier = tx_data_content.get("contract_identifier") - if not contract_identifier: - self.logger.warning("No contract identifier found in transaction data") - return - - # Find the DAO for this contract - dao_data = self._find_dao_for_contract(contract_identifier) - if not dao_data: - self.logger.warning(f"No DAO found for contract {contract_identifier}") - return - - # Get the proposal info from the transaction events - events = tx_metadata.receipt.events if hasattr(tx_metadata, "receipt") else [] - proposal_info = self._get_proposal_info_from_events(events) - if proposal_info is None: - self.logger.warning( - "Could not determine proposal information from transaction" - ) - return - - self.logger.info( - f"Processing new action proposal {proposal_info['proposal_id']} for DAO {dao_data['name']} " - f"(contract: {contract_identifier})" - ) - - # Check if the proposal already exists in the database - existing_proposals = backend.list_proposals( - filters=ProposalFilter( - dao_id=dao_data["id"], - contract_principal=contract_identifier, - proposal_id=proposal_info["proposal_id"], - type=ProposalType.ACTION, - ) - ) - - if not existing_proposals: - # Create a new proposal record in the database - proposal_title = f"Action Proposal #{proposal_info['proposal_id']}" - proposal = backend.create_proposal( - ProposalCreate( - dao_id=dao_data["id"], - title=proposal_title, - description=f"Action proposal {proposal_info['proposal_id']} for {dao_data['name']}", - contract_principal=contract_identifier, - tx_id=tx_id, - proposal_id=proposal_info["proposal_id"], - status=ContractStatus.DEPLOYED, # Since it's already on-chain - type=ProposalType.ACTION, - # Add fields from payload - action=proposal_info["action"], - caller=proposal_info["caller"], - creator=proposal_info["creator"], - created_at_block=proposal_info["created_at_block"], - end_block=proposal_info["end_block"], - start_block=proposal_info["start_block"], - liquid_tokens=proposal_info["liquid_tokens"], - parameters=proposal_info["parameters"], - bond=proposal_info["bond"], - ) - ) - self.logger.info( - f"Created new action proposal record in database: {proposal.id}" - ) - else: - self.logger.info( - f"Action proposal already exists in database: {existing_proposals[0].id}" - ) diff --git a/services/webhooks/chainhook/handlers/contract_message_handler.py b/services/webhooks/chainhook/handlers/contract_message_handler.py deleted file mode 100644 index 6f0d4d7c..00000000 --- a/services/webhooks/chainhook/handlers/contract_message_handler.py +++ /dev/null @@ -1,166 +0,0 @@ -"""Handler for capturing messages from contracts.""" - -from typing import Dict, List, Optional - -from backend.factory import backend -from backend.models import ExtensionFilter, QueueMessageCreate -from services.webhooks.chainhook.handlers.base import ChainhookEventHandler -from services.webhooks.chainhook.models import Event, TransactionWithReceipt - - -class ContractMessageHandler(ChainhookEventHandler): - """Handler for capturing and processing messages from contracts. - - This handler identifies contract calls with specific patterns and - creates appropriate queue messages for further processing. - """ - - def can_handle_transaction(self, transaction: TransactionWithReceipt) -> bool: - """Check if this handler can handle the given transaction. - - This handler can handle contract call transactions with conclude-proposal method. - - Args: - transaction: The transaction to check - - Returns: - bool: True if this handler can handle the transaction, False otherwise - """ - tx_data = self.extract_transaction_data(transaction) - tx_kind = tx_data["tx_kind"] - tx_data_content = tx_data["tx_data"] - tx_metadata = tx_data["tx_metadata"] - - # Only handle ContractCall type transactions - if not isinstance(tx_kind, dict): - self.logger.debug(f"Skipping: tx_kind is not a dict: {type(tx_kind)}") - return False - - tx_kind_type = tx_kind.get("type") - - if not isinstance(tx_data_content, dict): - self.logger.debug( - f"Skipping: tx_data_content is not a dict: {type(tx_data_content)}" - ) - return False - - # Check if the method name is exactly "conclude-proposal" - tx_method = tx_data_content.get("method", "") - is_conclude_proposal = tx_method == "conclude-proposal" - - # Access success from TransactionMetadata - tx_success = tx_metadata.success - - if is_conclude_proposal and tx_success: - self.logger.debug(f"Found conclude-proposal method: {tx_method}") - - return ( - tx_kind_type == "ContractCall" - and is_conclude_proposal - and tx_success is True - ) - - def _find_dao_for_contract(self, contract_identifier: str) -> Optional[Dict]: - """Find the DAO associated with the given contract. - - Args: - contract_identifier: The contract identifier to search for - - Returns: - Optional[Dict]: The DAO data if found, None otherwise - """ - # Find extensions with this contract principal - extensions = backend.list_extensions( - filters=ExtensionFilter( - contract_principal=contract_identifier, - ) - ) - - if not extensions: - self.logger.warning( - f"No extensions found for contract {contract_identifier}" - ) - return None - - # Get the DAO for the first matching extension - dao_id = extensions[0].dao_id - if not dao_id: - self.logger.warning("Extension found but no DAO ID associated with it") - return None - - dao = backend.get_dao(dao_id) - if not dao: - self.logger.warning(f"No DAO found with ID {dao_id}") - return None - - self.logger.info(f"Found DAO for contract {contract_identifier}: {dao.name}") - return dao.model_dump() - - def _get_message_from_events(self, events: List[Event]) -> Optional[str]: - """Extract the message from onchain-messaging contract events. - - Args: - events: List of events from the transaction - - Returns: - Optional[str]: The message if found, None otherwise - """ - for event in events: - # Find print events from onchain-messaging contract - if ( - event.type == "SmartContractEvent" - and hasattr(event, "data") - and event.data.get("topic") == "print" - and "onchain-messaging" in event.data.get("contract_identifier", "") - ): - # Get the value directly if it's a string - value = event.data.get("value") - if isinstance(value, str): - return value - - self.logger.warning("Could not find message in transaction events") - return None - - async def handle_transaction(self, transaction: TransactionWithReceipt) -> None: - """Handle contract message transactions. - - Processes contract call transactions that contain messages from concluded proposals, - creates queue messages for them, and associates them with the appropriate DAO. - - Args: - transaction: The transaction to handle - """ - tx_data = self.extract_transaction_data(transaction) - tx_data_content = tx_data["tx_data"] - tx_metadata = tx_data["tx_metadata"] - - # Get contract identifier - contract_identifier = tx_data_content.get("contract_identifier") - if not contract_identifier: - self.logger.warning("No contract identifier found in transaction data") - return - - # Find the DAO for this contract - dao_data = self._find_dao_for_contract(contract_identifier) - if not dao_data: - self.logger.warning(f"No DAO found for contract {contract_identifier}") - return - - # Get the message from the transaction events - events = tx_metadata.receipt.events if hasattr(tx_metadata, "receipt") else [] - message = self._get_message_from_events(events) - if message is None: - self.logger.warning("Could not find message in transaction events") - return - - self.logger.info(f"Processing message from DAO {dao_data['name']}: {message}") - - # Create a new queue message for the DAO - new_message = backend.create_queue_message( - QueueMessageCreate( - type="tweet", - message={"message": message}, - dao_id=dao_data["id"], - ) - ) - self.logger.info(f"Created queue message: {new_message.id}") diff --git a/services/webhooks/chainhook/models.py b/services/webhooks/chainhook/models.py deleted file mode 100644 index fef565c1..00000000 --- a/services/webhooks/chainhook/models.py +++ /dev/null @@ -1,138 +0,0 @@ -"""Chainhook webhook data models.""" - -import logging -from dataclasses import dataclass -from typing import Any, Dict, List, Optional, Union - -# Configure logger -logger = logging.getLogger(__name__) - - -@dataclass -class TransactionIdentifier: - """Transaction identifier with hash.""" - - hash: str - - -@dataclass -class BlockIdentifier: - """Block identifier with hash and index.""" - - hash: str - index: int - - -@dataclass -class Operation: - """Operation within a transaction.""" - - account: Dict[str, str] - amount: Dict[str, Any] - operation_identifier: Dict[str, int] - status: str - type: str - related_operations: Optional[List[Dict[str, int]]] = None - - -@dataclass -class Event: - """Event data from transaction receipt.""" - - data: Dict[str, Any] - position: Dict[str, int] - type: str - - -@dataclass -class Receipt: - """Transaction receipt containing events and other metadata.""" - - contract_calls_stack: List[Any] - events: List[Event] - mutated_assets_radius: List[Any] - mutated_contracts_radius: List[Any] - - -@dataclass -class TransactionMetadata: - """Metadata about a transaction including execution cost and kind.""" - - description: str - execution_cost: Dict[str, int] - fee: int - kind: Dict[str, Any] - nonce: int - position: Dict[str, int] - raw_tx: str - receipt: Receipt - result: str - sender: str - sponsor: Optional[str] - success: bool - - -@dataclass -class TransactionWithReceipt: - """Transaction with receipt including metadata and operations.""" - - transaction_identifier: TransactionIdentifier - metadata: Union[Dict[str, Any], TransactionMetadata] - operations: List[Union[Dict[str, Any], Operation]] - - -@dataclass -class BlockMetadata: - """Metadata about a block.""" - - bitcoin_anchor_block_identifier: Optional[BlockIdentifier] = None - block_time: Optional[int] = None - confirm_microblock_identifier: Optional[Any] = None - cycle_number: Optional[int] = None - pox_cycle_index: Optional[int] = None - pox_cycle_length: Optional[int] = None - pox_cycle_position: Optional[int] = None - reward_set: Optional[Any] = None - signer_bitvec: Optional[str] = None - signer_public_keys: Optional[List[str]] = None - signer_signature: Optional[List[str]] = None - stacks_block_hash: Optional[str] = None - tenure_height: Optional[int] = None - - -@dataclass -class Apply: - """Apply block data structure containing transactions.""" - - block_identifier: BlockIdentifier - transactions: List[TransactionWithReceipt] - metadata: Optional[BlockMetadata] = None - parent_block_identifier: Optional[BlockIdentifier] = None - timestamp: Optional[int] = None - - -@dataclass -class Predicate: - """Predicate for chainhook filter.""" - - scope: str - higher_than: int - - -@dataclass -class ChainHookInfo: - """Information about the chainhook itself.""" - - is_streaming_blocks: bool - predicate: Predicate - uuid: str - - -@dataclass -class ChainHookData: - """Top-level data structure for Chainhook webhook payloads.""" - - apply: List[Apply] - chainhook: ChainHookInfo - events: List[Any] - rollback: List[Any] diff --git a/services/webhooks/dao/handler.py b/services/webhooks/dao/handler.py deleted file mode 100644 index 73933281..00000000 --- a/services/webhooks/dao/handler.py +++ /dev/null @@ -1,116 +0,0 @@ -"""Handler for DAO webhook payloads.""" - -from typing import Any, Dict, List -from uuid import UUID - -from backend.factory import backend -from backend.models import ContractStatus, DAOCreate, ExtensionCreate, TokenCreate -from lib.logger import configure_logger -from services.webhooks.base import WebhookHandler -from services.webhooks.dao.models import DAOWebhookPayload, DAOWebhookResponse - - -class DAOHandler(WebhookHandler): - """Handler for DAO webhook payloads. - - This handler processes validated DAO webhook payloads and creates - the corresponding DAO, extensions, and token in the database. - """ - - def __init__(self): - """Initialize the DAO webhook handler.""" - super().__init__() - self.logger = configure_logger(self.__class__.__name__) - self.db = backend - - async def handle(self, parsed_data: DAOWebhookPayload) -> Dict[str, Any]: - """Handle the parsed DAO webhook data. - - Args: - parsed_data: The parsed and validated DAO webhook payload - - Returns: - Dict containing the result of handling the webhook with created entities - - Raises: - Exception: If there is an error creating any of the entities - """ - try: - self.logger.info(f"Handling DAO webhook for '{parsed_data.name}'") - - # Create the DAO - dao_create = DAOCreate( - name=parsed_data.name, - mission=parsed_data.mission, - description=parsed_data.description, - is_deployed=True, - is_broadcasted=True, - ) - - dao = self.db.create_dao(dao_create) - self.logger.info(f"Created DAO with ID: {dao.id}") - - # Create extensions if provided - extension_ids: List[UUID] = [] - if parsed_data.extensions: - for ext_data in parsed_data.extensions: - extension_create = ExtensionCreate( - dao_id=dao.id, - type=( - f"{ext_data.type}_{ext_data.subtype}" - if ext_data.subtype - else ext_data.type - ), - contract_principal=ext_data.contract_principal, - tx_id=ext_data.tx_id, - status=( - ContractStatus.DEPLOYED - if ext_data.success - else ContractStatus.FAILED - ), - ) - - extension = self.db.create_extension(extension_create) - extension_ids.append(extension.id) - self.logger.info(f"Created extension with ID: {extension.id}") - - # Create token if provided - token_id = None - if parsed_data.token: - token_create = TokenCreate( - dao_id=dao.id, - contract_principal=parsed_data.token.contract_principal, - tx_id=parsed_data.token.tx_id, - name=parsed_data.token.name, - description=parsed_data.token.description, - symbol=parsed_data.token.symbol, - decimals=parsed_data.token.decimals, - max_supply=parsed_data.token.max_supply, - uri=parsed_data.token.uri, - image_url=parsed_data.token.image_url, - x_url=parsed_data.token.x_url, - telegram_url=parsed_data.token.telegram_url, - website_url=parsed_data.token.website_url, - status=ContractStatus.DEPLOYED, - ) - - token = self.db.create_token(token_create) - token_id = token.id - self.logger.info(f"Created token with ID: {token.id}") - - # Prepare response - response = DAOWebhookResponse( - dao_id=dao.id, - extension_ids=extension_ids if extension_ids else None, - token_id=token_id, - ) - - return { - "success": True, - "message": f"Successfully created DAO '{dao.name}' with ID: {dao.id}", - "data": response.model_dump(), - } - - except Exception as e: - self.logger.error(f"Error handling DAO webhook: {str(e)}", exc_info=True) - raise diff --git a/services/webhooks/dao/models.py b/services/webhooks/dao/models.py deleted file mode 100644 index e1b1461c..00000000 --- a/services/webhooks/dao/models.py +++ /dev/null @@ -1,84 +0,0 @@ -"""Models for DAO webhook service.""" - -from typing import List, Optional -from uuid import UUID - -from pydantic import BaseModel, Field, model_validator - - -class ExtensionData(BaseModel): - """Data model for extension creation via webhook.""" - - name: str - type: str - subtype: Optional[str] = None - source: Optional[str] = None - hash: Optional[str] = None - sender: Optional[str] = None - success: Optional[bool] = True - txId: Optional[str] = None - address: Optional[str] = None - contract_principal: Optional[str] = None - tx_id: Optional[str] = None - - @model_validator(mode="after") - def set_contract_info(self): - """Set contract_principal and tx_id from address and txId if not provided.""" - if not self.contract_principal and self.address: - self.contract_principal = self.address - if not self.tx_id and self.txId: - self.tx_id = self.txId - return self - - -class TokenData(BaseModel): - """Data model for token creation via webhook.""" - - contract_principal: Optional[str] = None - tx_id: Optional[str] = None - name: str - description: Optional[str] = None - symbol: str - decimals: int = 6 - max_supply: Optional[str] = None - uri: Optional[str] = None - image_url: Optional[str] = None - x_url: Optional[str] = None - telegram_url: Optional[str] = None - website_url: Optional[str] = None - - -class DAOWebhookPayload(BaseModel): - """Webhook payload for DAO creation.""" - - name: str - mission: Optional[str] = None - description: Optional[str] = None - extensions: Optional[List[ExtensionData]] = Field(default_factory=list) - token: Optional[TokenData] = None - - @model_validator(mode="after") - def extract_token_from_extensions(self): - """Extract token information from extensions if token is not provided.""" - if not self.token and self.extensions: - # Look for a TOKEN extension with subtype DAO - for ext in self.extensions: - if ext.type == "TOKEN" and ext.subtype == "DAO": - # Create a token from the extension data - self.token = TokenData( - contract_principal=ext.address, - tx_id=ext.txId, - name=f"{self.name} Token", - symbol="TKN", - decimals=6, - ) - break - return self - - -class DAOWebhookResponse(BaseModel): - """Response model for DAO creation webhook.""" - - dao_id: UUID - extension_ids: Optional[List[UUID]] = None - token_id: Optional[UUID] = None diff --git a/services/workflows/__init__.py b/services/workflows/__init__.py deleted file mode 100644 index e2e72953..00000000 --- a/services/workflows/__init__.py +++ /dev/null @@ -1,123 +0,0 @@ -"""Workflows package for LangGraph-based workflows.""" - -# Base workflow components -from services.workflows.base import ( - BaseWorkflow, - BaseWorkflowMixin, - ExecutionError, - LangGraphError, - PlanningCapability, - StateType, - StreamingError, - ValidationError, - VectorRetrievalCapability, -) - -# Enhanced ReAct workflow variants -from services.workflows.preplan_react import ( - PreplanLangGraphService, - PreplanReactWorkflow, - PreplanState, - execute_preplan_react_stream, -) - -# Special purpose workflows -from services.workflows.proposal_evaluation import ( - ProposalEvaluationWorkflow, - evaluate_and_vote_on_proposal, - evaluate_proposal_only, -) - -# Core messaging and streaming components -# Core ReAct workflow components -from services.workflows.react import ( - LangGraphService, - MessageContent, - MessageProcessor, - ReactState, - ReactWorkflow, - StreamingCallbackHandler, - execute_langgraph_stream, -) -from services.workflows.tweet_analysis import ( - TweetAnalysisWorkflow, - analyze_tweet, -) -from services.workflows.tweet_generator import ( - TweetGeneratorWorkflow, - generate_dao_tweet, -) -from services.workflows.vector_preplan_react import ( - VectorPreplanLangGraphService, - VectorPreplanReactWorkflow, - VectorPreplanState, - execute_vector_preplan_stream, -) -from services.workflows.vector_react import ( - VectorLangGraphService, - VectorReactState, - VectorReactWorkflow, - add_documents_to_vectors, - execute_vector_langgraph_stream, -) - -# Workflow service and factory -from services.workflows.workflow_service import ( - BaseWorkflowService, - WorkflowBuilder, - WorkflowFactory, - WorkflowService, - execute_workflow_stream, -) - -__all__ = [ - # Base workflow foundation - "BaseWorkflow", - "BaseWorkflowMixin", - "ExecutionError", - "LangGraphError", - "PlanningCapability", - "StateType", - "StreamingError", - "ValidationError", - "VectorRetrievalCapability", - # Workflow service layer - "BaseWorkflowService", - "WorkflowBuilder", - "WorkflowFactory", - "WorkflowService", - "execute_workflow_stream", - # Core messaging components - "MessageContent", - "MessageProcessor", - "StreamingCallbackHandler", - # Core ReAct workflow - "LangGraphService", - "ReactState", - "ReactWorkflow", - "execute_langgraph_stream", - # PrePlan ReAct workflow - "PreplanLangGraphService", - "PreplanReactWorkflow", - "PreplanState", - "execute_preplan_react_stream", - # Vector ReAct workflow - "VectorLangGraphService", - "VectorReactState", - "VectorReactWorkflow", - "add_documents_to_vectors", - "execute_vector_langgraph_stream", - # Vector PrePlan ReAct workflow - "VectorPreplanLangGraphService", - "VectorPreplanReactWorkflow", - "VectorPreplanState", - "execute_vector_preplan_stream", - # Special purpose workflows - "ProposalEvaluationWorkflow", - "TweetAnalysisWorkflow", - "TweetGeneratorWorkflow", - "analyze_tweet", - "evaluate_and_vote_on_proposal", - "evaluate_proposal_only", - "generate_dao_tweet", -] diff --git a/services/workflows/base.py b/services/workflows/base.py deleted file mode 100644 index 2259335e..00000000 --- a/services/workflows/base.py +++ /dev/null @@ -1,574 +0,0 @@ -"""Base workflow functionality and shared components for all workflow types.""" - -import json -from abc import ABC, abstractmethod -from typing import Any, Dict, Generic, List, Optional, TypeVar, Union - -from langchain.prompts import PromptTemplate -from langchain.schema import Document -from langchain_openai import ChatOpenAI, OpenAIEmbeddings -from langgraph.graph import Graph, StateGraph -from openai import OpenAI - -from backend.factory import backend -from lib.logger import configure_logger - -logger = configure_logger(__name__) - - -class LangGraphError(Exception): - """Base exception for LangGraph operations""" - - def __init__(self, message: str, details: Dict = None): - super().__init__(message) - self.details = details or {} - - -class StreamingError(LangGraphError): - """Raised when streaming operations fail""" - - pass - - -class ExecutionError(LangGraphError): - """Raised when graph execution fails""" - - pass - - -class ValidationError(LangGraphError): - """Raised when state validation fails""" - - pass - - -# Base state type for all workflows -StateType = TypeVar("StateType", bound=Dict[str, Any]) - - -class BaseWorkflow(Generic[StateType]): - """Base class for all LangGraph workflows. - - This class provides common functionality and patterns for all workflows. - Each workflow should inherit from this class and implement the required - methods. - """ - - def __init__( - self, - model_name: str = "gpt-4.1", - temperature: Optional[float] = 0.1, - streaming: bool = True, - callbacks: Optional[List[Any]] = None, - ): - """Initialize the workflow. - - Args: - model_name: LLM model to use - temperature: Temperature for LLM generation, can be a float or None - streaming: Whether to enable streaming - callbacks: Optional callback handlers - """ - self.llm = ChatOpenAI( - temperature=temperature, - model=model_name, - streaming=streaming, - stream_usage=True, - callbacks=callbacks or [], - ) - self.logger = configure_logger(self.__class__.__name__) - self.required_fields: List[str] = [] - self.model_name = model_name - self.temperature = temperature - - def _clean_llm_response(self, content: str) -> str: - """Clean the LLM response content and ensure valid JSON.""" - try: - # First try to parse as-is in case it's already valid JSON - json.loads(content) - return content.strip() - except json.JSONDecodeError: - # If not valid JSON, try to extract from markdown blocks - if "```json" in content: - json_content = content.split("```json")[1].split("```")[0].strip() - elif "```" in content: - json_content = content.split("```")[1].split("```")[0].strip() - else: - json_content = content.strip() - - # Replace any Python boolean values with JSON boolean values - json_content = json_content.replace("True", "true").replace( - "False", "false" - ) - - # Validate the cleaned JSON - try: - json.loads(json_content) - return json_content - except json.JSONDecodeError as e: - self.logger.error(f"Failed to parse JSON after cleaning: {str(e)}") - raise ValueError(f"Invalid JSON response from LLM: {str(e)}") - - def create_llm_with_callbacks(self, callbacks: List[Any]) -> ChatOpenAI: - """Create a new LLM instance with specified callbacks. - - This is useful when you need to create a new LLM instance with different - callbacks or tools. - - Args: - callbacks: List of callback handlers - - Returns: - A new ChatOpenAI instance with the specified callbacks - """ - return ChatOpenAI( - model=self.model_name, - temperature=self.temperature, - streaming=True, - stream_usage=True, - callbacks=callbacks, - ) - - def _create_prompt(self) -> PromptTemplate: - """Create the prompt template for this workflow.""" - raise NotImplementedError("Workflow must implement _create_prompt") - - def _create_graph(self) -> Union[Graph, StateGraph]: - """Create the workflow graph.""" - raise NotImplementedError("Workflow must implement _create_graph") - - def _validate_state(self, state: StateType) -> bool: - """Validate the workflow state. - - This method checks if all required fields are present in the state. - Override this method to add custom validation logic. - - Args: - state: The state to validate - - Returns: - True if the state is valid, False otherwise - """ - if not self.required_fields: - # If no required fields specified, assume validation passes - return True - - # Check that all required fields are present and have values - return all( - field in state and state[field] is not None - for field in self.required_fields - ) - - def get_missing_fields(self, state: StateType) -> List[str]: - """Get a list of missing required fields in the state. - - Args: - state: The state to check - - Returns: - List of missing field names - """ - if not self.required_fields: - return [] - - return [ - field - for field in self.required_fields - if field not in state or state[field] is None - ] - - async def execute(self, initial_state: StateType) -> Dict: - """Execute the workflow. - - Args: - initial_state: The initial state for the workflow - - Returns: - The final state after execution - - Raises: - ValidationError: If the initial state is invalid - ExecutionError: If the workflow execution fails - """ - try: - # Validate state - is_valid = self._validate_state(initial_state) - if not is_valid: - missing_fields = self.get_missing_fields(initial_state) - error_msg = ( - f"Invalid initial state. Missing required fields: {missing_fields}" - ) - self.logger.error(error_msg) - raise ValidationError(error_msg, {"missing_fields": missing_fields}) - - # Create and compile the graph - graph = self._create_graph() - if hasattr(graph, "compile"): - app = graph.compile() - else: - # Graph is already compiled - app = graph - - # Execute the workflow - self.logger.info(f"Executing workflow {self.__class__.__name__}") - result = await app.ainvoke(initial_state) - self.logger.info(f"Workflow {self.__class__.__name__} execution completed") - return result - - except ValidationError as e: - # Re-raise validation errors - raise e - except Exception as e: - self.logger.error(f"Workflow execution failed: {str(e)}", exc_info=True) - raise ExecutionError(f"Workflow execution failed: {str(e)}") - - -class BaseWorkflowMixin(ABC): - """Base mixin for adding capabilities to workflows. - - This is an abstract base class that defines the interface for - workflow capability mixins. Mixins can be combined to create - workflows with multiple capabilities. - """ - - @abstractmethod - def integrate_with_graph(self, graph: StateGraph, **kwargs) -> None: - """Integrate this capability with a graph. - - Args: - graph: The graph to integrate with - **kwargs: Additional arguments specific to this capability - """ - pass - - -class PlanningCapability(BaseWorkflowMixin): - """Mixin that adds planning capabilities to a workflow.""" - - async def create_plan(self, query: str, **kwargs) -> str: - """Create a plan based on the user's query. - - Args: - query: The user's query to plan for - **kwargs: Additional arguments (callback_handler, etc.) - - Returns: - The generated plan - """ - raise NotImplementedError("PlanningCapability must implement create_plan") - - def integrate_with_graph(self, graph: StateGraph, **kwargs) -> None: - """Integrate planning capability with a graph. - - This adds the planning capability to the graph by modifying - the entry point to first create a plan. - - Args: - graph: The graph to integrate with - **kwargs: Additional arguments specific to planning - """ - # Implementation depends on specific graph structure - raise NotImplementedError( - "PlanningCapability must implement integrate_with_graph" - ) - - -class VectorRetrievalCapability(BaseWorkflowMixin): - """Mixin that adds vector retrieval capabilities to a workflow.""" - - def __init__(self, *args, **kwargs): - """Initialize the vector retrieval capability.""" - # Initialize parent class if it exists - super().__init__(*args, **kwargs) if hasattr(super(), "__init__") else None - # Initialize our attributes - self._init_vector_retrieval() - - def _init_vector_retrieval(self) -> None: - """Initialize vector retrieval attributes if not already initialized.""" - if not hasattr(self, "collection_names"): - self.collection_names = ["knowledge_collection", "dao_collection"] - if not hasattr(self, "embeddings"): - self.embeddings = OpenAIEmbeddings() - if not hasattr(self, "vector_results_cache"): - self.vector_results_cache = {} - - async def retrieve_from_vector_store(self, query: str, **kwargs) -> List[Document]: - """Retrieve relevant documents from multiple vector stores. - - Args: - query: The query to search for - **kwargs: Additional arguments (collection_name, embeddings, etc.) - - Returns: - List of retrieved documents - """ - try: - # Ensure initialization - self._init_vector_retrieval() - - # Check cache first - if query in self.vector_results_cache: - logger.debug(f"Using cached vector results for query: {query}") - return self.vector_results_cache[query] - - all_documents = [] - limit_per_collection = kwargs.get("limit", 4) - logger.debug( - f"Searching vector store: query={query} | limit_per_collection={limit_per_collection}" - ) - - # Query each collection and gather results - for collection_name in self.collection_names: - try: - # Query vectors using the backend - vector_results = await backend.query_vectors( - collection_name=collection_name, - query_text=query, - limit=limit_per_collection, - embeddings=self.embeddings, - ) - - # Convert to LangChain Documents and add collection source - documents = [ - Document( - page_content=doc.get("page_content", ""), - metadata={ - **doc.get("metadata", {}), - "collection_source": collection_name, - }, - ) - for doc in vector_results - ] - - all_documents.extend(documents) - logger.debug( - f"Retrieved {len(documents)} documents from collection {collection_name}" - ) - except Exception as e: - logger.error( - f"Failed to retrieve from collection {collection_name}: {str(e)}", - exc_info=True, - ) - continue # Continue with other collections if one fails - - logger.debug( - f"Retrieved total of {len(all_documents)} documents from all collections" - ) - - # Cache the results - self.vector_results_cache[query] = all_documents - - return all_documents - except Exception as e: - logger.error(f"Vector store retrieval failed: {str(e)}", exc_info=True) - return [] - - def integrate_with_graph(self, graph: StateGraph, **kwargs) -> None: - """Integrate vector retrieval capability with a graph. - - This adds the vector retrieval capability to the graph by adding a node - that can perform vector searches when needed. - - Args: - graph: The graph to integrate with - **kwargs: Additional arguments specific to vector retrieval including: - - collection_names: List of collection names to search - - limit_per_collection: Number of results per collection - """ - # Add vector search node - graph.add_node("vector_search", self.retrieve_from_vector_store) - - # Add result processing node if needed - if "process_vector_results" not in graph.nodes: - graph.add_node("process_vector_results", self._process_vector_results) - graph.add_edge("vector_search", "process_vector_results") - - async def _process_vector_results( - self, vector_results: List[Document], **kwargs - ) -> Dict[str, Any]: - """Process vector search results. - - Args: - vector_results: Results from vector search - **kwargs: Additional processing arguments - - Returns: - Processed results with metadata - """ - return { - "results": vector_results, - "metadata": { - "num_vector_results": len(vector_results), - "collection_sources": list( - set( - doc.metadata.get("collection_source", "unknown") - for doc in vector_results - ) - ), - }, - } - - -class WebSearchCapability(BaseWorkflowMixin): - """Mixin that adds web search capabilities to a workflow using OpenAI Responses API.""" - - def __init__(self, *args, **kwargs): - """Initialize the web search capability.""" - # Initialize parent class if it exists - super().__init__(*args, **kwargs) if hasattr(super(), "__init__") else None - # Initialize our attributes - self._init_web_search() - - def _init_web_search(self) -> None: - """Initialize web search attributes if not already initialized.""" - if not hasattr(self, "search_results_cache"): - self.search_results_cache = {} - if not hasattr(self, "client"): - self.client = OpenAI() - - async def search_web(self, query: str, **kwargs) -> List[Dict[str, Any]]: - """Search the web using OpenAI Responses API. - - Args: - query: The search query - **kwargs: Additional search parameters like user_location and search_context_size - - Returns: - List of search results with content and metadata - """ - try: - # Ensure initialization - self._init_web_search() - - # Check cache first - if query in self.search_results_cache: - logger.info(f"Using cached results for query: {query}") - return self.search_results_cache[query] - - # Configure web search tool - tool_config = { - "type": "web_search_preview", - "search_context_size": kwargs.get("search_context_size", "medium"), - } - - # Add user location if provided - if "user_location" in kwargs: - tool_config["user_location"] = kwargs["user_location"] - - # Make the API call - response = self.client.responses.create( - model="gpt-4.1", tools=[tool_config], input=query - ) - - logger.debug(f"Web search response: {response}") - # Process the response into our document format - documents = [] - - # Access the output text directly - if hasattr(response, "output_text"): - text_content = response.output_text - source_urls = [] - - # Try to extract citations if available - if hasattr(response, "citations"): - source_urls = [ - { - "url": citation.url, - "title": getattr(citation, "title", ""), - "start_index": getattr(citation, "start_index", 0), - "end_index": getattr(citation, "end_index", 0), - } - for citation in response.citations - if hasattr(citation, "url") - ] - - # Ensure we always have at least one URL entry - if not source_urls: - source_urls = [ - { - "url": "No source URL available", - "title": "Generated Response", - "start_index": 0, - "end_index": len(text_content), - } - ] - - # Create document with content - doc = { - "page_content": text_content, - "metadata": { - "type": "web_search_result", - "source_urls": source_urls, - "query": query, - "timestamp": None, - }, - } - documents.append(doc) - - # Cache the results - self.search_results_cache[query] = documents - - logger.info(f"Web search completed with {len(documents)} results") - return documents - - except Exception as e: - logger.error(f"Web search failed: {str(e)}") - # Return a list with one empty result to prevent downstream errors - return [ - { - "page_content": "Web search failed to return results.", - "metadata": { - "type": "web_search_result", - "source_urls": [ - { - "url": "Error occurred during web search", - "title": "Error", - "start_index": 0, - "end_index": 0, - } - ], - "query": query, - "timestamp": None, - }, - } - ] - - def integrate_with_graph(self, graph: StateGraph, **kwargs) -> None: - """Integrate web search capability with a graph. - - This adds the web search capability to the graph by adding a node - that can perform web searches when needed. - - Args: - graph: The graph to integrate with - **kwargs: Additional arguments specific to web search including: - - search_context_size: "low", "medium", or "high" - - user_location: dict with type, country, city, region - """ - # Add web search node - graph.add_node("web_search", self.search_web) - - # Add result processing node if needed - if "process_results" not in graph.nodes: - graph.add_node("process_results", self._process_results) - graph.add_edge("web_search", "process_results") - - async def _process_results( - self, web_results: List[Dict[str, Any]], **kwargs - ) -> Dict[str, Any]: - """Process web search results. - - Args: - web_results: Results from web search - **kwargs: Additional processing arguments - - Returns: - Processed results with metadata - """ - return { - "results": web_results, - "metadata": { - "num_web_results": len(web_results), - "source_types": ["web_search"], - }, - } diff --git a/services/workflows/preplan_react.py b/services/workflows/preplan_react.py deleted file mode 100644 index 8bd1f3e1..00000000 --- a/services/workflows/preplan_react.py +++ /dev/null @@ -1,481 +0,0 @@ -"""PrePlan ReAct workflow functionality. - -This workflow first creates a plan based on the user's query, then executes -the ReAct workflow to complete the task according to the plan. -""" - -import asyncio -from typing import ( - Annotated, - Any, - AsyncGenerator, - Dict, - List, - Optional, - TypedDict, - Union, -) - -from langchain_core.messages import AIMessage, HumanMessage, SystemMessage -from langchain_openai import ChatOpenAI -from langgraph.graph import END, START, StateGraph -from langgraph.graph.message import add_messages -from langgraph.prebuilt import ToolNode - -from lib.logger import configure_logger -from services.workflows.base import BaseWorkflow, ExecutionError, PlanningCapability -from services.workflows.react import MessageProcessor, StreamingCallbackHandler - -# Remove this import to avoid circular dependencies -# from services.workflows.workflow_service import BaseWorkflowService, WorkflowBuilder - -logger = configure_logger(__name__) - - -class PreplanState(TypedDict): - """State for the PrePlan ReAct workflow.""" - - messages: Annotated[list, add_messages] - plan: Optional[str] - - -class PreplanReactWorkflow(BaseWorkflow[PreplanState], PlanningCapability): - """PrePlan ReAct workflow implementation. - - This workflow first creates a plan based on the user's query, - then executes the ReAct workflow to complete the task according to the plan. - """ - - def __init__( - self, - callback_handler: StreamingCallbackHandler, - tools: List[Any], - **kwargs, - ): - super().__init__(**kwargs) - self.callback_handler = callback_handler - self.tools = tools - self.required_fields = ["messages"] - # Set decisive behavior flag - self.decisive_behavior = True - - # Create a new LLM instance with the callback handler - self.llm = self.create_llm_with_callbacks([callback_handler]).bind_tools(tools) - - # Create a separate LLM for planning with streaming enabled - self.planning_llm = ChatOpenAI( - model="o4-mini", - streaming=True, # Enable streaming for the planning LLM - callbacks=[callback_handler], - ) - - # Store tool information for planning - self.tool_names = [] - if tools: - self.tool_names = [ - tool.name if hasattr(tool, "name") else str(tool) for tool in tools - ] - - # Additional attributes for planning - self.persona = None - self.tool_descriptions = None - - def _create_prompt(self) -> None: - """Not used in PrePlan ReAct workflow.""" - pass - - async def create_plan(self, query: str) -> str: - """Create a simple thought process plan based on the user's query.""" - # Create a more decisive planning prompt - planning_prompt = f""" - You are an AI assistant planning a decisive response to the user's query. - - Write a few short sentences as if you're taking notes in a notebook about: - - What the user is asking for - - What information or tools you'll use to complete the task - - The exact actions you'll take to fulfill the request - - AIBTC DAO Context Information: - You are an AI governance agent integrated with an AIBTC DAO. Your role is to interact with the DAO's smart contracts - on behalf of token holders, either by assisting human users or by acting autonomously within the DAO's rules. The DAO - is governed entirely by its token holders through proposals – members submit proposals, vote on them, and if a proposal passes, - it is executed on-chain. Always maintain the integrity of the DAO's decentralized process: never bypass on-chain governance, - and ensure all actions strictly follow the DAO's smart contract rules and parameters. - - Your responsibilities include: - 1. Helping users create and submit proposals to the DAO - 2. Guiding users through the voting process - 3. Explaining how DAO contract interactions work - 4. Preventing invalid actions and detecting potential exploits - 5. In autonomous mode, monitoring DAO state, proposing actions, and voting according to governance rules - - When interacting with users about the DAO, always: - - Retrieve contract addresses automatically instead of asking users - - Validate transactions before submission - - Present clear summaries of proposed actions - - Verify eligibility and check voting power - - Format transactions precisely according to blockchain requirements - - Provide confirmation and feedback after actions - - DAO Tools Usage: - For ANY DAO-related request, use the appropriate DAO tools to access real-time information: - - Use dao_list tool to retrieve all DAOs, their tokens, and extensions - - Use dao_search tool to find specific DAOs by name, description, token name, symbol, or contract ID - - Do NOT hardcode DAO information or assumptions about contract addresses - - Always query for the latest DAO data through the tools rather than relying on static information - - When analyzing user requests, determine if they're asking about a specific DAO or need a list of DAOs - - After retrieving DAO information, use it to accurately guide users through governance processes - - Examples of effective DAO tool usage: - 1. If user asks about voting on a proposal: First use dao_search to find the specific DAO, then guide them with the correct contract details - 2. If user asks to list available DAOs: Use dao_list to retrieve current DAOs and present them clearly - 3. If user wants to create a proposal: Use dao_search to get the DAO details first, then assist with the proposal creation using the current contract addresses - - Be decisive and action-oriented. Don't include phrases like "I would," "I could," or "I might." - Instead, use phrases like "I will," "I am going to," and "I'll execute." - Don't ask for confirmation before taking actions - assume the user wants you to proceed. - - User Query: {query} - """ - - # Add available tools to the planning prompt if available - if hasattr(self, "tool_names") and self.tool_names: - tool_info = "\n\nTools available to you:\n" - for tool_name in self.tool_names: - tool_info += f"- {tool_name}\n" - planning_prompt += tool_info - - # Add tool descriptions if available - if hasattr(self, "tool_descriptions"): - planning_prompt += self.tool_descriptions - - # Create planning messages, including persona if available - planning_messages = [] - - # If we're in the service context and persona is available, add it as a system message - if hasattr(self, "persona") and self.persona: - planning_messages.append(SystemMessage(content=self.persona)) - - # Add the planning prompt - planning_messages.append(HumanMessage(content=planning_prompt)) - - try: - logger.info("Creating thought process notes for user query") - - # Configure custom callback for planning to properly mark planning tokens - original_new_token = self.callback_handler.custom_on_llm_new_token - - # Create temporary wrapper to mark planning tokens - async def planning_token_wrapper(token, **kwargs): - # Add planning flag to tokens during the planning phase - if asyncio.iscoroutinefunction(original_new_token): - await original_new_token(token, planning_only=True, **kwargs) - else: - # If it's not a coroutine, assume it's a function that uses run_coroutine_threadsafe - loop = asyncio.get_running_loop() - asyncio.run_coroutine_threadsafe( - self.callback_handler.queue.put( - { - "type": "token", - "content": token, - "status": "planning", - "planning_only": True, - } - ), - loop, - ) - - # Set the temporary wrapper - self.callback_handler.custom_on_llm_new_token = planning_token_wrapper - - # Create a task to invoke the planning LLM - task = asyncio.create_task(self.planning_llm.ainvoke(planning_messages)) - - # Wait for the task to complete - response = await task - plan = response.content - - # Restore original callback - self.callback_handler.custom_on_llm_new_token = original_new_token - - logger.info("Thought process notes created successfully") - logger.debug(f"Notes content length: {len(plan)}") - - # Use the new process_step method to emit the plan with a planning status - await self.callback_handler.process_step( - content=plan, role="assistant", thought="Planning Phase" - ) - - return plan - except Exception as e: - # Restore original callback in case of error - if hasattr(self, "callback_handler") and hasattr( - self.callback_handler, "custom_on_llm_new_token" - ): - self.callback_handler.custom_on_llm_new_token = original_new_token - - logger.error(f"Failed to create plan: {str(e)}", exc_info=True) - # Let the LLM handle the planning naturally without a static fallback - raise - - def _create_graph(self) -> StateGraph: - """Create the PrePlan ReAct workflow graph.""" - logger.info("Creating PrePlan ReAct workflow graph") - tool_node = ToolNode(self.tools) - logger.debug(f"Created tool node with {len(self.tools)} tools") - - def should_continue(state: PreplanState) -> str: - messages = state["messages"] - last_message = messages[-1] - result = "tools" if last_message.tool_calls else END - logger.debug(f"Continue decision: {result}") - return result - - def call_model(state: PreplanState) -> Dict: - logger.debug("Calling model with current state") - messages = state["messages"] - - # Add the plan as a system message if it exists and hasn't been added yet - if state.get("plan") is not None and not any( - isinstance(msg, SystemMessage) and "thought" in msg.content.lower() - for msg in messages - ): - logger.info("Adding thought notes to messages as system message") - plan_message = SystemMessage( - content=f""" - Follow these decisive actions to address the user's query: - - {state["plan"]} - - Execute these steps directly without asking for confirmation. - Be decisive and action-oriented in your responses. - """ - ) - messages = [plan_message] + messages - else: - logger.debug("No thought notes to add or notes already added") - - # If decisive behavior is enabled and there's no plan-related system message, - # add a decisive behavior system message - if getattr(self, "decisive_behavior", False) and not any( - isinstance(msg, SystemMessage) for msg in messages - ): - logger.info("Adding decisive behavior instruction as system message") - decisive_message = SystemMessage( - content="Be decisive and take action without asking for confirmation. " - "When the user requests something, proceed directly with executing it." - ) - messages = [decisive_message] + messages - - logger.debug(f"Invoking LLM with {len(messages)} messages") - response = self.llm.invoke(messages) - logger.debug("Received model response") - logger.debug( - f"Response content length: {len(response.content) if hasattr(response, 'content') else 0}" - ) - return {"messages": [response]} - - workflow = StateGraph(PreplanState) - logger.debug("Created StateGraph") - - workflow.add_node("agent", call_model) - workflow.add_node("tools", tool_node) - workflow.add_edge(START, "agent") - workflow.add_conditional_edges("agent", should_continue) - workflow.add_edge("tools", "agent") - logger.info("Graph setup complete") - - return workflow - - def integrate_with_graph(self, graph: StateGraph, **kwargs) -> None: - """Integrate planning capability with the graph. - - Args: - graph: The graph to integrate with - **kwargs: Additional arguments - """ - # Implementation would modify the graph to include planning step - # before the main execution flow - pass - - -class PreplanLangGraphService: - """Service for executing PrePlan LangGraph operations""" - - def __init__(self): - # Initialize message processor here - self.message_processor = MessageProcessor() - - def setup_callback_handler(self, queue, loop): - # Import here to avoid circular dependencies - from services.workflows.workflow_service import BaseWorkflowService - - # Use the static method instead of instantiating BaseWorkflowService - return BaseWorkflowService.create_callback_handler(queue, loop) - - async def stream_task_results(self, task, queue): - # Import here to avoid circular dependencies - from services.workflows.workflow_service import BaseWorkflowService - - # Use the static method instead of instantiating BaseWorkflowService - async for chunk in BaseWorkflowService.stream_results_from_task( - task=task, callback_queue=queue, logger_name=self.__class__.__name__ - ): - yield chunk - - async def _execute_stream_impl( - self, - messages: List[Union[SystemMessage, HumanMessage, AIMessage]], - input_str: str, - persona: Optional[str] = None, - tools_map: Optional[Dict] = None, - **kwargs, - ) -> AsyncGenerator[Dict, None]: - """Execute a PrePlan React stream implementation. - - Args: - messages: Processed messages - input_str: Current user input - persona: Optional persona to use - tools_map: Optional tools to use - **kwargs: Additional arguments - - Returns: - Async generator of result chunks - """ - try: - # Import here to avoid circular dependencies - from services.workflows.workflow_service import WorkflowBuilder - - # Setup queue and callbacks - callback_queue = asyncio.Queue() - loop = asyncio.get_running_loop() - - # Setup callback handler - callback_handler = self.setup_callback_handler(callback_queue, loop) - - # Create workflow using builder pattern - workflow_builder = ( - WorkflowBuilder(PreplanReactWorkflow) - .with_callback_handler(callback_handler) - .with_tools(list(tools_map.values()) if tools_map else []) - ) - - workflow = workflow_builder.build() - - # Store persona and tool information for planning - if persona: - # Append decisiveness guidance to the persona - decisive_guidance = "\n\nBe decisive and take action without asking for confirmation. When the user requests something, proceed directly with executing it rather than asking if they want you to do it." - workflow.persona = persona + decisive_guidance - - # Store available tool names for planning - if tools_map: - workflow.tool_names = list(tools_map.keys()) - # Add tool descriptions to planning prompt - tool_descriptions = "\n\nTOOL DESCRIPTIONS:\n" - for name, tool in tools_map.items(): - description = getattr( - tool, "description", "No description available" - ) - tool_descriptions += f"- {name}: {description}\n" - workflow.tool_descriptions = tool_descriptions - - try: - # The thought notes will be streamed through callbacks - plan = await workflow.create_plan(input_str) - - except Exception as e: - logger.error(f"Planning failed, continuing with execution: {str(e)}") - yield { - "type": "token", - "content": "Proceeding directly to answer...\n\n", - } - # No plan will be provided, letting the LLM handle the task naturally - plan = None - - # Create graph and compile - graph = workflow._create_graph() - runnable = graph.compile() - logger.info("Graph compiled successfully") - - # Add the plan to the initial state - initial_state = {"messages": messages} - if plan is not None: - initial_state["plan"] = plan - logger.info("Added plan to initial state") - else: - logger.warning("No plan available for initial state") - - # Set up configuration with callbacks - config = {"callbacks": [callback_handler]} - logger.debug("Configuration set up with callbacks") - - # Execute workflow with callbacks config - logger.info("Creating task to execute workflow") - task = asyncio.create_task(runnable.ainvoke(initial_state, config=config)) - - # Stream results - async for chunk in self.stream_task_results(task, callback_queue): - yield chunk - - except Exception as e: - logger.error( - f"Failed to execute PrePlan ReAct stream: {str(e)}", exc_info=True - ) - raise ExecutionError(f"PrePlan ReAct stream execution failed: {str(e)}") - - # Add execute_stream method to maintain the same interface as BaseWorkflowService - async def execute_stream( - self, - history: List[Dict], - input_str: str, - persona: Optional[str] = None, - tools_map: Optional[Dict] = None, - **kwargs, - ) -> AsyncGenerator[Dict, None]: - """Execute a workflow stream. - - This processes the history and delegates to _execute_stream_impl. - """ - # Process messages - filtered_content = self.message_processor.extract_filtered_content(history) - messages = self.message_processor.convert_to_langchain_messages( - filtered_content, input_str, persona - ) - - # Call the implementation - async for chunk in self._execute_stream_impl( - messages=messages, - input_str=input_str, - persona=persona, - tools_map=tools_map, - **kwargs, - ): - yield chunk - - # Keep the old method for backward compatibility - async def execute_preplan_react_stream( - self, - history: List[Dict], - input_str: str, - persona: Optional[str] = None, - tools_map: Optional[Dict] = None, - ) -> AsyncGenerator[Dict, None]: - """Execute a PrePlan ReAct stream using LangGraph.""" - # Call the new method - async for chunk in self.execute_stream(history, input_str, persona, tools_map): - yield chunk - - -# Facade function for compatibility with the API -async def execute_preplan_react_stream( - history: List[Dict], - input_str: str, - persona: Optional[str] = None, - tools_map: Optional[Dict] = None, -) -> AsyncGenerator[Dict, None]: - """Execute a PrePlan ReAct stream using LangGraph with optional persona.""" - service = PreplanLangGraphService() - async for chunk in service.execute_stream(history, input_str, persona, tools_map): - yield chunk diff --git a/services/workflows/proposal_evaluation.py b/services/workflows/proposal_evaluation.py deleted file mode 100644 index 5e9f8419..00000000 --- a/services/workflows/proposal_evaluation.py +++ /dev/null @@ -1,1111 +0,0 @@ -"""Proposal evaluation workflow.""" - -import binascii -from typing import Dict, List, Optional, TypedDict - -from langchain.callbacks.base import BaseCallbackHandler -from langchain.prompts import PromptTemplate -from langchain_core.documents import Document -from langgraph.graph import END, Graph, StateGraph -from pydantic import BaseModel, Field - -from backend.factory import backend -from backend.models import ( - UUID, - ExtensionFilter, - Profile, - Prompt, - PromptFilter, - ProposalType, - QueueMessageFilter, - QueueMessageType, -) -from lib.hiro import HiroApi -from lib.logger import configure_logger -from services.workflows.base import ( - BaseWorkflow, - VectorRetrievalCapability, - WebSearchCapability, -) -from services.workflows.vector_react import VectorLangGraphService, VectorReactState -from tools.dao_ext_action_proposals import VoteOnActionProposalTool -from tools.tools_factory import filter_tools_by_names, initialize_tools - -logger = configure_logger(__name__) - - -class ProposalEvaluationOutput(BaseModel): - """Output model for proposal evaluation.""" - - approve: bool = Field( - description="Whether to approve (true) or reject (false) the proposal" - ) - confidence_score: float = Field( - description="The confidence score for the evaluation (0.0-1.0)" - ) - reasoning: str = Field(description="The reasoning behind the evaluation decision") - - -class EvaluationState(TypedDict): - """State for the proposal evaluation flow.""" - - action_proposals_contract: str - action_proposals_voting_extension: str - proposal_id: int - proposal_data: Dict - dao_info: Optional[Dict] - approve: bool - confidence_score: float - reasoning: str - vote_result: Optional[Dict] - wallet_id: Optional[UUID] - confidence_threshold: float - auto_vote: bool - formatted_prompt: str - agent_prompts: List[Dict] - vector_results: Optional[List[Dict]] - recent_tweets: Optional[List[Dict]] - web_search_results: Optional[List[Dict]] # Add field for web search results - treasury_balance: Optional[float] - token_usage: Optional[Dict] # Add field for token usage tracking - model_info: Optional[Dict] # Add field for model information - - -class ProposalEvaluationWorkflow( - BaseWorkflow[EvaluationState], VectorRetrievalCapability, WebSearchCapability -): - """Workflow for evaluating DAO proposals and voting automatically.""" - - def __init__( - self, - collection_names: Optional[List[str]] = None, - model_name: str = "gpt-4.1", - temperature: Optional[float] = 0.1, - **kwargs, - ): - """Initialize the workflow. - - Args: - collection_names: Optional list of collection names to search - model_name: The model to use for evaluation - temperature: Optional temperature setting for the model - **kwargs: Additional arguments passed to parent - """ - super().__init__(model_name=model_name, temperature=temperature, **kwargs) - self.collection_names = collection_names or [ - "knowledge_collection", - "dao_collection", - ] - self.required_fields = ["proposal_id", "proposal_data"] - self.logger.debug( - f"Initialized workflow: collections={self.collection_names} | model={model_name} | temperature={temperature}" - ) - - def _create_prompt(self) -> PromptTemplate: - """Create the evaluation prompt template.""" - return PromptTemplate( - input_variables=[ - "proposal_data", - "dao_info", - "treasury_balance", - "contract_source", - "agent_prompts", - "vector_context", - "recent_tweets", - "web_search_results", - ], - template=""" - You are a DAO proposal evaluator. Your task is to analyze the proposal and determine whether to vote FOR or AGAINST it. - - # 1. AGENT-SPECIFIC INSTRUCTIONS (HIGHEST PRIORITY) - {agent_prompts} - - If no agent-specific instructions are provided, explicitly state: "No agent-specific instructions provided." - You MUST explain how each instruction influenced your decision. - - # 2. PROPOSAL INFORMATION - {proposal_data} - - # 3. DAO CONTEXT - {dao_info} - - # 4. TREASURY INFORMATION - {treasury_balance} - - # 5. AIBTC CHARTER - Core Values: Curiosity, Truth Maximizing, Humanity's Best Interests, Transparency, Resilience, Collaboration - Mission: Elevate human potential through Autonomous Intelligence on Bitcoin - Guardrails: Decentralized Governance, Smart Contract accountability - - # 6. CONTRACT SOURCE (for core proposals) - {contract_source} - - # 7. EVALUATION CRITERIA - For Core Proposals: - - Security implications - - Mission alignment - - Vulnerability assessment - - Impact analysis - - For Action Proposals: - - Parameter validation - - Resource implications - - Security considerations - - Alignment with DAO goals - - # 8. CONFIDENCE SCORING RUBRIC - You MUST choose one of these confidence bands: - - 0.0-0.2: Extremely low confidence (major red flags or insufficient information) - - 0.3-0.4: Low confidence (significant concerns or unclear implications) - - 0.5-0.6: Moderate confidence (some concerns but manageable) - - 0.7-0.8: High confidence (minor concerns if any) - - 0.9-1.0: Very high confidence (clear positive alignment) - - # 9. QUALITY STANDARDS - Your evaluation must uphold clarity, reasoning, and respect for the DAO's voice: - • Be clear and specific — avoid vagueness or filler - • Use a consistent tone, but reflect the DAO's personality if known - • Avoid casual throwaway phrases, sarcasm, or hype - • Don't hedge — take a position and justify it clearly - • Make every point logically sound and backed by facts or context - • Cite relevant parts of the proposal, DAO mission, or prior actions - • Use terms accurately — don't fake precision - • Keep structure clean and easy to follow - - # 10. VECTOR CONTEXT - {vector_context} - - # 11. RECENT DAO TWEETS - {recent_tweets} - - # 12. WEB SEARCH RESULTS - {web_search_results} - - # OUTPUT FORMAT - Provide your evaluation in this exact JSON format: - {{ - "approve": boolean, // true for FOR, false for AGAINST - "confidence_score": float, // MUST be from the confidence bands above - "reasoning": string // Brief, professional explanation addressing: - // 1. How agent instructions were applied - // 2. How DAO context influenced decision - // 3. How AIBTC Charter alignment was considered - // 4. Key factors in confidence score selection - // Must be clear, precise, and well-structured - }} - """, - ) - - def _create_graph(self) -> Graph: - """Create the evaluation graph.""" - prompt = self._create_prompt() - - # Create evaluation node - async def evaluate_proposal(state: EvaluationState) -> EvaluationState: - """Evaluate the proposal and determine how to vote.""" - try: - # Get proposal data from state - proposal_data = state["proposal_data"] - dao_id = state.get("dao_info", {}).get("id") - - # Perform web search for relevant context - try: - # Create search query from proposal data - web_search_query = f"DAO proposal {proposal_data.get('type', 'unknown')} - {proposal_data.get('parameters', '')}" - - # Use web search capability - web_search_results = await self.search_web( - query=web_search_query, - search_context_size="medium", # Use medium context size for balanced results - ) - - # Update state with web search results - state["web_search_results"] = web_search_results - self.logger.debug( - f"Web search query: {web_search_query} | Results count: {len(web_search_results)}" - ) - self.logger.debug( - f"Retrieved {len(web_search_results)} web search results" - ) - except Exception as e: - self.logger.error( - f"Failed to perform web search: {str(e)}", exc_info=True - ) - state["web_search_results"] = [] - - # Fetch recent tweets from queue if dao_id exists - recent_tweets = [] - if dao_id: - try: - # Add debug logging for dao_id - self.logger.debug(f"Fetching tweets for DAO ID: {dao_id}") - - queue_messages = backend.list_queue_messages( - QueueMessageFilter( - type=QueueMessageType.TWEET, - dao_id=dao_id, - is_processed=True, - ) - ) - # Log the number of messages found - self.logger.debug(f"Found {len(queue_messages)} queue messages") - - # Sort by created_at and take last 5 - sorted_messages = sorted( - queue_messages, key=lambda x: x.created_at, reverse=True - )[:5] - self.logger.debug(f"After sorting, have {len(sorted_messages)} messages") - - recent_tweets = [ - { - "created_at": msg.created_at, - "message": msg.message.get('message', 'No text available') if isinstance(msg.message, dict) else msg.message, - "tweet_id": msg.tweet_id, - } - for msg in sorted_messages - ] - self.logger.debug(f"Retrieved tweets: {recent_tweets}") - self.logger.debug( - f"Found {len(recent_tweets)} recent tweets for DAO {dao_id}" - ) - except Exception as e: - self.logger.error( - f"Failed to fetch recent tweets: {str(e)}", exc_info=True - ) - recent_tweets = [] - - # Update state with recent tweets - state["recent_tweets"] = recent_tweets - - # If this is a core proposal, fetch the contract source - contract_source = "" - if proposal_data.get("type") == "core" and proposal_data.get( - "proposal_contract" - ): - # Split contract address into parts - parts = proposal_data["proposal_contract"].split(".") - if len(parts) >= 2: - contract_address = parts[0] - contract_name = parts[1] - - # Use HiroApi to fetch contract source - try: - api = HiroApi() - result = api.get_contract_source( - contract_address, contract_name - ) - if "source" in result: - contract_source = result["source"] - self.logger.debug( - f"Retrieved contract source for {contract_address}.{contract_name}" - ) - else: - self.logger.warning( - f"Contract source not found in API response: {result}" - ) - except Exception as e: - self.logger.error( - f"Failed to fetch contract source: {str(e)}", - exc_info=True, - ) - else: - self.logger.warning( - f"Invalid contract address format: {proposal_data['proposal_contract']}" - ) - - # Retrieve relevant context from vector store - try: - # Create search query from proposal data - search_query = f"Proposal type: {proposal_data.get('type')} - {proposal_data.get('parameters', '')}" - - # Use vector retrieval capability - vector_results = await self.retrieve_from_vector_store( - query=search_query, limit=5 # Get top 5 most relevant documents - ) - - # Update state with vector results - state["vector_results"] = vector_results - self.logger.debug( - f"Searching vector store with query: {search_query} | Collection count: {len(self.collection_names)}" - ) - self.logger.debug(f"Vector search results: {vector_results}") - self.logger.debug( - f"Retrieved {len(vector_results)} relevant documents from vector store" - ) - - # Format vector context for prompt - vector_context = "\n\n".join( - [ - f"Related Context {i+1}:\n{doc.page_content}" - for i, doc in enumerate(vector_results) - ] - ) - except Exception as e: - self.logger.error( - f"Failed to retrieve from vector store: {str(e)}", exc_info=True - ) - vector_context = ( - "No additional context available from vector store." - ) - - # Format prompt with state - self.logger.debug("Preparing evaluation prompt...") - - # Format agent prompts as a string - agent_prompts_str = "No agent-specific instructions available." - if state.get("agent_prompts"): - self.logger.debug(f"Raw agent prompts: {state['agent_prompts']}") - if ( - isinstance(state["agent_prompts"], list) - and state["agent_prompts"] - ): - # Just use the prompt text directly since that's what we're storing - agent_prompts_str = "\n\n".join(state["agent_prompts"]) - self.logger.debug( - f"Formatted agent prompts: {agent_prompts_str}" - ) - else: - self.logger.warning( - f"Invalid agent prompts format: {type(state['agent_prompts'])}" - ) - else: - self.logger.debug("No agent prompts found in state") - - # Format web search results for prompt - web_search_content = "No relevant web search results found." - if state.get("web_search_results"): - web_search_content = "\n\n".join( - [ - f"Web Result {i+1}:\n{result['page_content']}\nSource: {result['metadata']['source_urls'][0]['url'] if result['metadata']['source_urls'] else 'Unknown'}" - for i, result in enumerate(state["web_search_results"]) - ] - ) - - # Update formatted prompt with web search results - formatted_prompt = self._create_prompt().format( - proposal_data=proposal_data, - dao_info=state.get( - "dao_info", "No additional DAO information available." - ), - treasury_balance=state.get("treasury_balance"), - contract_source=contract_source, - agent_prompts=agent_prompts_str, - vector_context=vector_context, - recent_tweets=( - "\n".join( - [ - f"Tweet {i+1} ({tweet['created_at']}): {tweet['message']}" - for i, tweet in enumerate(recent_tweets) - ] - ) - if recent_tweets - else "No recent tweets available." - ), - web_search_results=web_search_content, - ) - - # Get evaluation from LLM - self.logger.debug("Starting LLM evaluation...") - structured_output = self.llm.with_structured_output( - ProposalEvaluationOutput, - include_raw=True, # Include raw response to get token usage - ) - - # Invoke LLM with formatted prompt - result = structured_output.invoke(formatted_prompt) - - # Extract the parsed result and token usage from raw response - self.logger.debug( - f"Raw LLM result structure: {type(result).__name__} | Has parsed: {'parsed' in result if isinstance(result, dict) else False}" - ) - parsed_result = result["parsed"] if isinstance(result, dict) else result - model_info = {"name": self.model_name, "temperature": self.temperature} - - if isinstance(result, dict) and "raw" in result: - raw_msg = result["raw"] - # Extract token usage - if hasattr(raw_msg, "usage_metadata"): - token_usage = raw_msg.usage_metadata - self.logger.debug( - f"Token usage details: input={token_usage.get('input_tokens', 0)} | output={token_usage.get('output_tokens', 0)} | total={token_usage.get('total_tokens', 0)}" - ) - else: - self.logger.warning("No usage_metadata found in raw response") - token_usage = { - "input_tokens": 0, - "output_tokens": 0, - "total_tokens": 0, - } - else: - self.logger.warning("No raw response available") - token_usage = { - "input_tokens": 0, - "output_tokens": 0, - "total_tokens": 0, - } - - self.logger.debug(f"Parsed evaluation result: {parsed_result}") - - # Update state - state["formatted_prompt"] = formatted_prompt - state["approve"] = parsed_result.approve - state["confidence_score"] = parsed_result.confidence_score - state["reasoning"] = parsed_result.reasoning - state["token_usage"] = token_usage - state["model_info"] = model_info - - # Calculate token costs - token_costs = calculate_token_cost(token_usage, model_info["name"]) - - # Log final evaluation summary - self.logger.debug( - f"Evaluation complete: Decision={'APPROVE' if parsed_result.approve else 'REJECT'} | Confidence={parsed_result.confidence_score:.2f} | Model={model_info['name']} (temp={model_info['temperature']}) | Tokens={token_usage} | Cost=${token_costs['total_cost']:.4f}" - ) - self.logger.debug(f"Full reasoning: {parsed_result.reasoning}") - - return state - except Exception as e: - self.logger.error( - f"Error in evaluate_proposal: {str(e)}", exc_info=True - ) - state["approve"] = False - state["confidence_score"] = 0.0 - state["reasoning"] = f"Error during evaluation: {str(e)}" - return state - - # Create decision node - async def should_vote(state: EvaluationState) -> str: - """Decide whether to vote based on confidence threshold.""" - try: - self.logger.debug( - f"Deciding vote: auto_vote={state['auto_vote']} | confidence={state['confidence_score']} | threshold={state['confidence_threshold']}" - ) - - if not state["auto_vote"]: - self.logger.debug("Auto-vote is disabled, skipping vote") - return "skip_vote" - - if state["confidence_score"] >= state["confidence_threshold"]: - self.logger.debug( - f"Confidence score {state['confidence_score']} meets threshold {state['confidence_threshold']}, proceeding to vote" - ) - return "vote" - else: - self.logger.debug( - f"Confidence score {state['confidence_score']} below threshold {state['confidence_threshold']}, skipping vote" - ) - return "skip_vote" - except Exception as e: - self.logger.error(f"Error in should_vote: {str(e)}", exc_info=True) - return "skip_vote" - - # Create voting node using VectorReact workflow - async def vote_on_proposal(state: EvaluationState) -> EvaluationState: - """Vote on the proposal using VectorReact workflow.""" - try: - self.logger.debug( - f"Setting up VectorReact workflow: proposal_id={state['proposal_id']} | vote={state['approve']}" - ) - - # Set up the voting tool - vote_tool = VoteOnActionProposalTool(wallet_id=state["wallet_id"]) - tools_map = {"dao_action_vote_on_proposal": vote_tool} - - # Create a user input message that instructs the LLM what to do - vote_instruction = f"I need you to vote on a DAO proposal with ID {state['proposal_id']} in the contract {state['action_proposals_contract']}. Please vote {'FOR' if state['approve'] else 'AGAINST'} the proposal. Use the dao_action_vote_on_proposal tool to submit the vote." - - # Create VectorLangGraph service with collections - service = VectorLangGraphService( - collection_names=self.collection_names, - ) - - # History with system message only - history = [ - { - "role": "system", - "content": "You are a helpful assistant tasked with voting on DAO proposals. Follow the instructions precisely.", - } - ] - - self.logger.debug("Executing VectorReact workflow for voting...") - - # Collect response chunks - response_chunks = [] - vote_result = None - - # Execute the VectorReact workflow - async for chunk in service.execute_stream( - history=history, - input_str=vote_instruction, - tools_map=tools_map, - ): - response_chunks.append(chunk) - self.logger.debug(f"VectorReact chunk: {chunk}") - - # Extract tool results - if ( - chunk.get("type") == "tool" - and chunk.get("tool") == "dao_action_vote_on_proposal" - ): - if "output" in chunk: - vote_result = chunk.get("output") - self.logger.debug(f"Vote result: {vote_result}") - - # Update state with vote result and vector results - state["vote_result"] = { - "success": vote_result is not None, - "output": vote_result, - } - state["vector_results"] = [ - chunk.get("vector_results", []) - for chunk in response_chunks - if chunk.get("vector_results") - ] - - return state - except Exception as e: - self.logger.error(f"Error in vote_on_proposal: {str(e)}", exc_info=True) - state["vote_result"] = { - "success": False, - "error": f"Error during voting: {str(e)}", - } - return state - - # Create skip voting node - async def skip_voting(state: EvaluationState) -> EvaluationState: - """Skip voting and just return the evaluation.""" - try: - self.logger.debug("Vote skipped: reason=threshold_or_setting") - state["vote_result"] = { - "success": True, - "message": "Voting skipped due to confidence threshold or auto_vote setting", - "data": None, - } - return state - except Exception as e: - self.logger.error(f"Error in skip_voting: {str(e)}", exc_info=True) - state["vote_result"] = { - "success": True, - "message": f"Voting skipped (with error: {str(e)})", - "data": None, - } - return state - - # Create the graph - workflow = StateGraph(EvaluationState) - - # Add nodes - workflow.add_node("evaluate", evaluate_proposal) - workflow.add_node("vote", vote_on_proposal) - workflow.add_node("skip_vote", skip_voting) - - # Set up the conditional branching - workflow.set_entry_point("evaluate") - workflow.add_conditional_edges( - "evaluate", - should_vote, - { - "vote": "vote", - "skip_vote": "skip_vote", - }, - ) - workflow.add_edge("vote", END) - workflow.add_edge("skip_vote", END) - - return workflow.compile() - - def _validate_state(self, state: EvaluationState) -> bool: - """Validate the workflow state.""" - required_fields = ["proposal_id", "proposal_data"] - - # Log the state for debugging - self.logger.debug( - f"Validating state: proposal_id={state.get('proposal_id')} | proposal_type={state.get('proposal_data', {}).get('type', 'unknown')}" - ) - - # Check all fields and log problems - for field in required_fields: - if field not in state: - self.logger.error(f"Missing required field: {field}") - return False - elif not state[field]: - self.logger.error(f"Empty required field: {field}") - return False - - # Get proposal type - proposal_type = state["proposal_data"].get("type", ProposalType.ACTION) - - # Validate based on proposal type - if proposal_type == ProposalType.ACTION: - # Action proposals require action_proposals_contract and parameters - if not state.get("action_proposals_contract"): - self.logger.error( - "Missing action_proposals_contract for action proposal" - ) - return False - if not state["proposal_data"].get("parameters"): - self.logger.error("No parameters field in action proposal data") - return False - elif proposal_type == ProposalType.CORE: - # Core proposals require proposal_contract - if not state["proposal_data"].get("proposal_contract"): - self.logger.error("Missing proposal_contract for core proposal") - return False - else: - self.logger.error(f"Invalid proposal type: {proposal_type}") - return False - - self.logger.debug("State validation successful") - return True - - -def get_proposal_evaluation_tools( - profile: Optional[Profile] = None, agent_id: Optional[UUID] = None -): - """Get the tools needed for proposal evaluation. - - Args: - profile: Optional user profile - agent_id: Optional agent ID - - Returns: - Dictionary of filtered tools for proposal evaluation - """ - # Initialize all tools - all_tools = initialize_tools(profile=profile, agent_id=agent_id) - logger.debug(f"Available tools: {', '.join(all_tools.keys())}") - - # Filter to only include the tools we need - required_tools = [ - "dao_action_get_proposal", - "dao_action_vote_on_proposal", - "dao_action_get_voting_power", - "dao_action_get_voting_configuration", - "database_get_dao_get_by_name", # Try old name - "dao_search", # Try new name - ] - - filtered_tools = filter_tools_by_names(required_tools, all_tools) - logger.debug(f"Using tools: {', '.join(filtered_tools.keys())}") - - return filtered_tools - - -def decode_hex_parameters(hex_string: Optional[str]) -> Optional[str]: - """Decodes a hexadecimal-encoded string if valid.""" - if not hex_string: - return None - if hex_string.startswith("0x"): - hex_string = hex_string[2:] # Remove "0x" prefix - try: - decoded_bytes = binascii.unhexlify(hex_string) - decoded_string = decoded_bytes.decode( - "utf-8", errors="ignore" - ) # Decode as UTF-8 - logger.debug(f"Successfully decoded hex string: {hex_string[:20]}...") - return decoded_string - except (binascii.Error, UnicodeDecodeError) as e: - logger.warning(f"Failed to decode hex string: {str(e)}") - return None # Return None if decoding fails - - -def calculate_token_cost( - token_usage: Dict[str, int], model_name: str -) -> Dict[str, float]: - """Calculate the cost of token usage based on current pricing. - - Args: - token_usage: Dictionary containing input_tokens and output_tokens - model_name: Name of the model used - - Returns: - Dictionary containing cost breakdown and total cost - """ - # Current pricing per million tokens (as of August 2024) - MODEL_PRICES = { - "gpt-4o": { - "input": 2.50, # $2.50 per million input tokens - "output": 10.00, # $10.00 per million output tokens - }, - "gpt-4.1": { - "input": 2.00, # $2.00 per million input tokens - "output": 8.00, # $8.00 per million output tokens - }, - "gpt-4.1-mini": { - "input": 0.40, # $0.40 per million input tokens - "output": 1.60, # $1.60 per million output tokens - }, - "gpt-4.1-nano": { - "input": 0.10, # $0.10 per million input tokens - "output": 0.40, # $0.40 per million output tokens - }, - # Default to gpt-4.1 pricing if model not found - "default": { - "input": 2.00, - "output": 8.00, - }, - } - - # Get pricing for the model, default to gpt-4.1 pricing if not found - model_prices = MODEL_PRICES.get(model_name.lower(), MODEL_PRICES["default"]) - - # Extract token counts, ensuring we get integers and handle None values - try: - input_tokens = int(token_usage.get("input_tokens", 0)) - output_tokens = int(token_usage.get("output_tokens", 0)) - except (TypeError, ValueError) as e: - logger.error(f"Error converting token counts to integers: {str(e)}") - input_tokens = 0 - output_tokens = 0 - - # Calculate costs with more precision - input_cost = (input_tokens / 1_000_000.0) * model_prices["input"] - output_cost = (output_tokens / 1_000_000.0) * model_prices["output"] - total_cost = input_cost + output_cost - - # Create detailed token usage breakdown - token_details = { - "input_tokens": input_tokens, - "output_tokens": output_tokens, - "total_tokens": input_tokens + output_tokens, - "model_name": model_name, - "input_price_per_million": model_prices["input"], - "output_price_per_million": model_prices["output"], - } - - # Add token details if available - if "input_token_details" in token_usage: - token_details["input_token_details"] = token_usage["input_token_details"] - if "output_token_details" in token_usage: - token_details["output_token_details"] = token_usage["output_token_details"] - - # Debug logging with more detail - logger.debug( - f"Cost calculation details: Model={model_name} | Input={input_tokens} tokens * ${model_prices['input']}/1M = ${input_cost:.6f} | Output={output_tokens} tokens * ${model_prices['output']}/1M = ${output_cost:.6f} | Total=${total_cost:.6f} | Token details={token_details}" - ) - - return { - "input_cost": round(input_cost, 6), - "output_cost": round(output_cost, 6), - "total_cost": round(total_cost, 6), - "currency": "USD", - "details": token_details, - } - - -async def evaluate_and_vote_on_proposal( - proposal_id: UUID, - wallet_id: Optional[UUID] = None, - auto_vote: bool = True, - confidence_threshold: float = 0.7, - dao_id: Optional[UUID] = None, -) -> Dict: - """Evaluate a proposal and automatically vote based on the evaluation. - - Args: - proposal_id: The ID of the proposal to evaluate and vote on - wallet_id: Optional wallet ID to use for voting - auto_vote: Whether to automatically vote based on the evaluation - confidence_threshold: Minimum confidence score required to auto-vote (0.0-1.0) - dao_id: Optional DAO ID to explicitly pass to the workflow - - Returns: - Dictionary containing the evaluation results and voting outcome - """ - logger.debug( - f"Starting proposal evaluation: proposal_id={proposal_id} | auto_vote={auto_vote} | confidence_threshold={confidence_threshold}" - ) - - try: - # Get proposal data directly from the database - proposal_data = backend.get_proposal(proposal_id) - if not proposal_data: - error_msg = f"Proposal {proposal_id} not found in database" - logger.error(error_msg) - return {"success": False, "error": error_msg} - - # Decode parameters if they exist - decoded_parameters = decode_hex_parameters(proposal_data.parameters) - if decoded_parameters: - logger.debug( - f"Decoded proposal parameters: length={len(decoded_parameters) if decoded_parameters else 0}" - ) - - # Convert proposal data to dictionary and ensure parameters exist - proposal_dict = { - "proposal_id": proposal_data.proposal_id, - "parameters": decoded_parameters - or proposal_data.parameters, # Use decoded if available - "action": proposal_data.action, - "caller": proposal_data.caller, - "contract_principal": proposal_data.contract_principal, - "creator": proposal_data.creator, - "created_at_block": proposal_data.created_at_block, - "end_block": proposal_data.end_block, - "start_block": proposal_data.start_block, - "liquid_tokens": proposal_data.liquid_tokens, - "type": proposal_data.type, # Add proposal type - "proposal_contract": proposal_data.proposal_contract, # Add proposal contract for core proposals - } - - # For action proposals, parameters are required - if proposal_data.type == ProposalType.ACTION and not proposal_dict.get( - "parameters" - ): - error_msg = "No parameters found in action proposal data" - logger.error(error_msg) - return {"success": False, "error": error_msg} - - # For core proposals, proposal_contract is required - if proposal_data.type == ProposalType.CORE and not proposal_dict.get( - "proposal_contract" - ): - error_msg = "No proposal contract found in core proposal data" - logger.error(error_msg) - return {"success": False, "error": error_msg} - - # Get DAO info based on provided dao_id or from proposal - dao_info = None - if dao_id: - logger.debug( - f"Using provided DAO ID: {dao_id} | Found={dao_info is not None}" - ) - dao_info = backend.get_dao(dao_id) - if not dao_info: - logger.warning( - f"Provided DAO ID {dao_id} not found, falling back to proposal's DAO ID" - ) - - # If dao_info is still None, try to get it from proposal's dao_id - if not dao_info and proposal_data.dao_id: - logger.debug( - f"Using proposal's DAO ID: {proposal_data.dao_id} | Found={dao_info is not None}" - ) - dao_info = backend.get_dao(proposal_data.dao_id) - - if not dao_info: - error_msg = "Could not find DAO information" - logger.error(error_msg) - return {"success": False, "error": error_msg} - - # Get the treasury extension for the DAO - treasury_extension = None - try: - treasury_extensions = backend.list_extensions( - ExtensionFilter(dao_id=dao_info.id, type="EXTENSIONS_TREASURY") - ) - if treasury_extensions: - treasury_extension = treasury_extensions[0] - logger.debug( - f"Found treasury extension: contract_principal={treasury_extension.contract_principal}" - ) - - # Get treasury balance from Hiro API - hiro_api = HiroApi() - treasury_balance = hiro_api.get_address_balance( - treasury_extension.contract_principal - ) - logger.debug(f"Treasury balance retrieved: balance={treasury_balance}") - else: - logger.warning(f"No treasury extension found for DAO {dao_info.id}") - treasury_balance = None - except Exception as e: - logger.error(f"Failed to get treasury balance: {str(e)}", exc_info=True) - treasury_balance = None - - logger.debug( - f"Processing proposal for DAO: {dao_info.name} (ID: {dao_info.id})" - ) - - # Get the wallet and agent information if available - agent_id = None - if wallet_id: - wallet = backend.get_wallet(wallet_id) - if wallet and wallet.agent_id: - agent_id = wallet.agent_id - logger.debug(f"Using agent ID {agent_id} for wallet {wallet_id}") - - # Get agent prompts - agent_prompts = [] - model_name = "gpt-4.1" # Default model - temperature = 0.1 # Default temperature - try: - logger.debug( - f"Fetching prompts for agent_id={agent_id}, dao_id={proposal_data.dao_id}" - ) - prompts = backend.list_prompts( - PromptFilter( - agent_id=agent_id, - dao_id=proposal_data.dao_id, - is_active=True, - ) - ) - logger.debug(f"Retrieved prompts: {prompts}") - - # Store the full Prompt objects and get model settings from first prompt - agent_prompts = prompts - if agent_prompts: - first_prompt = agent_prompts[0] - model_name = first_prompt.model or model_name - temperature = ( - first_prompt.temperature - if first_prompt.temperature is not None - else temperature - ) - logger.debug( - f"Using model configuration: {model_name} (temperature={temperature})" - ) - else: - logger.warning( - f"No active prompts found for agent_id={agent_id}, dao_id={proposal_data.dao_id}" - ) - except Exception as e: - logger.error(f"Failed to get agent prompts: {str(e)}", exc_info=True) - - # Initialize state - state = { - "action_proposals_contract": proposal_dict["contract_principal"], - "action_proposals_voting_extension": proposal_dict["action"], - "proposal_id": proposal_dict["proposal_id"], - "proposal_data": proposal_dict, - "dao_info": dao_info.model_dump() if dao_info else {}, - "treasury_balance": treasury_balance, - "agent_prompts": ( - [p.prompt_text for p in agent_prompts] if agent_prompts else [] - ), - "approve": False, - "confidence_score": 0.0, - "reasoning": "", - "vote_result": None, - "wallet_id": wallet_id, - "confidence_threshold": confidence_threshold, - "auto_vote": auto_vote, - "vector_results": None, - "recent_tweets": None, - "web_search_results": None, - "token_usage": None, - "model_info": { - "name": "unknown", - "temperature": None, - }, - } - - logger.debug( - f"Agent prompts count: {len(state['agent_prompts'] or [])} | Has prompts: {bool(state['agent_prompts'])}" - ) - - # Create and run workflow with model settings from prompt - workflow = ProposalEvaluationWorkflow( - model_name=model_name, temperature=temperature - ) - if not workflow._validate_state(state): - error_msg = "Invalid workflow state" - logger.error(error_msg) - return { - "success": False, - "error": error_msg, - } - - logger.debug("Starting workflow execution...") - result = await workflow.execute(state) - logger.debug("Workflow execution completed") - - # Extract transaction ID from vote result if available - tx_id = None - if result.get("vote_result") and result["vote_result"].get("output"): - # Try to extract tx_id from the output - output = result["vote_result"]["output"] - if isinstance(output, str) and "txid:" in output.lower(): - # Extract the transaction ID from the output - for line in output.split("\n"): - if "txid:" in line.lower(): - parts = line.split(":") - if len(parts) > 1: - tx_id = parts[1].strip() - logger.debug(f"Transaction ID extracted: {tx_id}") - break - - # Prepare final result - final_result = { - "success": True, - "evaluation": { - "approve": result["approve"], - "confidence_score": result["confidence_score"], - "reasoning": result["reasoning"], - }, - "vote_result": result["vote_result"], - "auto_voted": auto_vote - and result["confidence_score"] >= confidence_threshold, - "tx_id": tx_id, - "formatted_prompt": result["formatted_prompt"], - "vector_results": result["vector_results"], - "recent_tweets": result["recent_tweets"], - "web_search_results": result["web_search_results"], - "treasury_balance": result.get("treasury_balance"), - "token_usage": result.get( - "token_usage", - {"input_tokens": 0, "output_tokens": 0, "total_tokens": 0}, - ), - "model_info": result.get( - "model_info", {"name": "unknown", "temperature": None} - ), - } - - # Calculate token costs - token_costs = calculate_token_cost( - final_result["token_usage"], final_result["model_info"]["name"] - ) - final_result["token_costs"] = token_costs - - # For the example token usage shown: - # Input: 7425 tokens * ($2.50/1M) = $0.0186 - # Output: 312 tokens * ($10.00/1M) = $0.0031 - # Total: $0.0217 - - logger.debug( - f"Proposal evaluation completed: Success={final_result['success']} | Decision={'APPROVE' if final_result['evaluation']['approve'] else 'REJECT'} | Confidence={final_result['evaluation']['confidence_score']:.2f} | Auto-voted={final_result['auto_voted']} | Transaction={tx_id or 'None'} | Model={final_result['model_info']['name']} | Token Usage={final_result['token_usage']} | Cost (USD)=${token_costs['total_cost']:.4f} (Input=${token_costs['input_cost']:.4f} for {token_costs['details']['input_tokens']} tokens, Output=${token_costs['output_cost']:.4f} for {token_costs['details']['output_tokens']} tokens)" - ) - logger.debug(f"Full evaluation result: {final_result}") - - return final_result - except Exception as e: - error_msg = f"Unexpected error in evaluate_and_vote_on_proposal: {str(e)}" - logger.error(error_msg, exc_info=True) - return { - "success": False, - "error": error_msg, - } - - -async def evaluate_proposal_only( - proposal_id: UUID, - wallet_id: Optional[UUID] = None, -) -> Dict: - """Evaluate a proposal without voting. - - Args: - proposal_id: The ID of the proposal to evaluate - wallet_id: Optional wallet ID to use for retrieving proposal data - - Returns: - Dictionary containing the evaluation results - """ - logger.debug(f"Starting proposal-only evaluation: proposal_id={proposal_id}") - - result = await evaluate_and_vote_on_proposal( - proposal_id=proposal_id, - wallet_id=wallet_id, - auto_vote=False, - ) - - # Remove vote-related fields from the response - logger.debug("Removing vote-related fields from response") - if "vote_result" in result: - del result["vote_result"] - if "auto_voted" in result: - del result["auto_voted"] - if "tx_id" in result: - del result["tx_id"] - - logger.debug("Proposal-only evaluation completed") - return result diff --git a/services/workflows/tweet_analysis.py b/services/workflows/tweet_analysis.py deleted file mode 100644 index fad0f1de..00000000 --- a/services/workflows/tweet_analysis.py +++ /dev/null @@ -1,218 +0,0 @@ -"""Tweet analysis workflow.""" - -from typing import Dict, Optional, TypedDict - -from langchain.prompts import PromptTemplate -from langgraph.graph import END, Graph, StateGraph -from pydantic import BaseModel, Field - -from backend.factory import backend -from backend.models import QueueMessageFilter, TweetType -from lib.logger import configure_logger -from services.workflows.base import BaseWorkflow -from tools.dao_deployments import ContractDAODeployInput - -logger = configure_logger(__name__) - - -class ToolRequest(BaseModel): - tool_name: str = Field( - description="The name of the tool to be executed its always contract_deploy_dao" - ) - parameters: ContractDAODeployInput = Field( - description="The parameters for the tool" - ) - priority: int = Field(description="The priority of the tool request") - - -class TweetAnalysisOutput(BaseModel): - worthy: bool = Field(description="Whether the tweet is worthy of processing") - reason: str = Field(description="The reason for the worthy determination") - tweet_type: TweetType = Field(description="The type of tweet") - tool_request: Optional[ToolRequest] = Field( - description="The tool request to be executed if the tweet is worthy" - ) - confidence_score: float = Field( - description="The confidence score for the worthy determination" - ) - - -class AnalysisState(TypedDict): - """State for the analysis flow.""" - - tweet_text: str - filtered_content: str - is_worthy: bool - tweet_type: TweetType - tool_request: Optional[ToolRequest] - confidence_score: float - reason: str - - -class TweetAnalysisWorkflow(BaseWorkflow[AnalysisState]): - """Workflow for analyzing tweets.""" - - def __init__(self, account_name: str = "@aibtcdevagent", **kwargs): - super().__init__(**kwargs) - self.account_name = account_name - - def _create_prompt(self) -> PromptTemplate: - """Create the analysis prompt template.""" - return PromptTemplate( - input_variables=[ - "tweet_text", - "filtered_content", - "account_name", - "token_symbols", - ], - template=""" - Your name is {account_name} on twitter. - - Analyze this tweet to determine: - 1. If it's worthy of processing (contains a valid DAO deployment request) - 2. What type of tweet it is (tool_request, thread, or invalid) - 3. If it's a tool request, extract the following required parameters: - - token_symbol: The symbol for the token (e.g., 'HUMAN') - - token_name: The name of the token (e.g., 'Human') - - token_description: Description of the token (e.g., 'The Human Token') - - token_max_supply: Initial supply (default: 1000000000) - - token_decimals: Number of decimals (default: 6) - - origin_address: The address of the DAO creator - - mission: The mission statement of the DAO serves as the unifying purpose and guiding principle of an AI DAO. It defines its goals, values, and desired impact, aligning participants and AI resources to achieve a shared outcome. - - tweet_id: The ID of the tweet - - Tweet History: - {filtered_content} - - Current Tweet: - {tweet_text} - - If the text is determined to be a general conversation, unrelated to creating or deploying a DAO, or if it appears to be promotional content, set Worthiness determination to False. - - Exclude tweets that are purely promotional and lack actionable parameters. If the tweet includes both praise and actionable details describing deploying a DAO, proceed with DAO deployment. - - Only craft the parameters for the tool contract_deploy_dao. - - Requirements: - 1. Expand upon any missing details in the request for a dao to be deployed to meet the needs of the tool parameters - 2. If the tweet is a general conversation, unrelated to creating or deploying a DAO, or if it appears to be promotional content, set Worthiness determination to False. - 3. Don't execute the tool contract_deploy_dao as your sole purpose is to generate the parameters for the tool. - 4. Make sure the DAO symbol is not already taken. If it is already taken, choose a new symbol for the parameters. - 5. Only craft the parameters for the tool contract_deploy_dao if Worthiness determination is True. - - Worthiness criteria: - - We welcome creativity—funny or edgy ideas are always welcome - - Concepts must avoid harmful or unethical themes - - While we're flexible on ethics, there's a clear line against promoting harm - - Worth depends on substance and alignment with basic principles - - Current DAO Symbols already taken: - {token_symbols} - - Output format: - {{ - "worthy": bool, - "reason": str, - "tweet_type": "tool_request" | "thread" | "invalid", - "tool_request": {{ - "tool_name": "contract_deploy_dao", - "parameters": {{ - "token_symbol": str, - "token_name": str, - "token_description": str, - "token_max_supply": str, - "token_decimals": str, - "origin_address": str, - "mission": str, - "tweet_id": str, - }}, - "priority": int - }} if worthy and tweet_type == "tool_request" else None, - "confidence_score": float - }} - """, - ) - - def _create_graph(self) -> Graph: - """Create the analysis graph.""" - prompt = self._create_prompt() - - # Create analysis node - def analyze_tweet(state: AnalysisState) -> AnalysisState: - """Analyze the tweet and determine if it's worthy of processing.""" - tokens = backend.list_tokens() - token_symbols_in_db = [token.symbol for token in tokens] - queued_messages = backend.list_queue_messages( - filters=QueueMessageFilter(type="daos", is_processed=False) - ) - token_symbols_in_queue = [ - message.message["parameters"]["token_symbol"] - for message in queued_messages - ] - - # make a list of token symbols in queue and token symbols in db - token_symbols = list(set(token_symbols_in_db + token_symbols_in_queue)) - - # Format prompt with state - formatted_prompt = prompt.format( - tweet_text=state["tweet_text"], - filtered_content=state["filtered_content"], - account_name=self.account_name, - token_symbols=token_symbols, - ) - - structured_output = self.llm.with_structured_output( - TweetAnalysisOutput, - ) - # Get analysis from LLM - result = structured_output.invoke(formatted_prompt) - - # Clean and parse the response - # content = self._clean_llm_response(result.content) - # parsed_result = TweetAnalysisOutput.model_validate_json(result) - - # Update state - state["is_worthy"] = result.worthy - state["tweet_type"] = result.tweet_type - state["tool_request"] = result.tool_request - state["confidence_score"] = result.confidence_score - state["reason"] = result.reason - - return state - - # Create the graph - workflow = StateGraph(AnalysisState) - - # Add nodes - workflow.add_node("analyze", analyze_tweet) - - # Add edges - workflow.set_entry_point("analyze") - workflow.add_edge("analyze", END) - - return workflow.compile() - - def _validate_state(self, state: AnalysisState) -> bool: - """Validate the workflow state.""" - required_fields = ["tweet_text", "filtered_content"] - return all(field in state and state[field] for field in required_fields) - - -async def analyze_tweet(tweet_text: str, filtered_content: str) -> Dict: - """Analyze a tweet and determine if it's worthy of processing.""" - # Initialize state - state = { - "tweet_text": tweet_text, - "filtered_content": filtered_content, - "is_worthy": False, - "tweet_type": TweetType.INVALID, - "tool_request": None, - "confidence_score": 0.0, - "reason": "", - } - - # Create and run workflow - workflow = TweetAnalysisWorkflow() - result = await workflow.execute(state) - - return result diff --git a/services/workflows/vector_react.py b/services/workflows/vector_react.py deleted file mode 100644 index aa55f95d..00000000 --- a/services/workflows/vector_react.py +++ /dev/null @@ -1,443 +0,0 @@ -"""Vector-enabled ReAct workflow functionality with Supabase Vecs integration.""" - -import asyncio -from typing import Any, AsyncGenerator, Dict, List, Optional, TypedDict, Union - -from langchain_core.documents import Document -from langchain_core.embeddings import Embeddings -from langchain_core.messages import AIMessage, HumanMessage, SystemMessage -from langchain_openai import ChatOpenAI, OpenAIEmbeddings -from langgraph.graph import END, START, StateGraph -from langgraph.prebuilt import ToolNode - -from backend.factory import backend -from lib.logger import configure_logger -from services.workflows.base import ( - BaseWorkflow, - ExecutionError, - VectorRetrievalCapability, -) -from services.workflows.react import ( - MessageProcessor, - ReactState, - StreamingCallbackHandler, -) - -# Remove this import to avoid circular dependencies -# from services.workflows.workflow_service import BaseWorkflowService, WorkflowBuilder - -logger = configure_logger(__name__) - - -class VectorRetrievalState(TypedDict): - """State for vector retrieval step.""" - - query: str - documents: List[Document] - - -class VectorReactState(ReactState): - """State for the Vector ReAct workflow, extending ReactState.""" - - vector_results: Optional[List[Document]] - - -class VectorReactWorkflow(BaseWorkflow[VectorReactState], VectorRetrievalCapability): - """ReAct workflow with vector store integration.""" - - def __init__( - self, - callback_handler: StreamingCallbackHandler, - tools: List[Any], - collection_names: Union[ - str, List[str] - ], # Modified to accept single or multiple collections - embeddings: Optional[Embeddings] = None, - **kwargs, - ): - super().__init__(**kwargs) - self.callback_handler = callback_handler - self.tools = tools - # Convert single collection to list for consistency - self.collection_names = ( - [collection_names] - if isinstance(collection_names, str) - else collection_names - ) - self.embeddings = embeddings or OpenAIEmbeddings() - self.required_fields = ["messages"] - - # Create a new LLM instance with the callback handler - self.llm = self.create_llm_with_callbacks([callback_handler]).bind_tools(tools) - - def _create_prompt(self) -> None: - """Not used in VectorReact workflow.""" - pass - - async def retrieve_from_vector_store(self, query: str, **kwargs) -> List[Document]: - """Retrieve relevant documents from multiple vector stores. - - Args: - query: The query to search for - **kwargs: Additional arguments - - Returns: - List of retrieved documents - """ - try: - all_documents = [] - limit_per_collection = kwargs.get( - "limit", 4 - ) # Get 4 results from each collection - - # Query each collection and gather results - for collection_name in self.collection_names: - try: - # Query vectors using the backend - vector_results = await backend.query_vectors( - collection_name=collection_name, - query_text=query, - limit=limit_per_collection, - embeddings=self.embeddings, - ) - - # Convert to LangChain Documents and add collection source - documents = [ - Document( - page_content=doc.get("page_content", ""), - metadata={ - **doc.get("metadata", {}), - "collection_source": collection_name, - }, - ) - for doc in vector_results - ] - - all_documents.extend(documents) - logger.info( - f"Retrieved {len(documents)} documents from collection {collection_name}" - ) - except Exception as e: - logger.error( - f"Failed to retrieve from collection {collection_name}: {str(e)}" - ) - continue # Continue with other collections if one fails - - logger.info( - f"Retrieved total of {len(all_documents)} documents from all collections" - ) - return all_documents - except Exception as e: - logger.error(f"Vector store retrieval failed: {str(e)}") - return [] - - def integrate_with_graph(self, graph: StateGraph, **kwargs) -> None: - """Integrate vector retrieval capability with a graph. - - Args: - graph: The graph to integrate with - **kwargs: Additional arguments - """ - # Modify the graph to include vector retrieval - # This is specific to the VectorReactWorkflow - pass - - def _create_graph(self) -> StateGraph: - """Create the VectorReact workflow graph.""" - tool_node = ToolNode(self.tools) - - def should_continue(state: VectorReactState) -> str: - messages = state["messages"] - last_message = messages[-1] - result = "tools" if last_message.tool_calls else END - logger.debug(f"Continue decision: {result}") - return result - - async def retrieve_from_vector_store(state: VectorReactState) -> Dict: - """Retrieve relevant documents from vector store.""" - messages = state["messages"] - # Get the last user message - last_user_message = None - for message in reversed(messages): - if isinstance(message, HumanMessage): - last_user_message = message.content - break - - if not last_user_message: - logger.warning("No user message found for vector retrieval") - return {"vector_results": []} - - documents = await self.retrieve_from_vector_store(query=last_user_message) - return {"vector_results": documents} - - def call_model_with_context(state: VectorReactState) -> Dict: - """Call model with additional context from vector store.""" - messages = state["messages"] - vector_results = state.get("vector_results", []) - - # Add vector context to the system message if available - context_message = None - - if vector_results: - # Format the vector results into a context string - context_str = "\n\n".join([doc.page_content for doc in vector_results]) - context_message = SystemMessage( - content=f"Here is additional context that may be helpful:\n\n{context_str}\n\n" - "Use this context to inform your response if relevant." - ) - messages = [context_message] + messages - - logger.debug( - f"Calling model with {len(messages)} messages and " - f"{len(vector_results)} retrieved documents" - ) - - response = self.llm.invoke(messages) - return {"messages": [response]} - - workflow = StateGraph(VectorReactState) - workflow.add_node("vector_retrieval", retrieve_from_vector_store) - workflow.add_node("agent", call_model_with_context) - workflow.add_node("tools", tool_node) - - # Set up the execution flow - workflow.add_edge(START, "vector_retrieval") - workflow.add_edge("vector_retrieval", "agent") - workflow.add_conditional_edges("agent", should_continue) - workflow.add_edge("tools", "agent") - - return workflow - - -class VectorLangGraphService: - """Service for executing VectorReact LangGraph operations""" - - def __init__( - self, - collection_names: Union[ - str, List[str] - ], # Modified to accept single or multiple collections - embeddings: Optional[Embeddings] = None, - ): - # Import here to avoid circular imports - from services.workflows.react import MessageProcessor - - self.collection_names = collection_names - self.embeddings = embeddings or OpenAIEmbeddings() - self.message_processor = MessageProcessor() - - def setup_callback_handler(self, queue, loop): - # Import here to avoid circular dependencies - from services.workflows.workflow_service import BaseWorkflowService - - # Use the static method instead of instantiating BaseWorkflowService - return BaseWorkflowService.create_callback_handler(queue, loop) - - async def stream_task_results(self, task, queue): - # Import here to avoid circular dependencies - from services.workflows.workflow_service import BaseWorkflowService - - # Use the static method instead of instantiating BaseWorkflowService - async for chunk in BaseWorkflowService.stream_results_from_task( - task=task, callback_queue=queue, logger_name=self.__class__.__name__ - ): - yield chunk - - async def _execute_stream_impl( - self, - messages: List[Union[SystemMessage, HumanMessage, AIMessage]], - input_str: str, - persona: Optional[str] = None, - tools_map: Optional[Dict] = None, - **kwargs, - ) -> AsyncGenerator[Dict, None]: - """Execute a Vector React stream implementation. - - Args: - messages: Processed messages - input_str: Current user input - persona: Optional persona to use - tools_map: Optional tools to use - **kwargs: Additional arguments - - Returns: - Async generator of result chunks - """ - try: - # Import here to avoid circular dependencies - from services.workflows.workflow_service import WorkflowBuilder - - # Setup queue and callbacks - callback_queue = asyncio.Queue() - loop = asyncio.get_running_loop() - - # Setup callback handler - callback_handler = self.setup_callback_handler(callback_queue, loop) - - # Create workflow using builder pattern - workflow = ( - WorkflowBuilder(VectorReactWorkflow) - .with_callback_handler(callback_handler) - .with_tools(list(tools_map.values()) if tools_map else []) - .build( - collection_names=self.collection_names, - embeddings=self.embeddings, - ) - ) - - # Create graph and compile - graph = workflow._create_graph() - runnable = graph.compile() - - # Execute workflow with callbacks config - config = {"callbacks": [callback_handler]} - task = asyncio.create_task( - runnable.ainvoke( - {"messages": messages, "vector_results": []}, config=config - ) - ) - - # Stream results - async for chunk in self.stream_task_results(task, callback_queue): - yield chunk - - except Exception as e: - logger.error( - f"Failed to execute VectorReact stream: {str(e)}", exc_info=True - ) - raise ExecutionError(f"VectorReact stream execution failed: {str(e)}") - - # Add execute_stream method to maintain the same interface as BaseWorkflowService - async def execute_stream( - self, - history: List[Dict], - input_str: str, - persona: Optional[str] = None, - tools_map: Optional[Dict] = None, - **kwargs, - ) -> AsyncGenerator[Dict, None]: - """Execute a workflow stream. - - This processes the history and delegates to _execute_stream_impl. - """ - # Process messages - filtered_content = self.message_processor.extract_filtered_content(history) - messages = self.message_processor.convert_to_langchain_messages( - filtered_content, input_str, persona - ) - - # Call the implementation - async for chunk in self._execute_stream_impl( - messages=messages, - input_str=input_str, - persona=persona, - tools_map=tools_map, - **kwargs, - ): - yield chunk - - # Keep the old method for backward compatibility - async def execute_vector_react_stream( - self, - history: List[Dict], - input_str: str, - persona: Optional[str] = None, - tools_map: Optional[Dict] = None, - ) -> AsyncGenerator[Dict, None]: - """Execute a VectorReact stream using LangGraph.""" - # Call the new method - async for chunk in self.execute_stream(history, input_str, persona, tools_map): - yield chunk - - -# Helper function for adding documents to vector store -async def add_documents_to_vectors( - collection_name: str, # Modified to only accept a single collection - documents: List[Document], - embeddings: Optional[Embeddings] = None, -) -> Dict[str, List[str]]: - """Add documents to vector collection. - - Args: - collection_name: Name of the collection to add to - documents: List of LangChain Document objects - embeddings: Optional embeddings model to use - - Returns: - Dictionary mapping collection name to list of document IDs - """ - # Ensure embeddings model is provided - if embeddings is None: - raise ValueError( - "Embeddings model must be provided to add documents to vector store" - ) - - # Store document IDs for the collection - collection_doc_ids = {} - - try: - # Ensure collection exists - try: - backend.get_vector_collection(collection_name) - except Exception: - # Create collection if it doesn't exist - embed_dim = 1536 # Default for OpenAI embeddings - if hasattr(embeddings, "embedding_dim"): - embed_dim = embeddings.embedding_dim - backend.create_vector_collection(collection_name, dimensions=embed_dim) - - # Extract texts for embedding - texts = [doc.page_content for doc in documents] - - # Generate embeddings for the texts - embedding_vectors = embeddings.embed_documents(texts) - - # Prepare documents for storage with embeddings - docs_for_storage = [ - {"page_content": doc.page_content, "embedding": embedding_vectors[i]} - for i, doc in enumerate(documents) - ] - - # Prepare metadata - metadata_list = [doc.metadata for doc in documents] - - # Add to vector store - ids = await backend.add_vectors( - collection_name=collection_name, - documents=docs_for_storage, - metadata=metadata_list, - ) - - collection_doc_ids[collection_name] = ids - logger.info(f"Added {len(ids)} documents to collection {collection_name}") - - except Exception as e: - logger.error( - f"Failed to add documents to collection {collection_name}: {str(e)}" - ) - collection_doc_ids[collection_name] = [] - - return collection_doc_ids - - -# Facade function for backward compatibility -async def execute_vector_langgraph_stream( - collection_names: Union[ - str, List[str] - ], # Modified to accept single or multiple collections - history: List[Dict], - input_str: str, - persona: Optional[str] = None, - tools_map: Optional[Dict] = None, - embeddings: Optional[Embeddings] = None, -) -> AsyncGenerator[Dict, None]: - """Execute a VectorReact stream using LangGraph with vector store integration.""" - # Initialize service and run stream - embeddings = embeddings or OpenAIEmbeddings() - service = VectorLangGraphService( - collection_names=collection_names, - embeddings=embeddings, - ) - - async for chunk in service.execute_stream(history, input_str, persona, tools_map): - yield chunk diff --git a/services/workflows/web_search.py b/services/workflows/web_search.py deleted file mode 100644 index e7a3155f..00000000 --- a/services/workflows/web_search.py +++ /dev/null @@ -1,238 +0,0 @@ -"""Web search workflow implementation using OpenAI Assistant API.""" - -import asyncio -import json -from typing import Any, Dict, List, Optional - -from langchain_core.messages import AIMessage, HumanMessage -from langgraph.graph import StateGraph -from openai import OpenAI -from openai.types.beta.assistant import Assistant -from openai.types.beta.thread import Thread -from openai.types.beta.threads.thread_message import ThreadMessage - -from lib.logger import configure_logger -from services.workflows.base import BaseWorkflow, WebSearchCapability -from services.workflows.vector import VectorRetrievalCapability - -logger = configure_logger(__name__) - - -class WebSearchWorkflow(BaseWorkflow, WebSearchCapability, VectorRetrievalCapability): - """Workflow that combines web search with vector retrieval capabilities using OpenAI Assistant.""" - - def __init__(self, **kwargs): - """Initialize the workflow. - - Args: - **kwargs: Additional arguments passed to parent classes - """ - super().__init__(**kwargs) - self.search_results_cache = {} - self.client = OpenAI() - # Create an assistant with web browsing capability - self.assistant: Assistant = self.client.beta.assistants.create( - name="Web Search Assistant", - description="Assistant that helps with web searches", - model="gpt-4-turbo-preview", - tools=[{"type": "retrieval"}, {"type": "web_browser"}], - instructions="""You are a web search assistant. Your primary task is to: - 1. Search the web for relevant information - 2. Extract key information from web pages - 3. Provide detailed, accurate responses with source URLs - 4. Format responses as structured data with content and metadata - Always include source URLs in your responses.""", - ) - - async def search_web(self, query: str, **kwargs) -> List[Dict[str, Any]]: - """Search the web using OpenAI Assistant API. - - Args: - query: The search query - **kwargs: Additional search parameters - - Returns: - List of search results with content and metadata - """ - try: - # Check cache first - if query in self.search_results_cache: - logger.info(f"Using cached results for query: {query}") - return self.search_results_cache[query] - - # Create a new thread for this search - thread: Thread = self.client.beta.threads.create() - - # Add the user's message to the thread - self.client.beta.threads.messages.create( - thread_id=thread.id, - role="user", - content=f"Search the web for: {query}. Please provide detailed information with source URLs.", - ) - - # Run the assistant - run = self.client.beta.threads.runs.create( - thread_id=thread.id, assistant_id=self.assistant.id - ) - - # Wait for completion - while True: - run_status = self.client.beta.threads.runs.retrieve( - thread_id=thread.id, run_id=run.id - ) - if run_status.status == "completed": - break - elif run_status.status in ["failed", "cancelled", "expired"]: - raise Exception( - f"Assistant run failed with status: {run_status.status}" - ) - await asyncio.sleep(1) # Wait before checking again - - # Get the assistant's response - messages: List[ThreadMessage] = self.client.beta.threads.messages.list( - thread_id=thread.id - ) - - # Process the response into our document format - documents = [] - for message in messages: - if message.role == "assistant": - for content in message.content: - if content.type == "text": - # Extract URLs from annotations if available - urls = [] - if message.metadata and "citations" in message.metadata: - urls = [ - cite["url"] - for cite in message.metadata["citations"] - ] - - # Create document with content and metadata - doc = { - "page_content": content.text, - "metadata": { - "type": "web_search_result", - "source_urls": urls, - "query": query, - "timestamp": message.created_at, - }, - } - documents.append(doc) - - # Cache the results - self.search_results_cache[query] = documents - - logger.info(f"Web search completed with {len(documents)} results") - return documents - - except Exception as e: - logger.error(f"Web search failed: {str(e)}") - return [] - - async def execute(self, query: str, **kwargs) -> Dict[str, Any]: - """Execute the web search workflow. - - This workflow: - 1. Searches the web for relevant information - 2. Processes and stores the results - 3. Combines with vector retrieval if available - - Args: - query: The search query - **kwargs: Additional execution arguments - - Returns: - Dict containing search results and any additional data - """ - try: - # Perform web search - web_results = await self.search_web(query, **kwargs) - - # Cache results - self.search_results_cache[query] = web_results - - # Combine with vector retrieval if available - combined_results = web_results - try: - vector_results = await self.retrieve_from_vectorstore(query, **kwargs) - combined_results.extend(vector_results) - except Exception as e: - logger.warning( - f"Vector retrieval failed, using only web results: {str(e)}" - ) - - return { - "query": query, - "results": combined_results, - "source": "web_search_workflow", - "metadata": { - "num_web_results": len(web_results), - "has_vector_results": ( - bool(vector_results) if "vector_results" in locals() else False - ), - }, - } - - except Exception as e: - logger.error(f"Web search workflow execution failed: {str(e)}") - raise - - def integrate_with_graph(self, graph: StateGraph, **kwargs) -> None: - """Integrate web search workflow with a graph. - - Args: - graph: The graph to integrate with - **kwargs: Additional integration arguments - """ - # Add web search node - graph.add_node("web_search", self.search_web) - - # Add vector retrieval node if available - try: - graph.add_node("vector_retrieval", self.retrieve_from_vectorstore) - - # Connect nodes - graph.add_edge("web_search", "vector_retrieval") - except Exception as e: - logger.warning(f"Vector retrieval integration failed: {str(e)}") - - # Add result processing node - graph.add_node("process_results", self._process_results) - graph.add_edge("vector_retrieval", "process_results") - - async def _process_results( - self, - web_results: List[Dict[str, Any]], - vector_results: Optional[List[Dict[str, Any]]] = None, - ) -> Dict[str, Any]: - """Process and combine search results. - - Args: - web_results: Results from web search - vector_results: Optional results from vector retrieval - - Returns: - Processed and combined results - """ - combined_results = web_results.copy() - if vector_results: - combined_results.extend(vector_results) - - # Deduplicate results based on content similarity - seen_contents = set() - unique_results = [] - for result in combined_results: - content = result.get("page_content", "") - content_hash = hash(content) - if content_hash not in seen_contents: - seen_contents.add(content_hash) - unique_results.append(result) - - return { - "results": unique_results, - "metadata": { - "num_web_results": len(web_results), - "num_vector_results": len(vector_results) if vector_results else 0, - "num_unique_results": len(unique_results), - }, - } diff --git a/test_comprehensive_evaluation.py b/test_comprehensive_evaluation.py new file mode 100755 index 00000000..154d0d19 --- /dev/null +++ b/test_comprehensive_evaluation.py @@ -0,0 +1,245 @@ +#!/usr/bin/env python3 +""" +Simple CLI test script for comprehensive proposal evaluation workflow. + +This test uses the ComprehensiveEvaluatorAgent that performs all evaluations +(core, financial, historical, social, and reasoning) in a single LLM pass. + +Usage: + python test_comprehensive_evaluation.py --proposal-id "123e4567-e89b-12d3-a456-426614174000" --proposal-data "Some proposal content" + python test_comprehensive_evaluation.py --proposal-id "123e4567-e89b-12d3-a456-426614174000" --proposal-data "Proposal content" --debug-level 2 + python test_comprehensive_evaluation.py --proposal-id "123e4567-e89b-12d3-a456-426614174000" --debug-level 2 # Lookup from database +""" + +import argparse +import asyncio +import json +import os +import sys +from uuid import UUID + +sys.path.insert(0, os.path.dirname(os.path.abspath(__file__))) + +from services.ai.workflows.comprehensive_evaluation import ( + evaluate_proposal_comprehensive, +) +from backend.factory import get_backend + + +async def main(): + parser = argparse.ArgumentParser( + description="Test comprehensive proposal evaluation workflow (single-agent)", + formatter_class=argparse.RawDescriptionHelpFormatter, + epilog=""" +Examples: + # Basic comprehensive evaluation with proposal data + python test_comprehensive_evaluation.py --proposal-id "12345678-1234-5678-9012-123456789abc" \\ + --proposal-data "Proposal to fund development of new feature" + + # Lookup proposal from database + python test_comprehensive_evaluation.py --proposal-id "12345678-1234-5678-9012-123456789abc" \\ + --debug-level 2 + + # Verbose debugging + python test_comprehensive_evaluation.py --proposal-id "12345678-1234-5678-9012-123456789abc" \\ + --proposal-data "Proposal content" --debug-level 2 + """, + ) + + # Required arguments + parser.add_argument( + "--proposal-id", + type=str, + required=True, + help="ID of the proposal to evaluate", + ) + + parser.add_argument( + "--proposal-data", + type=str, + required=False, + help="Content/data of the proposal to evaluate (optional - will lookup from database if not provided)", + ) + + # Optional arguments + parser.add_argument( + "--agent-id", + type=str, + help="ID of the agent", + ) + + parser.add_argument( + "--dao-id", + type=str, + help="ID of the DAO", + ) + + parser.add_argument( + "--profile-id", + type=str, + help="ID of the profile", + ) + + parser.add_argument( + "--debug-level", + type=int, + choices=[0, 1, 2], + default=0, + help="Debug level: 0=normal, 1=verbose, 2=very verbose (default: 0)", + ) + + parser.add_argument( + "--model-name", + type=str, + help="Override the default model name for evaluation", + ) + + args = parser.parse_args() + + # If proposal_content is not provided, look it up from the database + proposal_content = args.proposal_content + if not proposal_content: + print("📋 No proposal data provided, looking up from database...") + try: + backend = get_backend() + proposal_uuid = UUID(args.proposal_id) + proposal = backend.get_proposal(proposal_uuid) + + if not proposal: + print( + f"❌ Error: Proposal with ID {args.proposal_id} not found in database" + ) + sys.exit(1) + + if not proposal.content: + print(f"❌ Error: Proposal {args.proposal_id} has no content") + sys.exit(1) + + proposal_content = proposal.content + print(f"✅ Found proposal in database: {proposal.title or 'Untitled'}") + + # Update DAO ID if not provided and available in proposal + if not args.dao_id and proposal.dao_id: + args.dao_id = str(proposal.dao_id) + print(f"✅ Using DAO ID from proposal: {args.dao_id}") + + except ValueError as e: + print(f"❌ Error: Invalid proposal ID format: {e}") + sys.exit(1) + except Exception as e: + print(f"❌ Error looking up proposal: {e}") + sys.exit(1) + + print("🚀 Starting Comprehensive Proposal Evaluation Test") + print("=" * 60) + print(f"Proposal ID: {args.proposal_id}") + print( + f"Proposal Data: {proposal_content[:100]}{'...' if len(proposal_content) > 100 else ''}" + ) + print(f"Agent ID: {args.agent_id}") + print(f"DAO ID: {args.dao_id}") + print(f"Profile ID: {args.profile_id}") + print(f"Debug Level: {args.debug_level}") + print(f"Model Name: {args.model_name}") + print("=" * 60) + print("🧠 Using ComprehensiveEvaluatorAgent (Single LLM Pass)") + print("=" * 60) + + try: + # Set up config + config = { + "debug_level": args.debug_level, + } + + if args.model_name: + config["model_name"] = args.model_name + + if args.debug_level >= 1: + # For verbose debugging, customize agent settings + config["approval_threshold"] = 70 + config["veto_threshold"] = 30 + config["consensus_threshold"] = 10 + + # Run comprehensive evaluation + print("🔍 Running comprehensive evaluation...") + result = await evaluate_proposal_comprehensive( + proposal_id=args.proposal_id, + proposal_content=proposal_content, + config=config, + dao_id=args.dao_id, + agent_id=args.agent_id, + profile_id=args.profile_id, + ) + + print("\n✅ Comprehensive Evaluation Complete!") + print("=" * 60) + + # Pretty print the result + if "error" in result: + print(f"❌ Error: {result['error']}") + else: + print("📊 Comprehensive Evaluation Results:") + print( + f" • Approval: {'✅ APPROVE' if result.get('approve') else '❌ REJECT'}" + ) + print(f" • Overall Score: {result.get('overall_score', 0)}") + print(f" • Evaluation Type: {result.get('evaluation_type', 'unknown')}") + + # Show reasoning (truncated for readability) + reasoning = result.get("reasoning", "N/A") + if len(reasoning) > 500: + reasoning = reasoning[:500] + "... (truncated)" + print(f" • Reasoning: {reasoning}") + + scores = result.get("scores", {}) + if scores: + print(" • Detailed Scores:") + for score_type, score_value in scores.items(): + print(f" - {score_type.title()}: {score_value}") + + flags = result.get("flags", []) + if flags: + print(f" • Flags: {', '.join(flags[:5])}") # Show first 5 flags + if len(flags) > 5: + print(f" ... and {len(flags) - 5} more flags") + + token_usage = result.get("token_usage", {}) + if token_usage: + print(" • Token Usage:") + print(f" - Input: {token_usage.get('input_tokens', 0):,}") + print(f" - Output: {token_usage.get('output_tokens', 0):,}") + print(f" - Total: {token_usage.get('total_tokens', 0):,}") + + images_processed = result.get("images_processed", 0) + if images_processed > 0: + print(f" • Images Processed: {images_processed}") + + summaries = result.get("summaries", {}) + if summaries and args.debug_level >= 1: + print(" • Summaries:") + for summary_type, summary_text in summaries.items(): + truncated_summary = ( + summary_text[:200] + "..." + if len(summary_text) > 200 + else summary_text + ) + print( + f" - {summary_type.replace('_', ' ').title()}: {truncated_summary}" + ) + + print("\n📄 Full Result JSON:") + print(json.dumps(result, indent=2, default=str)) + + except Exception as e: + print(f"\n❌ Error during comprehensive evaluation: {str(e)}") + if args.debug_level >= 1: + import traceback + + traceback.print_exc() + sys.exit(1) + + print("\n🎉 Comprehensive evaluation test completed successfully!") + + +if __name__ == "__main__": + asyncio.run(main()) diff --git a/test_proposal_evaluation.py b/test_proposal_evaluation.py new file mode 100644 index 00000000..23b8e5e6 --- /dev/null +++ b/test_proposal_evaluation.py @@ -0,0 +1,247 @@ +#!/usr/bin/env python3 +""" +Simple CLI test script for proposal evaluation workflow (multi-agent). + +This test uses the multi-agent ProposalEvaluationWorkflow that runs multiple +specialized agents (core, financial, historical, social, reasoning) in sequence. + +Usage: + python test_proposal_evaluation.py --proposal-id "123e4567-e89b-12d3-a456-426614174000" --proposal-data "Some proposal content" + python test_proposal_evaluation.py --proposal-id "123e4567-e89b-12d3-a456-426614174000" --proposal-data "Proposal content" --debug-level 2 + python test_proposal_evaluation.py --proposal-id "123e4567-e89b-12d3-a456-426614174000" --debug-level 2 # Lookup from database +""" + +import argparse +import asyncio +import json +import os +import sys +from uuid import UUID + +sys.path.insert(0, os.path.dirname(os.path.abspath(__file__))) + +from services.ai.workflows.proposal_evaluation import evaluate_proposal +from backend.factory import get_backend + + +async def main(): + parser = argparse.ArgumentParser( + description="Test proposal evaluation workflow (multi-agent)", + formatter_class=argparse.RawDescriptionHelpFormatter, + epilog=""" +Examples: + # Basic multi-agent evaluation with proposal data + python test_proposal_evaluation.py --proposal-id "12345678-1234-5678-9012-123456789abc" \\ + --proposal-data "Proposal to fund development of new feature" + + # Lookup proposal from database + python test_proposal_evaluation.py --proposal-id "12345678-1234-5678-9012-123456789abc" \\ + --debug-level 2 + + # Verbose debugging + python test_proposal_evaluation.py --proposal-id "12345678-1234-5678-9012-123456789abc" \\ + --proposal-data "Proposal content" --debug-level 2 + """, + ) + + # Required arguments + parser.add_argument( + "--proposal-id", + type=str, + required=True, + help="ID of the proposal to evaluate", + ) + + parser.add_argument( + "--proposal-data", + type=str, + required=False, + help="Content/data of the proposal to evaluate (optional - will lookup from database if not provided)", + ) + + # Optional arguments + parser.add_argument( + "--agent-id", + type=str, + help="ID of the agent", + ) + + parser.add_argument( + "--dao-id", + type=str, + help="ID of the DAO", + ) + + parser.add_argument( + "--profile-id", + type=str, + help="ID of the profile", + ) + + parser.add_argument( + "--debug-level", + type=int, + choices=[0, 1, 2], + default=0, + help="Debug level: 0=normal, 1=verbose, 2=very verbose (default: 0)", + ) + + parser.add_argument( + "--model-name", + type=str, + help="Override the default model name for evaluation", + ) + + args = parser.parse_args() + + # If proposal_content is not provided, look it up from the database + proposal_content = args.proposal_content + if not proposal_content: + print("📋 No proposal data provided, looking up from database...") + try: + backend = get_backend() + proposal_uuid = UUID(args.proposal_id) + proposal = backend.get_proposal(proposal_uuid) + + if not proposal: + print( + f"❌ Error: Proposal with ID {args.proposal_id} not found in database" + ) + sys.exit(1) + + if not proposal.content: + print(f"❌ Error: Proposal {args.proposal_id} has no content") + sys.exit(1) + + proposal_content = proposal.content + print(f"✅ Found proposal in database: {proposal.title or 'Untitled'}") + + # Update DAO ID if not provided and available in proposal + if not args.dao_id and proposal.dao_id: + args.dao_id = str(proposal.dao_id) + print(f"✅ Using DAO ID from proposal: {args.dao_id}") + + except ValueError as e: + print(f"❌ Error: Invalid proposal ID format: {e}") + sys.exit(1) + except Exception as e: + print(f"❌ Error looking up proposal: {e}") + sys.exit(1) + + print("🚀 Starting Multi-Agent Proposal Evaluation Test") + print("=" * 60) + print(f"Proposal ID: {args.proposal_id}") + print( + f"Proposal Data: {proposal_content[:100]}{'...' if len(proposal_content) > 100 else ''}" + ) + print(f"Agent ID: {args.agent_id}") + print(f"DAO ID: {args.dao_id}") + print(f"Profile ID: {args.profile_id}") + print(f"Debug Level: {args.debug_level}") + print(f"Model Name: {args.model_name}") + print("=" * 60) + print("🧠 Using Multi-Agent ProposalEvaluationWorkflow") + print("=" * 60) + + try: + # Set up config + config = { + "debug_level": args.debug_level, + } + + if args.model_name: + config["model_name"] = args.model_name + + if args.debug_level >= 1: + # For verbose debugging, customize agent settings + config["approval_threshold"] = 70 + config["veto_threshold"] = 30 + config["consensus_threshold"] = 10 + + # Run multi-agent evaluation + print("🔍 Running multi-agent evaluation...") + result = await evaluate_proposal( + proposal_id=args.proposal_id, + proposal_content=proposal_content, + config=config, + dao_id=args.dao_id, + agent_id=args.agent_id, + profile_id=args.profile_id, + ) + + print("\n✅ Multi-Agent Evaluation Complete!") + print("=" * 60) + + # Pretty print the result + if "error" in result: + print(f"❌ Error: {result['error']}") + else: + print("📊 Multi-Agent Evaluation Results:") + print( + f" • Approval: {'✅ APPROVE' if result.get('approve') else '❌ REJECT'}" + ) + print(f" • Overall Score: {result.get('overall_score', 0)}") + print(f" • Evaluation Type: {result.get('evaluation_type', 'unknown')}") + + # Show reasoning (truncated for readability) + reasoning = result.get("reasoning", "N/A") + if len(reasoning) > 500: + reasoning = reasoning[:500] + "... (truncated)" + print(f" • Reasoning: {reasoning}") + + scores = result.get("scores", {}) + if scores: + print(" • Detailed Scores:") + for score_type, score_value in scores.items(): + print(f" - {score_type.title()}: {score_value}") + + flags = result.get("flags", []) + if flags: + print(f" • Flags: {', '.join(flags[:5])}") # Show first 5 flags + if len(flags) > 5: + print(f" ... and {len(flags) - 5} more flags") + + token_usage = result.get("token_usage", {}) + if token_usage: + print(" • Token Usage:") + print(f" - Input: {token_usage.get('input_tokens', 0):,}") + print(f" - Output: {token_usage.get('output_tokens', 0):,}") + print(f" - Total: {token_usage.get('total_tokens', 0):,}") + + workflow_step = result.get("workflow_step", "unknown") + completed_steps = result.get("completed_steps", []) + if workflow_step or completed_steps: + print(" • Workflow Progress:") + print(f" - Current Step: {workflow_step}") + if completed_steps: + print(f" - Completed Steps: {', '.join(completed_steps)}") + + summaries = result.get("summaries", {}) + if summaries and args.debug_level >= 1: + print(" • Summaries:") + for summary_type, summary_text in summaries.items(): + truncated_summary = ( + summary_text[:200] + "..." + if len(summary_text) > 200 + else summary_text + ) + print( + f" - {summary_type.replace('_', ' ').title()}: {truncated_summary}" + ) + + print("\n📄 Full Result JSON:") + print(json.dumps(result, indent=2, default=str)) + + except Exception as e: + print(f"\n❌ Error during multi-agent evaluation: {str(e)}") + if args.debug_level >= 1: + import traceback + + traceback.print_exc() + sys.exit(1) + + print("\n🎉 Multi-agent evaluation test completed successfully!") + + +if __name__ == "__main__": + asyncio.run(main()) diff --git a/tests/api/test_profile_auth.py b/tests/api/test_profile_auth.py deleted file mode 100644 index a2540f6d..00000000 --- a/tests/api/test_profile_auth.py +++ /dev/null @@ -1,239 +0,0 @@ -from unittest.mock import MagicMock, patch - -import pytest -from fastapi import HTTPException - -from api.dependencies import ( - get_profile_from_api_key, - verify_profile, - verify_profile_from_token, -) -from backend.models import Profile - - -@pytest.mark.asyncio -async def test_get_profile_from_api_key_invalid_uuid(): - """Test that invalid UUID format returns None.""" - result = await get_profile_from_api_key("not-a-uuid") - assert result is None - - -@pytest.mark.asyncio -async def test_get_profile_from_api_key_no_keys(): - """Test that when no keys are found, None is returned.""" - with patch("api.dependencies.backend") as mock_backend: - mock_backend.list_keys.return_value = [] - - result = await get_profile_from_api_key("123e4567-e89b-12d3-a456-426614174000") - - assert result is None - mock_backend.list_keys.assert_called_once() - - -@pytest.mark.asyncio -async def test_get_profile_from_api_key_no_profile_id(): - """Test that when key has no profile_id, None is returned.""" - with patch("api.dependencies.backend") as mock_backend: - mock_key = MagicMock() - mock_key.profile_id = None - mock_backend.list_keys.return_value = [mock_key] - - result = await get_profile_from_api_key("123e4567-e89b-12d3-a456-426614174000") - - assert result is None - - -@pytest.mark.asyncio -async def test_get_profile_from_api_key_no_profile(): - """Test that when profile is not found, None is returned.""" - with patch("api.dependencies.backend") as mock_backend: - mock_key = MagicMock() - mock_key.profile_id = "profile-id" - mock_backend.list_keys.return_value = [mock_key] - mock_backend.get_profile.return_value = None - - result = await get_profile_from_api_key("123e4567-e89b-12d3-a456-426614174000") - - assert result is None - mock_backend.get_profile.assert_called_once_with("profile-id") - - -@pytest.mark.asyncio -async def test_get_profile_from_api_key_success(): - """Test successful profile retrieval from API key.""" - with patch("api.dependencies.backend") as mock_backend: - mock_key = MagicMock() - mock_key.profile_id = "profile-id" - mock_profile = MagicMock(spec=Profile) - - mock_backend.list_keys.return_value = [mock_key] - mock_backend.get_profile.return_value = mock_profile - - result = await get_profile_from_api_key("123e4567-e89b-12d3-a456-426614174000") - - assert result == mock_profile - mock_backend.get_profile.assert_called_once_with("profile-id") - - -@pytest.mark.asyncio -async def test_verify_profile_with_api_key(): - """Test verify_profile with valid API key.""" - with patch("api.dependencies.get_profile_from_api_key") as mock_get_profile: - mock_profile = MagicMock(spec=Profile) - mock_get_profile.return_value = mock_profile - - result = await verify_profile(authorization=None, x_api_key="valid-api-key") - - assert result == mock_profile - mock_get_profile.assert_called_once_with("valid-api-key") - - -@pytest.mark.asyncio -async def test_verify_profile_with_invalid_api_key(): - """Test verify_profile with invalid API key raises exception.""" - with patch("api.dependencies.get_profile_from_api_key") as mock_get_profile: - mock_get_profile.return_value = None - - with pytest.raises(HTTPException) as exc_info: - await verify_profile(authorization=None, x_api_key="invalid-api-key") - - assert exc_info.value.status_code == 401 - assert "Invalid API key" in exc_info.value.detail - - -@pytest.mark.asyncio -async def test_verify_profile_missing_auth(): - """Test verify_profile with missing authorization raises exception.""" - with pytest.raises(HTTPException) as exc_info: - await verify_profile(authorization=None, x_api_key=None) - - assert exc_info.value.status_code == 401 - assert "Missing authorization header" in exc_info.value.detail - - -@pytest.mark.asyncio -async def test_verify_profile_invalid_auth_format(): - """Test verify_profile with invalid authorization format raises exception.""" - with pytest.raises(HTTPException) as exc_info: - await verify_profile(authorization="InvalidFormat", x_api_key=None) - - assert exc_info.value.status_code == 401 - assert "Invalid authorization format" in exc_info.value.detail - - -@pytest.mark.asyncio -async def test_verify_profile_invalid_token(): - """Test verify_profile with invalid token raises exception.""" - with patch("api.dependencies.backend") as mock_backend: - mock_backend.verify_session_token.return_value = None - - with pytest.raises(HTTPException) as exc_info: - await verify_profile(authorization="Bearer invalid-token", x_api_key=None) - - assert exc_info.value.status_code == 401 - assert "Invalid bearer token" in exc_info.value.detail - - -@pytest.mark.asyncio -async def test_verify_profile_no_profile(): - """Test verify_profile with valid token but no profile raises exception.""" - with patch("api.dependencies.backend") as mock_backend: - mock_backend.verify_session_token.return_value = "user@example.com" - mock_backend.list_profiles.return_value = [] - - with pytest.raises(HTTPException) as exc_info: - await verify_profile(authorization="Bearer valid-token", x_api_key=None) - - assert exc_info.value.status_code == 404 - assert "Profile not found" in exc_info.value.detail - - -@pytest.mark.asyncio -async def test_verify_profile_success(): - """Test verify_profile with valid token and profile.""" - with patch("api.dependencies.backend") as mock_backend: - mock_profile = MagicMock(spec=Profile) - mock_backend.verify_session_token.return_value = "user@example.com" - mock_backend.list_profiles.return_value = [mock_profile] - - result = await verify_profile( - authorization="Bearer valid-token", x_api_key=None - ) - - assert result == mock_profile - - -@pytest.mark.asyncio -async def test_verify_profile_from_token_with_key(): - """Test verify_profile_from_token with valid API key.""" - with patch("api.dependencies.get_profile_from_api_key") as mock_get_profile: - mock_profile = MagicMock(spec=Profile) - mock_get_profile.return_value = mock_profile - - result = await verify_profile_from_token(token=None, key="valid-api-key") - - assert result == mock_profile - mock_get_profile.assert_called_once_with("valid-api-key") - - -@pytest.mark.asyncio -async def test_verify_profile_from_token_with_invalid_key(): - """Test verify_profile_from_token with invalid API key raises exception.""" - with patch("api.dependencies.get_profile_from_api_key") as mock_get_profile: - mock_get_profile.return_value = None - - with pytest.raises(HTTPException) as exc_info: - await verify_profile_from_token(token=None, key="invalid-api-key") - - assert exc_info.value.status_code == 401 - assert "Invalid API key" in exc_info.value.detail - - -@pytest.mark.asyncio -async def test_verify_profile_from_token_missing_token(): - """Test verify_profile_from_token with missing token raises exception.""" - with pytest.raises(HTTPException) as exc_info: - await verify_profile_from_token(token=None, key=None) - - assert exc_info.value.status_code == 401 - assert "Missing token parameter" in exc_info.value.detail - - -@pytest.mark.asyncio -async def test_verify_profile_from_token_invalid_token(): - """Test verify_profile_from_token with invalid token raises exception.""" - with patch("api.dependencies.backend") as mock_backend: - mock_backend.verify_session_token.return_value = None - - with pytest.raises(HTTPException) as exc_info: - await verify_profile_from_token(token="invalid-token", key=None) - - assert exc_info.value.status_code == 401 - assert "Invalid or expired token" in exc_info.value.detail - - -@pytest.mark.asyncio -async def test_verify_profile_from_token_no_profile(): - """Test verify_profile_from_token with valid token but no profile raises exception.""" - with patch("api.dependencies.backend") as mock_backend: - mock_backend.verify_session_token.return_value = "user@example.com" - mock_backend.list_profiles.return_value = [] - - with pytest.raises(HTTPException) as exc_info: - await verify_profile_from_token(token="valid-token", key=None) - - assert exc_info.value.status_code == 404 - assert "No profile found for the authenticated email" in exc_info.value.detail - - -@pytest.mark.asyncio -async def test_verify_profile_from_token_success(): - """Test verify_profile_from_token with valid token and profile.""" - with patch("api.dependencies.backend") as mock_backend: - mock_profile = MagicMock(spec=Profile) - mock_backend.verify_session_token.return_value = "user@example.com" - mock_backend.list_profiles.return_value = [mock_profile] - - result = await verify_profile_from_token(token="valid-token", key=None) - - assert result == mock_profile diff --git a/tests/api/test_tools_api.py b/tests/api/test_tools_api.py deleted file mode 100644 index 5b73f568..00000000 --- a/tests/api/test_tools_api.py +++ /dev/null @@ -1,229 +0,0 @@ -import json -from unittest.mock import patch - -import pytest -from fastapi.testclient import TestClient - -from api.tools import router -from lib.tools import Tool - - -# Create a test client -@pytest.fixture -def client(): - from fastapi import FastAPI - - app = FastAPI() - app.include_router(router) - return TestClient(app) - - -# Mock tools for testing -@pytest.fixture -def mock_tools(): - return [ - Tool( - id="test_get_data", - name="Get Data", - description="Test tool for getting data", - category="TEST", - parameters=json.dumps( - { - "param1": {"description": "Test parameter 1", "type": "str"}, - "param2": {"description": "Test parameter 2", "type": "int"}, - } - ), - ), - Tool( - id="wallet_get_balance", - name="Get Balance", - description="Get wallet balance", - category="WALLET", - parameters=json.dumps( - {"wallet_id": {"description": "Wallet ID", "type": "UUID"}} - ), - ), - Tool( - id="dao_get_info", - name="Get Info", - description="Get DAO information", - category="DAO", - parameters=json.dumps( - {"dao_id": {"description": "DAO ID", "type": "UUID"}} - ), - ), - ] - - -@pytest.mark.asyncio -async def test_get_tools(client, mock_tools): - """Test the /tools/available endpoint.""" - # Mock the available_tools - with patch("api.tools.available_tools", mock_tools): - response = client.get("/tools/available") - - # Check response status - assert response.status_code == 200 - - # Check response content - tools = response.json() - assert len(tools) == 3 - assert tools[0]["id"] == "test_get_data" - assert tools[1]["id"] == "wallet_get_balance" - assert tools[2]["id"] == "dao_get_info" - - -@pytest.mark.asyncio -async def test_get_tools_with_category_filter(client, mock_tools): - """Test the /tools/available endpoint with category filter.""" - # Mock the available_tools - with patch("api.tools.available_tools", mock_tools): - response = client.get("/tools/available?category=WALLET") - - # Check response status - assert response.status_code == 200 - - # Check response content - tools = response.json() - assert len(tools) == 1 - assert tools[0]["id"] == "wallet_get_balance" - assert tools[0]["category"] == "WALLET" - - -@pytest.mark.asyncio -async def test_get_tools_with_nonexistent_category(client, mock_tools): - """Test the /tools/available endpoint with a category that doesn't exist.""" - # Mock the available_tools - with patch("api.tools.available_tools", mock_tools): - response = client.get("/tools/available?category=NONEXISTENT") - - # Check response status - assert response.status_code == 200 - - # Check response content - tools = response.json() - assert len(tools) == 0 - - -@pytest.mark.asyncio -async def test_get_tool_categories(client, mock_tools): - """Test the /tools/categories endpoint.""" - # Mock the available_tools - with patch("api.tools.available_tools", mock_tools): - response = client.get("/tools/categories") - - # Check response status - assert response.status_code == 200 - - # Check response content - categories = response.json() - assert len(categories) == 3 - assert "TEST" in categories - assert "WALLET" in categories - assert "DAO" in categories - - -@pytest.mark.asyncio -async def test_search_tools(client, mock_tools): - """Test the /tools/search endpoint.""" - # Mock the available_tools - with patch("api.tools.available_tools", mock_tools): - response = client.get("/tools/search?query=balance") - - # Check response status - assert response.status_code == 200 - - # Check response content - tools = response.json() - assert len(tools) == 1 - assert tools[0]["id"] == "wallet_get_balance" - - -@pytest.mark.asyncio -async def test_search_tools_with_category(client, mock_tools): - """Test the /tools/search endpoint with category filter.""" - # Mock the available_tools - with patch("api.tools.available_tools", mock_tools): - response = client.get("/tools/search?query=get&category=DAO") - - # Check response status - assert response.status_code == 200 - - # Check response content - tools = response.json() - assert len(tools) == 1 - assert tools[0]["id"] == "dao_get_info" - - -@pytest.mark.asyncio -async def test_search_tools_no_results(client, mock_tools): - """Test the /tools/search endpoint with no matching results.""" - # Mock the available_tools - with patch("api.tools.available_tools", mock_tools): - response = client.get("/tools/search?query=nonexistent") - - # Check response status - assert response.status_code == 200 - - # Check response content - tools = response.json() - assert len(tools) == 0 - - -@pytest.mark.asyncio -async def test_search_tools_missing_query(client, mock_tools): - """Test the /tools/search endpoint with missing query parameter.""" - # Mock the available_tools - with patch("api.tools.available_tools", mock_tools): - response = client.get("/tools/search") - - # Check response status - assert response.status_code == 422 # Unprocessable Entity - - -@pytest.mark.asyncio -async def test_get_tools_error_handling(client): - """Test error handling in the /tools/available endpoint.""" - # Mock get_available_tools to raise an exception - with patch("api.tools.available_tools", side_effect=Exception("Test error")): - response = client.get("/tools/available") - - # Check response status - assert response.status_code == 500 - - # Check error message - error = response.json() - assert "detail" in error - assert "Failed to serve available tools" in error["detail"] - - -@pytest.mark.asyncio -async def test_get_tool_categories_error_handling(client): - """Test error handling in the /tools/categories endpoint.""" - # Mock available_tools to raise an exception when accessed - with patch("api.tools.available_tools", side_effect=Exception("Test error")): - response = client.get("/tools/categories") - - # Check response status - assert response.status_code == 500 - - # Check error message - error = response.json() - assert "detail" in error - assert "Failed to serve tool categories" in error["detail"] - - -@pytest.mark.asyncio -async def test_search_tools_error_handling(client): - """Test error handling in the /tools/search endpoint.""" - # Mock available_tools to raise an exception when accessed - with patch("api.tools.available_tools", side_effect=Exception("Test error")): - response = client.get("/tools/search?query=test") - - # Check response status - assert response.status_code == 500 - - # Check error message - error = response.json() - assert "detail" in error - assert "Failed to search tools" in error["detail"] diff --git a/tests/api/test_webhook_auth.py b/tests/api/test_webhook_auth.py deleted file mode 100644 index c775e4b4..00000000 --- a/tests/api/test_webhook_auth.py +++ /dev/null @@ -1,64 +0,0 @@ -from unittest.mock import patch - -import pytest -from fastapi import HTTPException - -from api.dependencies import verify_webhook_auth - - -@pytest.mark.asyncio -async def test_verify_webhook_auth_missing_header(): - """Test authentication fails when Authorization header is missing.""" - with pytest.raises(HTTPException) as exc_info: - await verify_webhook_auth(authorization=None) - - assert exc_info.value.status_code == 401 - assert "Missing Authorization header" in exc_info.value.detail - - -@pytest.mark.asyncio -async def test_verify_webhook_auth_invalid_format(): - """Test authentication fails when Authorization header has invalid format.""" - with pytest.raises(HTTPException) as exc_info: - await verify_webhook_auth(authorization="InvalidFormat") - - assert exc_info.value.status_code == 401 - assert "Invalid Authorization format" in exc_info.value.detail - - -@pytest.mark.asyncio -async def test_verify_webhook_auth_invalid_token(): - """Test authentication fails when token is invalid.""" - with patch("api.dependencies.config") as mock_config: - mock_config.api.webhook_auth = "Bearer correct-token" - - with pytest.raises(HTTPException) as exc_info: - await verify_webhook_auth(authorization="Bearer wrong-token") - - assert exc_info.value.status_code == 401 - assert "Invalid authentication token" in exc_info.value.detail - - -@pytest.mark.asyncio -async def test_verify_webhook_auth_success(): - """Test authentication succeeds with valid token.""" - with patch("api.dependencies.config") as mock_config: - mock_config.api.webhook_auth = "Bearer correct-token" - - # Should not raise an exception - result = await verify_webhook_auth(authorization="Bearer correct-token") - - assert result is None # Function returns None on success - - -@pytest.mark.asyncio -async def test_verify_webhook_auth_with_raw_token(): - """Test authentication with raw token in config.""" - with patch("api.dependencies.config") as mock_config: - # Config has token without Bearer prefix - mock_config.api.webhook_auth = "correct-token" - - # Should not raise an exception - result = await verify_webhook_auth(authorization="Bearer correct-token") - - assert result is None # Function returns None on success diff --git a/tests/backend/test_models.py b/tests/backend/test_models.py deleted file mode 100644 index 77827621..00000000 --- a/tests/backend/test_models.py +++ /dev/null @@ -1,43 +0,0 @@ -"""Tests for backend models.""" - -from backend.models import QueueMessageBase, QueueMessageFilter, QueueMessageType - - -def test_queue_message_type_enum(): - """Test QueueMessageType enum values.""" - assert QueueMessageType.TWEET == "tweet" - assert QueueMessageType.DAO == "dao" - assert QueueMessageType.DAO_TWEET == "dao_tweet" - assert QueueMessageType.DAO_PROPOSAL_VOTE == "dao_proposal_vote" - - # Test string conversion - assert str(QueueMessageType.TWEET) == "tweet" - assert str(QueueMessageType.DAO) == "dao" - assert str(QueueMessageType.DAO_TWEET) == "dao_tweet" - assert str(QueueMessageType.DAO_PROPOSAL_VOTE) == "dao_proposal_vote" - - -def test_queue_message_base_with_enum(): - """Test QueueMessageBase with QueueMessageType enum.""" - # Create a message with enum type - message = QueueMessageBase(type=QueueMessageType.TWEET) - assert message.type == QueueMessageType.TWEET - - # Test serialization/deserialization - message_dict = message.model_dump() - assert message_dict["type"] == "tweet" - - # Create from dict - message2 = QueueMessageBase.model_validate({"type": "tweet"}) - assert message2.type == QueueMessageType.TWEET - - -def test_queue_message_filter_with_enum(): - """Test QueueMessageFilter with QueueMessageType enum.""" - # Create a filter with enum type - filter_obj = QueueMessageFilter(type=QueueMessageType.DAO) - assert filter_obj.type == QueueMessageType.DAO - - # Test serialization/deserialization - filter_dict = filter_obj.model_dump() - assert filter_dict["type"] == "dao" diff --git a/tests/lib/test_alex.py b/tests/lib/test_alex.py deleted file mode 100644 index 05a6e3bd..00000000 --- a/tests/lib/test_alex.py +++ /dev/null @@ -1,261 +0,0 @@ -from typing import Dict, List -from unittest.mock import Mock, patch - -import pytest - -from lib.alex import AlexApi -from lib.logger import configure_logger - -logger = configure_logger(__name__) - - -@pytest.fixture -def mock_config() -> None: - """Fixture to mock config values.""" - with patch("config.config") as mock_config: - mock_config.api.alex_base_url = "https://test-alex-api.com/" - yield - - -@pytest.fixture -def alex_api(mock_config: None) -> AlexApi: - """Fixture providing an AlexApi instance.""" - return AlexApi() - - -@pytest.fixture -def mock_price_data() -> Dict[str, List[Dict[str, float]]]: - """Fixture providing mock price history data.""" - return { - "prices": [ - {"avg_price_usd": 1.0, "block_height": 1000}, - {"avg_price_usd": 2.0, "block_height": 2000}, - ] - } - - -@pytest.fixture -def mock_volume_data() -> Dict[str, List[Dict[str, float]]]: - """Fixture providing mock volume data.""" - return { - "volume_values": [ - {"volume_24h": 1000.0, "block_height": 1000}, - {"volume_24h": 2000.0, "block_height": 2000}, - ] - } - - -def test_initialization(alex_api: AlexApi) -> None: - """Test AlexApi initialization.""" - assert alex_api.base_url == "https://test-alex-api.com/" - assert alex_api.limits == 500 - - -@patch("requests.get") -def test_get_success(mock_get: Mock, alex_api: AlexApi) -> None: - """Test successful GET request.""" - mock_response = Mock() - mock_response.json.return_value = {"data": "test"} - mock_get.return_value = mock_response - - result = alex_api._get("test-endpoint") - - assert result == {"data": "test"} - mock_get.assert_called_once_with( - "https://test-alex-api.com/test-endpoint", - headers={"Accept": "application/json"}, - params={}, - ) - - -@patch("requests.get") -def test_get_with_params(mock_get: Mock, alex_api: AlexApi) -> None: - """Test GET request with parameters.""" - mock_response = Mock() - mock_response.json.return_value = {"data": "test"} - mock_get.return_value = mock_response - - params = {"key": "value"} - result = alex_api._get("test-endpoint", params=params) - - assert result == {"data": "test"} - mock_get.assert_called_once_with( - "https://test-alex-api.com/test-endpoint", - headers={"Accept": "application/json"}, - params=params, - ) - - -@patch("requests.get") -def test_get_error(mock_get: Mock, alex_api: AlexApi) -> None: - """Test GET request error handling.""" - mock_get.side_effect = Exception("API Error") - - with pytest.raises(Exception, match="Alex API GET request error: API Error"): - alex_api._get("test-endpoint") - - -@patch("requests.get") -def test_get_pairs(mock_get: Mock, alex_api: AlexApi) -> None: - """Test pairs retrieval.""" - mock_response = Mock() - mock_response.json.return_value = {"data": ["pair1", "pair2"]} - mock_get.return_value = mock_response - - result = alex_api.get_pairs() - - assert result == ["pair1", "pair2"] - mock_get.assert_called_once_with( - "https://test-alex-api.com/v1/public/pairs", - headers={"Accept": "application/json"}, - params={}, - ) - - -@patch("requests.get") -def test_get_price_history( - mock_get: Mock, - alex_api: AlexApi, - mock_price_data: Dict[str, List[Dict[str, float]]], -) -> None: - """Test price history retrieval.""" - mock_response = Mock() - mock_response.json.return_value = mock_price_data - mock_get.return_value = mock_response - - result = alex_api.get_price_history("test-token") - - assert len(result) == 2 - assert all(key in result[0] for key in ["price", "block"]) - assert result[0]["price"] == 1.0 - assert result[0]["block"] == 1000 - mock_get.assert_called_once_with( - "https://test-alex-api.com/v1/price_history/test-token?limit=500", - headers={"Accept": "application/json"}, - params={}, - ) - - -@patch("requests.get") -def test_get_all_swaps(mock_get: Mock, alex_api: AlexApi) -> None: - """Test all swaps retrieval.""" - mock_response = Mock() - mock_response.json.return_value = {"swaps": ["swap1", "swap2"]} - mock_get.return_value = mock_response - - result = alex_api.get_all_swaps() - - assert result == {"swaps": ["swap1", "swap2"]} - mock_get.assert_called_once_with( - "https://test-alex-api.com/v1/allswaps", - headers={"Accept": "application/json"}, - params={}, - ) - - -@patch("requests.get") -def test_get_token_pool_volume( - mock_get: Mock, - alex_api: AlexApi, - mock_volume_data: Dict[str, List[Dict[str, float]]], -) -> None: - """Test pool volume retrieval.""" - mock_response = Mock() - mock_response.json.return_value = mock_volume_data - mock_get.return_value = mock_response - - result = alex_api.get_token_pool_volume("test-pool") - - assert len(result) == 2 - assert result[0]["volume_24h"] == 1000.0 - assert result[0]["block_height"] == 1000 - mock_get.assert_called_once_with( - "https://test-alex-api.com/v1/pool_volume/test-pool?limit=500", - headers={"Accept": "application/json"}, - params={}, - ) - - -@patch("requests.get") -def test_get_token_pool_agg_history( - mock_get: Mock, - alex_api: AlexApi, - mock_price_data: Dict[str, List[Dict[str, float]]], - mock_volume_data: Dict[str, List[Dict[str, float]]], -) -> None: - """Test aggregated history retrieval.""" - mock_response1 = Mock() - mock_response1.json.return_value = mock_price_data - mock_response2 = Mock() - mock_response2.json.return_value = mock_volume_data - mock_get.side_effect = [mock_response1, mock_response2] - - result = alex_api.get_token_pool_agg_history("test-token", "test-pool") - - assert len(result) == 2 - assert all(key in result[0] for key in ["price", "block", "volume_24h"]) - assert result[0]["price"] == 1.0 - assert result[0]["block"] == 1000 - assert result[0]["volume_24h"] == 1000.0 - assert mock_get.call_count == 2 - - -@patch("requests.get") -def test_get_token_pool_price(mock_get: Mock, alex_api: AlexApi) -> None: - """Test pool price retrieval.""" - mock_response = Mock() - mock_response.json.return_value = {"price": 1.5} - mock_get.return_value = mock_response - - result = alex_api.get_token_pool_price("test-pool") - - assert result == {"price": 1.5} - mock_get.assert_called_once_with( - "https://test-alex-api.com/v1/pool_token_price/test-pool?limit=500", - headers={"Accept": "application/json"}, - params={}, - ) - - -@patch("requests.get") -def test_get_token_tvl(mock_get: Mock, alex_api: AlexApi) -> None: - """Test TVL retrieval.""" - mock_response = Mock() - mock_response.json.return_value = {"tvl": 1000000.0} - mock_get.return_value = mock_response - - result = alex_api.get_token_tvl("test-pool") - - assert result == {"tvl": 1000000.0} - mock_get.assert_called_once_with( - "https://test-alex-api.com/v1/stats/tvl/test-pool?limit=500", - headers={"Accept": "application/json"}, - params={}, - ) - - -@patch("requests.get") -def test_error_handling(mock_get: Mock, alex_api: AlexApi) -> None: - """Test error handling for all methods.""" - mock_get.side_effect = Exception("API Error") - - with pytest.raises(Exception, match="Failed to get token pairs"): - alex_api.get_pairs() - - with pytest.raises(Exception, match="Failed to get token price history"): - alex_api.get_price_history("test-token") - - with pytest.raises(Exception, match="Failed to get all swaps"): - alex_api.get_all_swaps() - - with pytest.raises(Exception, match="Failed to get pool volume"): - alex_api.get_token_pool_volume("test-pool") - - with pytest.raises(Exception, match="Failed to get token price history"): - alex_api.get_token_pool_agg_history("test-token", "test-pool") - - with pytest.raises(Exception, match="Failed to get pool price"): - alex_api.get_token_pool_price("test-pool") - - with pytest.raises(Exception, match="Failed to get pool volume"): - alex_api.get_token_tvl("test-pool") diff --git a/tests/lib/test_hiro.py b/tests/lib/test_hiro.py deleted file mode 100644 index 250a6977..00000000 --- a/tests/lib/test_hiro.py +++ /dev/null @@ -1,482 +0,0 @@ -import time -from unittest.mock import Mock, patch - -import aiohttp -import pytest -import requests - -from lib.hiro import HiroApi, HiroApiError, HiroApiRateLimitError, HiroApiTimeoutError -from lib.logger import configure_logger - -logger = configure_logger(__name__) - - -@pytest.fixture -def mock_config() -> None: - """Fixture to mock config values.""" - with patch("config.config") as mock_config: - mock_config.api.hiro_api_url = "https://test-hiro-api.com/" - yield - - -@pytest.fixture -def hiro_api(mock_config: None) -> HiroApi: - """Fixture providing a HiroApi instance.""" - return HiroApi() - - -@pytest.fixture -def mock_response() -> Mock: - """Fixture providing a mock response.""" - mock = Mock() - mock.status_code = 200 - mock.json.return_value = {"data": "test_value"} - return mock - - -def test_initialization(hiro_api: HiroApi) -> None: - """Test HiroApi initialization.""" - assert hiro_api.base_url == "https://test-hiro-api.com/" - assert len(hiro_api._request_times) == 0 - assert hiro_api._cache is not None - assert hiro_api._session is None - - -def test_rate_limit(hiro_api: HiroApi) -> None: - """Test rate limiting functionality.""" - # Fill up the request times - current_time = time.time() - hiro_api._request_times = [current_time] * (hiro_api.RATE_LIMIT - 1) - - # This request should not trigger rate limiting - hiro_api._rate_limit() - assert len(hiro_api._request_times) == hiro_api.RATE_LIMIT - - # This request should trigger rate limiting - with patch("time.sleep") as mock_sleep: - hiro_api._rate_limit() - mock_sleep.assert_called_once() - - -@patch("requests.get") -def test_get_success(mock_get: Mock, hiro_api: HiroApi, mock_response: Mock) -> None: - """Test successful GET request.""" - mock_get.return_value = mock_response - - result = hiro_api._get("test-endpoint") - - assert result == {"data": "test_value"} - mock_get.assert_called_once_with( - "https://test-hiro-api.com/test-endpoint", - headers={"Accept": "application/json"}, - params=None, - ) - - -@patch("requests.get") -def test_get_with_params( - mock_get: Mock, hiro_api: HiroApi, mock_response: Mock -) -> None: - """Test GET request with parameters.""" - mock_get.return_value = mock_response - - params = {"key": "value"} - result = hiro_api._get("test-endpoint", params=params) - - assert result == {"data": "test_value"} - mock_get.assert_called_once_with( - "https://test-hiro-api.com/test-endpoint", - headers={"Accept": "application/json"}, - params=params, - ) - - -@patch("requests.get") -def test_get_error(mock_get: Mock, hiro_api: HiroApi) -> None: - """Test GET request error handling.""" - mock_get.side_effect = Exception("API Error") - - with pytest.raises(HiroApiError, match="Unexpected error: API Error"): - hiro_api._get("test-endpoint") - - -@patch("requests.get") -def test_get_rate_limit_error(mock_get: Mock, hiro_api: HiroApi) -> None: - """Test rate limit error handling.""" - mock_response = Mock() - mock_response.raise_for_status.side_effect = requests.exceptions.HTTPError( - response=Mock(status_code=429) - ) - mock_get.return_value = mock_response - - with pytest.raises(HiroApiRateLimitError): - hiro_api._get("test-endpoint") - - -@patch("requests.get") -def test_get_retry_on_timeout(mock_get: Mock, hiro_api: HiroApi) -> None: - """Test retry mechanism on timeout.""" - mock_get.side_effect = [ - requests.exceptions.Timeout(), - requests.exceptions.Timeout(), - mock_response, - ] - - result = hiro_api._get("test-endpoint") - assert result == {"data": "test_value"} - assert mock_get.call_count == 3 - - -@patch("requests.get") -def test_get_max_retries_exceeded(mock_get: Mock, hiro_api: HiroApi) -> None: - """Test max retries exceeded.""" - mock_get.side_effect = requests.exceptions.Timeout() - - with pytest.raises(HiroApiTimeoutError): - hiro_api._get("test-endpoint") - assert mock_get.call_count == hiro_api.MAX_RETRIES - - -@pytest.mark.asyncio -async def test_aget_success(hiro_api: HiroApi) -> None: - """Test successful async GET request.""" - mock_response = Mock() - mock_response.json.return_value = {"data": "test_value"} - mock_response.__aenter__.return_value = mock_response - - with patch.object(aiohttp.ClientSession, "get") as mock_get: - mock_get.return_value = mock_response - result = await hiro_api._aget("test-endpoint") - assert result == {"data": "test_value"} - - -@pytest.mark.asyncio -async def test_aget_error(hiro_api: HiroApi) -> None: - """Test async GET request error handling.""" - with patch.object(aiohttp.ClientSession, "get") as mock_get: - mock_get.side_effect = aiohttp.ClientError() - with pytest.raises(HiroApiError): - await hiro_api._aget("test-endpoint") - - -@pytest.mark.asyncio -async def test_close_session(hiro_api: HiroApi) -> None: - """Test closing async session.""" - # Create a session - await hiro_api._aget("test-endpoint") - assert hiro_api._session is not None - - # Close the session - await hiro_api.close() - assert hiro_api._session is None - - -def test_cached_methods(hiro_api: HiroApi) -> None: - """Test that caching works for decorated methods.""" - with patch.object(HiroApi, "_get") as mock_get: - mock_get.return_value = {"data": "test_value"} - - # First call should hit the API - result1 = hiro_api.get_token_holders("test-token") - assert result1 == {"data": "test_value"} - assert mock_get.call_count == 1 - - # Second call should use cache - result2 = hiro_api.get_token_holders("test-token") - assert result2 == {"data": "test_value"} - assert mock_get.call_count == 1 - - -# Token holder related tests -@patch("requests.get") -def test_get_token_holders( - mock_get: Mock, hiro_api: HiroApi, mock_response: Mock -) -> None: - """Test token holders retrieval.""" - mock_get.return_value = mock_response - - result = hiro_api.get_token_holders("test-token") - - assert result == {"data": "test_value"} - mock_get.assert_called_once_with( - f"{hiro_api.base_url}{hiro_api.ENDPOINTS['tokens']}/ft/test-token/holders", - headers={"Accept": "application/json"}, - params=None, - ) - - -@patch("requests.get") -def test_get_address_balance( - mock_get: Mock, hiro_api: HiroApi, mock_response: Mock -) -> None: - """Test address balance retrieval.""" - mock_get.return_value = mock_response - - result = hiro_api.get_address_balance("test-address") - - assert result == {"data": "test_value"} - mock_get.assert_called_once_with( - f"{hiro_api.base_url}{hiro_api.ENDPOINTS['addresses']}/test-address/balances", - headers={"Accept": "application/json"}, - params=None, - ) - - -# Transaction related tests -@patch("requests.get") -def test_get_transaction( - mock_get: Mock, hiro_api: HiroApi, mock_response: Mock -) -> None: - """Test transaction retrieval.""" - mock_get.return_value = mock_response - - result = hiro_api.get_transaction("test-tx") - - assert result == {"data": "test_value"} - mock_get.assert_called_once_with( - "https://test-hiro-api.com//extended/v1/tx/test-tx", - headers={"Accept": "application/json"}, - params=None, - ) - - -@patch("requests.get") -def test_get_raw_transaction( - mock_get: Mock, hiro_api: HiroApi, mock_response: Mock -) -> None: - """Test raw transaction retrieval.""" - mock_get.return_value = mock_response - - result = hiro_api.get_raw_transaction("test-tx") - - assert result == {"data": "test_value"} - mock_get.assert_called_once_with( - "https://test-hiro-api.com//extended/v1/tx/test-tx/raw", - headers={"Accept": "application/json"}, - params=None, - ) - - -# Block related tests -@patch("requests.get") -def test_get_blocks(mock_get: Mock, hiro_api: HiroApi, mock_response: Mock) -> None: - """Test blocks retrieval.""" - mock_get.return_value = mock_response - - result = hiro_api.get_blocks() - - assert result == {"data": "test_value"} - mock_get.assert_called_once_with( - "https://test-hiro-api.com//extended/v1/block", - headers={"Accept": "application/json"}, - params=None, - ) - - -@patch("requests.get") -def test_get_block_by_height( - mock_get: Mock, hiro_api: HiroApi, mock_response: Mock -) -> None: - """Test block retrieval by height.""" - mock_get.return_value = mock_response - - result = hiro_api.get_block_by_height(12345) - - assert result == {"data": "test_value"} - mock_get.assert_called_once_with( - "https://test-hiro-api.com//extended/v1/block/by_height/12345", - headers={"Accept": "application/json"}, - params=None, - ) - - -# Address related tests -@patch("requests.get") -def test_get_address_stx_balance( - mock_get: Mock, hiro_api: HiroApi, mock_response: Mock -) -> None: - """Test STX balance retrieval.""" - mock_get.return_value = mock_response - - result = hiro_api.get_address_stx_balance("test-principal") - - assert result == {"data": "test_value"} - mock_get.assert_called_once_with( - "https://test-hiro-api.com//extended/v1/address/test-principal/stx", - headers={"Accept": "application/json"}, - params=None, - ) - - -@patch("requests.get") -def test_get_address_transactions( - mock_get: Mock, hiro_api: HiroApi, mock_response: Mock -) -> None: - """Test address transactions retrieval.""" - mock_get.return_value = mock_response - - result = hiro_api.get_address_transactions("test-principal") - - assert result == {"data": "test_value"} - mock_get.assert_called_once_with( - "https://test-hiro-api.com//extended/v1/address/test-principal/transactions", - headers={"Accept": "application/json"}, - params=None, - ) - - -# Token related tests -@patch("requests.get") -def test_get_nft_holdings( - mock_get: Mock, hiro_api: HiroApi, mock_response: Mock -) -> None: - """Test NFT holdings retrieval.""" - mock_get.return_value = mock_response - params = {"limit": 20, "offset": 0} - - result = hiro_api.get_nft_holdings(**params) - - assert result == {"data": "test_value"} - mock_get.assert_called_once_with( - "https://test-hiro-api.com//extended/v1/tokens/nft/holdings", - headers={"Accept": "application/json"}, - params=params, - ) - - -# Contract related tests -@patch("requests.get") -def test_get_contract_by_id( - mock_get: Mock, hiro_api: HiroApi, mock_response: Mock -) -> None: - """Test contract retrieval by ID.""" - mock_get.return_value = mock_response - - result = hiro_api.get_contract_by_id("test-contract") - - assert result == {"data": "test_value"} - mock_get.assert_called_once_with( - "https://test-hiro-api.com//extended/v1/contract/test-contract", - headers={"Accept": "application/json"}, - params=None, - ) - - -@patch("requests.get") -def test_get_contract_events( - mock_get: Mock, hiro_api: HiroApi, mock_response: Mock -) -> None: - """Test contract events retrieval.""" - mock_get.return_value = mock_response - - result = hiro_api.get_contract_events("test-contract") - - assert result == {"data": "test_value"} - mock_get.assert_called_once_with( - "https://test-hiro-api.com//extended/v1/contract/test-contract/events", - headers={"Accept": "application/json"}, - params=None, - ) - - -# Utility endpoint tests -@patch("requests.get") -def test_get_stx_supply(mock_get: Mock, hiro_api: HiroApi, mock_response: Mock) -> None: - """Test STX supply retrieval.""" - mock_get.return_value = mock_response - - result = hiro_api.get_stx_supply() - - assert result == {"data": "test_value"} - mock_get.assert_called_once_with( - "https://test-hiro-api.com//extended/v1/stx_supply", - headers={"Accept": "application/json"}, - params=None, - ) - - -@patch("requests.get") -def test_get_stx_price(mock_get: Mock, hiro_api: HiroApi) -> None: - """Test STX price retrieval.""" - mock_response = Mock() - mock_response.json.return_value = {"price": 1.23} - mock_get.return_value = mock_response - - result = hiro_api.get_stx_price() - - assert result == 1.23 - mock_get.assert_called_once_with( - "https://explorer.hiro.so/stxPrice", params={"blockBurnTime": "current"} - ) - - -@patch("requests.get") -def test_get_current_block_height(mock_get: Mock, hiro_api: HiroApi) -> None: - """Test current block height retrieval.""" - mock_response = Mock() - mock_response.json.return_value = {"results": [{"height": 12345}]} - mock_get.return_value = mock_response - - result = hiro_api.get_current_block_height() - - assert result == 12345 - mock_get.assert_called_once_with( - "https://test-hiro-api.com//extended/v2/blocks", - headers={"Accept": "application/json"}, - params={"limit": 1, "offset": 0}, - ) - - -@patch("requests.get") -def test_search(mock_get: Mock, hiro_api: HiroApi, mock_response: Mock) -> None: - """Test search functionality.""" - mock_get.return_value = mock_response - - result = hiro_api.search("test-query") - - assert result == {"data": "test_value"} - mock_get.assert_called_once_with( - "https://test-hiro-api.com//extended/v1/search/test-query", - headers={"Accept": "application/json"}, - params=None, - ) - - -# Error handling tests -@patch("requests.get") -def test_stx_price_error(mock_get: Mock, hiro_api: HiroApi) -> None: - """Test STX price error handling.""" - mock_get.side_effect = Exception("API Error") - - with pytest.raises(Exception, match="Failed to get STX price: API Error"): - hiro_api.get_stx_price() - - -@patch("requests.get") -def test_current_block_height_error(mock_get: Mock, hiro_api: HiroApi) -> None: - """Test current block height error handling.""" - mock_get.side_effect = Exception("API Error") - - with pytest.raises( - Exception, match="Failed to get current block height: API Error" - ): - hiro_api.get_current_block_height() - - -@pytest.mark.asyncio -async def test_async_methods(hiro_api: HiroApi) -> None: - """Test async versions of methods.""" - mock_response = Mock() - mock_response.json.return_value = {"data": "test_value"} - mock_response.__aenter__.return_value = mock_response - - with patch.object(aiohttp.ClientSession, "get") as mock_get: - mock_get.return_value = mock_response - - # Test async token holders - result = await hiro_api.aget_token_holders("test-token") - assert result == {"data": "test_value"} - - # Test async address balance - result = await hiro_api.aget_address_balance("test-address") - assert result == {"data": "test_value"} diff --git a/tests/lib/test_images.py b/tests/lib/test_images.py deleted file mode 100644 index 2549ce54..00000000 --- a/tests/lib/test_images.py +++ /dev/null @@ -1,130 +0,0 @@ -from unittest.mock import Mock, patch - -import pytest - -from lib.images import ImageGenerationError, generate_image, generate_token_image -from lib.logger import configure_logger - -logger = configure_logger(__name__) - - -@pytest.fixture -def mock_openai_response() -> Mock: - """Fixture providing a mock OpenAI response.""" - mock_data = Mock() - mock_data.url = "https://fake-image-url.com/image.png" - mock_response = Mock() - mock_response.data = [mock_data] - return mock_response - - -@pytest.fixture -def mock_requests_response() -> Mock: - """Fixture providing a mock requests response.""" - mock_response = Mock() - mock_response.status_code = 200 - mock_response.content = b"fake-image-content" - return mock_response - - -def test_generate_image_success(mock_openai_response: Mock) -> None: - """Test successful image generation.""" - with patch("openai.OpenAI") as mock_client: - mock_instance = Mock() - mock_instance.images.generate.return_value = mock_openai_response - mock_client.return_value = mock_instance - - result = generate_image("test prompt") - assert result == "https://fake-image-url.com/image.png" - - mock_instance.images.generate.assert_called_once_with( - model="dall-e-3", quality="hd", prompt="test prompt", n=1, size="1024x1024" - ) - - -def test_generate_image_no_response() -> None: - """Test image generation with no response.""" - with patch("openai.OpenAI") as mock_client: - mock_instance = Mock() - mock_instance.images.generate.return_value = Mock(data=[]) - mock_client.return_value = mock_instance - - with pytest.raises( - ImageGenerationError, match="No response from image generation service" - ): - generate_image("test prompt") - - -def test_generate_image_api_error() -> None: - """Test image generation with API error.""" - with patch("openai.OpenAI") as mock_client: - mock_instance = Mock() - mock_instance.images.generate.side_effect = Exception("API Error") - mock_client.return_value = mock_instance - - with pytest.raises( - ImageGenerationError, match="Failed to generate image: API Error" - ): - generate_image("test prompt") - - -def test_generate_token_image_success( - mock_openai_response: Mock, mock_requests_response: Mock -) -> None: - """Test successful token image generation.""" - with patch("openai.OpenAI") as mock_client, patch("requests.get") as mock_get: - mock_instance = Mock() - mock_instance.images.generate.return_value = mock_openai_response - mock_client.return_value = mock_instance - mock_get.return_value = mock_requests_response - - result = generate_token_image("Test Token", "TT", "A test token") - assert result == b"fake-image-content" - - -def test_generate_token_image_download_error(mock_openai_response: Mock) -> None: - """Test token image generation with download error.""" - with patch("openai.OpenAI") as mock_client, patch("requests.get") as mock_get: - mock_instance = Mock() - mock_instance.images.generate.return_value = mock_openai_response - mock_client.return_value = mock_instance - - mock_response = Mock() - mock_response.status_code = 404 - mock_get.return_value = mock_response - - with pytest.raises( - ImageGenerationError, match="Failed to download image: HTTP 404" - ): - generate_token_image("Test Token", "TT", "A test token") - - -def test_generate_token_image_empty_content(mock_openai_response: Mock) -> None: - """Test token image generation with empty content.""" - with patch("openai.OpenAI") as mock_client, patch("requests.get") as mock_get: - mock_instance = Mock() - mock_instance.images.generate.return_value = mock_openai_response - mock_client.return_value = mock_instance - - mock_response = Mock() - mock_response.status_code = 200 - mock_response.content = b"" - mock_get.return_value = mock_response - - with pytest.raises(ImageGenerationError, match="Downloaded image is empty"): - generate_token_image("Test Token", "TT", "A test token") - - -def test_generate_token_image_unexpected_error(mock_openai_response: Mock) -> None: - """Test token image generation with unexpected error.""" - with patch("openai.OpenAI") as mock_client, patch("requests.get") as mock_get: - mock_instance = Mock() - mock_instance.images.generate.return_value = mock_openai_response - mock_client.return_value = mock_instance - - mock_get.side_effect = Exception("Unexpected error") - - with pytest.raises( - ImageGenerationError, match="Unexpected error generating token image" - ): - generate_token_image("Test Token", "TT", "A test token") diff --git a/tests/lib/test_logger.py b/tests/lib/test_logger.py deleted file mode 100644 index 586d638f..00000000 --- a/tests/lib/test_logger.py +++ /dev/null @@ -1,63 +0,0 @@ -import logging -import os -from typing import Generator - -import pytest - -from lib.logger import configure_logger - - -@pytest.fixture -def reset_logging() -> Generator[None, None, None]: - """Reset logging configuration after each test.""" - yield - logging.getLogger().handlers.clear() - logging.getLogger().setLevel(logging.NOTSET) - - -@pytest.fixture -def env_cleanup() -> Generator[None, None, None]: - """Clean up environment variables after each test.""" - old_level = os.environ.get("LOG_LEVEL") - yield - if old_level: - os.environ["LOG_LEVEL"] = old_level - else: - os.environ.pop("LOG_LEVEL", None) - - -def test_configure_logger_default(reset_logging: None) -> None: - """Test logger configuration with default settings.""" - logger = configure_logger() - assert logger.name == "uvicorn.error" - assert logger.level == logging.INFO - - -def test_configure_logger_custom_name(reset_logging: None) -> None: - """Test logger configuration with custom name.""" - logger = configure_logger("test_logger") - assert logger.name == "test_logger" - assert logger.level == logging.INFO - - -def test_configure_logger_custom_level(reset_logging: None, env_cleanup: None) -> None: - """Test logger configuration with custom log level.""" - os.environ["LOG_LEVEL"] = "DEBUG" - logger = configure_logger() - assert logger.level == logging.DEBUG - - -def test_configure_logger_invalid_level(reset_logging: None, env_cleanup: None) -> None: - """Test logger configuration with invalid log level.""" - os.environ["LOG_LEVEL"] = "INVALID" - logger = configure_logger() - assert logger.level == logging.INFO # Should default to INFO for invalid levels - - -def test_configure_logger_case_insensitive( - reset_logging: None, env_cleanup: None -) -> None: - """Test logger configuration with case-insensitive log level.""" - os.environ["LOG_LEVEL"] = "debug" - logger = configure_logger() - assert logger.level == logging.DEBUG diff --git a/tests/lib/test_lunarcrush.py b/tests/lib/test_lunarcrush.py deleted file mode 100644 index 7a4d55f9..00000000 --- a/tests/lib/test_lunarcrush.py +++ /dev/null @@ -1,141 +0,0 @@ -from unittest.mock import Mock, patch - -import pytest -import requests - -from lib.logger import configure_logger -from lib.lunarcrush import LunarCrushApi - -logger = configure_logger(__name__) - - -@pytest.fixture -def mock_response() -> Mock: - """Fixture providing a mock response.""" - mock = Mock() - mock.status_code = 200 - mock.json.return_value = {"data": {"test": "value"}} - return mock - - -@pytest.fixture -def api() -> LunarCrushApi: - """Fixture providing a LunarCrushApi instance.""" - return LunarCrushApi() - - -def test_get_success(api: LunarCrushApi, mock_response: Mock) -> None: - """Test successful GET request.""" - with patch("requests.get") as mock_get: - mock_get.return_value = mock_response - - result = api._get("/test-endpoint") - assert result == {"data": {"test": "value"}} - - mock_get.assert_called_once() - args, kwargs = mock_get.call_args - assert args[0] == "https://lunarcrush.com/api/v2/test-endpoint" - assert kwargs["headers"]["Authorization"] == f"Bearer {api.api_key}" - - -def test_get_with_params(api: LunarCrushApi, mock_response: Mock) -> None: - """Test GET request with parameters.""" - with patch("requests.get") as mock_get: - mock_get.return_value = mock_response - - params = {"key": "value"} - api._get("/test-endpoint", params=params) - - mock_get.assert_called_once() - args, kwargs = mock_get.call_args - assert kwargs["params"] == params - - -def test_get_error(api: LunarCrushApi) -> None: - """Test GET request with error.""" - with patch("requests.get") as mock_get: - mock_get.side_effect = Exception("API Error") - - with pytest.raises( - Exception, match="Lunarcrush API GET request error: API Error" - ): - api._get("/test-endpoint") - - -def test_get_token_socials(api: LunarCrushApi, mock_response: Mock) -> None: - """Test getting token socials.""" - with patch("requests.get") as mock_get: - mock_get.return_value = mock_response - - result = api.get_token_socials("0x123") - assert result == {"test": "value"} - - mock_get.assert_called_once() - args, kwargs = mock_get.call_args - assert args[0] == "https://lunarcrush.com/api/v2/coins/0x123/v1" - - -def test_get_token_metadata(api: LunarCrushApi, mock_response: Mock) -> None: - """Test getting token metadata.""" - with patch("requests.get") as mock_get: - mock_get.return_value = mock_response - - result = api.get_token_metadata("0x123") - assert result == {"test": "value"} - - mock_get.assert_called_once() - args, kwargs = mock_get.call_args - assert args[0] == "https://lunarcrush.com/api/v2/coins/0x123/meta/v1" - - -def test_get_token_social_history(api: LunarCrushApi, mock_response: Mock) -> None: - """Test getting token social history.""" - with patch("requests.get") as mock_get: - mock_get.return_value = mock_response - - result = api.get_token_social_history("0x123") - assert result == {"data": {"test": "value"}} - - mock_get.assert_called_once() - args, kwargs = mock_get.call_args - assert args[0] == "https://lunarcrush.com/api/v2/coins/0x123/time-series/v1" - - -def test_search(api: LunarCrushApi, mock_response: Mock) -> None: - """Test search functionality.""" - with patch("requests.get") as mock_get: - mock_get.return_value = mock_response - - result = api.search("test_term") - assert result == {"data": {"test": "value"}} - - mock_get.assert_called_once() - args, kwargs = mock_get.call_args - assert args[0] == "https://lunarcrush.com/api/v2/searches/search" - assert kwargs["params"] == {"term": "test_term"} - - -def test_http_error(api: LunarCrushApi) -> None: - """Test handling of HTTP errors.""" - with patch("requests.get") as mock_get: - mock_response = Mock() - mock_response.raise_for_status.side_effect = requests.exceptions.HTTPError( - "404 Client Error" - ) - mock_get.return_value = mock_response - - with pytest.raises( - Exception, match="Lunarcrush API GET request error: 404 Client Error" - ): - api._get("/test-endpoint") - - -def test_connection_error(api: LunarCrushApi) -> None: - """Test handling of connection errors.""" - with patch("requests.get") as mock_get: - mock_get.side_effect = requests.exceptions.ConnectionError("Connection refused") - - with pytest.raises( - Exception, match="Lunarcrush API GET request error: Connection refused" - ): - api._get("/test-endpoint") diff --git a/tests/lib/test_persona.py b/tests/lib/test_persona.py deleted file mode 100644 index af0c16a3..00000000 --- a/tests/lib/test_persona.py +++ /dev/null @@ -1,131 +0,0 @@ -from unittest.mock import Mock - -import pytest - -from backend.models import Agent -from lib.logger import configure_logger -from lib.persona import generate_persona, generate_static_persona - -logger = configure_logger(__name__) - - -@pytest.fixture -def mock_agent() -> Agent: - """Fixture providing a mock Agent instance.""" - agent = Mock(spec=Agent) - agent.name = "TestBot" - agent.backstory = "A test bot with a simple backstory" - agent.role = "Testing assistant" - agent.goal = "Help with testing" - return agent - - -def test_generate_persona(mock_agent: Agent) -> None: - """Test persona generation with a mock agent.""" - persona = generate_persona(mock_agent) - - # Check that the persona is a string - assert isinstance(persona, str) - - # Check that agent attributes are included in the persona - assert mock_agent.name in persona - assert mock_agent.backstory in persona - assert mock_agent.role in persona - assert mock_agent.goal in persona - - # Check for required sections - required_sections = [ - "Knowledge:", - "Extensions:", - "Disclaimer:", - "Style:", - "Boundaries:", - ] - for section in required_sections: - assert section in persona - - -def test_generate_static_persona() -> None: - """Test static persona generation.""" - persona = generate_static_persona() - - # Check that the persona is a string - assert isinstance(persona, str) - - # Check for default name - assert "AI Assistant" in persona - - # Check for required sections - required_sections = [ - "Role:", - "Goal:", - "Knowledge:", - "Extensions:", - "Disclaimer:", - "Style:", - "Boundaries:", - ] - for section in required_sections: - assert section in persona - - # Check for specific content - assert "Stacks blockchain" in persona - assert "not a licensed financial advisor" in persona - assert "do not support or endorse illicit activities" in persona - - -def test_persona_formatting() -> None: - """Test persona formatting rules.""" - persona = generate_static_persona() - - # Check that the persona doesn't contain emojis - # This is a basic check - you might want to add more comprehensive emoji detection - common_emojis = ["😊", "👍", "🚀", "💰", "📈"] - for emoji in common_emojis: - assert emoji not in persona - - # Check that markdown syntax isn't used - markdown_elements = ["##", "**", "__", "```", "==="] - for element in markdown_elements: - assert element not in persona - - -def test_persona_content_consistency(mock_agent: Agent) -> None: - """Test that generated personas maintain consistent content across calls.""" - persona1 = generate_persona(mock_agent) - persona2 = generate_persona(mock_agent) - assert persona1 == persona2 - - static_persona1 = generate_static_persona() - static_persona2 = generate_static_persona() - assert static_persona1 == static_persona2 - - -def test_persona_security_elements() -> None: - """Test that personas include necessary security-related content.""" - persona = generate_static_persona() - - security_elements = [ - "security best practices", - "keep private keys secure", - "do their own research", - ] - - for element in security_elements: - assert element.lower() in persona.lower() - - -def test_persona_with_empty_agent_fields(mock_agent: Agent) -> None: - """Test persona generation with empty agent fields.""" - mock_agent.name = "" - mock_agent.backstory = "" - mock_agent.role = "" - mock_agent.goal = "" - - persona = generate_persona(mock_agent) - - # Check that the persona is still generated and contains core elements - assert isinstance(persona, str) - assert "Knowledge:" in persona - assert "Extensions:" in persona - assert "Disclaimer:" in persona diff --git a/tests/lib/test_platform.py b/tests/lib/test_platform.py deleted file mode 100644 index 836749fd..00000000 --- a/tests/lib/test_platform.py +++ /dev/null @@ -1,194 +0,0 @@ -from unittest.mock import Mock, patch - -import pytest - -from lib.hiro import PlatformApi -from lib.logger import configure_logger - -logger = configure_logger(__name__) - - -@pytest.fixture -def mock_config() -> None: - """Fixture to mock config values.""" - with patch("config.config") as mock_config: - mock_config.api.hiro_api_key = "test_api_key" - mock_config.api.webhook_url = "https://test-webhook.com" - mock_config.api.webhook_auth = "test_auth" - yield - - -@pytest.fixture -def api(mock_config: None) -> PlatformApi: - """Fixture providing a PlatformApi instance.""" - return PlatformApi() - - -def test_init_missing_api_key() -> None: - """Test initialization with missing API key.""" - with patch("config.config") as mock_config: - mock_config.api.hiro_api_key = None - with pytest.raises( - ValueError, match="HIRO_API_KEY environment variable is required" - ): - PlatformApi() - - -def test_generate_contract_deployment_predicate(api: PlatformApi) -> None: - """Test contract deployment predicate generation.""" - predicate = api.generate_contract_deployment_predicate( - txid="test_txid", - start_block=1000, - network="testnet", - name="test_hook", - end_block=2000, - expire_after_occurrence=2, - webhook_url="https://custom-webhook.com", - webhook_auth="custom_auth", - ) - - assert predicate["name"] == "test_hook" - assert predicate["chain"] == "stacks" - assert predicate["version"] == 1 - - network_config = predicate["networks"]["testnet"] - assert network_config["if_this"]["scope"] == "txid" - assert network_config["if_this"]["equals"] == "test_txid" - assert network_config["start_block"] == 1000 - assert network_config["end_block"] == 2000 - assert network_config["expire_after_occurrence"] == 2 - assert ( - network_config["then_that"]["http_post"]["url"] == "https://custom-webhook.com" - ) - assert ( - network_config["then_that"]["http_post"]["authorization_header"] - == "custom_auth" - ) - - -def test_generate_contract_deployment_predicate_defaults(api: PlatformApi) -> None: - """Test contract deployment predicate generation with default values.""" - predicate = api.generate_contract_deployment_predicate("test_txid") - - assert predicate["name"] == "test" - network_config = predicate["networks"]["testnet"] - assert network_config["start_block"] == 75996 - assert network_config["end_block"] is None - assert network_config["expire_after_occurrence"] == 1 - assert network_config["then_that"]["http_post"]["url"] == api.webhook_url - assert ( - network_config["then_that"]["http_post"]["authorization_header"] - == api.webhook_auth - ) - - -def test_create_contract_deployment_hook(api: PlatformApi) -> None: - """Test contract deployment hook creation.""" - with patch.object(api, "create_chainhook") as mock_create_chainhook: - mock_create_chainhook.return_value = {"status": "success"} - - result = api.create_contract_deployment_hook("test_txid", name="test_hook") - assert result == {"status": "success"} - - # Verify the predicate was generated correctly - mock_create_chainhook.assert_called_once() - predicate = mock_create_chainhook.call_args[0][0] - assert predicate["name"] == "test_hook" - assert predicate["networks"]["testnet"]["if_this"]["equals"] == "test_txid" - - -def test_create_chainhook(api: PlatformApi) -> None: - """Test chainhook creation.""" - mock_response = Mock() - mock_response.json.return_value = {"status": "success"} - - with patch("requests.post") as mock_post: - mock_post.return_value = mock_response - - predicate = {"test": "predicate"} - result = api.create_chainhook(predicate) - - assert result == {"status": "success"} - mock_post.assert_called_once_with( - f"{api.base_url}/v1/ext/{api.api_key}/chainhooks", - headers={"Content-Type": "application/json"}, - json=predicate, - ) - - -def test_create_chainhook_error(api: PlatformApi) -> None: - """Test chainhook creation error handling.""" - with patch("requests.post") as mock_post: - mock_post.side_effect = Exception("API Error") - - with pytest.raises(Exception, match="Hiro API POST request error: API Error"): - api.create_chainhook({"test": "predicate"}) - - -def test_generate_dao_x_linkage(api: PlatformApi) -> None: - """Test DAO X linkage predicate generation.""" - predicate = api.generate_dao_x_linkage( - contract_identifier="test.contract", - method="test_method", - start_block=2000, - network="mainnet", - name="test_dao", - end_block=3000, - webhook_url="https://custom-webhook.com", - webhook_auth="custom_auth", - ) - - assert predicate["name"] == "test_dao" - assert predicate["chain"] == "stacks" - assert predicate["version"] == 1 - - network_config = predicate["networks"]["mainnet"] - assert network_config["if_this"]["scope"] == "contract_call" - assert network_config["if_this"]["method"] == "test_method" - assert network_config["if_this"]["contract_identifier"] == "test.contract" - assert network_config["start_block"] == 2000 - assert network_config["end_block"] == 3000 - assert ( - network_config["then_that"]["http_post"]["url"] == "https://custom-webhook.com" - ) - assert ( - network_config["then_that"]["http_post"]["authorization_header"] - == "custom_auth" - ) - - -def test_generate_dao_x_linkage_defaults(api: PlatformApi) -> None: - """Test DAO X linkage predicate generation with default values.""" - predicate = api.generate_dao_x_linkage("test.contract") - - assert predicate["name"] == "getMessage" - network_config = predicate["networks"]["mainnet"] - assert network_config["if_this"]["method"] == "send" - assert network_config["start_block"] == 601924 - assert network_config["end_block"] is None - assert network_config["then_that"]["http_post"]["url"] == api.webhook_url - assert ( - network_config["then_that"]["http_post"]["authorization_header"] - == api.webhook_auth - ) - - -def test_create_dao_x_linkage_hook(api: PlatformApi) -> None: - """Test DAO X linkage hook creation.""" - with patch.object(api, "create_chainhook") as mock_create_chainhook: - mock_create_chainhook.return_value = {"status": "success"} - - result = api.create_dao_x_linkage_hook( - "test.contract", "test_method", name="test_dao" - ) - assert result == {"status": "success"} - - # Verify the predicate was generated correctly - mock_create_chainhook.assert_called_once() - predicate = mock_create_chainhook.call_args[0][0] - assert predicate["name"] == "test_dao" - assert ( - predicate["networks"]["mainnet"]["if_this"]["contract_identifier"] - == "test.contract" - ) - assert predicate["networks"]["mainnet"]["if_this"]["method"] == "test_method" diff --git a/tests/lib/test_token_assets.py b/tests/lib/test_token_assets.py deleted file mode 100644 index 9eef054c..00000000 --- a/tests/lib/test_token_assets.py +++ /dev/null @@ -1,216 +0,0 @@ -import json -from unittest.mock import Mock, patch - -import pytest - -from lib.logger import configure_logger -from lib.token_assets import ( - ImageGenerationError, - StorageError, - TokenAssetError, - TokenAssetManager, - TokenMetadata, -) - -logger = configure_logger(__name__) - - -@pytest.fixture -def token_metadata() -> TokenMetadata: - """Fixture providing sample token metadata.""" - return TokenMetadata( - name="Test Token", - symbol="TEST", - description="A test token for unit testing", - decimals=8, - max_supply="21000000", - ) - - -@pytest.fixture -def token_manager() -> TokenAssetManager: - """Fixture providing a TokenAssetManager instance.""" - return TokenAssetManager("test-token-123") - - -@pytest.fixture -def mock_image_bytes() -> bytes: - """Fixture providing mock image bytes.""" - return b"fake-image-data" - - -def test_token_metadata_initialization(token_metadata: TokenMetadata) -> None: - """Test TokenMetadata initialization.""" - assert token_metadata.name == "Test Token" - assert token_metadata.symbol == "TEST" - assert token_metadata.description == "A test token for unit testing" - assert token_metadata.decimals == 8 - assert token_metadata.max_supply == "21000000" - assert token_metadata.image_url is None - assert token_metadata.uri is None - - -def test_token_asset_manager_initialization(token_manager: TokenAssetManager) -> None: - """Test TokenAssetManager initialization.""" - assert token_manager.token_id == "test-token-123" - assert token_manager.DEFAULT_EXTERNAL_URL == "https://aibtc.dev/" - assert token_manager.DEFAULT_SIP_VERSION == 10 - - -@patch("lib.images.generate_token_image") -@patch("backend.factory.backend.upload_file") -def test_generate_and_store_image_success( - mock_upload: Mock, - mock_generate: Mock, - token_manager: TokenAssetManager, - token_metadata: TokenMetadata, - mock_image_bytes: bytes, -) -> None: - """Test successful image generation and storage.""" - mock_generate.return_value = mock_image_bytes - mock_upload.return_value = "https://example.com/image.png" - - result = token_manager.generate_and_store_image(token_metadata) - - assert result == "https://example.com/image.png" - mock_generate.assert_called_once_with( - name=token_metadata.name, - symbol=token_metadata.symbol, - description=token_metadata.description, - ) - mock_upload.assert_called_once_with("test-token-123.png", mock_image_bytes) - - -@patch("lib.images.generate_token_image") -def test_generate_and_store_image_invalid_data( - mock_generate: Mock, - token_manager: TokenAssetManager, - token_metadata: TokenMetadata, -) -> None: - """Test image generation with invalid data type.""" - mock_generate.return_value = "invalid-data-type" - - with pytest.raises(ImageGenerationError, match="Invalid image data type"): - token_manager.generate_and_store_image(token_metadata) - - -@patch("lib.images.generate_token_image") -def test_generate_and_store_image_generation_error( - mock_generate: Mock, - token_manager: TokenAssetManager, - token_metadata: TokenMetadata, -) -> None: - """Test image generation error.""" - mock_generate.side_effect = ImageGenerationError("Generation failed") - - with pytest.raises(ImageGenerationError, match="Generation failed"): - token_manager.generate_and_store_image(token_metadata) - - -@patch("lib.images.generate_token_image") -@patch("backend.factory.backend.upload_file") -def test_generate_and_store_image_storage_error( - mock_upload: Mock, - mock_generate: Mock, - token_manager: TokenAssetManager, - token_metadata: TokenMetadata, - mock_image_bytes: bytes, -) -> None: - """Test image storage error.""" - mock_generate.return_value = mock_image_bytes - mock_upload.side_effect = StorageError("Storage failed") - - with pytest.raises(StorageError, match="Storage failed"): - token_manager.generate_and_store_image(token_metadata) - - -@patch("backend.factory.backend.upload_file") -def test_generate_and_store_metadata_success( - mock_upload: Mock, - token_manager: TokenAssetManager, - token_metadata: TokenMetadata, -) -> None: - """Test successful metadata generation and storage.""" - token_metadata.image_url = "https://example.com/image.png" - mock_upload.return_value = "https://example.com/metadata.json" - - result = token_manager.generate_and_store_metadata(token_metadata) - - assert result == "https://example.com/metadata.json" - mock_upload.assert_called_once() - - # Verify JSON content - args = mock_upload.call_args[0] - assert args[0] == "test-token-123.json" - json_data = json.loads(args[1].decode("utf-8")) - assert json_data["name"] == token_metadata.name - assert json_data["description"] == token_metadata.description - assert json_data["image"] == token_metadata.image_url - assert json_data["properties"]["decimals"] == token_metadata.decimals - assert json_data["properties"]["external_url"] == token_manager.DEFAULT_EXTERNAL_URL - assert json_data["sip"] == token_manager.DEFAULT_SIP_VERSION - - -@patch("backend.factory.backend.upload_file") -def test_generate_and_store_metadata_storage_error( - mock_upload: Mock, - token_manager: TokenAssetManager, - token_metadata: TokenMetadata, -) -> None: - """Test metadata storage error.""" - mock_upload.side_effect = Exception("Upload failed") - - with pytest.raises(StorageError, match="Failed to store metadata"): - token_manager.generate_and_store_metadata(token_metadata) - - -@patch.object(TokenAssetManager, "generate_and_store_image") -@patch.object(TokenAssetManager, "generate_and_store_metadata") -def test_generate_all_assets_success( - mock_metadata: Mock, - mock_image: Mock, - token_manager: TokenAssetManager, - token_metadata: TokenMetadata, -) -> None: - """Test successful generation of all assets.""" - mock_image.return_value = "https://example.com/image.png" - mock_metadata.return_value = "https://example.com/metadata.json" - - result = token_manager.generate_all_assets(token_metadata) - - assert result == { - "image_url": "https://example.com/image.png", - "metadata_url": "https://example.com/metadata.json", - } - mock_image.assert_called_once_with(token_metadata) - mock_metadata.assert_called_once_with(token_metadata) - assert token_metadata.image_url == "https://example.com/image.png" - - -@patch.object(TokenAssetManager, "generate_and_store_image") -def test_generate_all_assets_image_error( - mock_image: Mock, - token_manager: TokenAssetManager, - token_metadata: TokenMetadata, -) -> None: - """Test asset generation with image error.""" - mock_image.side_effect = ImageGenerationError("Image generation failed") - - with pytest.raises(TokenAssetError, match="Asset generation failed"): - token_manager.generate_all_assets(token_metadata) - - -@patch.object(TokenAssetManager, "generate_and_store_image") -@patch.object(TokenAssetManager, "generate_and_store_metadata") -def test_generate_all_assets_metadata_error( - mock_metadata: Mock, - mock_image: Mock, - token_manager: TokenAssetManager, - token_metadata: TokenMetadata, -) -> None: - """Test asset generation with metadata error.""" - mock_image.return_value = "https://example.com/image.png" - mock_metadata.side_effect = StorageError("Metadata storage failed") - - with pytest.raises(TokenAssetError, match="Asset generation failed"): - token_manager.generate_all_assets(token_metadata) diff --git a/tests/lib/test_tokenizer.py b/tests/lib/test_tokenizer.py deleted file mode 100644 index 17bea5c9..00000000 --- a/tests/lib/test_tokenizer.py +++ /dev/null @@ -1,91 +0,0 @@ -from typing import Any, Dict, List - -import pytest - -from lib.logger import configure_logger -from lib.tokenizer import Trimmer - -logger = configure_logger(__name__) - - -@pytest.fixture -def sample_messages() -> List[Dict[str, Any]]: - """Fixture providing sample messages for testing.""" - return [ - {"role": "system", "content": "You are a helpful assistant."}, - {"role": "user", "content": "Hello, how are you?"}, - {"role": "assistant", "content": "I'm doing well, thank you for asking!"}, - {"role": "user", "content": "That's great to hear!"}, - ] - - -def test_trimmer_initialization() -> None: - """Test Trimmer initialization with default and custom parameters.""" - default_trimmer = Trimmer() - assert default_trimmer.token_model == "gpt-4.1" - assert default_trimmer.maxsize == 50000 - assert default_trimmer.margin == 500 - - custom_trimmer = Trimmer(token_model="gpt-3.5-turbo", maxsize=4000, margin=200) - assert custom_trimmer.token_model == "gpt-3.5-turbo" - assert custom_trimmer.maxsize == 4000 - assert custom_trimmer.margin == 200 - - -def test_count_tokens(sample_messages: List[Dict[str, Any]]) -> None: - """Test token counting functionality.""" - trimmer = Trimmer() - token_count = trimmer.count_tokens(sample_messages) - assert token_count > 0 - assert isinstance(token_count, int) - - # Test with empty messages - assert trimmer.count_tokens([]) == 0 - - # Test with empty content - empty_content_messages = [{"role": "user", "content": ""}] - assert trimmer.count_tokens(empty_content_messages) == 0 - - -def test_trim_messages(sample_messages: List[Dict[str, Any]]) -> None: - """Test message trimming functionality.""" - # Create a trimmer with a very small maxsize to force trimming - trimmer = Trimmer(maxsize=50, margin=10) - - # Make a copy of messages to avoid modifying the fixture - messages = sample_messages.copy() - original_length = len(messages) - - trimmer.trim_messages(messages) - assert len(messages) < original_length - - # System message (index 0) and last message should be preserved - assert messages[0]["role"] == "system" - assert messages[-1]["content"] == "That's great to hear!" - - -def test_trim_messages_short_conversation( - sample_messages: List[Dict[str, Any]], -) -> None: - """Test trimming with very short conversations.""" - trimmer = Trimmer() - - # Test with just system and one user message - short_messages = sample_messages[:2] - original_messages = short_messages.copy() - - trimmer.trim_messages(short_messages) - assert short_messages == original_messages # Should not modify messages - - -def test_trim_messages_no_system_message() -> None: - """Test trimming messages without a system message.""" - trimmer = Trimmer(maxsize=50, margin=10) - messages = [ - {"role": "user", "content": "Hello"}, - {"role": "assistant", "content": "Hi there!"}, - {"role": "user", "content": "How are you?"}, - ] - - trimmer.trim_messages(messages) - assert len(messages) > 0 # Should still preserve some messages diff --git a/tests/lib/test_tools.py b/tests/lib/test_tools.py deleted file mode 100644 index c5dccbc1..00000000 --- a/tests/lib/test_tools.py +++ /dev/null @@ -1,120 +0,0 @@ -import json -from unittest.mock import Mock, patch - -import pytest - -from lib.tools import Tool, extract_tool_info, get_available_tools - - -class MockToolInstance: - def __init__(self, name: str, description: str, schema: Mock = None): - self.name = name - self.description = description - self.args_schema = schema - - -class MockSchemaField: - def __init__(self, description: str, annotation: str): - self.description = description - self.annotation = annotation - - -def test_extract_tool_info_valid(): - """Test extracting tool info with valid input.""" - # Setup mock schema - mock_schema = Mock() - mock_schema.model_fields = { - "param1": MockSchemaField("Test param", "str"), - "param2": MockSchemaField("Another param", "int"), - } - - # Create mock tool instance - tool_instance = MockToolInstance( - name="category_test_tool", - description="Test description", - schema=mock_schema, - ) - - # Extract tool info - result = extract_tool_info("category_test_tool", tool_instance) - - # Verify result - assert result is not None - assert result.id == "category_test_tool" - assert result.name == "Test Tool" - assert result.description == "Test description" - assert result.category == "CATEGORY" - - # Verify parameters - params = json.loads(result.parameters) - assert len(params) == 2 - assert params["param1"]["type"] == "str" - assert params["param2"]["type"] == "int" - - -def test_extract_tool_info_no_schema(): - """Test extracting tool info with no schema.""" - tool_instance = MockToolInstance( - name="test_tool", - description="Test description", - schema=None, - ) - - result = extract_tool_info("test_tool", tool_instance) - assert result is None - - -def test_extract_tool_info_error_handling(): - """Test error handling in extract_tool_info.""" - # Create a tool instance that will raise an exception - tool_instance = Mock() - tool_instance.args_schema = Mock(side_effect=Exception("Test error")) - - result = extract_tool_info("test_tool", tool_instance) - assert result is None - - -@patch("lib.tools.initialize_tools") -def test_get_available_tools_success(mock_initialize_tools): - """Test successfully getting available tools.""" - # Setup mock schema - mock_schema = Mock() - mock_schema.model_fields = { - "param1": MockSchemaField("Test param", "str"), - } - - # Setup mock tools - mock_tools = { - "category_tool1": MockToolInstance( - name="category_tool1", - description="Tool 1", - schema=mock_schema, - ), - "category_tool2": MockToolInstance( - name="category_tool2", - description="Tool 2", - schema=mock_schema, - ), - } - - # Configure mock - mock_initialize_tools.return_value = mock_tools - - # Get tools - result = get_available_tools() - - # Verify results - assert len(result) == 2 - assert all(isinstance(tool, Tool) for tool in result) - assert {tool.name for tool in result} == {"Tool1", "Tool2"} - - -@patch("lib.tools.initialize_tools") -def test_get_available_tools_error(mock_initialize_tools): - """Test error handling in get_available_tools.""" - # Configure mock to raise an exception - mock_initialize_tools.side_effect = Exception("Test error") - - # Verify exception is raised - with pytest.raises(Exception): - get_available_tools() diff --git a/tests/lib/test_twitter.py b/tests/lib/test_twitter.py deleted file mode 100644 index dfcdd884..00000000 --- a/tests/lib/test_twitter.py +++ /dev/null @@ -1,221 +0,0 @@ -from typing import Dict -from unittest.mock import Mock, patch - -import pytest -from pytwitter.models import Tweet, User - -from lib.logger import configure_logger -from lib.twitter import TwitterService - -logger = configure_logger(__name__) - - -@pytest.fixture -def twitter_credentials() -> Dict[str, str]: - """Fixture providing test Twitter credentials.""" - return { - "consumer_key": "test_consumer_key", - "consumer_secret": "test_consumer_secret", - "access_token": "test_access_token", - "access_secret": "test_access_secret", - "client_id": "test_client_id", - "client_secret": "test_client_secret", - } - - -@pytest.fixture -def twitter_service(twitter_credentials: Dict[str, str]) -> TwitterService: - """Fixture providing a TwitterService instance.""" - service = TwitterService(**twitter_credentials) - return service - - -@pytest.fixture -def mock_tweet() -> Tweet: - """Fixture providing a mock Tweet.""" - tweet = Mock(spec=Tweet) - tweet.id = "123456789" - tweet.text = "Test tweet" - return tweet - - -@pytest.fixture -def mock_user() -> User: - """Fixture providing a mock User.""" - user = Mock(spec=User) - user.id = "987654321" - user.username = "test_user" - return user - - -def test_initialization(twitter_service: TwitterService) -> None: - """Test TwitterService initialization.""" - assert twitter_service.consumer_key == "test_consumer_key" - assert twitter_service.consumer_secret == "test_consumer_secret" - assert twitter_service.access_token == "test_access_token" - assert twitter_service.access_secret == "test_access_secret" - assert twitter_service.client_id == "test_client_id" - assert twitter_service.client_secret == "test_client_secret" - assert twitter_service.client is None - - -def test_initialize_success(twitter_service: TwitterService) -> None: - """Test successful Twitter client initialization.""" - with patch("pytwitter.Api") as mock_api: - twitter_service.initialize() - - mock_api.assert_called_once_with( - client_id=twitter_service.client_id, - client_secret=twitter_service.client_secret, - consumer_key=twitter_service.consumer_key, - consumer_secret=twitter_service.consumer_secret, - access_token=twitter_service.access_token, - access_secret=twitter_service.access_secret, - application_only_auth=False, - ) - assert twitter_service.client is not None - - -def test_initialize_failure(twitter_service: TwitterService) -> None: - """Test Twitter client initialization failure.""" - with patch("pytwitter.Api", side_effect=Exception("API Error")): - with pytest.raises(Exception, match="API Error"): - twitter_service.initialize() - assert twitter_service.client is None - - -@pytest.mark.asyncio -async def test_ainitialize(twitter_service: TwitterService) -> None: - """Test asynchronous initialization.""" - with patch.object(twitter_service, "initialize") as mock_initialize: - await twitter_service._ainitialize() - mock_initialize.assert_called_once() - - -def test_post_tweet_success(twitter_service: TwitterService, mock_tweet: Tweet) -> None: - """Test successful tweet posting.""" - twitter_service.client = Mock() - twitter_service.client.create_tweet.return_value = mock_tweet - - result = twitter_service.post_tweet("Test message") - - assert result == mock_tweet - twitter_service.client.create_tweet.assert_called_once_with( - text="Test message", reply_in_reply_to_tweet_id=None - ) - - -def test_post_tweet_with_reply( - twitter_service: TwitterService, mock_tweet: Tweet -) -> None: - """Test tweet posting with reply.""" - twitter_service.client = Mock() - twitter_service.client.create_tweet.return_value = mock_tweet - - result = twitter_service.post_tweet( - "Test reply", reply_in_reply_to_tweet_id="987654321" - ) - - assert result == mock_tweet - twitter_service.client.create_tweet.assert_called_once_with( - text="Test reply", reply_in_reply_to_tweet_id="987654321" - ) - - -def test_post_tweet_client_not_initialized(twitter_service: TwitterService) -> None: - """Test tweet posting with uninitialized client.""" - result = twitter_service.post_tweet("Test message") - assert result is None - - -def test_post_tweet_failure(twitter_service: TwitterService) -> None: - """Test tweet posting failure.""" - twitter_service.client = Mock() - twitter_service.client.create_tweet.side_effect = Exception("API Error") - - result = twitter_service.post_tweet("Test message") - assert result is None - - -@pytest.mark.asyncio -async def test_get_user_by_username_success( - twitter_service: TwitterService, mock_user: User -) -> None: - """Test successful user retrieval by username.""" - twitter_service.client = Mock() - twitter_service.client.get_user.return_value = mock_user - - result = await twitter_service.get_user_by_username("test_user") - - assert result == mock_user - twitter_service.client.get_user.assert_called_once_with(username="test_user") - - -@pytest.mark.asyncio -async def test_get_user_by_username_failure(twitter_service: TwitterService) -> None: - """Test user retrieval failure by username.""" - twitter_service.client = Mock() - twitter_service.client.get_user.side_effect = Exception("API Error") - - result = await twitter_service.get_user_by_username("test_user") - assert result is None - - -@pytest.mark.asyncio -async def test_get_user_by_user_id_success( - twitter_service: TwitterService, mock_user: User -) -> None: - """Test successful user retrieval by user ID.""" - twitter_service.client = Mock() - twitter_service.client.get_user.return_value = mock_user - - result = await twitter_service.get_user_by_user_id("123456789") - - assert result == mock_user - twitter_service.client.get_user.assert_called_once_with(user_id="123456789") - - -@pytest.mark.asyncio -async def test_get_mentions_success( - twitter_service: TwitterService, mock_tweet: Tweet -) -> None: - """Test successful mentions retrieval.""" - twitter_service.client = Mock() - mock_response = Mock() - mock_response.data = [mock_tweet] - twitter_service.client.get_mentions.return_value = mock_response - - result = await twitter_service.get_mentions_by_user_id("123456789") - - assert result == [mock_tweet] - twitter_service.client.get_mentions.assert_called_once() - args, kwargs = twitter_service.client.get_mentions.call_args - assert kwargs["user_id"] == "123456789" - assert kwargs["max_results"] == 100 - assert "tweet_fields" in kwargs - assert "expansions" in kwargs - assert "user_fields" in kwargs - assert "media_fields" in kwargs - assert "place_fields" in kwargs - assert "poll_fields" in kwargs - - -@pytest.mark.asyncio -async def test_get_mentions_failure(twitter_service: TwitterService) -> None: - """Test mentions retrieval failure.""" - twitter_service.client = Mock() - twitter_service.client.get_mentions.side_effect = Exception("API Error") - - result = await twitter_service.get_mentions_by_user_id("123456789") - assert result == [] - - -@pytest.mark.asyncio -async def test_apost_tweet(twitter_service: TwitterService) -> None: - """Test asynchronous tweet posting.""" - with patch.object(twitter_service, "post_tweet") as mock_post_tweet: - mock_post_tweet.return_value = Mock(spec=Tweet) - result = await twitter_service._apost_tweet("Test message", "987654321") - - mock_post_tweet.assert_called_once_with("Test message", "987654321") - assert isinstance(result, Mock) # Mock of Tweet diff --git a/tests/lib/test_velar.py b/tests/lib/test_velar.py deleted file mode 100644 index 4df56146..00000000 --- a/tests/lib/test_velar.py +++ /dev/null @@ -1,248 +0,0 @@ -from typing import Dict, List -from unittest.mock import Mock, patch - -import pytest - -from lib.logger import configure_logger -from lib.velar import VelarApi - -logger = configure_logger(__name__) - - -@pytest.fixture -def velar_api() -> VelarApi: - """Fixture providing a VelarApi instance.""" - return VelarApi() - - -@pytest.fixture -def mock_pools() -> List[Dict[str, str]]: - """Fixture providing mock pool data.""" - return [ - { - "token0Symbol": "TEST", - "token1Symbol": "STX", - "poolId": "pool1", - }, - { - "token0Symbol": "STX", - "token1Symbol": "OTHER", - "poolId": "pool2", - }, - { - "token0Symbol": "TEST", - "token1Symbol": "OTHER", - "poolId": "pool3", - }, - ] - - -@pytest.fixture -def mock_stats_data() -> Dict[str, List[Dict[str, float]]]: - """Fixture providing mock stats data.""" - return { - "data": [ - {"datetime": "2024-01-01", "value": 1.0}, - {"datetime": "2024-01-02", "value": 2.0}, - ] - } - - -def test_initialization(velar_api: VelarApi) -> None: - """Test VelarApi initialization.""" - assert velar_api.base_url == "https://gateway.velar.network/" - - -@patch("requests.get") -def test_get_success(mock_get: Mock, velar_api: VelarApi) -> None: - """Test successful GET request.""" - mock_response = Mock() - mock_response.json.return_value = {"data": "test"} - mock_get.return_value = mock_response - - result = velar_api._get("test-endpoint") - - assert result == {"data": "test"} - mock_get.assert_called_once_with( - "https://gateway.velar.network/test-endpoint", - headers={"Accept": "application/json"}, - params=None, - ) - - -@patch("requests.get") -def test_get_with_params(mock_get: Mock, velar_api: VelarApi) -> None: - """Test GET request with parameters.""" - mock_response = Mock() - mock_response.json.return_value = {"data": "test"} - mock_get.return_value = mock_response - - params = {"key": "value"} - result = velar_api._get("test-endpoint", params=params) - - assert result == {"data": "test"} - mock_get.assert_called_once_with( - "https://gateway.velar.network/test-endpoint", - headers={"Accept": "application/json"}, - params=params, - ) - - -@patch("requests.get") -def test_get_error(mock_get: Mock, velar_api: VelarApi) -> None: - """Test GET request error handling.""" - mock_get.side_effect = Exception("API Error") - - with pytest.raises(Exception, match="Velar API GET request error: API Error"): - velar_api._get("test-endpoint") - - -@patch("requests.get") -def test_get_tokens(mock_get: Mock, velar_api: VelarApi) -> None: - """Test tokens retrieval.""" - mock_response = Mock() - mock_response.json.return_value = {"message": ["token1", "token2"]} - mock_get.return_value = mock_response - - result = velar_api.get_tokens() - - assert result == ["token1", "token2"] - mock_get.assert_called_once_with( - "https://gateway.velar.network/swapapp/swap/tokens", - headers={"Accept": "application/json"}, - params=None, - ) - - -@patch("requests.get") -def test_get_pools( - mock_get: Mock, velar_api: VelarApi, mock_pools: List[Dict[str, str]] -) -> None: - """Test pools retrieval.""" - mock_response = Mock() - mock_response.json.return_value = {"message": mock_pools} - mock_get.return_value = mock_response - - result = velar_api.get_pools() - - assert result == mock_pools - mock_get.assert_called_once_with( - "https://gateway.velar.network/watcherapp/pool", - headers={"Accept": "application/json"}, - params=None, - ) - - -@patch.object(VelarApi, "get_pools") -def test_get_token_pools( - mock_get_pools: Mock, velar_api: VelarApi, mock_pools: List[Dict[str, str]] -) -> None: - """Test token pools retrieval.""" - mock_get_pools.return_value = mock_pools - - result = velar_api.get_token_pools("TEST") - - assert len(result) == 2 - assert all( - pool["token0Symbol"] == "TEST" or pool["token1Symbol"] == "TEST" - for pool in result - ) - - -@patch.object(VelarApi, "get_pools") -def test_get_token_stx_pools( - mock_get_pools: Mock, velar_api: VelarApi, mock_pools: List[Dict[str, str]] -) -> None: - """Test STX token pools retrieval.""" - mock_get_pools.return_value = mock_pools - - result = velar_api.get_token_stx_pools("TEST") - - assert len(result) == 1 - assert result[0]["poolId"] == "pool1" - assert "TEST" in [ - result[0]["token0Symbol"], - result[0]["token1Symbol"], - ] and "STX" in [result[0]["token0Symbol"], result[0]["token1Symbol"]] - - -@patch("requests.get") -def test_get_token_price_history( - mock_get: Mock, - velar_api: VelarApi, - mock_stats_data: Dict[str, List[Dict[str, float]]], -) -> None: - """Test token price history retrieval.""" - mock_response = Mock() - mock_response.json.return_value = mock_stats_data - mock_get.return_value = mock_response - - result = velar_api.get_token_price_history("TEST", "week") - - assert result == mock_stats_data - mock_get.assert_called_once_with( - "https://gateway.velar.network/watcherapp/stats/TEST/?type=price&interval=week", - headers={"Accept": "application/json"}, - params=None, - ) - - -@patch("requests.get") -def test_get_token_stats(mock_get: Mock, velar_api: VelarApi) -> None: - """Test token stats retrieval.""" - mock_response = Mock() - mock_response.json.return_value = {"stats": "data"} - mock_get.return_value = mock_response - - result = velar_api.get_token_stats("TEST") - - assert result == {"stats": "data"} - mock_get.assert_called_once_with( - "https://gateway.velar.network/watcherapp/pool/TEST", - headers={"Accept": "application/json"}, - params=None, - ) - - -@patch("requests.get") -def test_get_pool_stats_history(mock_get: Mock, velar_api: VelarApi) -> None: - """Test pool stats history retrieval.""" - mock_response = Mock() - mock_response.json.return_value = {"stats": "data"} - mock_get.return_value = mock_response - - result = velar_api.get_pool_stats_history("pool1", "tvl", "week") - - assert result == {"stats": "data"} - mock_get.assert_called_once_with( - "https://gateway.velar.network/watcherapp/stats/pool1?type=tvl&interval=week", - headers={"Accept": "application/json"}, - params=None, - ) - - -@patch.object(VelarApi, "_get") -def test_get_pool_stats_history_agg( - mock_get: Mock, - velar_api: VelarApi, - mock_stats_data: Dict[str, List[Dict[str, float]]], -) -> None: - """Test aggregated pool stats history retrieval.""" - mock_get.return_value = mock_stats_data - - result = velar_api.get_pool_stats_history_agg("pool1", "week") - - assert len(result) == 2 - assert all(key in result[0] for key in ["price", "tvl", "volume", "datetime"]) - assert mock_get.call_count == 3 # Called for price, tvl, and volume data - - -@patch.object(VelarApi, "_get") -def test_get_pool_stats_history_agg_error(mock_get: Mock, velar_api: VelarApi) -> None: - """Test aggregated pool stats history retrieval error.""" - mock_get.side_effect = Exception("API Error") - - with pytest.raises( - Exception, match="Token pool stats history retrieval error: API Error" - ): - velar_api.get_pool_stats_history_agg("pool1") diff --git a/tests/lib/test_websocket_manager.py b/tests/lib/test_websocket_manager.py deleted file mode 100644 index 4acba3a8..00000000 --- a/tests/lib/test_websocket_manager.py +++ /dev/null @@ -1,267 +0,0 @@ -import asyncio -from unittest.mock import AsyncMock, patch - -import pytest -from fastapi import WebSocket - -from lib.logger import configure_logger -from lib.websocket_manager import ConnectionManager - -logger = configure_logger(__name__) - - -@pytest.fixture -def manager() -> ConnectionManager: - """Fixture providing a ConnectionManager instance with a short TTL for testing.""" - return ConnectionManager(ttl_seconds=1) - - -@pytest.fixture -def mock_websocket() -> AsyncMock: - """Fixture providing a mock WebSocket.""" - websocket = AsyncMock(spec=WebSocket) - return websocket - - -@pytest.mark.asyncio -async def test_connect_job( - manager: ConnectionManager, mock_websocket: AsyncMock -) -> None: - """Test job connection.""" - job_id = "test-job-1" - await manager.connect_job(mock_websocket, job_id) - - assert job_id in manager.job_connections - assert len(manager.job_connections[job_id]) == 1 - ws, ts = next(iter(manager.job_connections[job_id])) - assert ws == mock_websocket - assert isinstance(ts, float) - mock_websocket.accept.assert_called_once() - - -@pytest.mark.asyncio -async def test_connect_thread( - manager: ConnectionManager, mock_websocket: AsyncMock -) -> None: - """Test thread connection.""" - thread_id = "test-thread-1" - await manager.connect_thread(mock_websocket, thread_id) - - assert thread_id in manager.thread_connections - assert len(manager.thread_connections[thread_id]) == 1 - ws, ts = next(iter(manager.thread_connections[thread_id])) - assert ws == mock_websocket - assert isinstance(ts, float) - mock_websocket.accept.assert_called_once() - - -@pytest.mark.asyncio -async def test_connect_session( - manager: ConnectionManager, mock_websocket: AsyncMock -) -> None: - """Test session connection.""" - session_id = "test-session-1" - await manager.connect_session(mock_websocket, session_id) - - assert session_id in manager.session_connections - assert len(manager.session_connections[session_id]) == 1 - ws, ts = next(iter(manager.session_connections[session_id])) - assert ws == mock_websocket - assert isinstance(ts, float) - mock_websocket.accept.assert_called_once() - - -@pytest.mark.asyncio -async def test_disconnect_job( - manager: ConnectionManager, mock_websocket: AsyncMock -) -> None: - """Test job disconnection.""" - job_id = "test-job-1" - await manager.connect_job(mock_websocket, job_id) - await manager.disconnect_job(mock_websocket, job_id) - - assert job_id not in manager.job_connections - - -@pytest.mark.asyncio -async def test_disconnect_thread( - manager: ConnectionManager, mock_websocket: AsyncMock -) -> None: - """Test thread disconnection.""" - thread_id = "test-thread-1" - await manager.connect_thread(mock_websocket, thread_id) - await manager.disconnect_thread(mock_websocket, thread_id) - - assert thread_id not in manager.thread_connections - - -@pytest.mark.asyncio -async def test_disconnect_session( - manager: ConnectionManager, mock_websocket: AsyncMock -) -> None: - """Test session disconnection.""" - session_id = "test-session-1" - await manager.connect_session(mock_websocket, session_id) - await manager.disconnect_session(mock_websocket, session_id) - - assert session_id not in manager.session_connections - - -@pytest.mark.asyncio -async def test_send_job_message( - manager: ConnectionManager, mock_websocket: AsyncMock -) -> None: - """Test sending message to job connection.""" - job_id = "test-job-1" - message = {"type": "test", "data": "test-data"} - - await manager.connect_job(mock_websocket, job_id) - await manager.send_job_message(message, job_id) - - mock_websocket.send_json.assert_called_once_with(message) - - -@pytest.mark.asyncio -async def test_send_thread_message( - manager: ConnectionManager, mock_websocket: AsyncMock -) -> None: - """Test sending message to thread connection.""" - thread_id = "test-thread-1" - message = {"type": "test", "data": "test-data"} - - await manager.connect_thread(mock_websocket, thread_id) - await manager.send_thread_message(message, thread_id) - - mock_websocket.send_json.assert_called_once_with(message) - - -@pytest.mark.asyncio -async def test_send_session_message( - manager: ConnectionManager, mock_websocket: AsyncMock -) -> None: - """Test sending message to session connection.""" - session_id = "test-session-1" - message = {"type": "test", "data": "test-data"} - - await manager.connect_session(mock_websocket, session_id) - await manager.send_session_message(message, session_id) - - mock_websocket.send_json.assert_called_once_with(message) - - -@pytest.mark.asyncio -async def test_send_message_to_dead_connection( - manager: ConnectionManager, mock_websocket: AsyncMock -) -> None: - """Test sending message to dead connection.""" - job_id = "test-job-1" - message = {"type": "test", "data": "test-data"} - - mock_websocket.send_json.side_effect = Exception("Connection closed") - - await manager.connect_job(mock_websocket, job_id) - await manager.send_job_message(message, job_id) - - assert job_id not in manager.job_connections - - -@pytest.mark.asyncio -async def test_cleanup_expired_connections( - manager: ConnectionManager, mock_websocket: AsyncMock -) -> None: - """Test cleanup of expired connections.""" - job_id = "test-job-1" - thread_id = "test-thread-1" - session_id = "test-session-1" - - # Connect to all types - await manager.connect_job(mock_websocket, job_id) - await manager.connect_thread(mock_websocket, thread_id) - await manager.connect_session(mock_websocket, session_id) - - # Wait for TTL to expire - await asyncio.sleep(1.1) - - # Run cleanup - await manager.cleanup_expired_connections() - - assert job_id not in manager.job_connections - assert thread_id not in manager.thread_connections - assert session_id not in manager.session_connections - mock_websocket.close.assert_called() - - -@pytest.mark.asyncio -async def test_broadcast_errors( - manager: ConnectionManager, mock_websocket: AsyncMock -) -> None: - """Test broadcasting error messages.""" - job_id = "test-job-1" - thread_id = "test-thread-1" - session_id = "test-session-1" - error_message = "Test error" - - # Connect to all types - await manager.connect_job(mock_websocket, job_id) - await manager.connect_thread(mock_websocket, thread_id) - await manager.connect_session(mock_websocket, session_id) - - # Broadcast errors - await manager.broadcast_job_error(error_message, job_id) - await manager.broadcast_thread_error(error_message, thread_id) - await manager.broadcast_session_error(error_message, session_id) - - expected_message = {"type": "error", "message": error_message} - assert mock_websocket.send_json.call_count == 3 - mock_websocket.send_json.assert_called_with(expected_message) - - -@pytest.mark.asyncio -async def test_multiple_connections(manager: ConnectionManager) -> None: - """Test managing multiple connections.""" - job_id = "test-job-1" - mock_websocket1 = AsyncMock(spec=WebSocket) - mock_websocket2 = AsyncMock(spec=WebSocket) - - # Connect two websockets - await manager.connect_job(mock_websocket1, job_id) - await manager.connect_job(mock_websocket2, job_id) - - assert len(manager.job_connections[job_id]) == 2 - - # Send a message - message = {"type": "test", "data": "test-data"} - await manager.send_job_message(message, job_id) - - mock_websocket1.send_json.assert_called_once_with(message) - mock_websocket2.send_json.assert_called_once_with(message) - - # Disconnect one - await manager.disconnect_job(mock_websocket1, job_id) - assert len(manager.job_connections[job_id]) == 1 - - # Send another message - await manager.send_job_message(message, job_id) - mock_websocket1.send_json.assert_called_once() # Still only called once - assert mock_websocket2.send_json.call_count == 2 # Called twice - - -@pytest.mark.asyncio -async def test_cleanup_task(manager: ConnectionManager) -> None: - """Test the cleanup task.""" - with patch.object(manager, "cleanup_expired_connections") as mock_cleanup: - # Start the cleanup task - cleanup_task = asyncio.create_task(manager.start_cleanup_task()) - - # Wait a bit to allow the task to run - await asyncio.sleep(0.1) - - # Cancel the task - cleanup_task.cancel() - try: - await cleanup_task - except asyncio.CancelledError: - pass - - # Verify cleanup was called - mock_cleanup.assert_called() diff --git a/tests/services/test_bot.py b/tests/services/test_bot.py deleted file mode 100644 index 9b103986..00000000 --- a/tests/services/test_bot.py +++ /dev/null @@ -1,259 +0,0 @@ -from unittest.mock import AsyncMock, MagicMock, patch - -import pytest -from telegram import Update, User -from telegram.ext import Application, ContextTypes - -from backend.models import TelegramUserBase -from lib.logger import configure_logger -from services.bot import TelegramBotConfig, TelegramBotService - -logger = configure_logger(__name__) - - -@pytest.fixture -def config(): - return TelegramBotConfig(token="test_token", admin_ids={12345}, is_enabled=True) - - -@pytest.fixture -def service(config): - return TelegramBotService(config) - - -@pytest.fixture -def mock_update(): - update = MagicMock(spec=Update) - update.effective_user = MagicMock(spec=User) - update.effective_user.id = 12345 - update.effective_user.username = "test_user" - update.effective_user.first_name = "Test" - update.effective_user.last_name = "User" - update.message = AsyncMock() - return update - - -@pytest.fixture -def mock_context(): - context = MagicMock(spec=ContextTypes.DEFAULT_TYPE) - context.args = [] - return context - - -@pytest.fixture -def mock_backend(): - with patch("services.bot.backend") as mock: - mock.get_telegram_user = MagicMock() - mock.update_telegram_user = MagicMock() - mock.list_telegram_users = MagicMock() - yield mock - - -class TestTelegramBotConfig: - def test_from_env(self): - with patch.dict( - "os.environ", - { - "AIBTC_TELEGRAM_BOT_TOKEN": "test_token", - "AIBTC_TELEGRAM_BOT_ENABLED": "true", - }, - ): - config = TelegramBotConfig.from_env() - assert config.token == "test_token" - assert config.is_enabled is True - assert isinstance(config.admin_ids, set) - - -class TestTelegramBotService: - def test_is_admin(self, service): - assert service.is_admin(12345) is True - assert service.is_admin(54321) is False - - @pytest.mark.asyncio - async def test_start_command_no_args( - self, service, mock_update, mock_context, mock_backend - ): - await service.start_command(mock_update, mock_context) - mock_update.message.reply_text.assert_called_once_with( - "Please use the registration link provided to start the bot." - ) - - @pytest.mark.asyncio - async def test_start_command_invalid_user( - self, service, mock_update, mock_context, mock_backend - ): - mock_context.args = ["invalid_id"] - mock_backend.get_telegram_user.return_value = None - - await service.start_command(mock_update, mock_context) - mock_update.message.reply_text.assert_called_once_with( - "Invalid registration link. Please use the correct link to register." - ) - - @pytest.mark.asyncio - async def test_start_command_success( - self, service, mock_update, mock_context, mock_backend - ): - mock_context.args = ["valid_id"] - mock_backend.get_telegram_user.return_value = True - mock_backend.update_telegram_user.return_value = True - - await service.start_command(mock_update, mock_context) - mock_backend.update_telegram_user.assert_called_once() - assert ( - "Your registration has been completed successfully!" - in mock_update.message.reply_text.call_args[0][0] - ) - - @pytest.mark.asyncio - async def test_help_command(self, service, mock_update, mock_context): - await service.help_command(mock_update, mock_context) - mock_update.message.reply_text.assert_called_once() - assert "Available Commands" in mock_update.message.reply_text.call_args[0][0] - - @pytest.mark.asyncio - async def test_send_message_command_not_admin( - self, service, mock_update, mock_context - ): - service.config.admin_ids = { - 54321 - } # Different from mock_update.effective_user.id - await service.send_message_command(mock_update, mock_context) - mock_update.message.reply_text.assert_called_once_with( - "You are not authorized to send messages." - ) - - @pytest.mark.asyncio - async def test_send_message_command_no_args( - self, service, mock_update, mock_context - ): - await service.send_message_command(mock_update, mock_context) - mock_update.message.reply_text.assert_called_once_with( - "Please provide username and message. Usage: /send " - ) - - @pytest.mark.asyncio - async def test_send_message_command_user_not_found( - self, service, mock_update, mock_context, mock_backend - ): - mock_context.args = ["nonexistent_user", "test message"] - mock_backend.list_telegram_users.return_value = [] - - await service.send_message_command(mock_update, mock_context) - mock_update.message.reply_text.assert_called_once_with( - "Registered user with username nonexistent_user not found." - ) - - @pytest.mark.asyncio - async def test_list_users_command_not_admin( - self, service, mock_update, mock_context - ): - service.config.admin_ids = {54321} - await service.list_users_command(mock_update, mock_context) - mock_update.message.reply_text.assert_called_once_with( - "You are not authorized to list users." - ) - - @pytest.mark.asyncio - async def test_list_users_command_empty( - self, service, mock_update, mock_context, mock_backend - ): - mock_backend.list_telegram_users.return_value = [] - await service.list_users_command(mock_update, mock_context) - mock_update.message.reply_text.assert_called_once_with( - "No registered users found." - ) - - @pytest.mark.asyncio - async def test_list_users_command_success( - self, service, mock_update, mock_context, mock_backend - ): - mock_backend.list_telegram_users.return_value = [ - TelegramUserBase(telegram_user_id="123", username="user1"), - TelegramUserBase(telegram_user_id="456", username="user2"), - ] - await service.list_users_command(mock_update, mock_context) - mock_update.message.reply_text.assert_called_once() - assert "user1: 123" in mock_update.message.reply_text.call_args[0][0] - assert "user2: 456" in mock_update.message.reply_text.call_args[0][0] - - @pytest.mark.asyncio - async def test_add_admin_command_not_admin( - self, service, mock_update, mock_context - ): - service.config.admin_ids = {54321} - await service.add_admin_command(mock_update, mock_context) - mock_update.message.reply_text.assert_called_once_with( - "You are not authorized to add admins." - ) - - @pytest.mark.asyncio - async def test_add_admin_command_no_args(self, service, mock_update, mock_context): - await service.add_admin_command(mock_update, mock_context) - mock_update.message.reply_text.assert_called_once_with( - "Please provide a user ID. Usage: /add_admin " - ) - - @pytest.mark.asyncio - async def test_add_admin_command_invalid_id( - self, service, mock_update, mock_context - ): - mock_context.args = ["not_a_number"] - await service.add_admin_command(mock_update, mock_context) - mock_update.message.reply_text.assert_called_once_with( - "Please provide a valid user ID (numbers only)." - ) - - @pytest.mark.asyncio - async def test_add_admin_command_success(self, service, mock_update, mock_context): - mock_context.args = ["54321"] - await service.add_admin_command(mock_update, mock_context) - mock_update.message.reply_text.assert_called_once_with( - "Successfully added user ID 54321 as admin." - ) - assert 54321 in service.config.admin_ids - - @pytest.mark.asyncio - async def test_send_message_to_user(self, service, mock_backend): - # Setup mock application - service._app = AsyncMock(spec=Application) - service._app.bot.send_message = AsyncMock() - - mock_backend.list_telegram_users.return_value = [ - TelegramUserBase(telegram_user_id="123", username="test_user") - ] - - result = await service.send_message_to_user("test_profile", "test message") - assert result is True - service._app.bot.send_message.assert_called_once_with( - chat_id="123", text="test message" - ) - - @pytest.mark.asyncio - async def test_send_message_to_user_disabled(self, service): - service.config.is_enabled = False - result = await service.send_message_to_user("test_profile", "test message") - assert result is False - - @pytest.mark.asyncio - async def test_initialize(self, service): - with patch("telegram.ext.Application.builder") as mock_builder: - mock_app = AsyncMock(spec=Application) - mock_builder.return_value.token.return_value.build.return_value = mock_app - - await service.initialize() - - assert service._app is not None - mock_app.initialize.assert_called_once() - mock_app.start.assert_called_once() - mock_app.updater.start_polling.assert_called_once_with( - allowed_updates=Update.ALL_TYPES - ) - - @pytest.mark.asyncio - async def test_shutdown(self, service): - service._app = AsyncMock(spec=Application) - await service.shutdown() - service._app.stop.assert_called_once() - service._app.shutdown.assert_called_once() - assert service._app is None diff --git a/tests/services/test_chat.py b/tests/services/test_chat.py deleted file mode 100644 index e2f80b92..00000000 --- a/tests/services/test_chat.py +++ /dev/null @@ -1,230 +0,0 @@ -import asyncio -import datetime -from unittest.mock import Mock, patch -from uuid import UUID - -import pytest - -pytest_plugins = ("pytest_asyncio",) - -from backend.models import Agent, Profile -from services.chat import ( - MessageHandler, - ToolExecutionHandler, - process_chat_message, -) - - -@pytest.fixture -def mock_profile(): - return Profile( - id=UUID("12345678-1234-5678-1234-567812345678"), - name="Test User", - email="test@example.com", - created_at=datetime.datetime.now(), - updated_at=datetime.datetime.now(), - is_active=True, - is_verified=True, - is_admin=False, - is_superuser=False, - ) - - -@pytest.fixture -def mock_queue(): - return asyncio.Queue() - - -@pytest.fixture -def mock_agent(): - return Agent( - id=UUID("11111111-2222-3333-4444-555555555555"), - name="Test Agent", - backstory="Test backstory", - role="Test role", - goal="Test goal", - created_at=datetime.datetime.now(), - updated_at=datetime.datetime.now(), - ) - - -@pytest.fixture -def mock_backend(mock_agent): - backend = Mock() - backend.get_agent = Mock(return_value=mock_agent) - backend.create_step = Mock() - backend.update_job = Mock() - return backend - - -class AsyncIterator: - def __init__(self, items): - self.items = items - - def __aiter__(self): - return self - - async def __anext__(self): - if not self.items: - raise StopAsyncIteration - return self.items.pop(0) - - -@pytest.fixture -def mock_tools(): - return { - "search": Mock(), - "calculator": Mock(), - } - - -@pytest.mark.asyncio -async def test_process_chat_message_basic_flow( - mock_profile, mock_queue, mock_backend, mock_tools -): - with patch("services.chat.backend", mock_backend): - with patch("services.chat.execute_langgraph_stream") as mock_execute: - # Setup mock response from langgraph - mock_execute.return_value = AsyncIterator( - [ - {"type": "token", "content": "Hello"}, - {"type": "result", "content": "Hello, how can I help?"}, - {"type": "end", "content": ""}, - ] - ) - - with patch("services.chat.initialize_tools", return_value=mock_tools): - job_id = UUID("12345678-1234-5678-1234-567812345678") - thread_id = UUID("87654321-4321-8765-4321-876543210987") - agent_id = UUID("11111111-2222-3333-4444-555555555555") - - await process_chat_message( - job_id=job_id, - thread_id=thread_id, - profile=mock_profile, - agent_id=agent_id, - input_str="Hi", - history=[], - output_queue=mock_queue, - ) - - # Verify backend calls - mock_backend.create_step.assert_called() - mock_backend.update_job.assert_called_once() - - # Verify queue output - messages = [] - while not mock_queue.empty(): - msg = await mock_queue.get() - if msg is not None: - messages.append(msg) - - assert len(messages) > 0 - assert any(msg["type"] == "token" for msg in messages) - - -@pytest.mark.asyncio -async def test_process_chat_message_with_tool_execution( - mock_profile, mock_queue, mock_backend, mock_tools -): - with patch("services.chat.backend", mock_backend): - with patch("services.chat.execute_langgraph_stream") as mock_execute: - # Setup mock response with tool execution - mock_execute.return_value = AsyncIterator( - [ - {"type": "token", "content": "Let me check that for you"}, - { - "type": "tool", - "status": "start", - "tool": "search", - "input": "query", - "output": None, - }, - { - "type": "tool", - "status": "end", - "tool": "search", - "input": "query", - "output": "result", - }, - {"type": "result", "content": "Here's what I found"}, - {"type": "end", "content": ""}, - ] - ) - - with patch("services.chat.initialize_tools", return_value=mock_tools): - job_id = UUID("12345678-1234-5678-1234-567812345678") - thread_id = UUID("87654321-4321-8765-4321-876543210987") - - await process_chat_message( - job_id=job_id, - thread_id=thread_id, - profile=mock_profile, - agent_id=None, - input_str="Search for something", - history=[], - output_queue=mock_queue, - ) - - # Verify tool execution was recorded - tool_step_calls = [ - call.kwargs["new_step"].tool - for call in mock_backend.create_step.call_args_list - if call.kwargs["new_step"].tool is not None - ] - assert "search" in tool_step_calls - - -@pytest.mark.asyncio -async def test_process_chat_message_error_handling(mock_profile, mock_queue): - with patch("services.chat.execute_langgraph_stream") as mock_execute: - mock_execute.side_effect = Exception("Test error") - - job_id = UUID("12345678-1234-5678-1234-567812345678") - thread_id = UUID("87654321-4321-8765-4321-876543210987") - - with pytest.raises(Exception): - await process_chat_message( - job_id=job_id, - thread_id=thread_id, - profile=mock_profile, - agent_id=None, - input_str="This should fail", - history=[], - output_queue=mock_queue, - ) - - -@pytest.mark.asyncio -async def test_message_handler_process_tokens(): - handler = MessageHandler() - message = { - "type": "token", - "content": "test content", - "thread_id": "test-thread", - "agent_id": "test-agent", - } - - processed = handler.process_token_message(message) - assert processed["type"] == "token" - assert processed["content"] == "test content" - assert "created_at" in processed - - -@pytest.mark.asyncio -async def test_tool_execution_handler(): - handler = ToolExecutionHandler() - tool_message = { - "type": "tool", - "status": "start", - "tool": "test_tool", - "input": "test_input", - "output": "test_output", - "thread_id": "test-thread", - "agent_id": "test-agent", - } - - processed = handler.process_tool_message(tool_message) - assert processed["type"] == "tool" - assert processed["tool"] == "test_tool" - assert "created_at" in processed diff --git a/tests/services/test_daos.py b/tests/services/test_daos.py deleted file mode 100644 index a063facf..00000000 --- a/tests/services/test_daos.py +++ /dev/null @@ -1,209 +0,0 @@ -import uuid -from unittest.mock import patch - -import pytest - -from backend.models import DAO, Token -from services.daos import ( - DAORequest, - DAOService, - TokenCreationError, - TokenRequest, - TokenService, - TokenServiceError, - TokenUpdateError, -) - - -@pytest.fixture -def mock_backend(): - with patch("services.daos.backend") as mock: - yield mock - - -@pytest.fixture -def dao_request(): - return DAORequest( - name="Test DAO", - mission="Test Mission", - description="Test Description", - wallet_id=uuid.uuid4(), - ) - - -@pytest.fixture -def token_request(): - return TokenRequest( - name="Test Token", - symbol="TEST", - description="Test Token Description", - decimals=6, - max_supply="1000000000", - ) - - -class TestDAORequest: - def test_to_dao_create(self, dao_request): - dao_create = dao_request.to_dao_create() - assert dao_create.name == dao_request.name - assert dao_create.mission == dao_request.mission - assert dao_create.description == dao_request.description - assert dao_create.wallet_id == dao_request.wallet_id - - -class TestTokenRequest: - def test_to_token_create(self, token_request): - token_create = token_request.to_token_create() - assert token_create.name == token_request.name - assert token_create.symbol == token_request.symbol - assert token_create.description == token_request.description - assert token_create.decimals == token_request.decimals - assert token_create.max_supply == token_request.max_supply - assert token_create.status == "DRAFT" - - def test_to_token_metadata(self, token_request): - metadata = token_request.to_token_metadata() - assert metadata.name == token_request.name - assert metadata.symbol == token_request.symbol - assert metadata.description == token_request.description - assert metadata.decimals == token_request.decimals - assert metadata.max_supply == token_request.max_supply - - -class TestDAOService: - def test_create_dao_success(self, mock_backend, dao_request): - expected_dao = DAO( - id=uuid.uuid4(), - name=dao_request.name, - mission=dao_request.mission, - description=dao_request.description, - wallet_id=dao_request.wallet_id, - ) - mock_backend.create_dao.return_value = expected_dao - - result = DAOService.create_dao(dao_request) - assert result == expected_dao - mock_backend.create_dao.assert_called_once_with(dao_request.to_dao_create()) - - def test_create_dao_failure(self, mock_backend, dao_request): - mock_backend.create_dao.side_effect = Exception("Database error") - - with pytest.raises(TokenServiceError) as exc_info: - DAOService.create_dao(dao_request) - - assert "Failed to create dao" in str(exc_info.value) - - -class TestTokenService: - @pytest.fixture - def token_service(self): - return TokenService() - - @pytest.fixture - def mock_asset_manager(self): - with patch("services.daos.TokenAssetManager") as mock: - instance = mock.return_value - instance.generate_all_assets.return_value = { - "metadata_url": "http://example.com/metadata", - "image_url": "http://example.com/image", - } - yield instance - - def test_create_token_success( - self, token_service, mock_backend, mock_asset_manager, token_request - ): - # Mock token creation - created_token = Token( - id=uuid.uuid4(), - name=token_request.name, - symbol=token_request.symbol, - description=token_request.description, - decimals=token_request.decimals, - max_supply=token_request.max_supply, - status="DRAFT", - ) - mock_backend.create_token.return_value = created_token - - # Mock token update - updated_token = Token( - id=created_token.id, - name=created_token.name, - symbol=created_token.symbol, - description=created_token.description, - decimals=created_token.decimals, - max_supply=created_token.max_supply, - status="DRAFT", - uri="http://example.com/metadata", - image_url="http://example.com/image", - ) - mock_backend.update_token.return_value = updated_token - - metadata_url, result = token_service.create_token(token_request) - - assert metadata_url == "http://example.com/metadata" - assert result == updated_token - mock_backend.create_token.assert_called_once_with( - token_request.to_token_create() - ) - mock_asset_manager.generate_all_assets.assert_called_once() - - def test_create_token_asset_generation_failure( - self, token_service, mock_backend, mock_asset_manager, token_request - ): - created_token = Token( - id=uuid.uuid4(), - name=token_request.name, - symbol=token_request.symbol, - description=token_request.description, - decimals=token_request.decimals, - max_supply=token_request.max_supply, - status="DRAFT", - ) - mock_backend.create_token.return_value = created_token - mock_asset_manager.generate_all_assets.side_effect = Exception( - "Asset generation failed" - ) - - with pytest.raises(TokenCreationError) as exc_info: - token_service.create_token(token_request) - - assert "Unexpected error during token creation" in str(exc_info.value) - - def test_create_token_update_failure( - self, token_service, mock_backend, mock_asset_manager, token_request - ): - created_token = Token( - id=uuid.uuid4(), - name=token_request.name, - symbol=token_request.symbol, - description=token_request.description, - decimals=token_request.decimals, - max_supply=token_request.max_supply, - status="DRAFT", - ) - mock_backend.create_token.return_value = created_token - mock_backend.update_token.return_value = None - - with pytest.raises(TokenUpdateError) as exc_info: - token_service.create_token(token_request) - - assert "Failed to update token record with asset URLs" in str(exc_info.value) - - def test_bind_token_to_dao_success(self, token_service, mock_backend): - token_id = uuid.uuid4() - dao_id = uuid.uuid4() - mock_backend.update_token.return_value = True - - result = token_service.bind_token_to_dao(token_id, dao_id) - - assert result is True - mock_backend.update_token.assert_called_once() - - def test_bind_token_to_dao_failure(self, token_service, mock_backend): - token_id = uuid.uuid4() - dao_id = uuid.uuid4() - mock_backend.update_token.side_effect = Exception("Update failed") - - result = token_service.bind_token_to_dao(token_id, dao_id) - - assert result is False diff --git a/tests/services/test_job_manager.py b/tests/services/test_job_manager.py deleted file mode 100644 index 759c4d74..00000000 --- a/tests/services/test_job_manager.py +++ /dev/null @@ -1,93 +0,0 @@ -"""Tests for the job manager module.""" - -from unittest.mock import MagicMock, patch - -from apscheduler.schedulers.asyncio import AsyncIOScheduler - -from services.runner.job_manager import JobManager - - -class TestJobManager: - """Test cases for JobManager class.""" - - def test_get_all_jobs(self): - """Test that get_all_jobs returns a list of job configurations.""" - with patch("services.runner.job_manager.config") as mock_config: - # Set up mock config - mock_config.twitter.enabled = True - mock_config.twitter.interval_seconds = 60 - mock_config.scheduler.sync_enabled = True - mock_config.scheduler.sync_interval_seconds = 120 - mock_config.scheduler.dao_runner_enabled = True - mock_config.scheduler.dao_runner_interval_seconds = 30 - mock_config.scheduler.tweet_runner_enabled = False - - # Call the method - jobs = JobManager.get_all_jobs() - - # Verify results - assert len(jobs) >= 5 # At least 5 jobs should be returned - - # Verify some specific jobs - twitter_job = next((j for j in jobs if j.name == "Twitter Service"), None) - assert twitter_job is not None - assert twitter_job.enabled is True - assert twitter_job.seconds == 60 - - dao_job = next((j for j in jobs if j.name == "DAO Runner Service"), None) - assert dao_job is not None - assert dao_job.enabled is True - assert dao_job.seconds == 30 - - tweet_job = next( - (j for j in jobs if j.name == "Tweet Runner Service"), None - ) - assert tweet_job is not None - assert tweet_job.enabled is False - - def test_schedule_jobs(self): - """Test scheduling jobs.""" - # Create mock scheduler - mock_scheduler = MagicMock(spec=AsyncIOScheduler) - - with ( - patch( - "services.runner.job_manager.JobManager.get_all_jobs" - ) as mock_get_jobs, - patch( - "services.runner.job_manager.execute_twitter_job" - ) as mock_twitter_func, - ): - # Create mock jobs - mock_jobs = [ - MagicMock( - name="Twitter Service", - enabled=True, - func=mock_twitter_func, - seconds=60, - args=None, - job_id="twitter_service", - ), - MagicMock( - name="Disabled Service", - enabled=False, - func=MagicMock(), - seconds=30, - args=None, - job_id="disabled_service", - ), - ] - mock_get_jobs.return_value = mock_jobs - - # Call the method - result = JobManager.schedule_jobs(mock_scheduler) - - # Verify results - assert result is True # At least one job was enabled - mock_scheduler.add_job.assert_called_once() - - # Verify the job was added with the correct parameters - args, kwargs = mock_scheduler.add_job.call_args - assert args[0] == mock_twitter_func - assert kwargs["seconds"] == 60 - assert kwargs["id"] == "twitter_service" diff --git a/tests/services/test_langgraph.py b/tests/services/test_langgraph.py deleted file mode 100644 index 3f3b65a2..00000000 --- a/tests/services/test_langgraph.py +++ /dev/null @@ -1,209 +0,0 @@ -import asyncio -from unittest.mock import AsyncMock, MagicMock, patch - -import pytest -from langchain_core.messages import AIMessage, HumanMessage, SystemMessage - -from services.workflows import ( - ExecutionError, - LangGraphService, - MessageContent, - MessageProcessor, - StreamingCallbackHandler, - StreamingError, - execute_langgraph_stream, -) - - -@pytest.fixture -def message_processor(): - return MessageProcessor() - - -@pytest.fixture -def sample_history(): - return [ - {"role": "user", "content": "Hello"}, - {"role": "assistant", "content": "Hi there"}, - {"role": "user", "content": "How are you?"}, - { - "role": "assistant", - "content": "I'm doing well", - "tool_calls": [{"type": "function", "function": {"name": "test_tool"}}], - }, - ] - - -class TestMessageContent: - def test_from_dict(self): - data = { - "role": "user", - "content": "test message", - "tool_calls": [{"type": "function"}], - } - content = MessageContent.from_dict(data) - assert content.role == "user" - assert content.content == "test message" - assert content.tool_calls == [{"type": "function"}] - - def test_from_dict_minimal(self): - data = {"role": "assistant", "content": "response"} - content = MessageContent.from_dict(data) - assert content.role == "assistant" - assert content.content == "response" - assert content.tool_calls is None - - -class TestMessageProcessor: - def test_extract_filtered_content(self, message_processor, sample_history): - filtered = message_processor.extract_filtered_content(sample_history) - assert len(filtered) == 4 - assert all(msg["role"] in ["user", "assistant"] for msg in filtered) - - def test_convert_to_langchain_messages(self, message_processor, sample_history): - filtered = message_processor.extract_filtered_content(sample_history) - messages = message_processor.convert_to_langchain_messages( - filtered, "current input", "test persona" - ) - - assert len(messages) == 6 # 4 history + 1 persona + 1 current input - assert isinstance(messages[0], SystemMessage) - assert messages[0].content == "test persona" - assert isinstance(messages[-1], HumanMessage) - assert messages[-1].content == "current input" - - def test_convert_without_persona(self, message_processor, sample_history): - filtered = message_processor.extract_filtered_content(sample_history) - messages = message_processor.convert_to_langchain_messages( - filtered, "current input" - ) - - assert len(messages) == 5 # 4 history + 1 current input - assert isinstance(messages[0], HumanMessage) - - -class TestStreamingCallbackHandler: - @pytest.fixture - def queue(self): - return asyncio.Queue() - - @pytest.fixture - def handler(self, queue): - return StreamingCallbackHandler(queue=queue) - - def test_initialization(self, handler): - assert handler.tokens == [] - assert handler.current_tool is None - assert handler._loop is None - - @pytest.mark.asyncio - async def test_queue_operations(self, handler): - test_item = {"type": "test", "content": "test_content"} - - with pytest.raises(StreamingError): - # Test with invalid queue operation - handler.queue = None - handler._put_to_queue(test_item) - - def test_tool_start(self, handler): - handler._put_to_queue = MagicMock() - handler.on_tool_start({"name": "test_tool"}, "test_input") - - assert handler.current_tool == "test_tool" - handler._put_to_queue.assert_called_once() - - def test_tool_end(self, handler): - handler._put_to_queue = MagicMock() - handler.current_tool = "test_tool" - handler.on_tool_end("test_output") - - assert handler.current_tool is None - handler._put_to_queue.assert_called_once() - - def test_llm_new_token(self, handler): - handler.on_llm_new_token("test_token") - assert "test_token" in handler.tokens - - def test_llm_error(self, handler): - with pytest.raises(ExecutionError): - handler.on_llm_error(Exception("test error")) - - def test_tool_error(self, handler): - handler._put_to_queue = MagicMock() - handler.current_tool = "test_tool" - handler.on_tool_error(Exception("test error")) - - assert handler.current_tool is None - handler._put_to_queue.assert_called_once() - - -class TestLangGraphService: - @pytest.fixture - def service(self): - return LangGraphService() - - @pytest.fixture - def mock_chat_model(self): - with patch("services.workflows.ChatOpenAI") as mock: - yield mock - - @pytest.fixture - def mock_tool_node(self): - with patch("services.workflows.ToolNode") as mock: - yield mock - - def test_create_chat_model(self, service, mock_chat_model): - callback_handler = MagicMock() - tools = [MagicMock()] - - service._create_chat_model(callback_handler, tools) - mock_chat_model.assert_called_once() - - def test_create_workflow(self, service): - chat = MagicMock() - tool_node = MagicMock() - - workflow = service._create_workflow(chat, tool_node) - assert workflow is not None - - @pytest.mark.asyncio - async def test_execute_chat_stream_success( - self, service, sample_history, mock_chat_model - ): - # Mock necessary components - mock_queue = asyncio.Queue() - await mock_queue.put({"type": "token", "content": "test"}) - await mock_queue.put({"type": "end"}) - - mock_chat = MagicMock() - mock_chat.invoke.return_value = AIMessage(content="test response") - mock_chat_model.return_value = mock_chat - - # Execute stream - tools_map = {"test_tool": MagicMock()} - chunks = [] - async for chunk in service.execute_chat_stream( - sample_history, "test input", "test persona", tools_map - ): - chunks.append(chunk) - - assert len(chunks) > 0 - - @pytest.mark.asyncio - async def test_execute_chat_stream_error(self, service, sample_history): - with pytest.raises(ExecutionError): - async for _ in service.execute_chat_stream( - sample_history, "test input", None, None - ): - pass - - -@pytest.mark.asyncio -async def test_facade_function(): - with patch("services.workflows.LangGraphService") as mock_service: - instance = mock_service.return_value - instance.execute_chat_stream = AsyncMock() - instance.execute_chat_stream.return_value = [{"type": "test"}] - - async for chunk in execute_langgraph_stream([], "test", None, None): - assert chunk["type"] == "test" diff --git a/tests/services/test_schedule.py b/tests/services/test_schedule.py deleted file mode 100644 index 7eccfcb7..00000000 --- a/tests/services/test_schedule.py +++ /dev/null @@ -1,189 +0,0 @@ -import uuid -from unittest.mock import AsyncMock, MagicMock, patch - -import pytest -from apscheduler.schedulers.asyncio import AsyncIOScheduler -from apscheduler.triggers.cron import CronTrigger - -from backend.models import Task -from services.schedule import SchedulerService, get_scheduler_service - - -@pytest.fixture -def mock_scheduler(): - scheduler = MagicMock(spec=AsyncIOScheduler) - scheduler.get_jobs.return_value = [] - return scheduler - - -@pytest.fixture -def scheduler_service(mock_scheduler): - return SchedulerService(mock_scheduler) - - -@pytest.fixture -def mock_task(): - return Task( - id=uuid.uuid4(), - name="Test Task", - prompt="Test Prompt", - agent_id=uuid.uuid4(), - profile_id=uuid.uuid4(), - cron="0 * * * *", - is_scheduled=True, - ) - - -@pytest.fixture -def mock_backend(): - with patch("services.schedule.backend") as mock: - mock.get_task = AsyncMock() - mock.get_agent = AsyncMock() - mock.get_profile = AsyncMock() - mock.create_job = AsyncMock() - mock.create_step = AsyncMock() - mock.update_job = AsyncMock() - mock.list_tasks = AsyncMock() - yield mock - - -@pytest.mark.asyncio -async def test_execute_job_success(scheduler_service, mock_backend, mock_task): - # Setup - agent_id = str(uuid.uuid4()) - task_id = str(uuid.uuid4()) - profile_id = str(uuid.uuid4()) - - mock_backend.get_task.return_value = mock_task - mock_backend.get_agent.return_value = {"id": agent_id} - mock_backend.get_profile.return_value = {"id": profile_id} - mock_backend.create_job.return_value = {"id": str(uuid.uuid4())} - - with patch("services.schedule.execute_langgraph_stream") as mock_stream: - mock_stream.return_value = [ - {"type": "tool", "tool": "test_tool", "input": "test_input"}, - {"type": "result", "content": "test_result"}, - ] - - # Execute - await scheduler_service.execute_job(agent_id, task_id, profile_id) - - # Assert - mock_backend.get_task.assert_called_once_with(task_id=uuid.UUID(task_id)) - mock_backend.get_agent.assert_called_once_with(agent_id=uuid.UUID(agent_id)) - mock_backend.get_profile.assert_called_once_with( - profile_id=uuid.UUID(profile_id) - ) - mock_backend.create_job.assert_called_once() - assert mock_backend.create_step.call_count == 2 - - -@pytest.mark.asyncio -async def test_execute_job_task_not_found(scheduler_service, mock_backend): - # Setup - mock_backend.get_task.return_value = None - - # Execute - await scheduler_service.execute_job("agent_id", "task_id", "profile_id") - - # Assert - mock_backend.get_agent.assert_not_called() - mock_backend.get_profile.assert_not_called() - mock_backend.create_job.assert_not_called() - - -@pytest.mark.asyncio -async def test_sync_schedules_add_new_job(scheduler_service, mock_backend, mock_task): - # Setup - mock_backend.list_tasks.return_value = [mock_task] - - # Execute - await scheduler_service.sync_schedules() - - # Assert - scheduler_service.scheduler.add_job.assert_called_once() - assert scheduler_service.scheduler.remove_job.call_count == 0 - - -@pytest.mark.asyncio -async def test_sync_schedules_update_job(scheduler_service, mock_backend, mock_task): - # Setup - job_id = f"schedule_{mock_task.id}" - mock_job = MagicMock() - mock_job.id = job_id - mock_job.trigger = CronTrigger.from_crontab( - "*/5 * * * *" - ) # Different from mock_task.cron - - scheduler_service.scheduler.get_jobs.return_value = [mock_job] - mock_backend.list_tasks.return_value = [mock_task] - - # Execute - await scheduler_service.sync_schedules() - - # Assert - assert scheduler_service.scheduler.remove_job.call_count == 1 - scheduler_service.scheduler.add_job.assert_called_once() - - -@pytest.mark.asyncio -async def test_sync_schedules_remove_job(scheduler_service, mock_backend): - # Setup - job_id = "schedule_old_job" - mock_job = MagicMock() - mock_job.id = job_id - - scheduler_service.scheduler.get_jobs.return_value = [mock_job] - mock_backend.list_tasks.return_value = [] # No tasks in backend - - # Execute - await scheduler_service.sync_schedules() - - # Assert - scheduler_service.scheduler.remove_job.assert_called_once_with(job_id) - assert scheduler_service.scheduler.add_job.call_count == 0 - - -def test_get_scheduler_service(): - # Setup - scheduler = MagicMock(spec=AsyncIOScheduler) - - # Execute - service1 = get_scheduler_service(scheduler) - service2 = get_scheduler_service() - - # Assert - assert service1 is service2 - assert isinstance(service1, SchedulerService) - - -def test_get_scheduler_service_no_scheduler(): - # Setup & Execute & Assert - with pytest.raises(ValueError): - get_scheduler_service() - - -@pytest.mark.asyncio -async def test_handle_stream_event(scheduler_service, mock_backend): - # Setup - job = {"id": str(uuid.uuid4())} - agent_id = str(uuid.uuid4()) - profile_id = str(uuid.uuid4()) - - # Test tool event - tool_event = { - "type": "tool", - "tool": "test_tool", - "input": "test_input", - "output": "test_output", - } - await scheduler_service._handle_stream_event(tool_event, job, agent_id, profile_id) - mock_backend.create_step.assert_called_once() - - # Test result event - result_event = {"type": "result", "content": "test_result"} - await scheduler_service._handle_stream_event( - result_event, job, agent_id, profile_id - ) - assert mock_backend.create_step.call_count == 2 - mock_backend.update_job.assert_called_once() diff --git a/tests/services/test_startup.py b/tests/services/test_startup.py deleted file mode 100644 index c98b2316..00000000 --- a/tests/services/test_startup.py +++ /dev/null @@ -1,148 +0,0 @@ -import asyncio -from unittest.mock import AsyncMock, MagicMock, patch - -import pytest -from apscheduler.schedulers.asyncio import AsyncIOScheduler - -from config import config -from services.startup import StartupService - - -@pytest.fixture -def mock_scheduler(): - scheduler = MagicMock(spec=AsyncIOScheduler) - scheduler.running = True - return scheduler - - -@pytest.fixture -def service(mock_scheduler): - return StartupService(scheduler=mock_scheduler) - - -@pytest.fixture -def mock_manager(): - with patch("services.startup.manager") as mock: - mock.start_cleanup_task = AsyncMock() - yield mock - - -@pytest.fixture -def mock_bot(): - with patch("services.startup.start_application") as mock: - mock.return_value = AsyncMock() - yield mock - - -@pytest.fixture -def mock_job_manager(): - with patch("services.startup.JobManager") as mock: - mock.schedule_jobs.return_value = True - yield mock - - -class TestStartupService: - @pytest.mark.asyncio - async def test_start_websocket_cleanup_success(self, service, mock_manager): - """Test successful websocket cleanup start.""" - await service.start_websocket_cleanup() - mock_manager.start_cleanup_task.assert_called_once() - - @pytest.mark.asyncio - async def test_start_websocket_cleanup_failure(self, service, mock_manager): - """Test websocket cleanup start failure.""" - mock_manager.start_cleanup_task.side_effect = Exception("Cleanup failed") - - with pytest.raises(Exception) as exc_info: - await service.start_websocket_cleanup() - assert str(exc_info.value) == "Cleanup failed" - - @pytest.mark.asyncio - async def test_start_bot_disabled(self, service, mock_bot): - """Test bot startup when disabled.""" - with patch.object(config.telegram, "enabled", False): - result = await service.start_bot() - assert result is None - mock_bot.assert_not_called() - - @pytest.mark.asyncio - async def test_start_bot_enabled(self, service, mock_bot): - """Test bot startup when enabled.""" - with patch.object(config.telegram, "enabled", True): - await service.start_bot() - mock_bot.assert_called_once() - - @pytest.mark.asyncio - async def test_start_bot_failure(self, service, mock_bot): - """Test bot startup failure.""" - with patch.object(config.telegram, "enabled", True): - mock_bot.side_effect = Exception("Bot startup failed") - - with pytest.raises(Exception) as exc_info: - await service.start_bot() - assert str(exc_info.value) == "Bot startup failed" - - def test_init_scheduler_jobs_enabled(self, service, mock_job_manager): - """Test scheduler initialization with jobs enabled.""" - mock_job_manager.schedule_jobs.return_value = True - - service.init_scheduler() - - mock_job_manager.schedule_jobs.assert_called_once_with(service.scheduler) - service.scheduler.start.assert_called_once() - - def test_init_scheduler_all_disabled(self, service, mock_job_manager): - """Test scheduler initialization with all jobs disabled.""" - mock_job_manager.schedule_jobs.return_value = False - - service.init_scheduler() - - mock_job_manager.schedule_jobs.assert_called_once_with(service.scheduler) - service.scheduler.start.assert_not_called() - - @pytest.mark.asyncio - async def test_init_background_tasks( - self, service, mock_manager, mock_bot, mock_job_manager - ): - """Test background tasks initialization.""" - with patch.object(config.telegram, "enabled", True): - cleanup_task = await service.init_background_tasks() - - assert isinstance(cleanup_task, asyncio.Task) - assert service.cleanup_task is cleanup_task - mock_manager.start_cleanup_task.assert_called_once() - mock_bot.assert_called_once() - - @pytest.mark.asyncio - async def test_shutdown(self, service): - """Test service shutdown.""" - # Create a mock cleanup task - mock_task = AsyncMock() - service.cleanup_task = mock_task - - await service.shutdown() - - service.scheduler.shutdown.assert_called_once() - mock_task.cancel.assert_called_once() - - -@pytest.mark.asyncio -async def test_global_init_background_tasks(): - """Test global init_background_tasks function.""" - with patch("services.startup.startup_service") as mock_service: - mock_service.init_background_tasks = AsyncMock() - from services.startup import init_background_tasks - - await asyncio.create_task(init_background_tasks()) - mock_service.init_background_tasks.assert_called_once() - - -@pytest.mark.asyncio -async def test_global_shutdown(): - """Test global shutdown function.""" - with patch("services.startup.startup_service") as mock_service: - mock_service.shutdown = AsyncMock() - from services.startup import shutdown - - await shutdown() - mock_service.shutdown.assert_called_once() diff --git a/tests/services/test_tweet_task.py b/tests/services/test_tweet_task.py deleted file mode 100644 index 80fc200e..00000000 --- a/tests/services/test_tweet_task.py +++ /dev/null @@ -1,207 +0,0 @@ -import pytest -from backend.models import QueueMessage -from services.runner.tasks.tweet_task import TweetTask -from unittest.mock import AsyncMock, MagicMock -from uuid import UUID - - -@pytest.fixture -def tweet_task(): - """Create a TweetTask instance for testing.""" - task = TweetTask() - task.twitter_service = MagicMock() - task.twitter_service._apost_tweet = AsyncMock() - return task - - -class TestTweetTask: - """Tests for the TweetTask class.""" - - @pytest.mark.asyncio - async def test_validate_message_with_valid_format(self, tweet_task): - """Test validating a message with the correct format.""" - # Arrange - message = QueueMessage( - id=UUID("00000000-0000-0000-0000-000000000001"), - message={"message": "This is a test tweet"}, - dao_id=UUID("00000000-0000-0000-0000-000000000001"), - created_at="2024-03-06T00:00:00Z", - ) - original_message = message.message.copy() - - # Act - result = await tweet_task._validate_message(message) - - # Assert - assert result is None - # Message structure should remain unchanged - assert message.message == original_message - - @pytest.mark.asyncio - async def test_validate_message_with_empty_message(self, tweet_task): - """Test validating a message with an empty message field.""" - # Arrange - message = QueueMessage( - id=UUID("00000000-0000-0000-0000-000000000001"), - message=None, - dao_id=UUID("00000000-0000-0000-0000-000000000001"), - created_at="2024-03-06T00:00:00Z", - ) - - # Act - result = await tweet_task._validate_message(message) - - # Assert - assert result is not None - assert result.success is False - assert "empty" in result.message.lower() - - @pytest.mark.asyncio - async def test_validate_message_with_empty_content(self, tweet_task): - """Test validating a message with empty content.""" - # Arrange - message = QueueMessage( - id=UUID("00000000-0000-0000-0000-000000000001"), - message={"message": ""}, - dao_id=UUID("00000000-0000-0000-0000-000000000001"), - created_at="2024-03-06T00:00:00Z", - ) - - # Act - result = await tweet_task._validate_message(message) - - # Assert - assert result is not None - assert result.success is False - assert "empty" in result.message.lower() - - @pytest.mark.asyncio - async def test_validate_message_with_invalid_format(self, tweet_task): - """Test validating a message with an invalid format.""" - # Arrange - message = QueueMessage( - id=UUID("00000000-0000-0000-0000-000000000001"), - message={"wrong_field": "This is a test tweet"}, - dao_id=UUID("00000000-0000-0000-0000-000000000001"), - created_at="2024-03-06T00:00:00Z", - ) - - # Act - result = await tweet_task._validate_message(message) - - # Assert - assert result is not None - assert result.success is False - assert "unsupported" in result.message.lower() - - @pytest.mark.asyncio - async def test_validate_message_with_no_dao_id(self, tweet_task): - """Test validating a message with no DAO ID.""" - # Arrange - message = QueueMessage( - id=UUID("00000000-0000-0000-0000-000000000001"), - message={"message": "This is a test tweet"}, - dao_id=None, - created_at="2024-03-06T00:00:00Z", - ) - - # Act - result = await tweet_task._validate_message(message) - - # Assert - assert result is not None - assert result.success is False - assert "dao_id" in result.message.lower() - - @pytest.mark.asyncio - async def test_validate_message_with_too_long_tweet(self, tweet_task): - """Test validating a message with a tweet that exceeds the character limit.""" - # Arrange - long_tweet = "x" * 281 # Twitter's character limit is 280 - message = QueueMessage( - id=UUID("00000000-0000-0000-0000-000000000001"), - message={"message": long_tweet}, - dao_id=UUID("00000000-0000-0000-0000-000000000001"), - created_at="2024-03-06T00:00:00Z", - ) - - # Act - result = await tweet_task._validate_message(message) - - # Assert - assert result is not None - assert result.success is False - assert "character limit" in result.message.lower() - - @pytest.mark.asyncio - async def test_process_tweet_message_success_with_reply(self, tweet_task): - """Test processing a tweet message successfully with a reply.""" - # Arrange - message = QueueMessage( - id=UUID("00000000-0000-0000-0000-000000000001"), - message={"message": "This is a test tweet"}, - dao_id=UUID("00000000-0000-0000-0000-000000000001"), - tweet_id="123456789", - created_at="2024-03-06T00:00:00Z", - ) - tweet_task.twitter_service._apost_tweet.return_value = { - "id": "987654321", - "text": "This is a test tweet", - } - - # Act - result = await tweet_task._process_tweet_message(message) - - # Assert - assert result.success is True - assert result.tweet_id is not None - tweet_task.twitter_service._apost_tweet.assert_called_once_with( - text="This is a test tweet", reply_in_reply_to_tweet_id="123456789" - ) - - @pytest.mark.asyncio - async def test_process_tweet_message_success_without_reply(self, tweet_task): - """Test processing a tweet message successfully without a reply.""" - # Arrange - message = QueueMessage( - id=UUID("00000000-0000-0000-0000-000000000001"), - message={"message": "This is a test tweet"}, - dao_id=UUID("00000000-0000-0000-0000-000000000001"), - created_at="2024-03-06T00:00:00Z", - ) - tweet_task.twitter_service._apost_tweet.return_value = { - "id": "987654321", - "text": "This is a test tweet", - } - - # Act - result = await tweet_task._process_tweet_message(message) - - # Assert - assert result.success is True - assert result.tweet_id is not None - tweet_task.twitter_service._apost_tweet.assert_called_once_with( - text="This is a test tweet" - ) - - @pytest.mark.asyncio - async def test_process_tweet_message_failure(self, tweet_task): - """Test processing a tweet message with a failure from the Twitter service.""" - # Arrange - message = QueueMessage( - id=UUID("00000000-0000-0000-0000-000000000001"), - message={"message": "This is a test tweet"}, - dao_id=UUID("00000000-0000-0000-0000-000000000001"), - created_at="2024-03-06T00:00:00Z", - ) - tweet_task.twitter_service._apost_tweet.return_value = None - - # Act - result = await tweet_task._process_tweet_message(message) - - # Assert - assert result.success is False - assert "failed to send tweet" in result.message.lower() - tweet_task.twitter_service._apost_tweet.assert_called_once_with( - text="This is a test tweet" - ) diff --git a/tests/services/test_twitter.py b/tests/services/test_twitter.py deleted file mode 100644 index 5606dbfc..00000000 --- a/tests/services/test_twitter.py +++ /dev/null @@ -1,273 +0,0 @@ -from unittest.mock import AsyncMock, MagicMock, patch - -import pytest - -from services.twitter import ( - TweetAnalyzer, - TweetData, - TweetRepository, - TwitterConfig, - TwitterMentionHandler, - create_twitter_handler, -) - - -@pytest.fixture -def mock_backend(): - with patch("services.twitter.backend") as mock: - mock.list_x_tweets = AsyncMock() - mock.create_x_tweet = AsyncMock() - mock.update_x_tweet = AsyncMock() - mock.list_x_users = AsyncMock() - mock.create_x_user = AsyncMock() - mock.create_queue_message = AsyncMock() - yield mock - - -@pytest.fixture -def mock_twitter_service(): - with patch("services.twitter.TwitterService") as mock: - instance = mock.return_value - instance._ainitialize = AsyncMock() - instance.get_mentions_by_user_id = AsyncMock() - instance._apost_tweet = AsyncMock() - yield instance - - -@pytest.fixture -def mock_analyze_tweet(): - with patch("services.twitter.analyze_tweet") as mock: - mock.return_value = { - "is_worthy": True, - "tweet_type": "test_type", - "confidence_score": 0.9, - "reason": "test reason", - "tool_request": {"type": "test_tool"}, - } - yield mock - - -@pytest.fixture -def config(): - return TwitterConfig( - consumer_key="test_key", - consumer_secret="test_secret", - client_id="test_client_id", - client_secret="test_client_secret", - access_token="test_token", - access_secret="test_secret", - user_id="test_user_id", - whitelisted_authors=["whitelisted_author"], - whitelist_enabled=True, - ) - - -@pytest.fixture -def tweet_data(): - return TweetData( - tweet_id="test_tweet_id", - author_id="test_author_id", - text="test tweet text", - conversation_id="test_conversation_id", - ) - - -@pytest.fixture -def tweet_repository(mock_backend): - return TweetRepository() - - -@pytest.fixture -def tweet_analyzer(tweet_repository): - return TweetAnalyzer(tweet_repository) - - -@pytest.fixture -def twitter_handler(config, tweet_repository, tweet_analyzer, mock_twitter_service): - return TwitterMentionHandler(config, tweet_repository, tweet_analyzer) - - -class TestTweetRepository: - @pytest.mark.asyncio - async def test_store_tweet_new_author( - self, tweet_repository, tweet_data, mock_backend - ): - # Setup - mock_backend.list_x_users.return_value = [] - mock_backend.create_x_user.return_value = MagicMock(id="test_author_db_id") - - # Execute - await tweet_repository.store_tweet(tweet_data) - - # Assert - mock_backend.list_x_users.assert_called_once() - mock_backend.create_x_user.assert_called_once() - mock_backend.create_x_tweet.assert_called_once() - - @pytest.mark.asyncio - async def test_store_tweet_existing_author( - self, tweet_repository, tweet_data, mock_backend - ): - # Setup - mock_backend.list_x_users.return_value = [MagicMock(id="test_author_db_id")] - - # Execute - await tweet_repository.store_tweet(tweet_data) - - # Assert - mock_backend.list_x_users.assert_called_once() - mock_backend.create_x_user.assert_not_called() - mock_backend.create_x_tweet.assert_called_once() - - @pytest.mark.asyncio - async def test_update_tweet_analysis(self, tweet_repository, mock_backend): - # Setup - mock_backend.list_x_tweets.return_value = [MagicMock(id="test_tweet_db_id")] - - # Execute - await tweet_repository.update_tweet_analysis( - tweet_id="test_tweet_id", - is_worthy=True, - tweet_type="test_type", - confidence_score=0.9, - reason="test reason", - ) - - # Assert - mock_backend.list_x_tweets.assert_called_once() - mock_backend.update_x_tweet.assert_called_once() - - @pytest.mark.asyncio - async def test_get_conversation_history(self, tweet_repository, mock_backend): - # Setup - mock_backend.list_x_tweets.return_value = [ - MagicMock(author_id="user1", message="message1"), - MagicMock(author_id="test_user_id", message="message2"), - ] - - # Execute - history = await tweet_repository.get_conversation_history( - "test_conversation_id", "test_user_id" - ) - - # Assert - assert len(history) == 2 - assert history[0]["role"] == "user" - assert history[1]["role"] == "assistant" - - -class TestTweetAnalyzer: - @pytest.mark.asyncio - async def test_analyze_tweet_content( - self, tweet_analyzer, tweet_data, mock_analyze_tweet - ): - # Setup - history = [{"role": "user", "content": "previous message"}] - - # Execute - result = await tweet_analyzer.analyze_tweet_content(tweet_data, history) - - # Assert - assert result["is_worthy"] is True - assert result["tweet_type"] == "test_type" - assert result["confidence_score"] == 0.9 - mock_analyze_tweet.assert_called_once() - - -class TestTwitterMentionHandler: - @pytest.mark.asyncio - async def test_process_mentions_no_mentions(self, twitter_handler): - # Setup - twitter_handler.twitter_service.get_mentions_by_user_id.return_value = [] - - # Execute - await twitter_handler.process_mentions() - - # Assert - twitter_handler.twitter_service._ainitialize.assert_called_once() - twitter_handler.twitter_service.get_mentions_by_user_id.assert_called_once_with( - "test_user_id" - ) - - @pytest.mark.asyncio - async def test_handle_mention_existing_tweet(self, twitter_handler, mock_backend): - # Setup - mention = MagicMock( - id="test_tweet_id", - author_id="test_author_id", - text="test text", - conversation_id="test_conv_id", - ) - mock_backend.list_x_tweets.return_value = [MagicMock()] - - # Execute - await twitter_handler._handle_mention(mention) - - # Assert - mock_backend.list_x_tweets.assert_called_once() - mock_backend.create_x_tweet.assert_not_called() - - @pytest.mark.asyncio - async def test_handle_mention_whitelisted_author( - self, twitter_handler, mock_backend, mock_analyze_tweet - ): - # Setup - mention = MagicMock( - id="test_tweet_id", - author_id="whitelisted_author", - text="test text", - conversation_id="test_conv_id", - ) - mock_backend.list_x_tweets.return_value = [] - mock_backend.list_x_users.return_value = [MagicMock(id="test_author_db_id")] - - # Execute - await twitter_handler._handle_mention(mention) - - # Assert - mock_backend.create_x_tweet.assert_called_once() - mock_analyze_tweet.assert_called_once() - - @pytest.mark.asyncio - async def test_handle_mention_non_whitelisted_author( - self, twitter_handler, mock_backend, mock_analyze_tweet - ): - # Setup - mention = MagicMock( - id="test_tweet_id", - author_id="non_whitelisted_author", - text="test text", - conversation_id="test_conv_id", - ) - mock_backend.list_x_tweets.return_value = [] - - # Execute - await twitter_handler._handle_mention(mention) - - # Assert - mock_backend.create_x_tweet.assert_called_once() - mock_analyze_tweet.assert_not_called() - - -def test_create_twitter_handler(): - with ( - patch("services.twitter.load_dotenv"), - patch.dict( - "os.environ", - { - "AIBTC_TWITTER_CONSUMER_KEY": "test_key", - "AIBTC_TWITTER_CONSUMER_SECRET": "test_secret", - "AIBTC_TWITTER_CLIENT_ID": "test_client_id", - "AIBTC_TWITTER_CLIENT_SECRET": "test_client_secret", - "AIBTC_TWITTER_ACCESS_TOKEN": "test_token", - "AIBTC_TWITTER_ACCESS_SECRET": "test_secret", - "AIBTC_TWITTER_AUTOMATED_USER_ID": "test_user_id", - "AIBTC_TWITTER_WHITELISTED": "whitelisted_author", - }, - ), - ): - handler = create_twitter_handler() - assert isinstance(handler, TwitterMentionHandler) - assert handler.config.consumer_key == "test_key" - assert handler.config.user_id == "test_user_id" - assert handler.config.whitelisted_authors == ["whitelisted_author"] diff --git a/tests/services/webhooks/chainhook/test_buy_event_handler.py b/tests/services/webhooks/chainhook/test_buy_event_handler.py deleted file mode 100644 index 1c276023..00000000 --- a/tests/services/webhooks/chainhook/test_buy_event_handler.py +++ /dev/null @@ -1,172 +0,0 @@ -"""Tests for the BuyEventHandler.""" - -import unittest -from unittest.mock import MagicMock, patch - -from services.webhooks.chainhook.handlers.buy_event_handler import BuyEventHandler -from services.webhooks.chainhook.models import ( - Event, - Receipt, - TransactionIdentifier, - TransactionMetadata, - TransactionWithReceipt, -) - - -class TestBuyEventHandler(unittest.TestCase): - """Test cases for BuyEventHandler.""" - - def setUp(self): - """Set up the test environment.""" - self.handler = BuyEventHandler() - - # Create a mock logger - self.handler.logger = MagicMock() - - # Create a sample event - self.sample_event = Event( - data={"amount": "1000", "recipient": "ST123", "sender": "ST456"}, - position={"index": 0}, - type="STXTransferEvent", - ) - - # Create a sample receipt with events - self.sample_receipt = Receipt( - contract_calls_stack=[], - events=[self.sample_event], - mutated_assets_radius=[], - mutated_contracts_radius=[], - ) - - # Create sample transaction metadata - self.sample_metadata = TransactionMetadata( - description="Test buy transaction", - execution_cost={"read_count": 10, "write_count": 5, "runtime": 100}, - fee=1000, - kind={ - "type": "ContractCall", - "data": { - "method": "buy", - "args": ["10"], - "contract_identifier": "ST123.test-contract", - }, - }, - nonce=42, - position={"index": 0}, - raw_tx="0x0123456789abcdef", - receipt=self.sample_receipt, - result="(ok true)", - sender="ST456", - sponsor=None, - success=True, - ) - - # Create a sample transaction - self.sample_transaction = TransactionWithReceipt( - transaction_identifier=TransactionIdentifier(hash="0xabcdef1234567890"), - metadata=self.sample_metadata, - operations=[], - ) - - def test_can_handle_buy_transaction(self): - """Test that the handler can handle buy transactions.""" - # Test with a buy transaction - result = self.handler.can_handle_transaction(self.sample_transaction) - self.assertTrue(result) - - # Test with a buy-tokens transaction - buy_tokens_metadata = TransactionMetadata( - description="Test buy-tokens transaction", - execution_cost={"read_count": 10, "write_count": 5, "runtime": 100}, - fee=1000, - kind={ - "type": "ContractCall", - "data": { - "method": "buy-tokens", - "args": ["10"], - "contract_identifier": "ST123.test-contract", - }, - }, - nonce=42, - position={"index": 0}, - raw_tx="0x0123456789abcdef", - receipt=self.sample_receipt, - result="(ok true)", - sender="ST456", - sponsor=None, - success=True, - ) - - buy_tokens_transaction = TransactionWithReceipt( - transaction_identifier=TransactionIdentifier(hash="0xabcdef1234567890"), - metadata=buy_tokens_metadata, - operations=[], - ) - - result = self.handler.can_handle_transaction(buy_tokens_transaction) - self.assertTrue(result) - - def test_cannot_handle_non_buy_transaction(self): - """Test that the handler cannot handle non-buy transactions.""" - # Create a non-buy transaction - non_buy_metadata = TransactionMetadata( - description="Test non-buy transaction", - execution_cost={"read_count": 10, "write_count": 5, "runtime": 100}, - fee=1000, - kind={ - "type": "ContractCall", - "data": { - "method": "transfer", - "args": ["10"], - "contract_identifier": "ST123.test-contract", - }, - }, - nonce=42, - position={"index": 0}, - raw_tx="0x0123456789abcdef", - receipt=self.sample_receipt, - result="(ok true)", - sender="ST456", - sponsor=None, - success=True, - ) - - non_buy_transaction = TransactionWithReceipt( - transaction_identifier=TransactionIdentifier(hash="0xabcdef1234567890"), - metadata=non_buy_metadata, - operations=[], - ) - - result = self.handler.can_handle_transaction(non_buy_transaction) - self.assertFalse(result) - - @patch("services.webhooks.chainhook.handlers.buy_event_handler.configure_logger") - async def test_handle_transaction(self, mock_configure_logger): - """Test that the handler correctly logs events.""" - # Set up the mock logger - mock_logger = MagicMock() - mock_configure_logger.return_value = mock_logger - - # Create a new handler with the mocked logger - handler = BuyEventHandler() - - # Handle the transaction - await handler.handle_transaction(self.sample_transaction) - - # Check that the logger was called with the expected messages - mock_logger.info.assert_any_call( - "Processing buy function call from ST456 to contract ST123.test-contract " - "with args: ['10'], tx_id: 0xabcdef1234567890" - ) - - mock_logger.info.assert_any_call( - "Found 1 events in transaction 0xabcdef1234567890" - ) - - mock_logger.info.assert_any_call( - "Event 1/1: Type=STXTransferEvent, Data={'amount': '1000', 'recipient': 'ST123', 'sender': 'ST456'}" - ) - - -if __name__ == "__main__": - unittest.main() diff --git a/tests/services/webhooks/chainhook/test_handlers.py b/tests/services/webhooks/chainhook/test_handlers.py deleted file mode 100644 index b2b51757..00000000 --- a/tests/services/webhooks/chainhook/test_handlers.py +++ /dev/null @@ -1,344 +0,0 @@ -"""Tests for the chainhook handlers.""" - -import unittest -from unittest.mock import MagicMock, patch - -from services.webhooks.chainhook.handlers import ( - BuyEventHandler, - ContractMessageHandler, - SellEventHandler, - TransactionStatusHandler, -) -from services.webhooks.chainhook.models import ( - Event, - Receipt, - TransactionIdentifier, - TransactionMetadata, - TransactionWithReceipt, -) - - -class TestContractMessageHandler(unittest.TestCase): - """Tests for the ContractMessageHandler.""" - - def setUp(self): - """Set up the test environment.""" - self.handler = ContractMessageHandler() - - # Sample transaction that should be handled - self.message_transaction = TransactionWithReceipt( - transaction_identifier=TransactionIdentifier(hash="0xabcdef1234567890"), - metadata={ - "kind": { - "type": "ContractCall", - "data": { - "method": "send", - "args": ["test message"], - "contract_identifier": "ST1234567890ABCDEF.test-contract", - }, - }, - "success": False, - "sender": "ST1234567890ABCDEF", - }, - operations=[], - ) - - # Sample transaction that should not be handled - self.non_message_transaction = TransactionWithReceipt( - transaction_identifier=TransactionIdentifier(hash="0xabcdef1234567890"), - metadata={ - "kind": { - "type": "ContractCall", - "data": { - "method": "transfer", - "args": ["100", "ST1234567890ABCDEF"], - "contract_identifier": "ST1234567890ABCDEF.test-contract", - }, - }, - "success": True, - "sender": "ST1234567890ABCDEF", - }, - operations=[], - ) - - def test_can_handle_transaction(self): - """Test the can_handle method.""" - # Should handle message transactions - self.assertTrue(self.handler.can_handle_transaction(self.message_transaction)) - - # Should not handle non-message transactions - self.assertFalse( - self.handler.can_handle_transaction(self.non_message_transaction) - ) - - @patch("backend.factory.backend") - async def test_handle_transaction(self, mock_backend): - """Test the handle_transaction method.""" - # Mock the backend methods - mock_extension = MagicMock() - mock_extension.dao_id = "test-dao-id" - mock_backend.list_extensions.return_value = [mock_extension] - mock_backend.create_queue_message.return_value = {"id": "test-message-id"} - - # Call the handler - await self.handler.handle_transaction(self.message_transaction) - - # Verify the backend methods were called correctly - mock_backend.list_extensions.assert_called_once() - mock_backend.create_queue_message.assert_called_once() - - # Check that the message was created with the correct parameters - call_args = mock_backend.create_queue_message.call_args[0][0] - self.assertEqual(call_args.type, "tweet") - self.assertEqual(call_args.message, {"message": "test message"}) - self.assertEqual(call_args.dao_id, "test-dao-id") - - -class TestTransactionStatusHandler(unittest.TestCase): - """Tests for the TransactionStatusHandler.""" - - def setUp(self): - """Set up the test environment.""" - self.handler = TransactionStatusHandler() - - # Sample transaction - self.transaction = TransactionWithReceipt( - transaction_identifier=TransactionIdentifier(hash="0xabcdef1234567890"), - metadata={ - "kind": { - "type": "ContractCall", - "data": { - "method": "deploy", - "contract_identifier": "ST1234567890ABCDEF.test-contract", - }, - }, - "success": True, - "sender": "ST1234567890ABCDEF", - }, - operations=[], - ) - - def test_can_handle_transaction(self): - """Test the can_handle method.""" - # Should handle any transaction - self.assertTrue(self.handler.can_handle_transaction(self.transaction)) - - @patch("backend.factory.backend") - async def test_handle_transaction(self, mock_backend): - """Test the handle_transaction method.""" - # Mock the backend methods - mock_extension = MagicMock() - mock_extension.id = "test-extension-id" - mock_extension.status = "PENDING" - mock_extension.tx_id = "0xabcdef1234567890" - - mock_token = MagicMock() - mock_token.id = "test-token-id" - mock_token.status = "PENDING" - mock_token.tx_id = "0xabcdef1234567890" - - mock_proposal = MagicMock() - mock_proposal.id = "test-proposal-id" - mock_proposal.status = "PENDING" - mock_proposal.tx_id = "other-tx-id" - - mock_backend.list_extensions.return_value = [mock_extension] - mock_backend.list_tokens.return_value = [mock_token] - mock_backend.list_proposals.return_value = [mock_proposal] - - # Call the handler - await self.handler.handle_transaction(self.transaction) - - # Verify the backend methods were called correctly - mock_backend.list_extensions.assert_called_once() - mock_backend.list_tokens.assert_called_once() - mock_backend.list_proposals.assert_called_once() - - # Check that the extension and token were updated but not the proposal - mock_backend.update_extension.assert_called_once() - mock_backend.update_token.assert_called_once() - mock_backend.update_proposal.assert_not_called() - - -class TestBuyEventHandler(unittest.TestCase): - """Tests for the BuyEventHandler.""" - - def setUp(self): - """Set up the test environment.""" - self.handler = BuyEventHandler() - - # Create a sample FT transfer event - self.ft_transfer_event = Event( - data={ - "asset_identifier": "ST123.test-token::TEST", - "amount": "1000", - "sender": "ST789", - "recipient": "ST456", - }, - position={"index": 0}, - type="FTTransferEvent", - ) - - # Create a sample receipt with events - self.sample_receipt = Receipt( - contract_calls_stack=[], - events=[self.ft_transfer_event], - mutated_assets_radius=[], - mutated_contracts_radius=[], - ) - - # Sample buy transaction - self.buy_transaction = TransactionWithReceipt( - transaction_identifier=TransactionIdentifier(hash="0xabcdef1234567890"), - metadata=TransactionMetadata( - description="Test buy transaction", - execution_cost={"read_count": 10, "write_count": 5, "runtime": 100}, - fee=1000, - kind={ - "type": "ContractCall", - "data": { - "method": "buy", - "args": ["10"], - "contract_identifier": "ST123.test-contract", - }, - }, - nonce=42, - position={"index": 0}, - raw_tx="0x0123456789abcdef", - receipt=self.sample_receipt, - result="(ok true)", - sender="ST456", - sponsor=None, - success=True, - ), - operations=[], - ) - - # Sample non-buy transaction - self.non_buy_transaction = TransactionWithReceipt( - transaction_identifier=TransactionIdentifier(hash="0xabcdef1234567890"), - metadata=TransactionMetadata( - description="Test non-buy transaction", - execution_cost={"read_count": 10, "write_count": 5, "runtime": 100}, - fee=1000, - kind={ - "type": "ContractCall", - "data": { - "method": "transfer", - "args": ["10"], - "contract_identifier": "ST123.test-contract", - }, - }, - nonce=42, - position={"index": 0}, - raw_tx="0x0123456789abcdef", - receipt=self.sample_receipt, - result="(ok true)", - sender="ST456", - sponsor=None, - success=True, - ), - operations=[], - ) - - def test_can_handle_transaction(self): - """Test the can_handle method.""" - # Should handle buy transactions - self.assertTrue(self.handler.can_handle_transaction(self.buy_transaction)) - - # Should not handle non-buy transactions - self.assertFalse(self.handler.can_handle_transaction(self.non_buy_transaction)) - - -class TestSellEventHandler(unittest.TestCase): - """Tests for the SellEventHandler.""" - - def setUp(self): - """Set up the test environment.""" - self.handler = SellEventHandler() - - # Create a sample FT transfer event - self.ft_transfer_event = Event( - data={ - "asset_identifier": "ST123.test-token::TEST", - "amount": "1000", - "sender": "ST456", - "recipient": "ST789", - }, - position={"index": 0}, - type="FTTransferEvent", - ) - - # Create a sample receipt with events - self.sample_receipt = Receipt( - contract_calls_stack=[], - events=[self.ft_transfer_event], - mutated_assets_radius=[], - mutated_contracts_radius=[], - ) - - # Sample sell transaction - self.sell_transaction = TransactionWithReceipt( - transaction_identifier=TransactionIdentifier(hash="0xabcdef1234567890"), - metadata=TransactionMetadata( - description="Test sell transaction", - execution_cost={"read_count": 10, "write_count": 5, "runtime": 100}, - fee=1000, - kind={ - "type": "ContractCall", - "data": { - "method": "sell", - "args": ["10"], - "contract_identifier": "ST123.test-contract", - }, - }, - nonce=42, - position={"index": 0}, - raw_tx="0x0123456789abcdef", - receipt=self.sample_receipt, - result="(ok true)", - sender="ST456", - sponsor=None, - success=True, - ), - operations=[], - ) - - # Sample non-sell transaction - self.non_sell_transaction = TransactionWithReceipt( - transaction_identifier=TransactionIdentifier(hash="0xabcdef1234567890"), - metadata=TransactionMetadata( - description="Test non-sell transaction", - execution_cost={"read_count": 10, "write_count": 5, "runtime": 100}, - fee=1000, - kind={ - "type": "ContractCall", - "data": { - "method": "transfer", - "args": ["10"], - "contract_identifier": "ST123.test-contract", - }, - }, - nonce=42, - position={"index": 0}, - raw_tx="0x0123456789abcdef", - receipt=self.sample_receipt, - result="(ok true)", - sender="ST456", - sponsor=None, - success=True, - ), - operations=[], - ) - - def test_can_handle_transaction(self): - """Test the can_handle method.""" - # Should handle sell transactions - self.assertTrue(self.handler.can_handle_transaction(self.sell_transaction)) - - # Should not handle non-sell transactions - self.assertFalse(self.handler.can_handle_transaction(self.non_sell_transaction)) - - -if __name__ == "__main__": - unittest.main() diff --git a/tests/services/webhooks/chainhook/test_models.py b/tests/services/webhooks/chainhook/test_models.py deleted file mode 100644 index 77b2930f..00000000 --- a/tests/services/webhooks/chainhook/test_models.py +++ /dev/null @@ -1,218 +0,0 @@ -"""Tests for the chainhook models.""" - -import unittest -from typing import Any, Dict - -from services.webhooks.chainhook.models import ( - Apply, - BlockIdentifier, - BlockMetadata, - ChainHookData, - ChainHookInfo, - Event, - Operation, - Predicate, - Receipt, - TransactionIdentifier, - TransactionMetadata, - TransactionWithReceipt, -) -from services.webhooks.chainhook.parser import ChainhookParser - - -class TestChainHookModels(unittest.TestCase): - """Test cases for ChainHook data models.""" - - def setUp(self): - """Set up the test environment.""" - # Initialize parser - self.parser = ChainhookParser() - - # Sample data for testing - self.sample_data: Dict[str, Any] = { - "apply": [ - { - "block_identifier": {"hash": "0x1234567890abcdef", "index": 123456}, - "parent_block_identifier": { - "hash": "0x0000000000000000", - "index": 123455, - }, - "timestamp": 1640995200, - "metadata": { - "bitcoin_anchor_block_identifier": { - "hash": "0xbtc0000000000000", - "index": 700000, - }, - "block_time": 1640995100, - "pox_cycle_index": 123, - "pox_cycle_length": 20, - "pox_cycle_position": 10, - "tenure_height": 12345, - }, - "transactions": [ - { - "transaction_identifier": {"hash": "0xabcdef1234567890"}, - "metadata": { - "description": "Test transaction", - "execution_cost": { - "read_count": 10, - "write_count": 5, - "runtime": 100, - }, - "fee": 1000, - "kind": { - "type": "ContractCall", - "data": { - "method": "transfer", - "args": ["123456"], - "contract_identifier": "ST1234567890ABCDEF.test-contract", - }, - }, - "nonce": 42, - "position": {"index": 0}, - "raw_tx": "0x0123456789abcdef", - "receipt": { - "contract_calls_stack": [], - "events": [ - { - "data": { - "amount": "123456", - "asset_identifier": "ST1234567890ABCDEF.test-token::token", - "sender": "ST1234567890ABCDEF", - "recipient": "ST0987654321FEDCBA", - }, - "position": {"index": 0}, - "type": "FTTransferEvent", - } - ], - "mutated_assets_radius": [ - "ST1234567890ABCDEF.test-token::token" - ], - "mutated_contracts_radius": [ - "ST1234567890ABCDEF.test-contract" - ], - }, - "result": "(ok true)", - "sender": "ST1234567890ABCDEF", - "sponsor": None, - "success": True, - }, - "operations": [ - { - "account": {"address": "ST1234567890ABCDEF"}, - "amount": { - "currency": {"decimals": 6, "symbol": "TOKEN"}, - "value": 123456, - }, - "operation_identifier": {"index": 0}, - "related_operations": [{"index": 1}], - "status": "SUCCESS", - "type": "DEBIT", - }, - { - "account": {"address": "ST0987654321FEDCBA"}, - "amount": { - "currency": {"decimals": 6, "symbol": "TOKEN"}, - "value": 123456, - }, - "operation_identifier": {"index": 1}, - "related_operations": [{"index": 0}], - "status": "SUCCESS", - "type": "CREDIT", - }, - ], - } - ], - } - ], - "chainhook": { - "is_streaming_blocks": False, - "predicate": {"scope": "block_height", "higher_than": 123450}, - "uuid": "test-uuid-12345", - }, - "events": [], - "rollback": [], - } - - def test_block_identifier(self): - """Test BlockIdentifier model.""" - block_id = BlockIdentifier(hash="0x1234", index=123) - self.assertEqual(block_id.hash, "0x1234") - self.assertEqual(block_id.index, 123) - - def test_transaction_identifier(self): - """Test TransactionIdentifier model.""" - tx_id = TransactionIdentifier(hash="0xabcd") - self.assertEqual(tx_id.hash, "0xabcd") - - def test_parse_chainhook_payload(self): - """Test the parse_chainhook_payload method of ChainhookParser.""" - result = self.parser.parse_chainhook_payload(self.sample_data) - - # Verify the result is of the correct type - self.assertIsInstance(result, ChainHookData) - - # Verify chainhook info - self.assertIsInstance(result.chainhook, ChainHookInfo) - self.assertFalse(result.chainhook.is_streaming_blocks) - self.assertEqual(result.chainhook.uuid, "test-uuid-12345") - self.assertIsInstance(result.chainhook.predicate, Predicate) - self.assertEqual(result.chainhook.predicate.scope, "block_height") - self.assertEqual(result.chainhook.predicate.higher_than, 123450) - - # Verify apply block structure - self.assertEqual(len(result.apply), 1) - apply_block = result.apply[0] - self.assertIsInstance(apply_block, Apply) - self.assertEqual(apply_block.block_identifier.hash, "0x1234567890abcdef") - self.assertEqual(apply_block.block_identifier.index, 123456) - self.assertEqual(apply_block.timestamp, 1640995200) - - # Verify parent block - self.assertIsNotNone(apply_block.parent_block_identifier) - self.assertEqual(apply_block.parent_block_identifier.hash, "0x0000000000000000") - self.assertEqual(apply_block.parent_block_identifier.index, 123455) - - # Verify block metadata - self.assertIsInstance(apply_block.metadata, BlockMetadata) - self.assertEqual(apply_block.metadata.tenure_height, 12345) - self.assertEqual(apply_block.metadata.pox_cycle_index, 123) - - # Verify transaction structure - self.assertEqual(len(apply_block.transactions), 1) - tx = apply_block.transactions[0] - self.assertIsInstance(tx, TransactionWithReceipt) - self.assertEqual(tx.transaction_identifier.hash, "0xabcdef1234567890") - - # Verify transaction metadata - self.assertIsInstance(tx.metadata, TransactionMetadata) - self.assertEqual(tx.metadata.description, "Test transaction") - self.assertEqual(tx.metadata.fee, 1000) - self.assertEqual(tx.metadata.nonce, 42) - self.assertEqual(tx.metadata.sender, "ST1234567890ABCDEF") - self.assertTrue(tx.metadata.success) - - # Verify transaction kind - self.assertEqual(tx.metadata.kind.get("type"), "ContractCall") - data = tx.metadata.kind.get("data", {}) - self.assertEqual(data.get("method"), "transfer") - - # Verify receipt - self.assertIsInstance(tx.metadata.receipt, Receipt) - self.assertEqual(len(tx.metadata.receipt.events), 1) - event = tx.metadata.receipt.events[0] - self.assertIsInstance(event, Event) - self.assertEqual(event.type, "FTTransferEvent") - self.assertEqual(event.data.get("amount"), "123456") - - # Verify operations - self.assertEqual(len(tx.operations), 2) - op = tx.operations[0] - self.assertIsInstance(op, Operation) - self.assertEqual(op.type, "DEBIT") - self.assertEqual(op.status, "SUCCESS") - self.assertEqual(op.account.get("address"), "ST1234567890ABCDEF") - - -if __name__ == "__main__": - unittest.main() diff --git a/tests/services/webhooks/chainhook/test_parser.py b/tests/services/webhooks/chainhook/test_parser.py deleted file mode 100644 index bf8ea344..00000000 --- a/tests/services/webhooks/chainhook/test_parser.py +++ /dev/null @@ -1,73 +0,0 @@ -"""Tests for the chainhook parser.""" - -import unittest -from typing import Any, Dict - -from services.webhooks.chainhook.models import ChainHookData -from services.webhooks.chainhook.parser import ChainhookParser - - -class TestChainhookParser(unittest.TestCase): - """Test cases for ChainhookParser.""" - - def setUp(self): - """Set up the test environment.""" - self.parser = ChainhookParser() - - # Sample data for testing - self.sample_data: Dict[str, Any] = { - "apply": [ - { - "block_identifier": {"hash": "0x1234567890abcdef", "index": 123456}, - "transactions": [ - { - "transaction_identifier": {"hash": "0xabcdef1234567890"}, - "metadata": { - "kind": { - "type": "ContractCall", - "data": { - "method": "send", - "args": ["test message"], - "contract_identifier": "ST1234567890ABCDEF.test-contract", - }, - }, - "success": False, - "sender": "ST1234567890ABCDEF", - }, - "operations": [], - } - ], - } - ] - } - - def test_parse(self): - """Test parsing chainhook webhook data.""" - result = self.parser.parse(self.sample_data) - - # Verify the result is of the correct type - self.assertIsInstance(result, ChainHookData) - - # Verify the parsed data structure - self.assertEqual(len(result.apply), 1) - self.assertEqual(result.apply[0].block_identifier.hash, "0x1234567890abcdef") - self.assertEqual(result.apply[0].block_identifier.index, 123456) - - # Verify transaction data - self.assertEqual(len(result.apply[0].transactions), 1) - tx = result.apply[0].transactions[0] - self.assertEqual(tx.transaction_identifier.hash, "0xabcdef1234567890") - self.assertEqual(tx.metadata["sender"], "ST1234567890ABCDEF") - - # Verify metadata structure - kind = tx.metadata.get("kind", {}) - self.assertEqual(kind.get("type"), "ContractCall") - - # Verify data structure - data = kind.get("data", {}) - self.assertEqual(data.get("method"), "send") - self.assertEqual(data.get("args"), ["test message"]) - - -if __name__ == "__main__": - unittest.main() diff --git a/tests/services/webhooks/chainhook/test_sell_event_handler.py b/tests/services/webhooks/chainhook/test_sell_event_handler.py deleted file mode 100644 index 2da95218..00000000 --- a/tests/services/webhooks/chainhook/test_sell_event_handler.py +++ /dev/null @@ -1,281 +0,0 @@ -"""Tests for the SellEventHandler.""" - -import unittest -from unittest.mock import MagicMock, patch -from uuid import UUID - -from backend.models import WalletTokenBase -from services.webhooks.chainhook.handlers.sell_event_handler import SellEventHandler -from services.webhooks.chainhook.models import ( - Event, - Receipt, - TransactionIdentifier, - TransactionMetadata, - TransactionWithReceipt, -) - - -class TestSellEventHandler(unittest.TestCase): - """Test cases for SellEventHandler.""" - - def setUp(self): - """Set up the test environment.""" - self.handler = SellEventHandler() - - # Create a mock logger - self.handler.logger = MagicMock() - - # Create a sample FT transfer event - self.ft_transfer_event = Event( - data={ - "asset_identifier": "ST123.test-token::TEST", - "amount": "1000", - "sender": "ST456", - "recipient": "ST789", - }, - position={"index": 0}, - type="FTTransferEvent", - ) - - # Create a sample receipt with events - self.sample_receipt = Receipt( - contract_calls_stack=[], - events=[self.ft_transfer_event], - mutated_assets_radius=[], - mutated_contracts_radius=[], - ) - - # Create sample transaction metadata - self.sample_metadata = TransactionMetadata( - description="Test sell transaction", - execution_cost={"read_count": 10, "write_count": 5, "runtime": 100}, - fee=1000, - kind={ - "type": "ContractCall", - "data": { - "method": "sell", - "args": ["10"], - "contract_identifier": "ST123.test-contract", - }, - }, - nonce=42, - position={"index": 0}, - raw_tx="0x0123456789abcdef", - receipt=self.sample_receipt, - result="(ok true)", - sender="ST456", - sponsor=None, - success=True, - ) - - # Create a sample transaction - self.sample_transaction = TransactionWithReceipt( - transaction_identifier=TransactionIdentifier(hash="0xabcdef1234567890"), - metadata=self.sample_metadata, - operations=[], - ) - - def test_can_handle_sell_transaction(self): - """Test that the handler can handle sell transactions.""" - # Test with a sell transaction - result = self.handler.can_handle_transaction(self.sample_transaction) - self.assertTrue(result) - - # Test with a sell-tokens transaction - sell_tokens_metadata = TransactionMetadata( - description="Test sell-tokens transaction", - execution_cost={"read_count": 10, "write_count": 5, "runtime": 100}, - fee=1000, - kind={ - "type": "ContractCall", - "data": { - "method": "sell-tokens", - "args": ["10"], - "contract_identifier": "ST123.test-contract", - }, - }, - nonce=42, - position={"index": 0}, - raw_tx="0x0123456789abcdef", - receipt=self.sample_receipt, - result="(ok true)", - sender="ST456", - sponsor=None, - success=True, - ) - - sell_tokens_transaction = TransactionWithReceipt( - transaction_identifier=TransactionIdentifier(hash="0xabcdef1234567890"), - metadata=sell_tokens_metadata, - operations=[], - ) - - result = self.handler.can_handle_transaction(sell_tokens_transaction) - self.assertTrue(result) - - def test_cannot_handle_non_sell_transaction(self): - """Test that the handler cannot handle non-sell transactions.""" - # Create a non-sell transaction - non_sell_metadata = TransactionMetadata( - description="Test non-sell transaction", - execution_cost={"read_count": 10, "write_count": 5, "runtime": 100}, - fee=1000, - kind={ - "type": "ContractCall", - "data": { - "method": "transfer", - "args": ["10"], - "contract_identifier": "ST123.test-contract", - }, - }, - nonce=42, - position={"index": 0}, - raw_tx="0x0123456789abcdef", - receipt=self.sample_receipt, - result="(ok true)", - sender="ST456", - sponsor=None, - success=True, - ) - - non_sell_transaction = TransactionWithReceipt( - transaction_identifier=TransactionIdentifier(hash="0xabcdef1234567890"), - metadata=non_sell_metadata, - operations=[], - ) - - result = self.handler.can_handle_transaction(non_sell_transaction) - self.assertFalse(result) - - @patch("backend.factory.backend") - @patch("services.webhooks.chainhook.handlers.sell_event_handler.configure_logger") - async def test_handle_transaction_with_wallet_token( - self, mock_configure_logger, mock_backend - ): - """Test that the handler correctly updates token balances when selling tokens.""" - # Set up the mock logger - mock_logger = MagicMock() - mock_configure_logger.return_value = mock_logger - - # Create a new handler with the mocked logger - handler = SellEventHandler() - - # Mock the wallet and token data - mock_wallet = MagicMock() - mock_wallet.id = UUID("00000000-0000-0000-0000-000000000001") - mock_token = MagicMock() - mock_token.id = UUID("00000000-0000-0000-0000-000000000002") - mock_token.dao_id = UUID("00000000-0000-0000-0000-000000000003") - - # Mock the wallet token record - mock_wallet_token = MagicMock() - mock_wallet_token.id = UUID("00000000-0000-0000-0000-000000000004") - mock_wallet_token.wallet_id = mock_wallet.id - mock_wallet_token.token_id = mock_token.id - mock_wallet_token.dao_id = mock_token.dao_id - mock_wallet_token.amount = "5000" # Current amount before selling - - # Set up the mock backend responses - mock_backend.list_wallets.return_value = [mock_wallet] - mock_backend.list_tokens.return_value = [mock_token] - mock_backend.list_wallet_tokens.return_value = [mock_wallet_token] - - # Handle the transaction - await handler.handle_transaction(self.sample_transaction) - - # Check that the backend methods were called correctly - mock_backend.list_wallets.assert_called_once() - mock_backend.list_tokens.assert_called_once() - mock_backend.list_wallet_tokens.assert_called_once() - - # Check that update_wallet_token was called with the correct parameters - mock_backend.update_wallet_token.assert_called_once() - call_args = mock_backend.update_wallet_token.call_args - self.assertEqual(call_args[0][0], mock_wallet_token.id) - - # Check that the amount was decreased correctly (5000 - 1000 = 4000) - update_data = call_args[0][1] - self.assertIsInstance(update_data, WalletTokenBase) - self.assertEqual(update_data.amount, "4000.0") - self.assertEqual(update_data.wallet_id, mock_wallet.id) - self.assertEqual(update_data.token_id, mock_token.id) - self.assertEqual(update_data.dao_id, mock_token.dao_id) - - @patch("backend.factory.backend") - @patch("services.webhooks.chainhook.handlers.sell_event_handler.configure_logger") - async def test_handle_transaction_with_insufficient_balance( - self, mock_configure_logger, mock_backend - ): - """Test that the handler correctly handles selling more tokens than available.""" - # Set up the mock logger - mock_logger = MagicMock() - mock_configure_logger.return_value = mock_logger - - # Create a new handler with the mocked logger - handler = SellEventHandler() - - # Create an event with a large amount to sell (more than available) - large_amount_event = Event( - data={ - "asset_identifier": "ST123.test-token::TEST", - "amount": "10000", # More than the 5000 available - "sender": "ST456", - "recipient": "ST789", - }, - position={"index": 0}, - type="FTTransferEvent", - ) - - # Update the receipt with the new event - large_amount_receipt = Receipt( - contract_calls_stack=[], - events=[large_amount_event], - mutated_assets_radius=[], - mutated_contracts_radius=[], - ) - - # Update the metadata with the new receipt - large_amount_metadata = self.sample_metadata - large_amount_metadata.receipt = large_amount_receipt - - # Create a new transaction with the updated metadata - large_amount_transaction = TransactionWithReceipt( - transaction_identifier=TransactionIdentifier(hash="0xabcdef1234567890"), - metadata=large_amount_metadata, - operations=[], - ) - - # Mock the wallet and token data - mock_wallet = MagicMock() - mock_wallet.id = UUID("00000000-0000-0000-0000-000000000001") - mock_token = MagicMock() - mock_token.id = UUID("00000000-0000-0000-0000-000000000002") - mock_token.dao_id = UUID("00000000-0000-0000-0000-000000000003") - - # Mock the wallet token record with a smaller amount than being sold - mock_wallet_token = MagicMock() - mock_wallet_token.id = UUID("00000000-0000-0000-0000-000000000004") - mock_wallet_token.wallet_id = mock_wallet.id - mock_wallet_token.token_id = mock_token.id - mock_wallet_token.dao_id = mock_token.dao_id - mock_wallet_token.amount = "5000" # Less than the 10000 being sold - - # Set up the mock backend responses - mock_backend.list_wallets.return_value = [mock_wallet] - mock_backend.list_tokens.return_value = [mock_token] - mock_backend.list_wallet_tokens.return_value = [mock_wallet_token] - - # Handle the transaction - await handler.handle_transaction(large_amount_transaction) - - # Check that update_wallet_token was called with the correct parameters - mock_backend.update_wallet_token.assert_called_once() - call_args = mock_backend.update_wallet_token.call_args - - # Check that the amount was set to 0 (not negative) - update_data = call_args[0][1] - self.assertEqual(update_data.amount, "0.0") - - -if __name__ == "__main__": - unittest.main() diff --git a/tests/services/webhooks/dao/test_dao_webhook.py b/tests/services/webhooks/dao/test_dao_webhook.py deleted file mode 100644 index 71cb1974..00000000 --- a/tests/services/webhooks/dao/test_dao_webhook.py +++ /dev/null @@ -1,142 +0,0 @@ -"""Tests for the DAO webhook service.""" - -from unittest.mock import AsyncMock, MagicMock, patch -from uuid import UUID - -import pytest - -from backend.models import ContractStatus -from services.webhooks.dao.handler import DAOHandler -from services.webhooks.dao.models import DAOWebhookPayload, ExtensionData, TokenData -from services.webhooks.dao.parser import DAOParser -from services.webhooks.dao.service import DAOService - - -@pytest.fixture -def sample_dao_payload(): - """Create a sample DAO webhook payload for testing.""" - return { - "name": "Test DAO", - "mission": "Testing mission", - "description": "A DAO for testing purposes", - "is_deployed": False, - "is_broadcasted": False, - "extensions": [{"type": "test_extension", "status": "DRAFT"}], - "token": { - "name": "Test Token", - "symbol": "TEST", - "decimals": 6, - "description": "A token for testing", - }, - } - - -def test_dao_parser(sample_dao_payload): - """Test that the DAO parser correctly parses a valid payload.""" - parser = DAOParser() - result = parser.parse(sample_dao_payload) - - assert isinstance(result, DAOWebhookPayload) - assert result.name == "Test DAO" - assert result.mission == "Testing mission" - assert result.description == "A DAO for testing purposes" - assert result.is_deployed is False - assert result.is_broadcasted is False - - assert len(result.extensions) == 1 - assert result.extensions[0].type == "test_extension" - assert result.extensions[0].status == ContractStatus.DRAFT - - assert result.token is not None - assert result.token.name == "Test Token" - assert result.token.symbol == "TEST" - assert result.token.decimals == 6 - assert result.token.description == "A token for testing" - - -@pytest.mark.asyncio -async def test_dao_handler(): - """Test that the DAO handler correctly processes a parsed payload.""" - # Create mock database - mock_db = MagicMock() - mock_db.create_dao.return_value = MagicMock( - id=UUID("00000000-0000-0000-0000-000000000001"), name="Test DAO" - ) - mock_db.create_extension.return_value = MagicMock( - id=UUID("00000000-0000-0000-0000-000000000002") - ) - mock_db.create_token.return_value = MagicMock( - id=UUID("00000000-0000-0000-0000-000000000003") - ) - - # Create parsed payload - parsed_data = DAOWebhookPayload( - name="Test DAO", - mission="Testing mission", - description="A DAO for testing purposes", - extensions=[ExtensionData(type="test_extension", status=ContractStatus.DRAFT)], - token=TokenData( - name="Test Token", - symbol="TEST", - decimals=6, - description="A token for testing", - ), - ) - - # Test handler with mocked database - with patch("backend.factory.backend", mock_db): - handler = DAOHandler() - result = await handler.handle(parsed_data) - - assert result["success"] is True - assert "Successfully created DAO 'Test DAO'" in result["message"] - assert result["data"]["dao_id"] == UUID("00000000-0000-0000-0000-000000000001") - assert result["data"]["extension_ids"] == [ - UUID("00000000-0000-0000-0000-000000000002") - ] - assert result["data"]["token_id"] == UUID( - "00000000-0000-0000-0000-000000000003" - ) - - # Verify database calls - mock_db.create_dao.assert_called_once() - mock_db.create_extension.assert_called_once() - mock_db.create_token.assert_called_once() - - -@pytest.mark.asyncio -async def test_dao_service(sample_dao_payload): - """Test that the DAO service correctly coordinates parsing and handling.""" - # Create mock parser and handler - mock_parser = MagicMock() - mock_handler = MagicMock() - mock_handler.handle = AsyncMock() - - # Configure mock returns - parsed_data = DAOWebhookPayload(**sample_dao_payload) - mock_parser.parse.return_value = parsed_data - mock_handler.handle.return_value = { - "success": True, - "message": "Successfully created DAO", - "data": { - "dao_id": UUID("00000000-0000-0000-0000-000000000001"), - "extension_ids": [UUID("00000000-0000-0000-0000-000000000002")], - "token_id": UUID("00000000-0000-0000-0000-000000000003"), - }, - } - - # Create service with mocked components - service = DAOService() - service.parser = mock_parser - service.handler = mock_handler - - # Test service - result = await service.process(sample_dao_payload) - - assert result["success"] is True - assert result["message"] == "Successfully created DAO" - assert result["data"]["dao_id"] == UUID("00000000-0000-0000-0000-000000000001") - - # Verify component calls - mock_parser.parse.assert_called_once_with(sample_dao_payload) - mock_handler.handle.assert_called_once_with(parsed_data) diff --git a/tests/services/workflows/test_vector_react.py b/tests/services/workflows/test_vector_react.py deleted file mode 100644 index ffd3ac9e..00000000 --- a/tests/services/workflows/test_vector_react.py +++ /dev/null @@ -1,207 +0,0 @@ -"""Tests for the Vector React workflow.""" - -import unittest -from unittest.mock import AsyncMock, MagicMock, patch - -from langchain_core.documents import Document - -from services.workflows.vector_react import ( - VectorLangGraphService, - VectorReactWorkflow, - add_documents_to_vectors, -) - - -class TestVectorOperations(unittest.TestCase): - """Tests for the vector store operations.""" - - def setUp(self): - """Set up test fixtures.""" - self.mock_backend = MagicMock() - self.mock_collection = MagicMock() - self.mock_backend.get_vector_collection.return_value = self.mock_collection - self.mock_backend.query_vectors = AsyncMock( - return_value=[ - { - "id": "1", - "page_content": "test content", - "metadata": {"source": "test"}, - } - ] - ) - self.mock_backend.add_vectors = AsyncMock(return_value=["1"]) - self.mock_backend.create_vector_collection.return_value = self.mock_collection - - # Patch backend - self.backend_patch = patch( - "services.workflows.vector_react.backend", self.mock_backend - ) - self.backend_patch.start() - - def tearDown(self): - """Tear down test fixtures.""" - self.backend_patch.stop() - - async def test_add_documents_to_vectors(self): - """Test adding documents to vector store.""" - # Setup - documents = [Document(page_content="test content", metadata={"source": "test"})] - - # Execute - result = await add_documents_to_vectors( - collection_name="test_collection", documents=documents - ) - - # Verify - self.mock_backend.get_vector_collection.assert_called_once_with( - "test_collection" - ) - self.mock_backend.add_vectors.assert_called_once() - self.assertEqual(result, ["1"]) - - async def test_add_documents_creates_collection_if_not_exists(self): - """Test that collection is created if it doesn't exist.""" - # Setup - documents = [Document(page_content="test content", metadata={"source": "test"})] - self.mock_backend.get_vector_collection.side_effect = [ - ValueError, - self.mock_collection, - ] - - # Execute - result = await add_documents_to_vectors( - collection_name="new_collection", documents=documents - ) - - # Verify - self.mock_backend.create_vector_collection.assert_called_once_with( - "new_collection", dimensions=1536 - ) - self.mock_backend.add_vectors.assert_called_once() - self.assertEqual(result, ["1"]) - - -class TestVectorReactWorkflow(unittest.TestCase): - """Tests for the VectorReactWorkflow class.""" - - def setUp(self): - """Set up test fixtures.""" - self.mock_callback_handler = MagicMock() - self.mock_tools = [] - self.mock_backend = MagicMock() - self.mock_backend.query_vectors = AsyncMock( - return_value=[ - { - "id": "1", - "page_content": "test content", - "metadata": {"source": "test"}, - } - ] - ) - self.backend_patch = patch( - "services.workflows.vector_react.backend", self.mock_backend - ) - self.backend_patch.start() - - self.mock_llm = MagicMock() - self.mock_llm.invoke = MagicMock() - - def tearDown(self): - """Tear down test fixtures.""" - self.backend_patch.stop() - - @patch("services.workflows.vector_react.ChatOpenAI") - def test_create_graph(self, mock_chat_openai): - """Test creating the workflow graph.""" - # Setup - mock_chat_openai.return_value.bind_tools.return_value = self.mock_llm - workflow = VectorReactWorkflow( - callback_handler=self.mock_callback_handler, - tools=self.mock_tools, - collection_name="test_collection", - llm=self.mock_llm, - ) - - # Execute - graph = workflow._create_graph() - - # Verify - self.assertIsNotNone(graph) - # Check that the graph has the expected nodes - self.assertIn("vector_retrieval", graph.nodes) - self.assertIn("agent", graph.nodes) - self.assertIn("tools", graph.nodes) - - -class TestVectorLangGraphService(unittest.IsolatedAsyncioTestCase): - """Tests for the VectorLangGraphService class.""" - - def setUp(self): - """Set up test fixtures.""" - self.mock_backend = MagicMock() - self.mock_backend.query_vectors = AsyncMock( - return_value=[ - { - "id": "1", - "page_content": "test content", - "metadata": {"source": "test"}, - } - ] - ) - self.backend_patch = patch( - "services.workflows.vector_react.backend", self.mock_backend - ) - self.backend_patch.start() - - self.service = VectorLangGraphService(collection_name="test_collection") - - def tearDown(self): - """Tear down test fixtures.""" - self.backend_patch.stop() - - @patch("services.workflows.vector_react.VectorReactWorkflow") - @patch("services.workflows.vector_react.StreamingCallbackHandler") - async def test_execute_vector_react_stream(self, mock_handler, mock_workflow): - """Test executing a vector react stream.""" - # Setup - history = [{"role": "user", "content": "test message"}] - input_str = "test input" - mock_queue = AsyncMock() - mock_queue.get = AsyncMock( - side_effect=[{"type": "token", "content": "test"}, {"type": "end"}] - ) - mock_handler.return_value = MagicMock() - - mock_graph = MagicMock() - mock_runnable = MagicMock() - mock_workflow.return_value._create_graph.return_value = mock_graph - mock_graph.compile.return_value = mock_runnable - - mock_task = MagicMock() - mock_task.done = MagicMock(side_effect=[False, False, True]) - mock_result = {"messages": [MagicMock(content="test result")]} - mock_task.__await__ = MagicMock(return_value=mock_result) - - # Execute - with ( - patch("asyncio.Queue", return_value=mock_queue), - patch("asyncio.get_running_loop"), - patch("asyncio.create_task", return_value=mock_task), - patch("asyncio.wait_for", side_effect=lambda *args, **kwargs: args[0]), - ): - results = [ - chunk - async for chunk in self.service.execute_vector_react_stream( - history, input_str - ) - ] - - # Verify - self.assertEqual(len(results), 3) # token, end, result - self.assertEqual(results[0], {"type": "token", "content": "test"}) - self.assertEqual(results[1], {"type": "end"}) - self.assertEqual(results[2]["type"], "result") - - -if __name__ == "__main__": - unittest.main() diff --git a/tests/test_dao_proposal_voter.py b/tests/test_dao_proposal_voter.py deleted file mode 100644 index 568933b5..00000000 --- a/tests/test_dao_proposal_voter.py +++ /dev/null @@ -1,198 +0,0 @@ -"""Unit tests for the DAO proposal voter task.""" - -import datetime -import unittest -from unittest.mock import MagicMock, patch -from uuid import UUID - -from backend.models import QueueMessage -from services.runner.base import JobContext, JobType -from services.runner.tasks.dao_proposal_voter import DAOProposalVoterTask - - -class TestDAOProposalVoterTask(unittest.TestCase): - """Test cases for the DAO proposal voter task.""" - - def setUp(self): - """Set up the test case.""" - # Create a test task instance - self.task = DAOProposalVoterTask() - - # Mock the configuration - self.task.config = MagicMock() - - # Create a test job context - self.context = JobContext( - job_type=JobType.DAO_PROPOSAL_VOTE, - config=self.task.config, - parameters={}, - ) - - # Mock queue messages - self.test_queue_message = QueueMessage( - id=UUID("12345678-1234-5678-1234-567812345678"), - created_at=datetime.datetime.now(), - type="dao_proposal_vote", - message={ - "action_proposals_contract": "SP123.dao-action-proposals", - "proposal_id": 1, - "dao_name": "TestDAO", - "tx_id": "0x1234567890", - }, - wallet_id=UUID("98765432-9876-5432-9876-543298765432"), - is_processed=False, - ) - - @patch("services.runner.tasks.dao_proposal_voter.backend") - @patch("services.runner.tasks.dao_proposal_voter.evaluate_and_vote_on_proposal") - async def test_process_message_success(self, mock_evaluate, mock_backend): - """Test processing a message successfully.""" - # Mock the evaluate_and_vote_on_proposal function - mock_evaluate.return_value = { - "success": True, - "evaluation": { - "approve": True, - "confidence_score": 0.85, - "reasoning": "This is a good proposal", - }, - "auto_voted": True, - } - - # Process the test message - result = await self.task.process_message(self.test_queue_message) - - # Check that the result is correct - self.assertTrue(result["success"]) - self.assertTrue(result["auto_voted"]) - self.assertTrue(result["approve"]) - - # Check that evaluate_and_vote_on_proposal was called with the correct parameters - mock_evaluate.assert_called_once_with( - action_proposals_contract="SP123.dao-action-proposals", - proposal_id=1, - dao_name="TestDAO", - wallet_id=UUID("98765432-9876-5432-9876-543298765432"), - auto_vote=True, - confidence_threshold=0.7, - ) - - # Check that the message was marked as processed - mock_backend.update_queue_message.assert_called_once_with( - UUID("12345678-1234-5678-1234-567812345678"), - {"is_processed": True}, - ) - - @patch("services.runner.tasks.dao_proposal_voter.backend") - @patch("services.runner.tasks.dao_proposal_voter.evaluate_and_vote_on_proposal") - async def test_process_message_missing_parameters( - self, mock_evaluate, mock_backend - ): - """Test processing a message with missing parameters.""" - # Create a message with missing parameters - message = QueueMessage( - id=UUID("12345678-1234-5678-1234-567812345678"), - created_at=datetime.datetime.now(), - type="dao_proposal_vote", - message={ - # Missing action_proposals_contract - "proposal_id": 1, - "dao_name": "TestDAO", - }, - wallet_id=UUID("98765432-9876-5432-9876-543298765432"), - is_processed=False, - ) - - # Process the message - result = await self.task.process_message(message) - - # Check that the result indicates failure - self.assertFalse(result["success"]) - self.assertIn("Missing required parameters", result["error"]) - - # Check that evaluate_and_vote_on_proposal was not called - mock_evaluate.assert_not_called() - - # Check that the message was not marked as processed - mock_backend.update_queue_message.assert_not_called() - - @patch("services.runner.tasks.dao_proposal_voter.backend") - async def test_get_pending_messages(self, mock_backend): - """Test retrieving pending messages.""" - # Mock the list_queue_messages function - mock_backend.list_queue_messages.return_value = [self.test_queue_message] - - # Get pending messages - messages = await self.task.get_pending_messages() - - # Check that the correct messages were returned - self.assertEqual(len(messages), 1) - self.assertEqual(messages[0].id, self.test_queue_message.id) - - # Check that list_queue_messages was called with the correct parameters - mock_backend.list_queue_messages.assert_called_once() - filters = mock_backend.list_queue_messages.call_args[1]["filters"] - self.assertEqual(filters.type, "dao_proposal_vote") - self.assertFalse(filters.is_processed) - - @patch( - "services.runner.tasks.dao_proposal_voter.DAOProposalVoterTask.get_pending_messages" - ) - @patch( - "services.runner.tasks.dao_proposal_voter.DAOProposalVoterTask.process_message" - ) - async def test_execute_no_messages(self, mock_process, mock_get_messages): - """Test executing the task when there are no messages.""" - # Mock get_pending_messages to return an empty list - mock_get_messages.return_value = [] - - # Execute the task - results = await self.task.execute(self.context) - - # Check that results are correct - self.assertEqual(len(results), 1) - self.assertTrue(results[0]["success"]) - self.assertEqual(results[0]["proposals_processed"], 0) - self.assertEqual(results[0]["proposals_voted"], 0) - self.assertEqual(len(results[0]["errors"]), 0) - - # Check that process_message was not called - mock_process.assert_not_called() - - @patch( - "services.runner.tasks.dao_proposal_voter.DAOProposalVoterTask.get_pending_messages" - ) - @patch( - "services.runner.tasks.dao_proposal_voter.DAOProposalVoterTask.process_message" - ) - async def test_execute_with_messages(self, mock_process, mock_get_messages): - """Test executing the task with pending messages.""" - # Mock get_pending_messages to return test messages - mock_get_messages.return_value = [ - self.test_queue_message, - self.test_queue_message, - ] - - # Mock process_message to return success for the first message and failure for the second - mock_process.side_effect = [ - {"success": True, "auto_voted": True, "approve": True}, - {"success": False, "error": "Test error"}, - ] - - # Execute the task - results = await self.task.execute(self.context) - - # Check that results are correct - self.assertEqual(len(results), 1) - self.assertTrue(results[0]["success"]) - self.assertEqual(results[0]["proposals_processed"], 2) - self.assertEqual(results[0]["proposals_voted"], 1) - self.assertEqual(len(results[0]["errors"]), 1) - self.assertEqual(results[0]["errors"][0], "Test error") - - # Check that process_message was called twice - self.assertEqual(mock_process.call_count, 2) - mock_process.assert_any_call(self.test_queue_message) - - -if __name__ == "__main__": - unittest.main() diff --git a/tests/test_proposal_evaluation.py b/tests/test_proposal_evaluation.py deleted file mode 100644 index 27487149..00000000 --- a/tests/test_proposal_evaluation.py +++ /dev/null @@ -1,87 +0,0 @@ -"""Test script for the proposal evaluation workflow.""" - -import asyncio -import os -import sys -from typing import Dict, Optional - -# Add the parent directory to the path so we can import the modules -sys.path.append(os.path.dirname(os.path.dirname(os.path.abspath(__file__)))) - -from backend.models import UUID -from services.workflows.proposal_evaluation import ( - evaluate_and_vote_on_proposal, - evaluate_proposal_only, -) - - -async def test_proposal_evaluation( - action_proposals_contract: str, - proposal_id: int, - dao_name: Optional[str] = None, - wallet_id: Optional[UUID] = None, - auto_vote: bool = False, -) -> Dict: - """Test the proposal evaluation workflow. - - Args: - action_proposals_contract: The contract ID of the DAO action proposals - proposal_id: The ID of the proposal to evaluate - dao_name: Optional name of the DAO for additional context - wallet_id: Optional wallet ID to use for retrieving proposal data - auto_vote: Whether to automatically vote based on the evaluation - - Returns: - Dictionary containing the evaluation results - """ - print(f"Evaluating proposal {proposal_id} for contract {action_proposals_contract}") - - if auto_vote: - print("Auto-voting is enabled") - result = await evaluate_and_vote_on_proposal( - action_proposals_contract=action_proposals_contract, - proposal_id=proposal_id, - dao_name=dao_name, - wallet_id=wallet_id, - auto_vote=True, - confidence_threshold=0.7, - ) - else: - print("Evaluation only mode (no voting)") - result = await evaluate_proposal_only( - action_proposals_contract=action_proposals_contract, - proposal_id=proposal_id, - dao_name=dao_name, - wallet_id=wallet_id, - ) - - # Print the results - print("\nEvaluation Results:") - print(f"Approve: {result['evaluation']['approve']}") - print(f"Confidence: {result['evaluation']['confidence_score']}") - print(f"Reasoning: {result['evaluation']['reasoning']}") - - if auto_vote and result.get("auto_voted"): - print("\nVoting Results:") - print(f"Auto-voted: {result.get('auto_voted', False)}") - print(f"Vote Result: {result.get('vote_result', {})}") - - return result - - -if __name__ == "__main__": - # Example usage - # Replace these values with actual contract and proposal IDs - contract_id = "SP000000000000000000002Q6VF78.dao-action-proposals" - proposal_id = 1 - dao_name = "Example DAO" - - # Run the test - asyncio.run( - test_proposal_evaluation( - action_proposals_contract=contract_id, - proposal_id=proposal_id, - dao_name=dao_name, - auto_vote=False, # Set to True to enable auto-voting - ) - ) diff --git a/tools/agent_account.py b/tools/agent_account.py new file mode 100644 index 00000000..fafd7665 --- /dev/null +++ b/tools/agent_account.py @@ -0,0 +1,137 @@ +from typing import Any, Dict, Optional, Type +from uuid import UUID + +from langchain.tools import BaseTool +from pydantic import BaseModel, Field + +from tools.bun import BunScriptRunner + + +class AgentAccountDeployInput(BaseModel): + """Input schema for deploying an agent account contract.""" + + owner_address: str = Field( + ..., + description="Stacks address of the wallet owner", + example="ST1PQHQKV0RJXZFY1DGX8MNSNYVE3VGZJSRTPGZGM", + ) + agent_address: str = Field( + ..., + description="Stacks address of the agent", + example="ST2CY5V39NHDPWSXMW9QDT3HC3GD6Q6XX4CFRK9AG", + ) + dao_token_contract: str = Field( + ..., + description="Contract principal of the DAO token", + example="ST35K818S3K2GSNEBC3M35GA3W8Q7X72KF4RVM3QA.aibtc-token", + ) + dao_token_dex_contract: str = Field( + ..., + description="Contract principal of the DAO token DEX", + example="ST35K818S3K2GSNEBC3M35GA3W8Q7X72KF4RVM3QA.aibtc-token-dex", + ) + save_to_file: bool = Field( + False, + description="Whether to save the contract to a file", + ) + + +class AgentAccountDeployTool(BaseTool): + name: str = "agent_account_deploy" + description: str = ( + "Deploy a new agent account contract with specified owner and agent addresses. " + "Returns the deployed contract address and transaction ID." + ) + args_schema: Type[BaseModel] = AgentAccountDeployInput + return_direct: bool = False + wallet_id: Optional[UUID] = None + seed_phrase: Optional[str] = None + + def __init__( + self, + wallet_id: Optional[UUID] = None, + seed_phrase: Optional[str] = None, + **kwargs, + ): + super().__init__(**kwargs) + self.wallet_id = wallet_id + self.seed_phrase = seed_phrase + + def _deploy( + self, + owner_address: str, + agent_address: str, + dao_token_contract: str, + dao_token_dex_contract: str, + save_to_file: bool = False, + **kwargs, + ) -> Dict[str, Any]: + """Execute the tool to deploy agent account.""" + if self.seed_phrase is None and self.wallet_id is None: + return { + "success": False, + "message": "Either seed phrase or wallet ID is required", + "data": None, + } + + args = [ + owner_address, + agent_address, + dao_token_contract, + dao_token_dex_contract, + str(save_to_file).lower(), + ] + + # Use seed phrase if available, otherwise fall back to wallet_id + if self.seed_phrase: + return BunScriptRunner.bun_run_with_seed_phrase( + self.seed_phrase, + "aibtc-cohort-0/contract-tools", + "deploy-agent-account.ts", + *args, + ) + else: + return BunScriptRunner.bun_run( + self.wallet_id, + "aibtc-cohort-0/contract-tools", + "deploy-agent-account.ts", + *args, + ) + + def _run( + self, + owner_address: str, + agent_address: str, + dao_token_contract: str, + dao_token_dex_contract: str, + save_to_file: bool = False, + **kwargs, + ) -> Dict[str, Any]: + """Execute the tool to deploy agent account.""" + return self._deploy( + owner_address, + agent_address, + dao_token_contract, + dao_token_dex_contract, + save_to_file, + **kwargs, + ) + + async def _arun( + self, + owner_address: str, + agent_address: str, + dao_token_contract: str, + dao_token_dex_contract: str, + save_to_file: bool = False, + **kwargs, + ) -> Dict[str, Any]: + """Async version of the tool.""" + return self._deploy( + owner_address, + agent_address, + dao_token_contract, + dao_token_dex_contract, + save_to_file, + **kwargs, + ) diff --git a/tools/alex.py b/tools/alex.py deleted file mode 100644 index c1a26767..00000000 --- a/tools/alex.py +++ /dev/null @@ -1,93 +0,0 @@ -from typing import Any, Dict, List, Type - -from langchain.tools import BaseTool -from pydantic import BaseModel, Field - -from lib.alex import AlexApi - - -class AlexBaseInput(BaseModel): - """Base input schema for Alex tools.""" - - -class AlexPriceHistoryInput(AlexBaseInput): - """Input schema for AlexGetPriceHistory.""" - - token_address: str = Field( - ..., description="The address of the token to get price history for." - ) - - -class AlexTokenPoolVolumeInput(AlexBaseInput): - """Input schema for AlexGetTokenPoolVolume.""" - - token_pool_id: str = Field( - ..., description="The token pool ID to get volume data for." - ) - - -class AlexGetPriceHistory(BaseTool): - name: str = "alex_get_price_history" - description: str = ( - "Retrieve historical price data for a specified cryptocurrency token address" - ) - args_schema: Type[BaseModel] = AlexPriceHistoryInput - return_direct: bool = False - - def _deploy(self, token_address: str, **kwargs) -> List[Any]: - """Execute the tool to get price history.""" - obj = AlexApi() - return obj.get_price_history(token_address) - - def _run(self, token_address: str, **kwargs) -> List[Any]: - """Execute the tool to get price history.""" - return self._deploy(token_address) - - async def _arun(self, token_address: str, **kwargs) -> List[Any]: - """Async version of the tool.""" - return self._deploy(token_address) - - -class AlexGetSwapInfo(BaseTool): - name: str = "alex_get_swap_info" - description: str = "Retrieve all available token pair data from the Alex DEX" - return_direct: bool = False - args_schema: Type[BaseModel] = AlexBaseInput - - def _deploy(self, **kwargs) -> List[Dict[str, str]]: - """Execute the tool to get swap info.""" - obj = AlexApi() - pairs = obj.get_pairs() - return [ - {"token": pair.get("wrapped_token_y"), "token_pool_id": pair.get("pool_id")} - for pair in pairs - if pair.get("wrapped_token_x") == "STX" - ] - - def _run(self, **kwargs) -> List[Dict[str, str]]: - """Execute the tool to get swap info.""" - return self._deploy() - - async def _arun(self, **kwargs) -> List[Dict[str, str]]: - """Async version of the tool.""" - return self._deploy() - - -class AlexGetTokenPoolVolume(BaseTool): - name: str = "alex_get_token_pool_volume" - description: str = "Retrieve pool volume data for a specified token pool ID" - args_schema: Type[BaseModel] = AlexTokenPoolVolumeInput - return_direct: bool = False - - def _deploy(self, token_pool_id: str, **kwargs) -> str: - """Execute the tool to get token pool volume.""" - obj = AlexApi() - return obj.get_token_pool_price(token_pool_id) - - def _run(self, token_pool_id: str, **kwargs) -> str: - """Execute the tool to get token pool volume.""" - return self._deploy(token_pool_id) - - async def _arun(self, token_pool_id: str, **kwargs) -> str: - """Async version of the tool.""" - return self._deploy(token_pool_id) diff --git a/tools/bun.py b/tools/bun.py index 9c3dc20b..eced1455 100644 --- a/tools/bun.py +++ b/tools/bun.py @@ -40,6 +40,49 @@ def bun_run( secret = backend.get_secret(wallet.secret_id) mnemonic = secret.decrypted_secret + return BunScriptRunner._execute_script( + mnemonic, script_path, script_name, *args + ) + + @staticmethod + def bun_run_with_seed_phrase( + seed_phrase: str, script_path: str, script_name: str, *args: str + ) -> Dict[str, Union[str, bool, None]]: + """ + Run a TypeScript script using Bun with specified parameters using seed phrase directly. + + Args: + seed_phrase: The mnemonic seed phrase to use for script execution + script_path: Path of the directory containing the script + script_name: Name of the TypeScript script to run + *args: Additional arguments to pass to the script + + Returns: + Dict containing: + - output: Script execution stdout if successful + - error: Error message if execution failed, None otherwise + - success: Boolean indicating if execution was successful + """ + return BunScriptRunner._execute_script( + seed_phrase, script_path, script_name, *args + ) + + @staticmethod + def _execute_script( + mnemonic: str, script_path: str, script_name: str, *args: str + ) -> Dict[str, Union[str, bool, None]]: + """ + Internal method to execute the script with the given mnemonic. + + Args: + mnemonic: The mnemonic phrase to use + script_path: Path of the directory containing the script + script_name: Name of the TypeScript script to run + *args: Additional arguments to pass to the script + + Returns: + Dict containing script execution results + """ env = os.environ.copy() env["ACCOUNT_INDEX"] = "0" env["MNEMONIC"] = mnemonic @@ -60,7 +103,7 @@ def bun_run( ) try: - logger.info(f"Running script: {script_name} for wallet: {wallet_id}") + logger.info(f"Running script: {script_name}") result = subprocess.run( command, check=True, diff --git a/tools/coinmarketcap.py b/tools/coinmarketcap.py deleted file mode 100644 index 8e478725..00000000 --- a/tools/coinmarketcap.py +++ /dev/null @@ -1,76 +0,0 @@ -from typing import Type - -import requests -from langchain.tools import BaseTool -from pydantic import BaseModel - -from config import config - - -class GetBitcoinDataInput(BaseModel): - """Input schema for GetBitcoinData tool. - This tool doesn't require any input parameters but we still define the schema for consistency. - """ - - pass - - -class GetBitcoinData(BaseTool): - name: str = "get_bitcoin_data" - description: str = "Fetch current Bitcoin market data including price, market cap, 24h trading volume, and percentage changes from CoinMarketCap" - args_schema: Type[BaseModel] = GetBitcoinDataInput - return_direct: bool = False - - def _deploy(self, **kwargs) -> str: - """Execute the tool to fetch Bitcoin market data.""" - # Get the API key from the config - api_key = config.api.cmc_api_key - - if not api_key: - return "Error: API key not found. Please set the 'AIBTC_CMC_API_KEY' environment variable." - - # CoinMarketCap API URL and parameters - url = "https://pro-api.coinmarketcap.com/v1/cryptocurrency/quotes/latest" - parameters = {"symbol": "BTC", "convert": "USD"} - - # Request headers including API key - headers = { - "Accepts": "application/json", - "X-CMC_PRO_API_KEY": api_key, - } - - try: - # Make the API request - response = requests.get(url, headers=headers, params=parameters) - response.raise_for_status() # Raise an exception for HTTP errors - - # Parse the JSON response - data = response.json() - bitcoin_data = data["data"]["BTC"] - - # Extract relevant Bitcoin data - price = bitcoin_data["quote"]["USD"]["price"] - market_cap = bitcoin_data["quote"]["USD"]["market_cap"] - volume_24h = bitcoin_data["quote"]["USD"]["volume_24h"] - percent_change_24h = bitcoin_data["quote"]["USD"]["percent_change_24h"] - percent_change_7d = bitcoin_data["quote"]["USD"]["percent_change_7d"] - - # Format the result as a string - return ( - f"Bitcoin Price: ${price:.2f}\n" - f"Market Cap: ${market_cap:.2f}\n" - f"24h Trading Volume: ${volume_24h:.2f}\n" - f"24h Change: {percent_change_24h:.2f}%\n" - f"7d Change: {percent_change_7d:.2f}%" - ) - - except requests.RequestException as e: - return f"Error fetching Bitcoin data: {e}" - - def _run(self, **kwargs) -> str: - """Execute the tool to fetch Bitcoin market data.""" - return self._deploy(**kwargs) - - async def _arun(self, **kwargs) -> str: - """Async version of the tool.""" - return self._deploy(**kwargs) diff --git a/tools/contracts.py b/tools/contracts.py index 69e5605f..b35aae66 100644 --- a/tools/contracts.py +++ b/tools/contracts.py @@ -4,8 +4,8 @@ from pydantic import BaseModel, Field from backend.models import UUID -from lib.hiro import HiroApi -from services.daos import TokenServiceError, generate_token_dependencies +from services.integrations.hiro.hiro_api import HiroApi +from services.core.dao_service import TokenServiceError, generate_token_dependencies from .bun import BunScriptRunner diff --git a/tools/dao_deployments.py b/tools/dao_deployments.py index f78278a1..73ac2e39 100644 --- a/tools/dao_deployments.py +++ b/tools/dao_deployments.py @@ -14,7 +14,7 @@ TokenBase, ) from lib.logger import configure_logger -from services.daos import ( +from services.core.dao_service import ( TokenServiceError, bind_token_to_dao, generate_dao_dependencies, diff --git a/tools/dao_ext_action_proposals.py b/tools/dao_ext_action_proposals.py index 47976187..dcba8a68 100644 --- a/tools/dao_ext_action_proposals.py +++ b/tools/dao_ext_action_proposals.py @@ -13,278 +13,6 @@ class DaoBaseInput(BaseModel): pass -class ProposeActionAddResourceInput(BaseModel): - """Input schema for proposing to add a resource action.""" - - action_proposals_voting_extension: str = Field( - ..., - description="Contract principal where the DAO creates action proposals for voting by DAO members.", - examples=[ - "ST3YT0XW92E6T2FE59B2G5N2WNNFSBZ6MZKQS5D18.faces-action-proposals-v2" - "ST3YT0XW92E6T2FE59B2G5N2WNNFSBZ6MZKQS5D18.t3st-action-proposals-v2" - ], - ) - action_proposal_contract_to_execute: str = Field( - ..., - description="Contract principal of the action proposal that executes adding a resource to the DAO.", - examples=[ - "ST3YT0XW92E6T2FE59B2G5N2WNNFSBZ6MZKQS5D18.faces-action-add-resource" - "ST3YT0XW92E6T2FE59B2G5N2WNNFSBZ6MZKQS5D18.t3st-action-add-resource" - ], - ) - dao_token_contract_address: str = Field( - ..., - description="Contract principal of the token used by the DAO for voting", - examples=[ - "ST3YT0XW92E6T2FE59B2G5N2WNNFSBZ6MZKQS5D18.faces-faktory", - "ST3YT0XW92E6T2FE59B2G5N2WNNFSBZ6MZKQS5D18.t3st-faktory", - ], - ) - resource_name: str = Field(..., description="Name of the resource to add") - resource_description: str = Field(..., description="Description of the resource") - resource_price: int = Field(..., description="Price of the resource in microstacks") - resource_url: Optional[str] = Field( - None, - description="Optional URL associated with the resource", - examples=["https://www.example.com/resource"], - ) - memo: Optional[str] = Field( - None, - description="Optional memo to include with the proposal", - examples=["Adding a new consultation resource for the DAO"], - ) - - -class ProposeActionAddResourceTool(BaseTool): - name: str = "dao_propose_action_add_resource" - description: str = ( - "This creates a proposal that DAO members can vote on to add the new resource to the " - " DAO resource contract with specified name, description, price, and optional URL." - ) - args_schema: Type[BaseModel] = ProposeActionAddResourceInput - return_direct: bool = False - wallet_id: Optional[UUID] = None - - def __init__(self, wallet_id: Optional[UUID] = None, **kwargs): - super().__init__(**kwargs) - self.wallet_id = wallet_id - - def _deploy( - self, - action_proposals_voting_extension: str, - action_proposal_contract_to_execute: str, - dao_token_contract_address: str, - resource_name: str, - resource_description: str, - resource_price: int, - resource_url: Optional[str] = None, - memo: Optional[str] = None, - **kwargs, - ) -> Dict[str, Any]: - """Execute the tool to propose adding a resource.""" - if self.wallet_id is None: - return {"success": False, "message": "Wallet ID is required", "data": None} - - args = [ - action_proposals_voting_extension, - action_proposal_contract_to_execute, - dao_token_contract_address, - resource_name, - resource_description, - str(resource_price), - ] - - if resource_url: - args.append(resource_url) - - if memo: - if not resource_url: - args.append("") # Add empty URL if not provided but memo is - args.append(memo) - - return BunScriptRunner.bun_run( - self.wallet_id, - "aibtc-dao/extensions/action-proposals/public", - "propose-action-add-resource.ts", - *args, - ) - - def _run( - self, - action_proposals_voting_extension: str, - action_proposal_contract_to_execute: str, - dao_token_contract_address: str, - resource_name: str, - resource_description: str, - resource_price: int, - resource_url: Optional[str] = None, - memo: Optional[str] = None, - **kwargs, - ) -> Dict[str, Any]: - """Execute the tool to propose adding a resource.""" - return self._deploy( - action_proposals_voting_extension, - action_proposal_contract_to_execute, - dao_token_contract_address, - resource_name, - resource_description, - resource_price, - resource_url, - memo, - **kwargs, - ) - - async def _arun( - self, - action_proposals_voting_extension: str, - action_proposal_contract_to_execute: str, - dao_token_contract_address: str, - resource_name: str, - resource_description: str, - resource_price: int, - resource_url: Optional[str] = None, - memo: Optional[str] = None, - **kwargs, - ) -> Dict[str, Any]: - """Async version of the tool.""" - return self._deploy( - action_proposals_voting_extension, - action_proposal_contract_to_execute, - dao_token_contract_address, - resource_name, - resource_description, - resource_price, - resource_url, - memo, - **kwargs, - ) - - -class ProposeActionAllowAssetInput(BaseModel): - """Input schema for proposing to allow an asset action.""" - - action_proposals_voting_extension: str = Field( - ..., - description="Contract principal where the DAO creates action proposals for voting by DAO members.", - examples=[ - "ST3YT0XW92E6T2FE59B2G5N2WNNFSBZ6MZKQS5D18.faces-action-proposals-v2", - "ST3YT0XW92E6T2FE59B2G5N2WNNFSBZ6MZKQS5D18.t3st-action-proposals-v2", - ], - ) - action_proposal_contract_to_execute: str = Field( - ..., - description="Contract principal of the action proposal that executes allowing an asset in the DAO treasury.", - examples=[ - "ST3YT0XW92E6T2FE59B2G5N2WNNFSBZ6MZKQS5D18.faces-action-allow-asset", - "ST3YT0XW92E6T2FE59B2G5N2WNNFSBZ6MZKQS5D18.t3st-action-proposals-v2", - ], - ) - dao_token_contract_address: str = Field( - ..., - description="Contract principal of the token used by the DAO for voting", - examples=[ - "ST3YT0XW92E6T2FE59B2G5N2WNNFSBZ6MZKQS5D18.faces-faktory", - "ST3YT0XW92E6T2FE59B2G5N2WNNFSBZ6MZKQS5D18.t3st-faktory", - ], - ) - dao_token_contract_address_to_allow: str = Field( - ..., - description="Contract principal of the token to allow", - examples=[ - "ST3YT0XW92E6T2FE59B2G5N2WNNFSBZ6MZKQS5D18.faces-faktory", - "ST3YT0XW92E6T2FE59B2G5N2WNNFSBZ6MZKQS5D18.t3st-faktory", - ], - ) - memo: Optional[str] = Field( - None, - description="Optional memo to include with the proposal", - examples=["Allow new token for DAO treasury operations"], - ) - - -class ProposeActionAllowAssetTool(BaseTool): - name: str = "dao_propose_action_allow_asset" - description: str = ( - "This creates a proposal that DAO members can vote on to allow a specific " - " token contract to be used within the DAO treasury contract." - ) - args_schema: Type[BaseModel] = ProposeActionAllowAssetInput - return_direct: bool = False - wallet_id: Optional[UUID] = None - - def __init__(self, wallet_id: Optional[UUID] = None, **kwargs): - super().__init__(**kwargs) - self.wallet_id = wallet_id - - def _deploy( - self, - action_proposals_voting_extension: str, - action_proposal_contract_to_execute: str, - dao_token_contract_address: str, - dao_token_contract_address_to_allow: str, - memo: Optional[str] = None, - **kwargs, - ) -> Dict[str, Any]: - """Execute the tool to propose allowing an asset.""" - if self.wallet_id is None: - return {"success": False, "message": "Wallet ID is required", "data": None} - - args = [ - action_proposals_voting_extension, - action_proposal_contract_to_execute, - dao_token_contract_address, - dao_token_contract_address_to_allow, - ] - - if memo: - args.append(memo) - - return BunScriptRunner.bun_run( - self.wallet_id, - "aibtc-dao/extensions/action-proposals/public", - "propose-action-allow-asset.ts", - *args, - ) - - def _run( - self, - action_proposals_voting_extension: str, - action_proposal_contract_to_execute: str, - dao_token_contract_address: str, - dao_token_contract_address_to_allow: str, - memo: Optional[str] = None, - **kwargs, - ) -> Dict[str, Any]: - """Execute the tool to propose allowing an asset.""" - return self._deploy( - action_proposals_voting_extension, - action_proposal_contract_to_execute, - dao_token_contract_address, - dao_token_contract_address_to_allow, - memo, - **kwargs, - ) - - async def _arun( - self, - action_proposals_voting_extension: str, - action_proposal_contract_to_execute: str, - dao_token_contract_address: str, - dao_token_contract_address_to_allow: str, - memo: Optional[str] = None, - **kwargs, - ) -> Dict[str, Any]: - """Async version of the tool.""" - return self._deploy( - action_proposals_voting_extension, - action_proposal_contract_to_execute, - dao_token_contract_address, - dao_token_contract_address_to_allow, - memo, - **kwargs, - ) - - class ProposeActionSendMessageInput(BaseModel): """Input schema for proposing to send a message action.""" @@ -354,8 +82,8 @@ def _deploy( args = [ action_proposals_voting_extension, action_proposal_contract_to_execute, - dao_token_contract_address, message, + dao_token_contract_address, ] if memo: @@ -363,8 +91,8 @@ def _deploy( return BunScriptRunner.bun_run( self.wallet_id, - "aibtc-dao/extensions/action-proposals/public", - "propose-action-send-message.ts", + "aibtc-cohort-0/dao-tools/extensions/action-proposal-voting/public", + "create-action-proposal.ts", *args, ) @@ -407,10 +135,10 @@ async def _arun( ) -class ProposeActionSetAccountHolderInput(BaseModel): - """Input schema for proposing to set account holder action.""" +class VoteOnActionProposalInput(BaseModel): + """Input schema for voting on an action proposal.""" - action_proposals_voting_extension: str = Field( + dao_action_proposal_voting_contract: str = Field( ..., description="Contract principal where the DAO creates action proposals for voting by DAO members.", examples=[ @@ -418,45 +146,18 @@ class ProposeActionSetAccountHolderInput(BaseModel): "ST3YT0XW92E6T2FE59B2G5N2WNNFSBZ6MZKQS5D18.t3st-action-proposals-v2", ], ) - action_proposal_contract_to_execute: str = Field( - ..., - description="Contract principal of the action proposal that executes setting the account holder in a DAO timed vault.", - examples=[ - "ST3YT0XW92E6T2FE59B2G5N2WNNFSBZ6MZKQS5D18.faces-action-set-account-holder", - "ST3YT0XW92E6T2FE59B2G5N2WNNFSBZ6MZKQS5D18.t3st-action-set-account-holder", - ], - ) - dao_token_contract_address: str = Field( - ..., - description="Contract principal of the token used by the DAO for voting", - examples=[ - "ST3YT0XW92E6T2FE59B2G5N2WNNFSBZ6MZKQS5D18.faces-faktory", - "ST3YT0XW92E6T2FE59B2G5N2WNNFSBZ6MZKQS5D18.t3st-faktory", - ], - ) - account_holder: str = Field( - ..., - description="Address of the new account holder", - examples=[ - "ST3YT0XW92E6T2FE59B2G5N2WNNFSBZ6MZKQS5D18", - "ST1PQHQKV0RJXZFY1DGX8MNSNYVE3VGZJSRTPGZGM", - "ST1PQHQKV0RJXZFY1DGX8MNSNYVE3VGZJSRTPGZGM.contract", - ], - ) - memo: Optional[str] = Field( - None, - description="Optional memo to include with the proposal", - examples=["Setting new account holder for DAO vault"], - ) + proposal_id: int = Field(..., description="ID of the proposal to vote on") + vote_for: bool = Field(..., description="True for yes/for, False for no/against") -class ProposeActionSetAccountHolderTool(BaseTool): - name: str = "dao_propose_action_set_account_holder" +class VoteOnActionProposalTool(BaseTool): + name: str = "dao_action_vote_on_proposal" description: str = ( - "This creates a proposal that DAO members can vote on to change the account holder " - "in a DAO timed vault to a specified standard or contract address." + "Vote on an existing action proposal in the DAO. " + "Allows casting a vote (true/false) on a specific proposal ID " + "in the provided action proposals contract." ) - args_schema: Type[BaseModel] = ProposeActionSetAccountHolderInput + args_schema: Type[BaseModel] = VoteOnActionProposalInput return_direct: bool = False wallet_id: Optional[UUID] = None @@ -466,75 +167,55 @@ def __init__(self, wallet_id: Optional[UUID] = None, **kwargs): def _deploy( self, - action_proposals_voting_extension: str, - action_proposal_contract_to_execute: str, - dao_token_contract_address: str, - account_holder: str, - memo: Optional[str] = None, + dao_action_proposal_voting_contract: str, + proposal_id: int, + vote_for: bool, **kwargs, ) -> Dict[str, Any]: - """Execute the tool to propose setting a new account holder.""" + """Execute the tool to vote on an action proposal.""" if self.wallet_id is None: return {"success": False, "message": "Wallet ID is required", "data": None} args = [ - action_proposals_voting_extension, - action_proposal_contract_to_execute, - dao_token_contract_address, - account_holder, + dao_action_proposal_voting_contract, + str(proposal_id), + str(vote_for).lower(), ] - if memo: - args.append(memo) - return BunScriptRunner.bun_run( self.wallet_id, - "aibtc-dao/extensions/action-proposals/public", - "propose-action-set-account-holder.ts", + "aibtc-cohort-0/dao-tools/extensions/action-proposal-voting/public", + "vote-on-action-proposal.ts", *args, ) def _run( self, - action_proposals_voting_extension: str, - action_proposal_contract_to_execute: str, - dao_token_contract_address: str, - account_holder: str, - memo: Optional[str] = None, + dao_action_proposal_voting_contract: str, + proposal_id: int, + vote_for: bool, **kwargs, ) -> Dict[str, Any]: - """Execute the tool to propose setting a new account holder.""" + """Execute the tool to vote on an action proposal.""" return self._deploy( - action_proposals_voting_extension, - action_proposal_contract_to_execute, - dao_token_contract_address, - account_holder, - memo, - **kwargs, + dao_action_proposal_voting_contract, proposal_id, vote_for, **kwargs ) async def _arun( self, - action_proposals_voting_extension: str, - action_proposal_contract_to_execute: str, - dao_token_contract_address: str, - account_holder: str, - memo: Optional[str] = None, + dao_action_proposal_voting_contract: str, + proposal_id: int, + vote_for: bool, **kwargs, ) -> Dict[str, Any]: """Async version of the tool.""" return self._deploy( - action_proposals_voting_extension, - action_proposal_contract_to_execute, - dao_token_contract_address, - account_holder, - memo, - **kwargs, + dao_action_proposal_voting_contract, proposal_id, vote_for, **kwargs ) -class ProposeActionSetWithdrawalAmountInput(BaseModel): - """Input schema for proposing to set withdrawal amount action.""" +class ConcludeActionProposalInput(BaseModel): + """Input schema for concluding an action proposal.""" action_proposals_voting_extension: str = Field( ..., @@ -544,12 +225,14 @@ class ProposeActionSetWithdrawalAmountInput(BaseModel): "ST3YT0XW92E6T2FE59B2G5N2WNNFSBZ6MZKQS5D18.t3st-action-proposals-v2", ], ) + proposal_id: int = Field(..., description="ID of the proposal to conclude") action_proposal_contract_to_execute: str = Field( ..., - description="Contract principal of the action proposal that executes setting the withdrawal amount in a DAO timed vault.", + description="Contract principal of the original action proposal submitted for execution as part of the proposal", examples=[ - "ST3YT0XW92E6T2FE59B2G5N2WNNFSBZ6MZKQS5D18.faces-action-set-withdrawal-amount", - "ST3YT0XW92E6T2FE59B2G5N2WNNFSBZ6MZKQS5D18.t3st-action-set-withdrawal-amount", + "ST3YT0XW92E6T2FE59B2G5N2WNNFSBZ6MZKQS5D18.faces-action-send-message", + "ST3YT0XW92E6T2FE59B2G5N2WNNFSBZ6MZKQS5D18.faces-action-set-account-holder", + "ST3YT0XW92E6T2FE59B2G5N2WNNFSBZ6MZKQS5D18.faces-action-toggle-resource", ], ) dao_token_contract_address: str = Field( @@ -560,103 +243,105 @@ class ProposeActionSetWithdrawalAmountInput(BaseModel): "ST3YT0XW92E6T2FE59B2G5N2WNNFSBZ6MZKQS5D18.t3st-faktory", ], ) - withdrawal_amount: int = Field( - ..., - description="New withdrawal amount to set in microSTX", - examples=["50000000"], # 50 STX - ) - memo: Optional[str] = Field( - None, - description="Optional memo to include with the proposal", - examples=["Updating withdrawal amount to 50 STX"], - ) -class ProposeActionSetWithdrawalAmountTool(BaseTool): - name: str = "dao_propose_action_set_withdrawal_amount" +class ConcludeActionProposalTool(BaseTool): + name: str = "dao_action_conclude_proposal" description: str = ( - "This creates a proposal that DAO members can vote on to change the withdrawal amount " - " to a specified number of microSTX in a DAO timed vault." + "Conclude an existing action proposal in the DAO. " + "This finalizes the proposal and executes the action if the vote passed." ) - args_schema: Type[BaseModel] = ProposeActionSetWithdrawalAmountInput + args_schema: Type[BaseModel] = ConcludeActionProposalInput return_direct: bool = False wallet_id: Optional[UUID] = None + seed_phrase: Optional[str] = None - def __init__(self, wallet_id: Optional[UUID] = None, **kwargs): + def __init__( + self, + wallet_id: Optional[UUID] = None, + seed_phrase: Optional[str] = None, + **kwargs, + ): super().__init__(**kwargs) self.wallet_id = wallet_id + self.seed_phrase = seed_phrase def _deploy( self, action_proposals_voting_extension: str, - action_proposal_contract_to_execute: str, dao_token_contract_address: str, - withdrawal_amount: int, - memo: Optional[str] = None, + proposal_id: int, + action_proposal_contract_to_execute: str, **kwargs, ) -> Dict[str, Any]: - """Execute the tool to propose setting a new withdrawal amount.""" - if self.wallet_id is None: - return {"success": False, "message": "Wallet ID is required", "data": None} + """Execute the tool to conclude an action proposal.""" + if self.seed_phrase is None and self.wallet_id is None: + return { + "success": False, + "message": "Either seed phrase or wallet ID is required", + "data": None, + } args = [ action_proposals_voting_extension, + str(proposal_id), action_proposal_contract_to_execute, dao_token_contract_address, - str(withdrawal_amount), ] - if memo: - args.append(memo) - - return BunScriptRunner.bun_run( - self.wallet_id, - "aibtc-dao/extensions/action-proposals/public", - "propose-action-set-withdrawal-amount.ts", - *args, - ) + # Use seed phrase if available, otherwise fall back to wallet_id + if self.seed_phrase: + return BunScriptRunner.bun_run_with_seed_phrase( + self.seed_phrase, + "aibtc-cohort-0/dao-tools/extensions/action-proposal-voting/public", + "conclude-action-proposal.ts", + *args, + ) + else: + return BunScriptRunner.bun_run( + self.wallet_id, + "aibtc-cohort-0/dao-tools/extensions/action-proposal-voting/public", + "conclude-action-proposal.ts", + *args, + ) def _run( self, action_proposals_voting_extension: str, - action_proposal_contract_to_execute: str, dao_token_contract_address: str, - withdrawal_amount: int, - memo: Optional[str] = None, + proposal_id: int, + action_proposal_contract_to_execute: str, **kwargs, ) -> Dict[str, Any]: - """Execute the tool to propose setting a new withdrawal amount.""" + """Execute the tool to conclude an action proposal.""" return self._deploy( action_proposals_voting_extension, - action_proposal_contract_to_execute, dao_token_contract_address, - withdrawal_amount, - memo, + proposal_id, + action_proposal_contract_to_execute, **kwargs, ) async def _arun( self, action_proposals_voting_extension: str, - action_proposal_contract_to_execute: str, dao_token_contract_address: str, - withdrawal_amount: int, - memo: Optional[str] = None, + proposal_id: int, + action_proposal_contract_to_execute: str, **kwargs, ) -> Dict[str, Any]: """Async version of the tool.""" return self._deploy( action_proposals_voting_extension, - action_proposal_contract_to_execute, dao_token_contract_address, - withdrawal_amount, - memo, + proposal_id, + action_proposal_contract_to_execute, **kwargs, ) -class ProposeActionSetWithdrawalPeriodInput(BaseModel): - """Input schema for proposing to set withdrawal period action.""" +class GetLiquidSupplyInput(BaseModel): + """Input schema for getting the liquid supply.""" action_proposals_voting_extension: str = Field( ..., @@ -666,41 +351,18 @@ class ProposeActionSetWithdrawalPeriodInput(BaseModel): "ST3YT0XW92E6T2FE59B2G5N2WNNFSBZ6MZKQS5D18.t3st-action-proposals-v2", ], ) - action_proposal_contract_to_execute: str = Field( - ..., - description="Contract principal of the action proposal that executes setting the withdrawal period in a DAO timed vault.", - examples=[ - "ST3YT0XW92E6T2FE59B2G5N2WNNFSBZ6MZKQS5D18.faces-action-set-withdrawal-period", - "ST3YT0XW92E6T2FE59B2G5N2WNNFSBZ6MZKQS5D18.t3st-action-set-withdrawal-period", - ], - ) - dao_token_contract_address: str = Field( - ..., - description="Contract principal of the token used by the DAO for voting", - examples=[ - "ST3YT0XW92E6T2FE59B2G5N2WNNFSBZ6MZKQS5D18.faces-faktory", - "ST3YT0XW92E6T2FE59B2G5N2WNNFSBZ6MZKQS5D18.t3st-faktory", - ], - ) - withdrawal_period: int = Field( - ..., - description="New withdrawal period to set in Bitcoin blocks", - examples=["144"], # 1 day in BTC blocks - ) - memo: Optional[str] = Field( - None, - description="Optional memo to include with the proposal", - examples=["Updating withdrawal period to 1 day (144 blocks)"], + stacks_block_height: int = Field( + ..., description="Stacks block height to query the liquid supply at" ) -class ProposeActionSetWithdrawalPeriodTool(BaseTool): - name: str = "dao_propose_action_set_withdrawal_period" +class GetLiquidSupplyTool(BaseTool): + name: str = "dao_action_get_liquid_supply" description: str = ( - "This creates a proposal that DAO members can vote on to change the withdrawal period " - " to a specified number of Bitcoin blocks in a DAO timed vault." + "Get the liquid supply of the DAO token at a specific Stacks block height. " + "Returns the total amount of tokens that are liquid at that block." ) - args_schema: Type[BaseModel] = ProposeActionSetWithdrawalPeriodInput + args_schema: Type[BaseModel] = GetLiquidSupplyInput return_direct: bool = False wallet_id: Optional[UUID] = None @@ -711,74 +373,50 @@ def __init__(self, wallet_id: Optional[UUID] = None, **kwargs): def _deploy( self, action_proposals_voting_extension: str, - action_proposal_contract_to_execute: str, - dao_token_contract_address: str, - withdrawal_period: int, - memo: Optional[str] = None, + stacks_block_height: int, **kwargs, ) -> Dict[str, Any]: - """Execute the tool to propose setting a new withdrawal period.""" + """Execute the tool to get the liquid supply.""" if self.wallet_id is None: return {"success": False, "message": "Wallet ID is required", "data": None} args = [ action_proposals_voting_extension, - action_proposal_contract_to_execute, - dao_token_contract_address, - str(withdrawal_period), + str(stacks_block_height), ] - if memo: - args.append(memo) - return BunScriptRunner.bun_run( self.wallet_id, - "aibtc-dao/extensions/action-proposals/public", - "propose-action-set-withdrawal-period.ts", + "aibtc-cohort-0/dao-tools/extensions/action-proposal-voting/read-only", + "get-liquid-supply.ts", *args, ) def _run( self, action_proposals_voting_extension: str, - action_proposal_contract_to_execute: str, - dao_token_contract_address: str, - withdrawal_period: int, - memo: Optional[str] = None, + stacks_block_height: int, **kwargs, ) -> Dict[str, Any]: - """Execute the tool to propose setting a new withdrawal period.""" + """Execute the tool to get the liquid supply.""" return self._deploy( - action_proposals_voting_extension, - action_proposal_contract_to_execute, - dao_token_contract_address, - withdrawal_period, - memo, - **kwargs, + action_proposals_voting_extension, stacks_block_height, **kwargs ) async def _arun( self, action_proposals_voting_extension: str, - action_proposal_contract_to_execute: str, - dao_token_contract_address: str, - withdrawal_period: int, - memo: Optional[str] = None, + stacks_block_height: int, **kwargs, ) -> Dict[str, Any]: """Async version of the tool.""" return self._deploy( - action_proposals_voting_extension, - action_proposal_contract_to_execute, - dao_token_contract_address, - withdrawal_period, - memo, - **kwargs, + action_proposals_voting_extension, stacks_block_height, **kwargs ) -class VoteOnActionProposalInput(BaseModel): - """Input schema for voting on an action proposal.""" +class GetProposalInput(BaseModel): + """Input schema for getting proposal data.""" action_proposals_voting_extension: str = Field( ..., @@ -788,18 +426,16 @@ class VoteOnActionProposalInput(BaseModel): "ST3YT0XW92E6T2FE59B2G5N2WNNFSBZ6MZKQS5D18.t3st-action-proposals-v2", ], ) - proposal_id: int = Field(..., description="ID of the proposal to vote on") - vote: bool = Field(..., description="True for yes/for, False for no/against") + proposal_id: int = Field(..., description="ID of the proposal to retrieve") -class VoteOnActionProposalTool(BaseTool): - name: str = "dao_action_vote_on_proposal" +class GetProposalTool(BaseTool): + name: str = "dao_action_get_proposal" description: str = ( - "Vote on an existing action proposal in the DAO. " - "Allows casting a vote (true/false) on a specific proposal ID " - "in the provided action proposals contract." + "Get the data for a specific proposal from the DAO action proposals contract. " + "Returns all stored information about the proposal if it exists." ) - args_schema: Type[BaseModel] = VoteOnActionProposalInput + args_schema: Type[BaseModel] = GetProposalInput return_direct: bool = False wallet_id: Optional[UUID] = None @@ -811,23 +447,21 @@ def _deploy( self, action_proposals_voting_extension: str, proposal_id: int, - vote: bool, **kwargs, ) -> Dict[str, Any]: - """Execute the tool to vote on an action proposal.""" + """Execute the tool to get proposal data.""" if self.wallet_id is None: return {"success": False, "message": "Wallet ID is required", "data": None} args = [ action_proposals_voting_extension, str(proposal_id), - str(vote).lower(), ] return BunScriptRunner.bun_run( self.wallet_id, - "aibtc-dao/extensions/action-proposals/public", - "vote-on-proposal.ts", + "aibtc-cohort-0/dao-tools/extensions/action-proposal-voting/read-only", + "get-proposal.ts", *args, ) @@ -835,29 +469,23 @@ def _run( self, action_proposals_voting_extension: str, proposal_id: int, - vote: bool, **kwargs, ) -> Dict[str, Any]: - """Execute the tool to vote on an action proposal.""" - return self._deploy( - action_proposals_voting_extension, proposal_id, vote, **kwargs - ) + """Execute the tool to get proposal data.""" + return self._deploy(action_proposals_voting_extension, proposal_id, **kwargs) async def _arun( self, action_proposals_voting_extension: str, proposal_id: int, - vote: bool, **kwargs, ) -> Dict[str, Any]: """Async version of the tool.""" - return self._deploy( - action_proposals_voting_extension, proposal_id, vote, **kwargs - ) + return self._deploy(action_proposals_voting_extension, proposal_id, **kwargs) -class ConcludeActionProposalInput(BaseModel): - """Input schema for concluding an action proposal.""" +class GetVotingConfigurationInput(BaseModel): + """Input schema for getting voting configuration.""" action_proposals_voting_extension: str = Field( ..., @@ -867,33 +495,15 @@ class ConcludeActionProposalInput(BaseModel): "ST3YT0XW92E6T2FE59B2G5N2WNNFSBZ6MZKQS5D18.t3st-action-proposals-v2", ], ) - proposal_id: int = Field(..., description="ID of the proposal to conclude") - action_proposal_contract_to_execute: str = Field( - ..., - description="Contract principal of the original action proposal submitted for execution as part of the proposal", - examples=[ - "ST3YT0XW92E6T2FE59B2G5N2WNNFSBZ6MZKQS5D18.faces-action-send-message", - "ST3YT0XW92E6T2FE59B2G5N2WNNFSBZ6MZKQS5D18.faces-action-set-account-holder", - "ST3YT0XW92E6T2FE59B2G5N2WNNFSBZ6MZKQS5D18.faces-action-toggle-resource", - ], - ) - dao_token_contract_address: str = Field( - ..., - description="Contract principal of the token used by the DAO for voting", - examples=[ - "ST3YT0XW92E6T2FE59B2G5N2WNNFSBZ6MZKQS5D18.faces-faktory", - "ST3YT0XW92E6T2FE59B2G5N2WNNFSBZ6MZKQS5D18.t3st-faktory", - ], - ) -class ConcludeActionProposalTool(BaseTool): - name: str = "dao_action_conclude_proposal" +class GetVotingConfigurationTool(BaseTool): + name: str = "dao_action_get_voting_configuration" description: str = ( - "Conclude an existing action proposal in the DAO. " - "This finalizes the proposal and executes the action if the vote passed." + "Get the voting configuration from the DAO action proposals contract. " + "Returns the current voting parameters and settings used for proposals." ) - args_schema: Type[BaseModel] = ConcludeActionProposalInput + args_schema: Type[BaseModel] = GetVotingConfigurationInput return_direct: bool = False wallet_id: Optional[UUID] = None @@ -904,66 +514,42 @@ def __init__(self, wallet_id: Optional[UUID] = None, **kwargs): def _deploy( self, action_proposals_voting_extension: str, - dao_token_contract_address: str, - proposal_id: int, - action_proposal_contract_to_execute: str, **kwargs, ) -> Dict[str, Any]: - """Execute the tool to conclude an action proposal.""" + """Execute the tool to get voting configuration.""" if self.wallet_id is None: return {"success": False, "message": "Wallet ID is required", "data": None} args = [ action_proposals_voting_extension, - str(proposal_id), - action_proposal_contract_to_execute, - dao_token_contract_address, ] return BunScriptRunner.bun_run( self.wallet_id, - "aibtc-dao/extensions/action-proposals/public", - "conclude-proposal.ts", + "aibtc-cohort-0/dao-tools/extensions/action-proposal-voting/read-only", + "get-voting-configuration.ts", *args, ) def _run( self, action_proposals_voting_extension: str, - dao_token_contract_address: str, - proposal_id: int, - action_proposal_contract_to_execute: str, **kwargs, ) -> Dict[str, Any]: - """Execute the tool to conclude an action proposal.""" - return self._deploy( - action_proposals_voting_extension, - dao_token_contract_address, - proposal_id, - action_proposal_contract_to_execute, - **kwargs, - ) + """Execute the tool to get voting configuration.""" + return self._deploy(action_proposals_voting_extension, **kwargs) async def _arun( self, action_proposals_voting_extension: str, - dao_token_contract_address: str, - proposal_id: int, - action_proposal_contract_to_execute: str, **kwargs, ) -> Dict[str, Any]: """Async version of the tool.""" - return self._deploy( - action_proposals_voting_extension, - dao_token_contract_address, - proposal_id, - action_proposal_contract_to_execute, - **kwargs, - ) + return self._deploy(action_proposals_voting_extension, **kwargs) -class ProposeActionToggleResourceInput(BaseModel): - """Input schema for proposing to toggle a resource action.""" +class GetVotingPowerInput(BaseModel): + """Input schema for getting voting power.""" action_proposals_voting_extension: str = Field( ..., @@ -973,41 +559,21 @@ class ProposeActionToggleResourceInput(BaseModel): "ST3YT0XW92E6T2FE59B2G5N2WNNFSBZ6MZKQS5D18.t3st-action-proposals-v2", ], ) - action_proposal_contract_to_execute: str = Field( - ..., - description="Contract principal of the action proposal that executes toggling a resource in the DAO.", - examples=[ - "ST3YT0XW92E6T2FE59B2G5N2WNNFSBZ6MZKQS5D18.faces-action-toggle-resource", - "ST3YT0XW92E6T2FE59B2G5N2WNNFSBZ6MZKQS5D18.t3st-action-toggle-resource", - ], - ) - dao_token_contract_address: str = Field( - ..., - description="Contract principal of the token used by the DAO for voting", - examples=[ - "ST3YT0XW92E6T2FE59B2G5N2WNNFSBZ6MZKQS5D18.faces-faktory", - "ST3YT0XW92E6T2FE59B2G5N2WNNFSBZ6MZKQS5D18.t3st-faktory", - ], - ) - resource_name: str = Field( + proposal_id: int = Field(..., description="ID of the proposal to check") + voter_address: str = Field( ..., - description="Name of the resource to toggle", - examples=["apiv1", "protected-content", "1hr consulting"], - ) - memo: Optional[str] = Field( - None, - description="Optional memo to include with the proposal", - examples=["Toggling availability of consulting resource"], + description="Address of the voter to check voting power for", + examples=["ST3YT0XW92E6T2FE59B2G5N2WNNFSBZ6MZKQS5D18"], ) -class ProposeActionToggleResourceTool(BaseTool): - name: str = "dao_propose_action_toggle_resource" +class GetVotingPowerTool(BaseTool): + name: str = "dao_action_get_voting_power" description: str = ( - "This creates a proposal that DAO members can vote on to enable or disable " - "whether a specific resource can be paid for in the DAO resource contract." + "Get the voting power of a specific address for a proposal. " + "Returns the number of votes the address can cast on the given proposal." ) - args_schema: Type[BaseModel] = ProposeActionToggleResourceInput + args_schema: Type[BaseModel] = GetVotingPowerInput return_direct: bool = False wallet_id: Optional[UUID] = None @@ -1018,76 +584,44 @@ def __init__(self, wallet_id: Optional[UUID] = None, **kwargs): def _deploy( self, action_proposals_voting_extension: str, - action_proposal_contract_to_execute: str, - dao_token_contract_address: str, - resource_name: str, - memo: Optional[str] = None, + proposal_id: int, + voter_address: str, **kwargs, ) -> Dict[str, Any]: - """Execute the tool to propose toggling a resource.""" + """Execute the tool to get voting power.""" if self.wallet_id is None: return {"success": False, "message": "Wallet ID is required", "data": None} args = [ action_proposals_voting_extension, - action_proposal_contract_to_execute, - dao_token_contract_address, - resource_name, + str(proposal_id), + voter_address, ] - if memo: - args.append(memo) - return BunScriptRunner.bun_run( self.wallet_id, - "aibtc-dao/extensions/action-proposals/public", - "propose-action-toggle-resource-by-name.ts", + "aibtc-cohort-0/dao-tools/extensions/action-proposal-voting/read-only", + "get-voting-power.ts", *args, ) def _run( self, action_proposals_voting_extension: str, - action_proposal_contract_to_execute: str, - dao_token_contract_address: str, - resource_name: str, - memo: Optional[str] = None, - **kwargs, - ) -> Dict[str, Any]: - """Execute the tool to propose toggling a resource.""" - return self._deploy( - action_proposals_voting_extension, - action_proposal_contract_to_execute, - dao_token_contract_address, - resource_name, - memo, - **kwargs, - ) - - async def _arun( - self, - action_proposals_voting_extension: str, - action_proposal_contract_to_execute: str, - dao_token_contract_address: str, - resource_name: str, - memo: Optional[str] = None, + proposal_id: int, + voter_address: str, **kwargs, ) -> Dict[str, Any]: - """Async version of the tool.""" + """Execute the tool to get voting power.""" return self._deploy( - action_proposals_voting_extension, - action_proposal_contract_to_execute, - dao_token_contract_address, - resource_name, - memo, - **kwargs, + action_proposals_voting_extension, proposal_id, voter_address, **kwargs ) -class GetLiquidSupplyInput(BaseModel): - """Input schema for getting the liquid supply.""" +class VetoActionProposalInput(BaseModel): + """Input schema for vetoing an action proposal.""" - action_proposals_voting_extension: str = Field( + dao_action_proposal_voting_contract: str = Field( ..., description="Contract principal where the DAO creates action proposals for voting by DAO members.", examples=[ @@ -1095,18 +629,17 @@ class GetLiquidSupplyInput(BaseModel): "ST3YT0XW92E6T2FE59B2G5N2WNNFSBZ6MZKQS5D18.t3st-action-proposals-v2", ], ) - stacks_block_height: int = Field( - ..., description="Stacks block height to query the liquid supply at" - ) + proposal_id: int = Field(..., description="ID of the proposal to veto") -class GetLiquidSupplyTool(BaseTool): - name: str = "dao_action_get_liquid_supply" +class VetoActionProposalTool(BaseTool): + name: str = "dao_action_veto_proposal" description: str = ( - "Get the liquid supply of the DAO token at a specific Stacks block height. " - "Returns the total amount of tokens that are liquid at that block." + "Veto an existing action proposal in the DAO. " + "Allows casting a veto vote on a specific proposal ID " + "in the provided action proposals contract." ) - args_schema: Type[BaseModel] = GetLiquidSupplyInput + args_schema: Type[BaseModel] = VetoActionProposalInput return_direct: bool = False wallet_id: Optional[UUID] = None @@ -1116,53 +649,49 @@ def __init__(self, wallet_id: Optional[UUID] = None, **kwargs): def _deploy( self, - action_proposals_voting_extension: str, - stacks_block_height: int, + dao_action_proposal_voting_contract: str, + proposal_id: int, **kwargs, ) -> Dict[str, Any]: - """Execute the tool to get the liquid supply.""" + """Execute the tool to veto an action proposal.""" if self.wallet_id is None: return {"success": False, "message": "Wallet ID is required", "data": None} args = [ - action_proposals_voting_extension, - str(stacks_block_height), + dao_action_proposal_voting_contract, + str(proposal_id), ] return BunScriptRunner.bun_run( self.wallet_id, - "aibtc-dao/extensions/action-proposals/read-only", - "get-liquid-supply.ts", + "aibtc-cohort-0/dao-tools/extensions/action-proposal-voting/public", + "veto-action-proposal.ts", *args, ) def _run( self, - action_proposals_voting_extension: str, - stacks_block_height: int, + dao_action_proposal_voting_contract: str, + proposal_id: int, **kwargs, ) -> Dict[str, Any]: - """Execute the tool to get the liquid supply.""" - return self._deploy( - action_proposals_voting_extension, stacks_block_height, **kwargs - ) + """Execute the tool to veto an action proposal.""" + return self._deploy(dao_action_proposal_voting_contract, proposal_id, **kwargs) async def _arun( self, - action_proposals_voting_extension: str, - stacks_block_height: int, + dao_action_proposal_voting_contract: str, + proposal_id: int, **kwargs, ) -> Dict[str, Any]: """Async version of the tool.""" - return self._deploy( - action_proposals_voting_extension, stacks_block_height, **kwargs - ) + return self._deploy(dao_action_proposal_voting_contract, proposal_id, **kwargs) -class GetProposalInput(BaseModel): - """Input schema for getting proposal data.""" +class GetTotalProposalsInput(BaseModel): + """Input schema for getting total proposals data.""" - action_proposals_voting_extension: str = Field( + dao_action_proposal_voting_contract: str = Field( ..., description="Contract principal where the DAO creates action proposals for voting by DAO members.", examples=[ @@ -1170,16 +699,15 @@ class GetProposalInput(BaseModel): "ST3YT0XW92E6T2FE59B2G5N2WNNFSBZ6MZKQS5D18.t3st-action-proposals-v2", ], ) - proposal_id: int = Field(..., description="ID of the proposal to retrieve") -class GetProposalTool(BaseTool): - name: str = "dao_action_get_proposal" +class GetTotalProposalsTool(BaseTool): + name: str = "dao_action_get_total_proposals" description: str = ( - "Get the data for a specific proposal from the DAO action proposals contract. " - "Returns all stored information about the proposal if it exists." + "Get the total proposals data from the DAO action proposals contract. " + "Returns counts of proposals and last proposal block information." ) - args_schema: Type[BaseModel] = GetProposalInput + args_schema: Type[BaseModel] = GetTotalProposalsInput return_direct: bool = False wallet_id: Optional[UUID] = None @@ -1189,49 +717,45 @@ def __init__(self, wallet_id: Optional[UUID] = None, **kwargs): def _deploy( self, - action_proposals_voting_extension: str, - proposal_id: int, + dao_action_proposal_voting_contract: str, **kwargs, ) -> Dict[str, Any]: - """Execute the tool to get proposal data.""" + """Execute the tool to get total proposals data.""" if self.wallet_id is None: return {"success": False, "message": "Wallet ID is required", "data": None} args = [ - action_proposals_voting_extension, - str(proposal_id), + dao_action_proposal_voting_contract, ] return BunScriptRunner.bun_run( self.wallet_id, - "aibtc-dao/extensions/action-proposals/read-only", - "get-proposal.ts", + "aibtc-cohort-0/dao-tools/extensions/action-proposal-voting/read-only", + "get-total-proposals.ts", *args, ) def _run( self, - action_proposals_voting_extension: str, - proposal_id: int, + dao_action_proposal_voting_contract: str, **kwargs, ) -> Dict[str, Any]: - """Execute the tool to get proposal data.""" - return self._deploy(action_proposals_voting_extension, proposal_id, **kwargs) + """Execute the tool to get total proposals data.""" + return self._deploy(dao_action_proposal_voting_contract, **kwargs) async def _arun( self, - action_proposals_voting_extension: str, - proposal_id: int, + dao_action_proposal_voting_contract: str, **kwargs, ) -> Dict[str, Any]: """Async version of the tool.""" - return self._deploy(action_proposals_voting_extension, proposal_id, **kwargs) + return self._deploy(dao_action_proposal_voting_contract, **kwargs) -class GetTotalVotesInput(BaseModel): - """Input schema for getting total votes for a voter.""" +class GetVetoVoteRecordInput(BaseModel): + """Input schema for getting a veto vote record.""" - action_proposals_voting_extension: str = Field( + dao_action_proposal_voting_contract: str = Field( ..., description="Contract principal where the DAO creates action proposals for voting by DAO members.", examples=[ @@ -1240,16 +764,20 @@ class GetTotalVotesInput(BaseModel): ], ) proposal_id: int = Field(..., description="ID of the proposal to check") - voter_address: str = Field(..., description="Address of the voter to check") + voter_address: str = Field( + ..., + description="Address of the voter to check the veto vote record for", + examples=["ST3YT0XW92E6T2FE59B2G5N2WNNFSBZ6MZKQS5D18"], + ) -class GetTotalVotesTool(BaseTool): - name: str = "dao_action_get_total_votes" +class GetVetoVoteRecordTool(BaseTool): + name: str = "dao_action_get_veto_vote_record" description: str = ( - "Get the total votes cast by a specific voter on a proposal. " - "Returns the number of votes the voter has cast on the given proposal." + "Get the veto vote record for a specific voter on a proposal. " + "Returns the amount of veto votes if a record exists, otherwise null." ) - args_schema: Type[BaseModel] = GetTotalVotesInput + args_schema: Type[BaseModel] = GetVetoVoteRecordInput return_direct: bool = False wallet_id: Optional[UUID] = None @@ -1259,57 +787,63 @@ def __init__(self, wallet_id: Optional[UUID] = None, **kwargs): def _deploy( self, - action_proposals_voting_extension: str, + dao_action_proposal_voting_contract: str, proposal_id: int, voter_address: str, **kwargs, ) -> Dict[str, Any]: - """Execute the tool to get total votes.""" + """Execute the tool to get a veto vote record.""" if self.wallet_id is None: return {"success": False, "message": "Wallet ID is required", "data": None} args = [ - action_proposals_voting_extension, + dao_action_proposal_voting_contract, str(proposal_id), voter_address, ] return BunScriptRunner.bun_run( self.wallet_id, - "aibtc-dao/extensions/action-proposals/read-only", - "get-total-votes.ts", + "aibtc-cohort-0/dao-tools/extensions/action-proposal-voting/read-only", + "get-veto-vote-record.ts", *args, ) def _run( self, - action_proposals_voting_extension: str, + dao_action_proposal_voting_contract: str, proposal_id: int, voter_address: str, **kwargs, ) -> Dict[str, Any]: - """Execute the tool to get total votes.""" + """Execute the tool to get a veto vote record.""" return self._deploy( - action_proposals_voting_extension, proposal_id, voter_address, **kwargs + dao_action_proposal_voting_contract, + proposal_id, + voter_address, + **kwargs, ) async def _arun( self, - action_proposals_voting_extension: str, + dao_action_proposal_voting_contract: str, proposal_id: int, voter_address: str, **kwargs, ) -> Dict[str, Any]: """Async version of the tool.""" return self._deploy( - action_proposals_voting_extension, proposal_id, voter_address, **kwargs + dao_action_proposal_voting_contract, + proposal_id, + voter_address, + **kwargs, ) -class GetVotingConfigurationInput(BaseModel): - """Input schema for getting voting configuration.""" +class GetVoteRecordInput(BaseModel): + """Input schema for getting a vote record.""" - action_proposals_voting_extension: str = Field( + dao_action_proposal_voting_contract: str = Field( ..., description="Contract principal where the DAO creates action proposals for voting by DAO members.", examples=[ @@ -1317,15 +851,21 @@ class GetVotingConfigurationInput(BaseModel): "ST3YT0XW92E6T2FE59B2G5N2WNNFSBZ6MZKQS5D18.t3st-action-proposals-v2", ], ) + proposal_id: int = Field(..., description="ID of the proposal to check") + voter_address: str = Field( + ..., + description="Address of the voter to check the vote record for", + examples=["ST3YT0XW92E6T2FE59B2G5N2WNNFSBZ6MZKQS5D18"], + ) -class GetVotingConfigurationTool(BaseTool): - name: str = "dao_action_get_voting_configuration" +class GetVoteRecordTool(BaseTool): + name: str = "dao_action_get_vote_record" description: str = ( - "Get the voting configuration from the DAO action proposals contract. " - "Returns the current voting parameters and settings used for proposals." + "Get the vote record for a specific voter on a proposal. " + "Returns the vote (true/false) and amount if a record exists, otherwise null." ) - args_schema: Type[BaseModel] = GetVotingConfigurationInput + args_schema: Type[BaseModel] = GetVoteRecordInput return_direct: bool = False wallet_id: Optional[UUID] = None @@ -1335,46 +875,63 @@ def __init__(self, wallet_id: Optional[UUID] = None, **kwargs): def _deploy( self, - action_proposals_voting_extension: str, + dao_action_proposal_voting_contract: str, + proposal_id: int, + voter_address: str, **kwargs, ) -> Dict[str, Any]: - """Execute the tool to get voting configuration.""" + """Execute the tool to get a vote record.""" if self.wallet_id is None: return {"success": False, "message": "Wallet ID is required", "data": None} args = [ - action_proposals_voting_extension, + dao_action_proposal_voting_contract, + str(proposal_id), + voter_address, ] return BunScriptRunner.bun_run( self.wallet_id, - "aibtc-dao/extensions/action-proposals/read-only", - "get-voting-configuration.ts", + "aibtc-cohort-0/dao-tools/extensions/action-proposal-voting/read-only", + "get-vote-record.ts", *args, ) def _run( self, - action_proposals_voting_extension: str, + dao_action_proposal_voting_contract: str, + proposal_id: int, + voter_address: str, **kwargs, ) -> Dict[str, Any]: - """Execute the tool to get voting configuration.""" - return self._deploy(action_proposals_voting_extension, **kwargs) + """Execute the tool to get a vote record.""" + return self._deploy( + dao_action_proposal_voting_contract, + proposal_id, + voter_address, + **kwargs, + ) async def _arun( self, - action_proposals_voting_extension: str, - dao_token_contract_address: str, + dao_action_proposal_voting_contract: str, + proposal_id: int, + voter_address: str, **kwargs, ) -> Dict[str, Any]: """Async version of the tool.""" - return self._deploy(action_proposals_voting_extension, **kwargs) + return self._deploy( + dao_action_proposal_voting_contract, + proposal_id, + voter_address, + **kwargs, + ) -class GetVotingPowerInput(BaseModel): - """Input schema for getting voting power.""" +class GetVoteRecordsInput(BaseModel): + """Input schema for getting vote records (vote and veto vote).""" - action_proposals_voting_extension: str = Field( + dao_action_proposal_voting_contract: str = Field( ..., description="Contract principal where the DAO creates action proposals for voting by DAO members.", examples=[ @@ -1385,18 +942,19 @@ class GetVotingPowerInput(BaseModel): proposal_id: int = Field(..., description="ID of the proposal to check") voter_address: str = Field( ..., - description="Address of the voter to check voting power for", + description="Address of the voter to check vote records for", examples=["ST3YT0XW92E6T2FE59B2G5N2WNNFSBZ6MZKQS5D18"], ) -class GetVotingPowerTool(BaseTool): - name: str = "dao_action_get_voting_power" +class GetVoteRecordsTool(BaseTool): + name: str = "dao_action_get_vote_records" description: str = ( - "Get the voting power of a specific address for a proposal. " - "Returns the number of votes the address can cast on the given proposal." + "Get both the regular vote record and veto vote record for a specific voter on a proposal. " + "Returns an object containing 'voteRecord' (vote and amount, or null) and " + "'vetoVoteRecord' (amount, or null)." ) - args_schema: Type[BaseModel] = GetVotingPowerInput + args_schema: Type[BaseModel] = GetVoteRecordsInput return_direct: bool = False wallet_id: Optional[UUID] = None @@ -1406,38 +964,56 @@ def __init__(self, wallet_id: Optional[UUID] = None, **kwargs): def _deploy( self, - action_proposals_voting_extension: str, + dao_action_proposal_voting_contract: str, proposal_id: int, voter_address: str, **kwargs, ) -> Dict[str, Any]: - """Execute the tool to get voting power.""" + """Execute the tool to get vote records.""" if self.wallet_id is None: return {"success": False, "message": "Wallet ID is required", "data": None} args = [ - action_proposals_voting_extension, + dao_action_proposal_voting_contract, str(proposal_id), voter_address, ] return BunScriptRunner.bun_run( self.wallet_id, - "aibtc-dao/extensions/action-proposals/read-only", - "get-voting-power.ts", + "aibtc-cohort-0/dao-tools/extensions/action-proposal-voting/read-only", + "get-vote-records.ts", *args, ) def _run( self, - action_proposals_voting_extension: str, + dao_action_proposal_voting_contract: str, proposal_id: int, voter_address: str, **kwargs, ) -> Dict[str, Any]: - """Execute the tool to get voting power.""" + """Execute the tool to get vote records.""" return self._deploy( - action_proposals_voting_extension, proposal_id, voter_address, **kwargs + dao_action_proposal_voting_contract, + proposal_id, + voter_address, + **kwargs, + ) + + async def _arun( + self, + dao_action_proposal_voting_contract: str, + proposal_id: int, + voter_address: str, + **kwargs, + ) -> Dict[str, Any]: + """Async version of the tool.""" + return self._deploy( + dao_action_proposal_voting_contract, + proposal_id, + voter_address, + **kwargs, ) async def _arun( diff --git a/tools/dao_ext_core_proposals.py b/tools/dao_ext_core_proposals.py deleted file mode 100644 index 7fc11821..00000000 --- a/tools/dao_ext_core_proposals.py +++ /dev/null @@ -1,236 +0,0 @@ -from typing import Any, Dict, Optional, Type -from uuid import UUID - -from langchain.tools import BaseTool -from pydantic import BaseModel, Field - -from tools.bun import BunScriptRunner - - -class GenerateCoreProposalInput(BaseModel): - """Input schema for generating a core proposal.""" - - dao_deployer_address: str = Field( - ..., - description="The address of the DAO deployer", - example="ST1PQHQKV0RJXZFY1DGX8MNSNYVE3VGZJSRTPGZGM", - ) - dao_token_symbol: str = Field( - ..., - description="The token symbol for the DAO", - example="aibtc", - ) - proposal_contract_name: str = Field( - ..., - description="The name of the proposal contract", - example="aibtc-treasury-withdraw-stx", - ) - proposal_args: Dict[str, str] = Field( - ..., - description="Arguments for the proposal in key-value format", - example={ - "stx_amount": "1000000", - "recipient_address": "ST1PQHQKV0RJXZFY1DGX8MNSNYVE3VGZJSRTPGZGM", - }, - ) - generate_files: bool = Field( - False, - description="Whether to generate and save proposal files", - ) - - -class GenerateCoreProposalTool(BaseTool): - name: str = "dao_generate_core_proposal" - description: str = ( - "Generate a core proposal for the DAO. " - "This will create the proposal contract but not deploy it. " - "Returns the generated proposal details if successful." - ) - args_schema: Type[BaseModel] = GenerateCoreProposalInput - return_direct: bool = False - wallet_id: Optional[UUID] = None - - def __init__(self, wallet_id: Optional[UUID] = None, **kwargs): - super().__init__(**kwargs) - self.wallet_id = wallet_id - - def _deploy( - self, - dao_deployer_address: str, - dao_token_symbol: str, - proposal_contract_name: str, - proposal_args: Dict[str, str], - generate_files: bool = False, - **kwargs, - ) -> Dict[str, Any]: - """Execute the tool to generate a core proposal.""" - if self.wallet_id is None: - return {"success": False, "message": "Wallet ID is required", "data": None} - - args = [ - dao_deployer_address, - dao_token_symbol, - proposal_contract_name, - str(proposal_args).replace("'", '"'), # Convert Python dict to JSON string - str(generate_files).lower(), - ] - - return BunScriptRunner.bun_run( - self.wallet_id, - "aibtc-dao/extensions/core-proposals", - "generate-core-proposal.ts", - *args, - ) - - def _run( - self, - dao_deployer_address: str, - dao_token_symbol: str, - proposal_contract_name: str, - proposal_args: Dict[str, str], - generate_files: bool = False, - **kwargs, - ) -> Dict[str, Any]: - """Execute the tool to generate a core proposal.""" - return self._deploy( - dao_deployer_address, - dao_token_symbol, - proposal_contract_name, - proposal_args, - generate_files, - **kwargs, - ) - - async def _arun( - self, - dao_deployer_address: str, - dao_token_symbol: str, - proposal_contract_name: str, - proposal_args: Dict[str, str], - generate_files: bool = False, - **kwargs, - ) -> Dict[str, Any]: - """Async version of the tool.""" - return self._deploy( - dao_deployer_address, - dao_token_symbol, - proposal_contract_name, - proposal_args, - generate_files, - **kwargs, - ) - - -class DeployCoreProposalInput(BaseModel): - """Input schema for deploying a core proposal.""" - - dao_deployer_address: str = Field( - ..., - description="The address of the DAO deployer", - example="ST1PQHQKV0RJXZFY1DGX8MNSNYVE3VGZJSRTPGZGM", - ) - dao_token_symbol: str = Field( - ..., - description="The token symbol for the DAO", - example="aibtc", - ) - proposal_contract_name: str = Field( - ..., - description="The name of the proposal contract", - example="aibtc-treasury-withdraw-stx", - ) - proposal_args: Dict[str, str] = Field( - ..., - description="Arguments for the proposal in key-value format", - example={ - "stx_amount": "1000000", - "recipient_address": "ST1PQHQKV0RJXZFY1DGX8MNSNYVE3VGZJSRTPGZGM", - }, - ) - generate_files: bool = Field( - False, - description="Whether to generate and save proposal files", - ) - - -class DeployCoreProposalTool(BaseTool): - name: str = "dao_deploy_core_proposal" - description: str = ( - "Deploy a core proposal for the DAO. " - "This will generate and deploy the proposal contract. " - "This is a required step before proposing. " - "Returns the deployment details if successful." - ) - args_schema: Type[BaseModel] = DeployCoreProposalInput - return_direct: bool = False - wallet_id: Optional[UUID] = None - - def __init__(self, wallet_id: Optional[UUID] = None, **kwargs): - super().__init__(**kwargs) - self.wallet_id = wallet_id - - def _deploy( - self, - dao_deployer_address: str, - dao_token_symbol: str, - proposal_contract_name: str, - proposal_args: Dict[str, str], - generate_files: bool = False, - **kwargs, - ) -> Dict[str, Any]: - """Execute the tool to deploy a core proposal.""" - if self.wallet_id is None: - return {"success": False, "message": "Wallet ID is required", "data": None} - - args = [ - dao_deployer_address, - dao_token_symbol, - proposal_contract_name, - str(proposal_args).replace("'", '"'), # Convert Python dict to JSON string - str(generate_files).lower(), - ] - - return BunScriptRunner.bun_run( - self.wallet_id, - "aibtc-dao/extensions/core-proposals", - "deploy-core-proposal.ts", - *args, - ) - - def _run( - self, - dao_deployer_address: str, - dao_token_symbol: str, - proposal_contract_name: str, - proposal_args: Dict[str, str], - generate_files: bool = False, - **kwargs, - ) -> Dict[str, Any]: - """Execute the tool to deploy a core proposal.""" - return self._deploy( - dao_deployer_address, - dao_token_symbol, - proposal_contract_name, - proposal_args, - generate_files, - **kwargs, - ) - - async def _arun( - self, - dao_deployer_address: str, - dao_token_symbol: str, - proposal_contract_name: str, - proposal_args: Dict[str, str], - generate_files: bool = False, - **kwargs, - ) -> Dict[str, Any]: - """Async version of the tool.""" - return self._deploy( - dao_deployer_address, - dao_token_symbol, - proposal_contract_name, - proposal_args, - generate_files, - **kwargs, - ) diff --git a/tools/dao_ext_onchain_messaging.py b/tools/dao_ext_onchain_messaging.py deleted file mode 100644 index 7bbb7219..00000000 --- a/tools/dao_ext_onchain_messaging.py +++ /dev/null @@ -1,73 +0,0 @@ -from typing import Any, Dict, Optional, Type -from uuid import UUID - -from langchain.tools import BaseTool -from pydantic import BaseModel, Field - -from tools.bun import BunScriptRunner - - -class SendMessageInput(BaseModel): - """Input schema for sending an onchain message.""" - - messaging_contract: str = Field( - ..., - description="Contract principal of the messaging contract for the DAO", - example="ST3YT0XW92E6T2FE59B2G5N2WNNFSBZ6MZKQS5D18.dao-onchain-messaging", - ) - message: str = Field(..., description="Message to send") - - -class SendMessageTool(BaseTool): - name: str = "dao_messaging_send" - description: str = ( - "Send a message through the DAO's onchain messaging system. " - "Messages are stored permanently on the blockchain and can be viewed by anyone." - ) - args_schema: Type[BaseModel] = SendMessageInput - return_direct: bool = False - wallet_id: Optional[UUID] = None - - def __init__(self, wallet_id: Optional[UUID] = None, **kwargs): - super().__init__(**kwargs) - self.wallet_id = wallet_id - - def _deploy( - self, - messaging_contract: str, - message: str, - **kwargs, - ) -> Dict[str, Any]: - """Execute the tool to send a message.""" - if self.wallet_id is None: - return {"success": False, "message": "Wallet ID is required", "data": None} - - args = [ - messaging_contract, - message, - ] - - return BunScriptRunner.bun_run( - self.wallet_id, - "aibtc-dao/extensions/onchain-messaging/public", - "send-message.ts", - *args, - ) - - def _run( - self, - messaging_contract: str, - message: str, - **kwargs, - ) -> Dict[str, Any]: - """Execute the tool to send a message.""" - return self._deploy(messaging_contract, message, **kwargs) - - async def _arun( - self, - messaging_contract: str, - message: str, - **kwargs, - ) -> Dict[str, Any]: - """Async version of the tool.""" - return self._deploy(messaging_contract, message, **kwargs) diff --git a/tools/dao_ext_payments_invoices.py b/tools/dao_ext_payments_invoices.py deleted file mode 100644 index 919ecae3..00000000 --- a/tools/dao_ext_payments_invoices.py +++ /dev/null @@ -1,338 +0,0 @@ -from typing import Any, Dict, Optional, Type -from uuid import UUID - -from langchain.tools import BaseTool -from pydantic import BaseModel, Field - -from tools.bun import BunScriptRunner - - -class GetInvoiceInput(BaseModel): - """Input schema for getting invoice details.""" - - payments_invoices_contract: str = Field( - ..., - description="Contract principal of the payments and invoices contract", - example="ST3YT0XW92E6T2FE59B2G5N2WNNFSBZ6MZKQS5D18.faces-payments-invoices", - ) - invoice_index: int = Field(..., description="Index of the invoice to retrieve") - - -class GetInvoiceTool(BaseTool): - name: str = "dao_get_invoice" - description: str = ( - "Get details of a specific invoice from the DAO's payments and invoices system. " - "Returns the full invoice data if it exists." - ) - args_schema: Type[BaseModel] = GetInvoiceInput - return_direct: bool = False - wallet_id: Optional[UUID] = None - - def __init__(self, wallet_id: Optional[UUID] = None, **kwargs): - super().__init__(**kwargs) - self.wallet_id = wallet_id - - def _deploy( - self, - payments_invoices_contract: str, - invoice_index: int, - **kwargs, - ) -> Dict[str, Any]: - """Execute the tool to get invoice details.""" - if self.wallet_id is None: - return {"success": False, "message": "Wallet ID is required", "data": None} - - args = [payments_invoices_contract, str(invoice_index)] - - return BunScriptRunner.bun_run( - self.wallet_id, - "aibtc-dao/extensions/payments-invoices/read-only", - "get-invoice.ts", - *args, - ) - - def _run( - self, - payments_invoices_contract: str, - invoice_index: int, - **kwargs, - ) -> Dict[str, Any]: - """Execute the tool to get invoice details.""" - return self._deploy(payments_invoices_contract, invoice_index, **kwargs) - - async def _arun( - self, - payments_invoices_contract: str, - invoice_index: int, - **kwargs, - ) -> Dict[str, Any]: - """Async version of the tool.""" - return self._deploy(payments_invoices_contract, invoice_index, **kwargs) - - -class GetResourceInput(BaseModel): - """Input schema for getting resource details.""" - - payments_invoices_contract: str = Field( - ..., - description="Contract principal of the payments and invoices contract", - example="ST3YT0XW92E6T2FE59B2G5N2WNNFSBZ6MZKQS5D18.faces-payments-invoices", - ) - resource_index: int = Field(..., description="Index of the resource to retrieve") - - -class GetResourceTool(BaseTool): - name: str = "dao_get_resource" - description: str = ( - "Get details of a specific resource from the DAO's payments and invoices system. " - "Returns the full resource data if it exists." - ) - args_schema: Type[BaseModel] = GetResourceInput - return_direct: bool = False - wallet_id: Optional[UUID] = None - - def __init__(self, wallet_id: Optional[UUID] = None, **kwargs): - super().__init__(**kwargs) - self.wallet_id = wallet_id - - def _deploy( - self, - payments_invoices_contract: str, - resource_index: int, - **kwargs, - ) -> Dict[str, Any]: - """Execute the tool to get resource details.""" - if self.wallet_id is None: - return {"success": False, "message": "Wallet ID is required", "data": None} - - args = [payments_invoices_contract, str(resource_index)] - - return BunScriptRunner.bun_run( - self.wallet_id, - "aibtc-dao/extensions/payments-invoices/read-only", - "get-resource.ts", - *args, - ) - - def _run( - self, - payments_invoices_contract: str, - resource_index: int, - **kwargs, - ) -> Dict[str, Any]: - """Execute the tool to get resource details.""" - return self._deploy(payments_invoices_contract, resource_index, **kwargs) - - async def _arun( - self, - payments_invoices_contract: str, - resource_index: int, - **kwargs, - ) -> Dict[str, Any]: - """Async version of the tool.""" - return self._deploy(payments_invoices_contract, resource_index, **kwargs) - - -class GetResourceByNameInput(BaseModel): - """Input schema for getting resource details by name.""" - - payments_invoices_contract: str = Field( - ..., - description="Contract principal of the payments and invoices contract", - example="ST3YT0XW92E6T2FE59B2G5N2WNNFSBZ6MZKQS5D18.faces-payments-invoices", - ) - resource_name: str = Field(..., description="Name of the resource to retrieve") - - -class GetResourceByNameTool(BaseTool): - name: str = "dao_get_resource_by_name" - description: str = ( - "Get details of a specific resource by its name from the DAO's payments and invoices system. " - "Returns the full resource data if it exists." - ) - args_schema: Type[BaseModel] = GetResourceByNameInput - return_direct: bool = False - wallet_id: Optional[UUID] = None - - def __init__(self, wallet_id: Optional[UUID] = None, **kwargs): - super().__init__(**kwargs) - self.wallet_id = wallet_id - - def _deploy( - self, - payments_invoices_contract: str, - resource_name: str, - **kwargs, - ) -> Dict[str, Any]: - """Execute the tool to get resource details by name.""" - if self.wallet_id is None: - return {"success": False, "message": "Wallet ID is required", "data": None} - - args = [payments_invoices_contract, resource_name] - - return BunScriptRunner.bun_run( - self.wallet_id, - "aibtc-dao/extensions/payments-invoices/read-only", - "get-resource-by-name.ts", - *args, - ) - - def _run( - self, - payments_invoices_contract: str, - resource_name: str, - **kwargs, - ) -> Dict[str, Any]: - """Execute the tool to get resource details by name.""" - return self._deploy(payments_invoices_contract, resource_name, **kwargs) - - async def _arun( - self, - payments_invoices_contract: str, - resource_name: str, - **kwargs, - ) -> Dict[str, Any]: - """Async version of the tool.""" - return self._deploy(payments_invoices_contract, resource_name, **kwargs) - - -class PayInvoiceInput(BaseModel): - """Input schema for paying an invoice.""" - - payments_invoices_contract: str = Field( - ..., - description="Contract principal of the payments and invoices contract", - example="ST3YT0XW92E6T2FE59B2G5N2WNNFSBZ6MZKQS5D18.faces-payments-invoices", - ) - resource_index: int = Field(..., description="Index of the resource to pay for") - memo: Optional[str] = Field( - None, description="Optional memo to include with the payment" - ) - - -class PayInvoiceTool(BaseTool): - name: str = "dao_pay_invoice" - description: str = ( - "Pay an invoice for a specific resource in the DAO's payments and invoices system. " - "Optionally includes a memo with the payment." - ) - args_schema: Type[BaseModel] = PayInvoiceInput - return_direct: bool = False - wallet_id: Optional[UUID] = None - - def __init__(self, wallet_id: Optional[UUID] = None, **kwargs): - super().__init__(**kwargs) - self.wallet_id = wallet_id - - def _deploy( - self, - payments_invoices_contract: str, - resource_index: int, - memo: Optional[str] = None, - **kwargs, - ) -> Dict[str, Any]: - """Execute the tool to pay an invoice.""" - if self.wallet_id is None: - return {"success": False, "message": "Wallet ID is required", "data": None} - - args = [payments_invoices_contract, str(resource_index)] - if memo: - args.append(memo) - - return BunScriptRunner.bun_run( - self.wallet_id, - "aibtc-dao/extensions/payments-invoices/public", - "pay-invoice.ts", - *args, - ) - - def _run( - self, - payments_invoices_contract: str, - resource_index: int, - memo: Optional[str] = None, - **kwargs, - ) -> Dict[str, Any]: - """Execute the tool to pay an invoice.""" - return self._deploy(payments_invoices_contract, resource_index, memo, **kwargs) - - async def _arun( - self, - payments_invoices_contract: str, - resource_index: int, - memo: Optional[str] = None, - **kwargs, - ) -> Dict[str, Any]: - """Async version of the tool.""" - return self._deploy(payments_invoices_contract, resource_index, memo, **kwargs) - - -class PayInvoiceByResourceNameInput(BaseModel): - """Input schema for paying an invoice by resource name.""" - - payments_invoices_contract: str = Field( - ..., - description="Contract principal of the payments and invoices contract", - example="ST3YT0XW92E6T2FE59B2G5N2WNNFSBZ6MZKQS5D18.faces-payments-invoices", - ) - resource_name: str = Field(..., description="Name of the resource to pay for") - memo: Optional[str] = Field( - None, description="Optional memo to include with the payment" - ) - - -class PayInvoiceByResourceNameTool(BaseTool): - name: str = "dao_pay_invoice_by_resource_name" - description: str = ( - "Pay an invoice for a specific resource by its name in the DAO's payments and invoices system. " - "Optionally includes a memo with the payment." - ) - args_schema: Type[BaseModel] = PayInvoiceByResourceNameInput - return_direct: bool = False - wallet_id: Optional[UUID] = None - - def __init__(self, wallet_id: Optional[UUID] = None, **kwargs): - super().__init__(**kwargs) - self.wallet_id = wallet_id - - def _deploy( - self, - payments_invoices_contract: str, - resource_name: str, - memo: Optional[str] = None, - **kwargs, - ) -> Dict[str, Any]: - """Execute the tool to pay an invoice by resource name.""" - if self.wallet_id is None: - return {"success": False, "message": "Wallet ID is required", "data": None} - - args = [payments_invoices_contract, resource_name] - if memo: - args.append(memo) - - return BunScriptRunner.bun_run( - self.wallet_id, - "aibtc-dao/extensions/payments-invoices/public", - "pay-invoice-by-resource-name.ts", - *args, - ) - - def _run( - self, - payments_invoices_contract: str, - resource_name: str, - memo: Optional[str] = None, - **kwargs, - ) -> Dict[str, Any]: - """Execute the tool to pay an invoice by resource name.""" - return self._deploy(payments_invoices_contract, resource_name, memo, **kwargs) - - async def _arun( - self, - payments_invoices_contract: str, - resource_name: str, - memo: Optional[str] = None, - **kwargs, - ) -> Dict[str, Any]: - """Async version of the tool.""" - return self._deploy(payments_invoices_contract, resource_name, memo, **kwargs) diff --git a/tools/dao_ext_timed_vault.py b/tools/dao_ext_timed_vault.py deleted file mode 100644 index 60c44025..00000000 --- a/tools/dao_ext_timed_vault.py +++ /dev/null @@ -1,188 +0,0 @@ -from typing import Any, Dict, Optional, Type -from uuid import UUID - -from langchain.tools import BaseTool -from pydantic import BaseModel, Field - -from tools.bun import BunScriptRunner - - -class GetAccountTermsInput(BaseModel): - """Input schema for getting timed vault terms.""" - - timed_vault_contract: str = Field( - ..., - description="Contract principal of the timed vault", - example="ST3YT0XW92E6T2FE59B2G5N2WNNFSBZ6MZKQS5D18.faces-timed-vault", - ) - - -class GetAccountTermsTool(BaseTool): - name: str = "dao_timedvault_get_account_terms" - description: str = ( - "Get the current terms of the DAO's timed vault. " - "Returns information about withdrawal limits, periods, and account holder." - ) - args_schema: Type[BaseModel] = GetAccountTermsInput - return_direct: bool = False - wallet_id: Optional[UUID] = None - - def __init__(self, wallet_id: Optional[UUID] = None, **kwargs): - super().__init__(**kwargs) - self.wallet_id = wallet_id - - def _deploy( - self, - timed_vault_contract: str, - **kwargs, - ) -> Dict[str, Any]: - """Execute the tool to get account terms.""" - if self.wallet_id is None: - return {"success": False, "message": "Wallet ID is required", "data": None} - - args = [timed_vault_contract] - - return BunScriptRunner.bun_run( - self.wallet_id, - "aibtc-dao/extensions/timed-vault/read-only", - "get-account-terms.ts", - *args, - ) - - def _run( - self, - timed_vault_contract: str, - **kwargs, - ) -> Dict[str, Any]: - """Execute the tool to get account terms.""" - return self._deploy(timed_vault_contract, **kwargs) - - async def _arun( - self, - timed_vault_contract: str, - **kwargs, - ) -> Dict[str, Any]: - """Async version of the tool.""" - return self._deploy(timed_vault_contract, **kwargs) - - -class DepositSTXInput(BaseModel): - """Input schema for depositing STX.""" - - timed_vault_contract: str = Field( - ..., - description="Contract principal of the timed vault", - example="ST3YT0XW92E6T2FE59B2G5N2WNNFSBZ6MZKQS5D18.faces-timed-vault", - ) - amount: int = Field(..., description="Amount of STX to deposit in microstacks") - - -class DepositSTXTool(BaseTool): - name: str = "dao_timedvault_deposit_stx" - description: str = ( - "Deposit STX into the DAO's timed vault. " - "The amount should be specified in microstacks (1 STX = 1,000,000 microstacks)." - ) - args_schema: Type[BaseModel] = DepositSTXInput - return_direct: bool = False - wallet_id: Optional[UUID] = None - - def __init__(self, wallet_id: Optional[UUID] = None, **kwargs): - super().__init__(**kwargs) - self.wallet_id = wallet_id - - def _deploy( - self, - timed_vault_contract: str, - amount: int, - **kwargs, - ) -> Dict[str, Any]: - """Execute the tool to deposit STX.""" - if self.wallet_id is None: - return {"success": False, "message": "Wallet ID is required", "data": None} - - args = [timed_vault_contract, str(amount)] - - return BunScriptRunner.bun_run( - self.wallet_id, - "aibtc-dao/extensions/timed-vault/public", - "deposit-stx.ts", - *args, - ) - - def _run( - self, - timed_vault_contract: str, - amount: int, - **kwargs, - ) -> Dict[str, Any]: - """Execute the tool to deposit STX.""" - return self._deploy(timed_vault_contract, amount, **kwargs) - - async def _arun( - self, - timed_vault_contract: str, - amount: int, - **kwargs, - ) -> Dict[str, Any]: - """Async version of the tool.""" - return self._deploy(timed_vault_contract, amount, **kwargs) - - -class WithdrawSTXInput(BaseModel): - """Input schema for withdrawing STX.""" - - timed_vault_contract: str = Field( - ..., - description="Contract principal of the timed vault", - example="ST3YT0XW92E6T2FE59B2G5N2WNNFSBZ6MZKQS5D18.faces-timed-vault", - ) - - -class WithdrawSTXTool(BaseTool): - name: str = "dao_timedvault_withdraw_stx" - description: str = ( - "Withdraw STX from the DAO's timed vault. " - "This will withdraw the maximum allowed amount based on the account terms." - ) - args_schema: Type[BaseModel] = WithdrawSTXInput - return_direct: bool = False - wallet_id: Optional[UUID] = None - - def __init__(self, wallet_id: Optional[UUID] = None, **kwargs): - super().__init__(**kwargs) - self.wallet_id = wallet_id - - def _deploy( - self, - timed_vault_contract: str, - **kwargs, - ) -> Dict[str, Any]: - """Execute the tool to withdraw STX.""" - if self.wallet_id is None: - return {"success": False, "message": "Wallet ID is required", "data": None} - - args = [timed_vault_contract] - - return BunScriptRunner.bun_run( - self.wallet_id, - "aibtc-dao/extensions/timed-vault/public", - "withdraw-stx.ts", - *args, - ) - - def _run( - self, - timed_vault_contract: str, - **kwargs, - ) -> Dict[str, Any]: - """Execute the tool to withdraw STX.""" - return self._deploy(timed_vault_contract, **kwargs) - - async def _arun( - self, - timed_vault_contract: str, - **kwargs, - ) -> Dict[str, Any]: - """Async version of the tool.""" - return self._deploy(timed_vault_contract, **kwargs) diff --git a/tools/hiro.py b/tools/hiro.py index 1e99aec1..c2162670 100644 --- a/tools/hiro.py +++ b/tools/hiro.py @@ -24,7 +24,7 @@ def _deploy(self, *args, **kwargs) -> float: Returns: float: The current STX price """ - from lib.hiro import HiroApi + from services.integrations.hiro.hiro_api import HiroApi api = HiroApi() return str(api.get_stx_price()) @@ -69,7 +69,7 @@ def _deploy(self, address: str) -> str: Returns: str: The balance and holdings of the address """ - from lib.hiro import HiroApi + from services.integrations.hiro.hiro_api import HiroApi api = HiroApi() return str(api.get_address_balance(address)) @@ -112,7 +112,7 @@ def _deploy(self, contract_id: str) -> str: Returns: str: The contract information """ - from lib.hiro import HiroApi + from services.integrations.hiro.hiro_api import HiroApi api = HiroApi() return str(api.get_contract_by_id(contract_id)) diff --git a/tools/jing.py b/tools/jing.py deleted file mode 100644 index 889107ac..00000000 --- a/tools/jing.py +++ /dev/null @@ -1,580 +0,0 @@ -from typing import Any, Dict, Optional, Type - -from langchain.tools import BaseTool -from pydantic import BaseModel, Field - -from backend.models import UUID - -from .bun import BunScriptRunner - - -# Schema definitions -class JingGetOrderBookInput(BaseModel): - """Input schema for getting orderbook data.""" - - pair: str = Field(..., description="Trading pair (e.g., 'PEPE-STX')") - - -class JingCreateBidInput(BaseModel): - """Input schema for creating bid offers.""" - - pair: str = Field(..., description="Trading pair (e.g., 'PEPE-STX')") - stx_amount: float = Field(..., description="Amount of STX to bid") - token_amount: float = Field(..., description="Amount of tokens requested") - recipient: Optional[str] = Field( - None, description="Optional: recipient address for private offers" - ) - expiry: Optional[int] = Field(None, description="Optional: blocks until expiration") - - -class JingSubmitOrderInput(BaseModel): - """Input schema for submitting (accepting) existing orders.""" - - swap_id: int = Field(..., description="ID of the order to submit") - - -class JingCreateAskInput(BaseModel): - """Input schema for creating ask offers.""" - - pair: str = Field(..., description="Trading pair (e.g., 'PEPE-STX')") - token_amount: float = Field(..., description="Amount of tokens to sell") - stx_amount: float = Field(..., description="Amount of STX requested") - recipient: Optional[str] = Field( - None, description="Optional: recipient address for private offers" - ) - expiry: Optional[int] = Field(None, description="Optional: blocks until expiration") - - -class JingGetPrivateOffersInput(BaseModel): - """Input schema for getting private offers.""" - - pair: str = Field(..., description="Trading pair (e.g., 'PEPE-STX')") - user_address: str = Field(..., description="Address to check private offers for") - - -class JingRepriceOrderInput(BaseModel): - """Input schema for repricing orders.""" - - swap_id: int = Field(..., description="ID of the order to reprice") - new_amount: float = Field( - ..., description="New amount (STX for asks, token for bids)" - ) - pair: str = Field(..., description="Trading pair (e.g., 'PEPE-STX')") - recipient: Optional[str] = Field( - None, description="Optional: recipient address for private offers" - ) - expiry: Optional[int] = Field(None, description="Optional: blocks until expiration") - - -class JingGetOrderInput(BaseModel): - """Input schema for getting order details.""" - - swap_id: int = Field(..., description="ID of the order to get details for") - - -class JingGetMarketsInput(BaseModel): - """Input schema for getting available markets.""" - - pass - - -# Base Tool with common initialization -class JingBaseTool(BaseTool): - wallet_id: Optional[UUID] = None - - def __init__(self, wallet_id: Optional[UUID] = None, **kwargs): - super().__init__(**kwargs) - self.wallet_id = wallet_id - - -# Tool implementations -class JingGetOrderBookTool(JingBaseTool): - name: str = "jing_get_order_book" - description: str = "Get the current order book for a trading pair on JingCash" - args_schema: Type[BaseModel] = JingGetOrderBookInput - return_direct: bool = False - - def _deploy(self, pair: str, **kwargs) -> Dict[str, Any]: - """Execute the tool to get order book data.""" - if self.wallet_id is None: - return { - "success": False, - "error": "Wallet ID is required", - "output": "", - } - return BunScriptRunner.bun_run(self.wallet_id, "jing", "get-orderbook.ts", pair) - - def _run(self, pair: str, **kwargs) -> Dict[str, Any]: - """Execute the tool to get order book data.""" - return self._deploy(pair, **kwargs) - - async def _arun(self, pair: str, **kwargs) -> Dict[str, Any]: - """Async version of the tool.""" - return self._deploy(pair, **kwargs) - - -class JingCreateBidTool(JingBaseTool): - name: str = "jing_create_bid" - description: str = "Create a new bid offer to buy tokens with STX on JingCash" - args_schema: Type[BaseModel] = JingCreateBidInput - return_direct: bool = False - - def _deploy( - self, - pair: str, - stx_amount: float, - token_amount: float, - recipient: Optional[str] = None, - expiry: Optional[int] = None, - **kwargs, - ) -> Dict[str, Any]: - """Execute the tool to create a bid offer.""" - if self.wallet_id is None: - return { - "success": False, - "error": "Wallet ID is required", - "output": "", - } - args = [pair, str(stx_amount), str(token_amount)] - if recipient: - args.append(recipient) - if expiry: - args.append(str(expiry)) - args.append(str(self.wallet_id)) - - return BunScriptRunner.bun_run(self.wallet_id, "jing", "bid.ts", *args) - - def _run( - self, - pair: str, - stx_amount: float, - token_amount: float, - recipient: Optional[str] = None, - expiry: Optional[int] = None, - **kwargs, - ) -> Dict[str, Any]: - """Execute the tool to create a bid offer.""" - return self._deploy(pair, stx_amount, token_amount, recipient, expiry, **kwargs) - - async def _arun( - self, - pair: str, - stx_amount: float, - token_amount: float, - recipient: Optional[str] = None, - expiry: Optional[int] = None, - **kwargs, - ) -> Dict[str, Any]: - """Async version of the tool.""" - return self._deploy(pair, stx_amount, token_amount, recipient, expiry) - - -class JingSubmitBidTool(JingBaseTool): - name: str = "jing_submit_bid" - description: str = ( - "Submit (accept) an existing bid offer to sell tokens on JingCash" - ) - args_schema: Type[BaseModel] = JingSubmitOrderInput - return_direct: bool = False - - def _deploy(self, swap_id: int, **kwargs) -> Dict[str, Any]: - """Execute the tool to submit a bid.""" - if self.wallet_id is None: - return { - "success": False, - "error": "Wallet ID is required", - "output": "", - } - return BunScriptRunner.bun_run( - self.wallet_id, "jing", "submit-bid.ts", str(swap_id) - ) - - def _run(self, swap_id: int, **kwargs) -> Dict[str, Any]: - """Execute the tool to submit a bid.""" - return self._deploy(swap_id, **kwargs) - - async def _arun(self, swap_id: int, **kwargs) -> Dict[str, Any]: - """Async version of the tool.""" - return self._deploy(swap_id, **kwargs) - - -class JingCreateAskTool(JingBaseTool): - name: str = "jing_create_ask" - description: str = "Create a new ask offer to sell tokens for STX on JingCash" - args_schema: Type[BaseModel] = JingCreateAskInput - return_direct: bool = False - - def _deploy( - self, - pair: str, - token_amount: float, - stx_amount: float, - recipient: Optional[str] = None, - expiry: Optional[int] = None, - **kwargs, - ) -> Dict[str, Any]: - """Execute the tool to create an ask offer.""" - if self.wallet_id is None: - return { - "success": False, - "error": "Wallet ID is required", - "output": "", - } - args = [pair, str(token_amount), str(stx_amount)] - if recipient: - args.append(recipient) - if expiry: - args.append(str(expiry)) - args.append(str(self.wallet_id)) - - return BunScriptRunner.bun_run(self.wallet_id, "jing", "ask.ts", *args) - - def _run( - self, - pair: str, - token_amount: float, - stx_amount: float, - recipient: Optional[str] = None, - expiry: Optional[int] = None, - **kwargs, - ) -> Dict[str, Any]: - """Execute the tool to create an ask offer.""" - return self._deploy(pair, token_amount, stx_amount, recipient, expiry, **kwargs) - - async def _arun( - self, - pair: str, - token_amount: float, - stx_amount: float, - recipient: Optional[str] = None, - expiry: Optional[int] = None, - **kwargs, - ) -> Dict[str, Any]: - """Async version of the tool.""" - return self._deploy(pair, token_amount, stx_amount, recipient, expiry) - - -class JingSubmitAskTool(JingBaseTool): - name: str = "jing_submit_ask" - description: str = "Submit (accept) an existing ask offer to buy tokens on JingCash" - args_schema: Type[BaseModel] = JingSubmitOrderInput - return_direct: bool = False - - def _deploy(self, swap_id: int, **kwargs) -> Dict[str, Any]: - """Execute the tool to submit an ask.""" - if self.wallet_id is None: - return { - "success": False, - "error": "Wallet ID is required", - "output": "", - } - return BunScriptRunner.bun_run( - self.wallet_id, "jing", "submit-ask.ts", str(swap_id) - ) - - def _run(self, swap_id: int, **kwargs) -> Dict[str, Any]: - """Execute the tool to submit an ask.""" - return self._deploy(swap_id, **kwargs) - - async def _arun(self, swap_id: int, **kwargs) -> Dict[str, Any]: - """Async version of the tool.""" - return self._deploy(swap_id, **kwargs) - - -class JingGetPrivateOffersTool(JingBaseTool): - name: str = "jing_get_private_offers" - description: str = "Get private offers for a specific address on JingCash" - args_schema: Type[BaseModel] = JingGetPrivateOffersInput - return_direct: bool = False - - def _deploy(self, pair: str, user_address: str, **kwargs) -> Dict[str, Any]: - """Execute the tool to get private offers.""" - if self.wallet_id is None: - return { - "success": False, - "error": "Wallet ID is required", - "output": "", - } - return BunScriptRunner.bun_run( - self.wallet_id, "jing", "get-private-offers.ts", pair, user_address - ) - - def _run(self, pair: str, user_address: str, **kwargs) -> Dict[str, Any]: - """Execute the tool to get private offers.""" - return self._deploy(pair, user_address, **kwargs) - - async def _arun(self, pair: str, user_address: str, **kwargs) -> Dict[str, Any]: - """Async version of the tool.""" - return self._deploy(pair, user_address, **kwargs) - - -class JingGetPendingOrdersTool(JingBaseTool): - name: str = "jing_get_pending_orders" - description: str = "Get all pending orders for the current user on JingCash" - args_schema: Type[BaseModel] = JingGetMarketsInput - return_direct: bool = False - - def _deploy(self, **kwargs) -> Dict[str, Any]: - """Execute the tool to get pending orders.""" - if self.wallet_id is None: - return { - "success": False, - "error": "Wallet ID is required", - "output": "", - } - return BunScriptRunner.bun_run(self.wallet_id, "jing", "get-pending-orders.ts") - - def _run(self, **kwargs) -> Dict[str, Any]: - """Execute the tool to get pending orders.""" - return self._deploy(**kwargs) - - async def _arun(self, **kwargs) -> Dict[str, Any]: - """Async version of the tool.""" - return self._deploy(**kwargs) - - -class JingRepriceBidTool(JingBaseTool): - name: str = "jing_reprice_bid" - description: str = "Reprice an existing bid order on JingCash" - args_schema: Type[BaseModel] = JingRepriceOrderInput - return_direct: bool = False - - def _deploy( - self, - swap_id: int, - new_amount: float, - pair: str, - recipient: Optional[str] = None, - expiry: Optional[int] = None, - **kwargs, - ) -> Dict[str, Any]: - """Execute the tool to reprice a bid.""" - if self.wallet_id is None: - return { - "success": False, - "error": "Wallet ID is required", - "output": "", - } - args = [str(swap_id), str(new_amount), pair] - if recipient: - args.append(recipient) - if expiry: - args.append(str(expiry)) - args.append(str(self.wallet_id)) - - return BunScriptRunner.bun_run(self.wallet_id, "jing", "reprice-bid.ts", *args) - - def _run( - self, - swap_id: int, - new_amount: float, - pair: str, - recipient: Optional[str] = None, - expiry: Optional[int] = None, - **kwargs, - ) -> Dict[str, Any]: - """Execute the tool to reprice a bid.""" - return self._deploy(swap_id, new_amount, pair, recipient, expiry, **kwargs) - - async def _arun( - self, - swap_id: int, - new_amount: float, - pair: str, - recipient: Optional[str] = None, - expiry: Optional[int] = None, - **kwargs, - ) -> Dict[str, Any]: - """Async version of the tool.""" - return self._deploy(swap_id, new_amount, pair, recipient, expiry, **kwargs) - - -class JingRepriceAskTool(JingBaseTool): - name: str = "jing_reprice_ask" - description: str = "Reprice an existing ask order on JingCash" - args_schema: Type[BaseModel] = JingRepriceOrderInput - return_direct: bool = False - - def _deploy( - self, - swap_id: int, - new_amount: float, - pair: str, - recipient: Optional[str] = None, - expiry: Optional[int] = None, - **kwargs, - ) -> Dict[str, Any]: - """Execute the tool to reprice an ask.""" - if self.wallet_id is None: - return { - "success": False, - "error": "Wallet ID is required", - "output": "", - } - args = [str(swap_id), str(new_amount), pair] - if recipient: - args.append(recipient) - if expiry: - args.append(str(expiry)) - args.append(str(self.wallet_id)) - - return BunScriptRunner.bun_run(self.wallet_id, "jing", "reprice-ask.ts", *args) - - def _run( - self, - swap_id: int, - new_amount: float, - pair: str, - recipient: Optional[str] = None, - expiry: Optional[int] = None, - **kwargs, - ) -> Dict[str, Any]: - """Execute the tool to reprice an ask.""" - return self._deploy(swap_id, new_amount, pair, recipient, expiry, **kwargs) - - async def _arun( - self, - swap_id: int, - new_amount: float, - pair: str, - recipient: Optional[str] = None, - expiry: Optional[int] = None, - **kwargs, - ) -> Dict[str, Any]: - """Async version of the tool.""" - return self._deploy(swap_id, new_amount, pair, recipient, expiry, **kwargs) - - -class JingCancelBidTool(JingBaseTool): - name: str = "jing_cancel_bid" - description: str = "Cancel an existing bid order on JingCash" - args_schema: Type[BaseModel] = JingGetOrderInput - return_direct: bool = False - - def _deploy(self, swap_id: int, **kwargs) -> Dict[str, Any]: - """Execute the tool to cancel a bid.""" - if self.wallet_id is None: - return { - "success": False, - "error": "Wallet ID is required", - "output": "", - } - return BunScriptRunner.bun_run( - self.wallet_id, "jing", "cancel-bid.ts", str(swap_id) - ) - - def _run(self, swap_id: int, **kwargs) -> Dict[str, Any]: - """Execute the tool to cancel a bid.""" - return self._deploy(swap_id, **kwargs) - - async def _arun(self, swap_id: int, **kwargs) -> Dict[str, Any]: - """Async version of the tool.""" - return self._deploy(swap_id, **kwargs) - - -class JingCancelAskTool(JingBaseTool): - name: str = "jing_cancel_ask" - description: str = "Cancel an existing ask order on JingCash" - args_schema: Type[BaseModel] = JingGetOrderInput - return_direct: bool = False - - def _deploy(self, swap_id: int, **kwargs) -> Dict[str, Any]: - """Execute the tool to cancel an ask.""" - if self.wallet_id is None: - return { - "success": False, - "error": "Wallet ID is required", - "output": "", - } - return BunScriptRunner.bun_run( - self.wallet_id, "jing", "cancel-ask.ts", str(swap_id) - ) - - def _run(self, swap_id: int, **kwargs) -> Dict[str, Any]: - """Execute the tool to cancel an ask.""" - return self._deploy(swap_id, **kwargs) - - async def _arun(self, swap_id: int, **kwargs) -> Dict[str, Any]: - """Async version of the tool.""" - return self._deploy(swap_id, **kwargs) - - -class JingGetBidTool(JingBaseTool): - name: str = "jing_get_bid" - description: str = "Get details of a specific bid order on JingCash" - args_schema: Type[BaseModel] = JingGetOrderInput - return_direct: bool = False - - def _deploy(self, swap_id: int, **kwargs) -> Dict[str, Any]: - """Execute the tool to get bid details.""" - if self.wallet_id is None: - return { - "success": False, - "error": "Wallet ID is required", - "output": "", - } - return BunScriptRunner.bun_run( - self.wallet_id, "jing", "get-bid.ts", str(swap_id) - ) - - def _run(self, swap_id: int, **kwargs) -> Dict[str, Any]: - """Execute the tool to get bid details.""" - return self._deploy(swap_id, **kwargs) - - async def _arun(self, swap_id: int, **kwargs) -> Dict[str, Any]: - """Async version of the tool.""" - return self._deploy(swap_id, **kwargs) - - -class JingGetAskTool(JingBaseTool): - name: str = "jing_get_ask" - description: str = "Get details of a specific ask order on JingCash" - args_schema: Type[BaseModel] = JingGetOrderInput - return_direct: bool = False - - def _deploy(self, swap_id: int, **kwargs) -> Dict[str, Any]: - """Execute the tool to get ask details.""" - if self.wallet_id is None: - return { - "success": False, - "error": "Wallet ID is required", - "output": "", - } - return BunScriptRunner.bun_run( - self.wallet_id, "jing", "get-ask.ts", str(swap_id) - ) - - def _run(self, swap_id: int, **kwargs) -> Dict[str, Any]: - """Execute the tool to get ask details.""" - return self._deploy(swap_id, **kwargs) - - async def _arun(self, swap_id: int, **kwargs) -> Dict[str, Any]: - """Async version of the tool.""" - return self._deploy(swap_id, **kwargs) - - -class JingGetMarketsTool(JingBaseTool): - name: str = "jing_get_markets" - description: str = ( - "Get all available trading pairs and their contract details on JingCash" - ) - args_schema: Type[BaseModel] = JingGetMarketsInput - return_direct: bool = False - - def _deploy(self, **kwargs) -> Dict[str, Any]: - """Execute the tool to get available markets.""" - if self.wallet_id is None: - return { - "success": False, - "error": "Wallet ID is required", - "output": "", - } - return BunScriptRunner.bun_run(self.wallet_id, "jing", "list-markets.ts") - - def _run(self, **kwargs) -> Dict[str, Any]: - """Execute the tool to get available markets.""" - return self._deploy(**kwargs) - - async def _arun(self, **kwargs) -> Dict[str, Any]: - """Async version of the tool.""" - return self._deploy(**kwargs) diff --git a/tools/smartwallet.py b/tools/smartwallet.py deleted file mode 100644 index 94f73b85..00000000 --- a/tools/smartwallet.py +++ /dev/null @@ -1,2568 +0,0 @@ -from typing import Any, Dict, Optional, Type -from uuid import UUID - -from langchain.tools import BaseTool -from pydantic import BaseModel, Field - -from tools.bun import BunScriptRunner - - -class SmartWalletGenerateSmartWalletInput(BaseModel): - """Input schema for generating a smart wallet contract.""" - - owner_address: str = Field( - ..., - description="Stacks address of the wallet owner", - example="ST1PQHQKV0RJXZFY1DGX8MNSNYVE3VGZJSRTPGZGM", - ) - agent_address: str = Field( - ..., - description="Stacks address of the agent", - example="ST2CY5V39NHDPWSXMW9QDT3HC3GD6Q6XX4CFRK9AG", - ) - dao_token_contract: str = Field( - ..., - description="Contract principal of the DAO token", - example="ST35K818S3K2GSNEBC3M35GA3W8Q7X72KF4RVM3QA.aibtc-token", - ) - dao_token_dex_contract: str = Field( - ..., - description="Contract principal of the DAO token DEX", - example="ST35K818S3K2GSNEBC3M35GA3W8Q7X72KF4RVM3QA.aibtc-token-dex", - ) - generate_files: bool = Field( - False, - description="Whether to generate contract files", - ) - - -class SmartWalletGenerateSmartWalletTool(BaseTool): - name: str = "smartwallet_generate_smart_wallet" - description: str = ( - "Generate a new smart wallet contract with specified owner and agent addresses. " - "Returns the contract name, hash, and source code." - ) - args_schema: Type[BaseModel] = SmartWalletGenerateSmartWalletInput - return_direct: bool = False - wallet_id: Optional[UUID] = None - - def __init__(self, wallet_id: Optional[UUID] = None, **kwargs): - super().__init__(**kwargs) - self.wallet_id = wallet_id - - def _deploy( - self, - owner_address: str, - agent_address: str, - dao_token_contract: str, - dao_token_dex_contract: str, - generate_files: bool = False, - **kwargs, - ) -> Dict[str, Any]: - """Execute the tool to generate smart wallet.""" - if self.wallet_id is None: - return {"success": False, "message": "Wallet ID is required", "data": None} - - args = [ - owner_address, - agent_address, - dao_token_contract, - dao_token_dex_contract, - str(generate_files).lower(), - ] - - return BunScriptRunner.bun_run( - self.wallet_id, - "aibtc-dao/smart-wallet", - "generate-smart-wallet.ts", - *args, - ) - - def _run( - self, - owner_address: str, - agent_address: str, - dao_token_contract: str, - dao_token_dex_contract: str, - generate_files: bool = False, - **kwargs, - ) -> Dict[str, Any]: - """Execute the tool to generate smart wallet.""" - return self._deploy( - owner_address, - agent_address, - dao_token_contract, - dao_token_dex_contract, - generate_files, - **kwargs, - ) - - async def _arun( - self, - owner_address: str, - agent_address: str, - dao_token_contract: str, - dao_token_dex_contract: str, - generate_files: bool = False, - **kwargs, - ) -> Dict[str, Any]: - """Async version of the tool.""" - return self._deploy( - owner_address, - agent_address, - dao_token_contract, - dao_token_dex_contract, - generate_files, - **kwargs, - ) - - -class SmartWalletGenerateMySmartWalletInput(BaseModel): - """Input schema for generating a smart wallet contract using the current agent as the agent address.""" - - owner_address: str = Field( - ..., - description="Stacks address of the wallet owner", - example="ST1PQHQKV0RJXZFY1DGX8MNSNYVE3VGZJSRTPGZGM", - ) - dao_token_contract: str = Field( - ..., - description="Contract principal of the DAO token", - example="ST35K818S3K2GSNEBC3M35GA3W8Q7X72KF4RVM3QA.aibtc-token", - ) - dao_token_dex_contract: str = Field( - ..., - description="Contract principal of the DAO token DEX", - example="ST35K818S3K2GSNEBC3M35GA3W8Q7X72KF4RVM3QA.aibtc-token-dex", - ) - generate_files: bool = Field( - False, - description="Whether to generate contract files", - ) - - -class SmartWalletGenerateMySmartWalletTool(BaseTool): - name: str = "smartwallet_generate_my_smart_wallet" - description: str = ( - "Generate a new smart wallet contract using the current agent as the agent address. " - "Returns the contract name, hash, and source code." - ) - args_schema: Type[BaseModel] = SmartWalletGenerateMySmartWalletInput - return_direct: bool = False - wallet_id: Optional[UUID] = None - - def __init__(self, wallet_id: Optional[UUID] = None, **kwargs): - super().__init__(**kwargs) - self.wallet_id = wallet_id - - def _deploy( - self, - owner_address: str, - dao_token_contract: str, - dao_token_dex_contract: str, - generate_files: bool = False, - **kwargs, - ) -> Dict[str, Any]: - """Execute the tool to generate smart wallet.""" - if self.wallet_id is None: - return {"success": False, "message": "Wallet ID is required", "data": None} - - args = [ - owner_address, - dao_token_contract, - dao_token_dex_contract, - str(generate_files).lower(), - ] - - return BunScriptRunner.bun_run( - self.wallet_id, - "aibtc-dao/smart-wallet", - "generate-my-smart-wallet.ts", - *args, - ) - - def _run( - self, - owner_address: str, - dao_token_contract: str, - dao_token_dex_contract: str, - generate_files: bool = False, - **kwargs, - ) -> Dict[str, Any]: - """Execute the tool to generate smart wallet.""" - return self._deploy( - owner_address, - dao_token_contract, - dao_token_dex_contract, - generate_files, - **kwargs, - ) - - async def _arun( - self, - owner_address: str, - dao_token_contract: str, - dao_token_dex_contract: str, - generate_files: bool = False, - **kwargs, - ) -> Dict[str, Any]: - """Async version of the tool.""" - return self._deploy( - owner_address, - dao_token_contract, - dao_token_dex_contract, - generate_files, - **kwargs, - ) - - -class SmartWalletDeploySmartWalletInput(BaseModel): - """Input schema for deploying a smart wallet contract.""" - - owner_address: str = Field( - ..., - description="Stacks address of the wallet owner", - example="ST1PQHQKV0RJXZFY1DGX8MNSNYVE3VGZJSRTPGZGM", - ) - dao_token_contract: str = Field( - ..., - description="Contract principal of the DAO token", - example="ST35K818S3K2GSNEBC3M35GA3W8Q7X72KF4RVM3QA.aibtc-token", - ) - dao_token_dex_contract: str = Field( - ..., - description="Contract principal of the DAO token DEX", - example="ST35K818S3K2GSNEBC3M35GA3W8Q7X72KF4RVM3QA.aibtc-token-dex", - ) - - -class SmartWalletDeploySmartWalletTool(BaseTool): - name: str = "smartwallet_deploy_smart_wallet" - description: str = ( - "Deploy a new smart wallet contract with specified owner and agent addresses. " - "Returns the deployed contract address and transaction ID." - ) - args_schema: Type[BaseModel] = SmartWalletDeploySmartWalletInput - return_direct: bool = False - wallet_id: Optional[UUID] = None - - def __init__(self, wallet_id: Optional[UUID] = None, **kwargs): - super().__init__(**kwargs) - self.wallet_id = wallet_id - - def _deploy( - self, - owner_address: str, - agent_address: str, - dao_token_contract: str, - dao_token_dex_contract: str, - **kwargs, - ) -> Dict[str, Any]: - """Execute the tool to deploy smart wallet.""" - if self.wallet_id is None: - return {"success": False, "message": "Wallet ID is required", "data": None} - - args = [ - owner_address, - agent_address, - dao_token_contract, - dao_token_dex_contract, - ] - - return BunScriptRunner.bun_run( - self.wallet_id, - "aibtc-dao/smart-wallet", - "deploy-smart-wallet.ts", - *args, - ) - - def _run( - self, - owner_address: str, - agent_address: str, - dao_token_contract: str, - dao_token_dex_contract: str, - **kwargs, - ) -> Dict[str, Any]: - """Execute the tool to deploy smart wallet.""" - return self._deploy( - owner_address, - agent_address, - dao_token_contract, - dao_token_dex_contract, - **kwargs, - ) - - async def _arun( - self, - owner_address: str, - agent_address: str, - dao_token_contract: str, - dao_token_dex_contract: str, - **kwargs, - ) -> Dict[str, Any]: - """Async version of the tool.""" - return self._deploy( - owner_address, - agent_address, - dao_token_contract, - dao_token_dex_contract, - **kwargs, - ) - - -class SmartWalletDeployMySmartWalletInput(BaseModel): - """Input schema for deploying a smart wallet contract using the current agent as the agent address.""" - - owner_address: str = Field( - ..., - description="Stacks address of the wallet owner", - example="ST1PQHQKV0RJXZFY1DGX8MNSNYVE3VGZJSRTPGZGM", - ) - dao_token_contract: str = Field( - ..., - description="Contract principal of the DAO token", - example="ST35K818S3K2GSNEBC3M35GA3W8Q7X72KF4RVM3QA.aibtc-token", - ) - dao_token_dex_contract: str = Field( - ..., - description="Contract principal of the DAO token DEX", - example="ST35K818S3K2GSNEBC3M35GA3W8Q7X72KF4RVM3QA.aibtc-token-dex", - ) - - -class SmartWalletDeployMySmartWalletTool(BaseTool): - name: str = "smartwallet_deploy_my_smart_wallet" - description: str = ( - "Deploy a new smart wallet contract using the current agent as the agent address. " - "Returns the deployed contract address and transaction ID." - ) - args_schema: Type[BaseModel] = SmartWalletDeployMySmartWalletInput - return_direct: bool = False - wallet_id: Optional[UUID] = None - - def __init__(self, wallet_id: Optional[UUID] = None, **kwargs): - super().__init__(**kwargs) - self.wallet_id = wallet_id - - def _deploy( - self, - owner_address: str, - dao_token_contract: str, - dao_token_dex_contract: str, - **kwargs, - ) -> Dict[str, Any]: - """Execute the tool to deploy smart wallet.""" - if self.wallet_id is None: - return {"success": False, "message": "Wallet ID is required", "data": None} - - args = [ - owner_address, - dao_token_contract, - dao_token_dex_contract, - ] - - return BunScriptRunner.bun_run( - self.wallet_id, - "aibtc-dao/smart-wallet", - "deploy-my-smart-wallet.ts", - *args, - ) - - def _run( - self, - owner_address: str, - dao_token_contract: str, - dao_token_dex_contract: str, - **kwargs, - ) -> Dict[str, Any]: - """Execute the tool to deploy smart wallet.""" - return self._deploy( - owner_address, - dao_token_contract, - dao_token_dex_contract, - **kwargs, - ) - - async def _arun( - self, - owner_address: str, - dao_token_contract: str, - dao_token_dex_contract: str, - **kwargs, - ) -> Dict[str, Any]: - """Async version of the tool.""" - return self._deploy( - owner_address, - dao_token_contract, - dao_token_dex_contract, - **kwargs, - ) - - -class SmartWalletIsApprovedAssetInput(BaseModel): - """Input schema for checking if an asset is approved in a smart wallet.""" - - smart_wallet_contract: str = Field( - ..., - description="Contract principal of the smart wallet", - example="ST35K818S3K2GSNEBC3M35GA3W8Q7X72KF4RVM3QA.aibtc-user-agent-smart-wallet", - ) - asset_contract: str = Field( - ..., - description="Contract principal of the asset", - example="ST35K818S3K2GSNEBC3M35GA3W8Q7X72KF4RVM3QA.aibtc-token", - ) - - -class SmartWalletIsApprovedAssetTool(BaseTool): - name: str = "smartwallet_is_approved_asset" - description: str = ( - "Check if an asset is approved for use with a smart wallet. " - "Returns true if the asset is approved, false otherwise." - ) - args_schema: Type[BaseModel] = SmartWalletIsApprovedAssetInput - return_direct: bool = False - wallet_id: Optional[UUID] = None - - def __init__(self, wallet_id: Optional[UUID] = None, **kwargs): - super().__init__(**kwargs) - self.wallet_id = wallet_id - - def _deploy( - self, - smart_wallet_contract: str, - asset_contract: str, - **kwargs, - ) -> Dict[str, Any]: - """Execute the tool to check asset approval.""" - if self.wallet_id is None: - return {"success": False, "message": "Wallet ID is required", "data": None} - - args = [ - smart_wallet_contract, - asset_contract, - ] - - return BunScriptRunner.bun_run( - self.wallet_id, - "aibtc-dao/smart-wallet/read-only", - "is-approved-asset.ts", - *args, - ) - - def _run( - self, - smart_wallet_contract: str, - asset_contract: str, - **kwargs, - ) -> Dict[str, Any]: - """Execute the tool to check asset approval.""" - return self._deploy( - smart_wallet_contract, - asset_contract, - **kwargs, - ) - - async def _arun( - self, - smart_wallet_contract: str, - asset_contract: str, - **kwargs, - ) -> Dict[str, Any]: - """Async version of the tool.""" - return self._deploy( - smart_wallet_contract, - asset_contract, - **kwargs, - ) - - -class SmartWalletGetBalanceStxInput(BaseModel): - """Input schema for getting STX balance from a smart wallet.""" - - smart_wallet_contract: str = Field( - ..., - description="Contract principal of the smart wallet", - example="ST35K818S3K2GSNEBC3M35GA3W8Q7X72KF4RVM3QA.aibtc-user-agent-smart-wallet", - ) - - -class SmartWalletGetBalanceStxTool(BaseTool): - name: str = "smartwallet_get_balance_stx" - description: str = ( - "Get the STX balance from a smart wallet. " "Returns the balance in microSTX." - ) - args_schema: Type[BaseModel] = SmartWalletGetBalanceStxInput - return_direct: bool = False - wallet_id: Optional[UUID] = None - - def __init__(self, wallet_id: Optional[UUID] = None, **kwargs): - super().__init__(**kwargs) - self.wallet_id = wallet_id - - def _deploy( - self, - smart_wallet_contract: str, - **kwargs, - ) -> Dict[str, Any]: - """Execute the tool to get STX balance.""" - if self.wallet_id is None: - return {"success": False, "message": "Wallet ID is required", "data": None} - - args = [smart_wallet_contract] - - return BunScriptRunner.bun_run( - self.wallet_id, - "aibtc-dao/smart-wallet/read-only", - "get-balance-stx.ts", - *args, - ) - - def _run( - self, - smart_wallet_contract: str, - **kwargs, - ) -> Dict[str, Any]: - """Execute the tool to get STX balance.""" - return self._deploy(smart_wallet_contract, **kwargs) - - async def _arun( - self, - smart_wallet_contract: str, - **kwargs, - ) -> Dict[str, Any]: - """Async version of the tool.""" - return self._deploy(smart_wallet_contract, **kwargs) - - -class SmartWalletGetConfigurationInput(BaseModel): - """Input schema for getting smart wallet configuration.""" - - smart_wallet_contract: str = Field( - ..., - description="Contract principal of the smart wallet", - example="ST35K818S3K2GSNEBC3M35GA3W8Q7X72KF4RVM3QA.aibtc-user-agent-smart-wallet", - ) - - -class SmartWalletGetConfigurationTool(BaseTool): - name: str = "smartwallet_get_configuration" - description: str = ( - "Get the configuration of a smart wallet. " - "Returns owner, agent, and other configuration details." - ) - args_schema: Type[BaseModel] = SmartWalletGetConfigurationInput - return_direct: bool = False - wallet_id: Optional[UUID] = None - - def __init__(self, wallet_id: Optional[UUID] = None, **kwargs): - super().__init__(**kwargs) - self.wallet_id = wallet_id - - def _deploy( - self, - smart_wallet_contract: str, - **kwargs, - ) -> Dict[str, Any]: - """Execute the tool to get wallet configuration.""" - if self.wallet_id is None: - return {"success": False, "message": "Wallet ID is required", "data": None} - - args = [smart_wallet_contract] - - return BunScriptRunner.bun_run( - self.wallet_id, - "aibtc-dao/smart-wallet/read-only", - "get-configuration.ts", - *args, - ) - - def _run( - self, - smart_wallet_contract: str, - **kwargs, - ) -> Dict[str, Any]: - """Execute the tool to get wallet configuration.""" - return self._deploy(smart_wallet_contract, **kwargs) - - async def _arun( - self, - smart_wallet_contract: str, - **kwargs, - ) -> Dict[str, Any]: - """Async version of the tool.""" - return self._deploy(smart_wallet_contract, **kwargs) - - -class SmartWalletApproveAssetInput(BaseModel): - """Input schema for approving an asset in a smart wallet.""" - - smart_wallet_contract: str = Field( - ..., - description="Contract principal of the smart wallet", - example="ST35K818S3K2GSNEBC3M35GA3W8Q7X72KF4RVM3QA.aibtc-user-agent-smart-wallet", - ) - asset_contract: str = Field( - ..., - description="Contract principal of the asset to approve", - example="ST35K818S3K2GSNEBC3M35GA3W8Q7X72KF4RVM3QA.aibtc-token", - ) - - -class SmartWalletApproveAssetTool(BaseTool): - name: str = "smartwallet_approve_asset" - description: str = ( - "Approve an asset for use with a smart wallet. " - "Returns the transaction ID of the approval transaction." - ) - args_schema: Type[BaseModel] = SmartWalletApproveAssetInput - return_direct: bool = False - wallet_id: Optional[UUID] = None - - def __init__(self, wallet_id: Optional[UUID] = None, **kwargs): - super().__init__(**kwargs) - self.wallet_id = wallet_id - - def _deploy( - self, - smart_wallet_contract: str, - asset_contract: str, - **kwargs, - ) -> Dict[str, Any]: - """Execute the tool to approve asset.""" - if self.wallet_id is None: - return {"success": False, "message": "Wallet ID is required", "data": None} - - args = [ - smart_wallet_contract, - asset_contract, - ] - - return BunScriptRunner.bun_run( - self.wallet_id, - "aibtc-dao/smart-wallet/public", - "approve-asset.ts", - *args, - ) - - def _run( - self, - smart_wallet_contract: str, - asset_contract: str, - **kwargs, - ) -> Dict[str, Any]: - """Execute the tool to approve asset.""" - return self._deploy( - smart_wallet_contract, - asset_contract, - **kwargs, - ) - - async def _arun( - self, - smart_wallet_contract: str, - asset_contract: str, - **kwargs, - ) -> Dict[str, Any]: - """Async version of the tool.""" - return self._deploy( - smart_wallet_contract, - asset_contract, - **kwargs, - ) - - -class SmartWalletRevokeAssetInput(BaseModel): - """Input schema for revoking an asset from a smart wallet.""" - - smart_wallet_contract: str = Field( - ..., - description="Contract principal of the smart wallet", - example="ST35K818S3K2GSNEBC3M35GA3W8Q7X72KF4RVM3QA.aibtc-user-agent-smart-wallet", - ) - asset_contract: str = Field( - ..., - description="Contract principal of the asset to revoke", - example="ST35K818S3K2GSNEBC3M35GA3W8Q7X72KF4RVM3QA.aibtc-token", - ) - - -class SmartWalletRevokeAssetTool(BaseTool): - name: str = "smartwallet_revoke_asset" - description: str = ( - "Revoke an asset from a smart wallet. " - "Returns the transaction ID of the revocation transaction." - ) - args_schema: Type[BaseModel] = SmartWalletRevokeAssetInput - return_direct: bool = False - wallet_id: Optional[UUID] = None - - def __init__(self, wallet_id: Optional[UUID] = None, **kwargs): - super().__init__(**kwargs) - self.wallet_id = wallet_id - - def _deploy( - self, - smart_wallet_contract: str, - asset_contract: str, - **kwargs, - ) -> Dict[str, Any]: - """Execute the tool to revoke asset.""" - if self.wallet_id is None: - return {"success": False, "message": "Wallet ID is required", "data": None} - - args = [ - smart_wallet_contract, - asset_contract, - ] - - return BunScriptRunner.bun_run( - self.wallet_id, - "aibtc-dao/smart-wallet/public", - "revoke-asset.ts", - *args, - ) - - def _run( - self, - smart_wallet_contract: str, - asset_contract: str, - **kwargs, - ) -> Dict[str, Any]: - """Execute the tool to revoke asset.""" - return self._deploy( - smart_wallet_contract, - asset_contract, - **kwargs, - ) - - async def _arun( - self, - smart_wallet_contract: str, - asset_contract: str, - **kwargs, - ) -> Dict[str, Any]: - """Async version of the tool.""" - return self._deploy( - smart_wallet_contract, - asset_contract, - **kwargs, - ) - - -class SmartWalletDepositStxInput(BaseModel): - """Input schema for depositing STX to a smart wallet.""" - - smart_wallet_contract: str = Field( - ..., - description="Contract principal of the smart wallet", - example="ST35K818S3K2GSNEBC3M35GA3W8Q7X72KF4RVM3QA.aibtc-user-agent-smart-wallet", - ) - amount: int = Field( - ..., - description="Amount of STX to deposit in microSTX", - example=1000000, - gt=0, - ) - - -class SmartWalletDepositStxTool(BaseTool): - name: str = "smartwallet_deposit_stx" - description: str = ( - "Deposit STX to a smart wallet. " - "Returns the transaction ID of the deposit transaction." - ) - args_schema: Type[BaseModel] = SmartWalletDepositStxInput - return_direct: bool = False - wallet_id: Optional[UUID] = None - - def __init__(self, wallet_id: Optional[UUID] = None, **kwargs): - super().__init__(**kwargs) - self.wallet_id = wallet_id - - def _deploy( - self, - smart_wallet_contract: str, - amount: int, - **kwargs, - ) -> Dict[str, Any]: - """Execute the tool to deposit STX.""" - if self.wallet_id is None: - return {"success": False, "message": "Wallet ID is required", "data": None} - - args = [ - smart_wallet_contract, - str(amount), - ] - - return BunScriptRunner.bun_run( - self.wallet_id, - "aibtc-dao/smart-wallet/public", - "deposit-stx.ts", - *args, - ) - - def _run( - self, - smart_wallet_contract: str, - amount: int, - **kwargs, - ) -> Dict[str, Any]: - """Execute the tool to deposit STX.""" - return self._deploy( - smart_wallet_contract, - amount, - **kwargs, - ) - - async def _arun( - self, - smart_wallet_contract: str, - amount: int, - **kwargs, - ) -> Dict[str, Any]: - """Async version of the tool.""" - return self._deploy( - smart_wallet_contract, - amount, - **kwargs, - ) - - -class SmartWalletWithdrawStxInput(BaseModel): - """Input schema for withdrawing STX from a smart wallet.""" - - smart_wallet_contract: str = Field( - ..., - description="Contract principal of the smart wallet", - example="ST35K818S3K2GSNEBC3M35GA3W8Q7X72KF4RVM3QA.aibtc-user-agent-smart-wallet", - ) - amount: int = Field( - ..., - description="Amount of STX to withdraw in microSTX", - example=1000000, - gt=0, - ) - - -class SmartWalletWithdrawSTXTool(BaseTool): - name: str = "smartwallet_withdraw_stx" - description: str = ( - "Withdraw STX from a smart wallet. " - "Returns the transaction ID of the withdrawal transaction." - ) - args_schema: Type[BaseModel] = SmartWalletWithdrawStxInput - return_direct: bool = False - wallet_id: Optional[UUID] = None - - def __init__(self, wallet_id: Optional[UUID] = None, **kwargs): - super().__init__(**kwargs) - self.wallet_id = wallet_id - - def _deploy( - self, - smart_wallet_contract: str, - amount: int, - **kwargs, - ) -> Dict[str, Any]: - """Execute the tool to withdraw STX.""" - if self.wallet_id is None: - return {"success": False, "message": "Wallet ID is required", "data": None} - - args = [ - smart_wallet_contract, - str(amount), - ] - - return BunScriptRunner.bun_run( - self.wallet_id, - "aibtc-dao/smart-wallet/public", - "withdraw-stx.ts", - *args, - ) - - def _run( - self, - smart_wallet_contract: str, - amount: int, - **kwargs, - ) -> Dict[str, Any]: - """Execute the tool to withdraw STX.""" - return self._deploy( - smart_wallet_contract, - amount, - **kwargs, - ) - - async def _arun( - self, - smart_wallet_contract: str, - amount: int, - **kwargs, - ) -> Dict[str, Any]: - """Async version of the tool.""" - return self._deploy( - smart_wallet_contract, - amount, - **kwargs, - ) - - -class SmartWalletDepositFtInput(BaseModel): - """Input schema for depositing fungible tokens to a smart wallet.""" - - smart_wallet_contract: str = Field( - ..., - description="Contract principal of the smart wallet", - example="ST35K818S3K2GSNEBC3M35GA3W8Q7X72KF4RVM3QA.aibtc-user-agent-smart-wallet", - ) - ft_contract: str = Field( - ..., - description="Contract principal of the fungible token", - example="ST35K818S3K2GSNEBC3M35GA3W8Q7X72KF4RVM3QA.aibtc-token", - ) - amount: int = Field( - ..., - description="Amount of tokens to deposit", - example=1000, - gt=0, - ) - - -class SmartWalletDepositFtTool(BaseTool): - name: str = "smartwallet_deposit_ft" - description: str = ( - "Deposit fungible tokens to a smart wallet. " - "Returns the transaction ID of the deposit transaction." - ) - args_schema: Type[BaseModel] = SmartWalletDepositFtInput - return_direct: bool = False - wallet_id: Optional[UUID] = None - - def __init__(self, wallet_id: Optional[UUID] = None, **kwargs): - super().__init__(**kwargs) - self.wallet_id = wallet_id - - def _deploy( - self, - smart_wallet_contract: str, - ft_contract: str, - amount: int, - **kwargs, - ) -> Dict[str, Any]: - """Execute the tool to deposit fungible tokens.""" - if self.wallet_id is None: - return {"success": False, "message": "Wallet ID is required", "data": None} - - args = [ - smart_wallet_contract, - ft_contract, - str(amount), - ] - - return BunScriptRunner.bun_run( - self.wallet_id, - "aibtc-dao/smart-wallet/public", - "deposit-ft.ts", - *args, - ) - - def _run( - self, - smart_wallet_contract: str, - ft_contract: str, - amount: int, - **kwargs, - ) -> Dict[str, Any]: - """Execute the tool to deposit fungible tokens.""" - return self._deploy( - smart_wallet_contract, - ft_contract, - amount, - **kwargs, - ) - - async def _arun( - self, - smart_wallet_contract: str, - ft_contract: str, - amount: int, - **kwargs, - ) -> Dict[str, Any]: - """Async version of the tool.""" - return self._deploy( - smart_wallet_contract, - ft_contract, - amount, - **kwargs, - ) - - -class SmartWalletWithdrawFtInput(BaseModel): - """Input schema for withdrawing fungible tokens from a smart wallet.""" - - smart_wallet_contract: str = Field( - ..., - description="Contract principal of the smart wallet", - example="ST35K818S3K2GSNEBC3M35GA3W8Q7X72KF4RVM3QA.aibtc-user-agent-smart-wallet", - ) - ft_contract: str = Field( - ..., - description="Contract principal of the fungible token", - example="ST35K818S3K2GSNEBC3M35GA3W8Q7X72KF4RVM3QA.aibtc-token", - ) - amount: int = Field( - ..., - description="Amount of tokens to withdraw", - example=1000, - gt=0, - ) - - -class SmartWalletWithdrawFTTool(BaseTool): - name: str = "smartwallet_withdraw_ft" - description: str = ( - "Withdraw fungible tokens from a smart wallet. " - "Returns the transaction ID of the withdrawal transaction." - ) - args_schema: Type[BaseModel] = SmartWalletWithdrawFtInput - return_direct: bool = False - wallet_id: Optional[UUID] = None - - def __init__(self, wallet_id: Optional[UUID] = None, **kwargs): - super().__init__(**kwargs) - self.wallet_id = wallet_id - - def _deploy( - self, - smart_wallet_contract: str, - ft_contract: str, - amount: int, - **kwargs, - ) -> Dict[str, Any]: - """Execute the tool to withdraw fungible tokens.""" - if self.wallet_id is None: - return {"success": False, "message": "Wallet ID is required", "data": None} - - args = [ - smart_wallet_contract, - ft_contract, - str(amount), - ] - - return BunScriptRunner.bun_run( - self.wallet_id, - "aibtc-dao/smart-wallet/public", - "withdraw-ft.ts", - *args, - ) - - def _run( - self, - smart_wallet_contract: str, - ft_contract: str, - amount: int, - **kwargs, - ) -> Dict[str, Any]: - """Execute the tool to withdraw fungible tokens.""" - return self._deploy( - smart_wallet_contract, - ft_contract, - amount, - **kwargs, - ) - - async def _arun( - self, - smart_wallet_contract: str, - ft_contract: str, - amount: int, - **kwargs, - ) -> Dict[str, Any]: - """Async version of the tool.""" - return self._deploy( - smart_wallet_contract, - ft_contract, - amount, - **kwargs, - ) - - -class SmartWalletProxyCreateProposalInput(BaseModel): - """Input schema for creating a core proposal through a smart wallet.""" - - smart_wallet_contract: str = Field( - ..., - description="Contract principal of the smart wallet", - example="ST35K818S3K2GSNEBC3M35GA3W8Q7X72KF4RVM3QA.aibtc-user-agent-smart-wallet", - ) - dao_core_proposals_extension_contract: str = Field( - ..., - description="Contract principal of the DAO core proposals extension", - example="ST35K818S3K2GSNEBC3M35GA3W8Q7X72KF4RVM3QA.aibtc-core-proposals-v2", - ) - dao_proposal_contract: str = Field( - ..., - description="Contract principal of the DAO proposal", - example="ST35K818S3K2GSNEBC3M35GA3W8Q7X72KF4RVM3QA.proposal-add-extension", - ) - dao_token_contract: str = Field( - ..., - description="Contract principal of the DAO token", - example="ST35K818S3K2GSNEBC3M35GA3W8Q7X72KF4RVM3QA.aibtc-token", - ) - - -class SmartWalletProxyCreateProposalTool(BaseTool): - name: str = "smartwallet_proxy_create_proposal" - description: str = ( - "Create a core proposal through a smart wallet. " - "Returns the transaction ID of the proposal creation transaction." - ) - args_schema: Type[BaseModel] = SmartWalletProxyCreateProposalInput - return_direct: bool = False - wallet_id: Optional[UUID] = None - - def __init__(self, wallet_id: Optional[UUID] = None, **kwargs): - super().__init__(**kwargs) - self.wallet_id = wallet_id - - def _deploy( - self, - smart_wallet_contract: str, - dao_core_proposals_extension_contract: str, - dao_proposal_contract: str, - dao_token_contract: str, - **kwargs, - ) -> Dict[str, Any]: - """Execute the tool to create a core proposal.""" - if self.wallet_id is None: - return {"success": False, "message": "Wallet ID is required", "data": None} - - args = [ - smart_wallet_contract, - dao_core_proposals_extension_contract, - dao_proposal_contract, - dao_token_contract, - ] - - return BunScriptRunner.bun_run( - self.wallet_id, - "aibtc-dao/smart-wallet/public", - "proxy-create-proposal.ts", - *args, - ) - - def _run( - self, - smart_wallet_contract: str, - dao_core_proposals_extension_contract: str, - dao_proposal_contract: str, - dao_token_contract: str, - **kwargs, - ) -> Dict[str, Any]: - """Execute the tool to create a core proposal.""" - return self._deploy( - smart_wallet_contract, - dao_core_proposals_extension_contract, - dao_proposal_contract, - dao_token_contract, - **kwargs, - ) - - async def _arun( - self, - smart_wallet_contract: str, - dao_core_proposals_extension_contract: str, - dao_proposal_contract: str, - dao_token_contract: str, - **kwargs, - ) -> Dict[str, Any]: - """Async version of the tool.""" - return self._deploy( - smart_wallet_contract, - dao_core_proposals_extension_contract, - dao_proposal_contract, - dao_token_contract, - **kwargs, - ) - - -class SmartWalletProxyProposeActionSendMessageInput(BaseModel): - """Input schema for proposing a send message action through a smart wallet.""" - - smart_wallet_contract: str = Field( - ..., - description="Contract principal of the smart wallet", - example="ST35K818S3K2GSNEBC3M35GA3W8Q7X72KF4RVM3QA.aibtc-user-agent-smart-wallet", - ) - dao_action_proposals_extension_contract: str = Field( - ..., - description="Contract principal of the DAO action proposals extension", - example="ST35K818S3K2GSNEBC3M35GA3W8Q7X72KF4RVM3QA.aibtc-action-proposals-v2", - ) - dao_action_proposal_contract: str = Field( - ..., - description="Contract principal of the DAO action proposal", - example="ST35K818S3K2GSNEBC3M35GA3W8Q7X72KF4RVM3QA.aibtc-action-send-message", - ) - dao_token_contract: str = Field( - ..., - description="Contract principal of the DAO token", - example="ST35K818S3K2GSNEBC3M35GA3W8Q7X72KF4RVM3QA.aibtc-token", - ) - message: str = Field( - ..., - description="Message to send", - example="hello world", - ) - - -class SmartWalletProxyProposeActionSendMessageTool(BaseTool): - name: str = "smartwallet_proxy_propose_action_send_message" - description: str = ( - "Propose a send message action through a smart wallet. " - "Returns the transaction ID of the action proposal transaction." - ) - args_schema: Type[BaseModel] = SmartWalletProxyProposeActionSendMessageInput - return_direct: bool = False - wallet_id: Optional[UUID] = None - - def __init__(self, wallet_id: Optional[UUID] = None, **kwargs): - super().__init__(**kwargs) - self.wallet_id = wallet_id - - def _deploy( - self, - smart_wallet_contract: str, - dao_action_proposals_extension_contract: str, - dao_action_proposal_contract: str, - dao_token_contract: str, - message: str, - **kwargs, - ) -> Dict[str, Any]: - """Execute the tool to propose a send message action.""" - if self.wallet_id is None: - return {"success": False, "message": "Wallet ID is required", "data": None} - - args = [ - smart_wallet_contract, - dao_action_proposals_extension_contract, - dao_action_proposal_contract, - dao_token_contract, - message, - ] - - return BunScriptRunner.bun_run( - self.wallet_id, - "aibtc-dao/smart-wallet/public", - "proxy-propose-action-send-message.ts", - *args, - ) - - def _run( - self, - smart_wallet_contract: str, - dao_action_proposals_extension_contract: str, - dao_action_proposal_contract: str, - dao_token_contract: str, - message: str, - **kwargs, - ) -> Dict[str, Any]: - """Execute the tool to propose a send message action.""" - return self._deploy( - smart_wallet_contract, - dao_action_proposals_extension_contract, - dao_action_proposal_contract, - dao_token_contract, - message, - **kwargs, - ) - - async def _arun( - self, - smart_wallet_contract: str, - dao_action_proposals_extension_contract: str, - dao_action_proposal_contract: str, - dao_token_contract: str, - message: str, - **kwargs, - ) -> Dict[str, Any]: - """Async version of the tool.""" - return self._deploy( - smart_wallet_contract, - dao_action_proposals_extension_contract, - dao_action_proposal_contract, - dao_token_contract, - message, - **kwargs, - ) - - -class SmartWalletVoteOnActionProposalInput(BaseModel): - """Input schema for voting on an action proposal through a smart wallet.""" - - smart_wallet_contract: str = Field( - ..., - description="Contract principal of the smart wallet", - example="ST1PQHQKV0RJXZFY1DGX8MNSNYVE3VGZJSRTPGZGM.aibtc-user-agent-smart-wallet", - ) - dao_action_proposals_extension_contract: str = Field( - ..., - description="Contract principal of the DAO action proposals extension", - example="ST35K818S3K2GSNEBC3M35GA3W8Q7X72KF4RVM3QA.aibtc-action-proposals-v2", - ) - proposal_id: int = Field( - ..., - description="ID of the action proposal", - example=1, - gt=0, - ) - vote: bool = Field( - ..., - description="True to vote in favor, False to vote against", - example=True, - ) - - -class SmartWalletVoteOnActionProposalTool(BaseTool): - name: str = "smartwallet_vote_on_action_proposal" - description: str = ( - "Vote on an action proposal through a smart wallet. " - "Returns the transaction ID of the vote transaction." - ) - args_schema: Type[BaseModel] = SmartWalletVoteOnActionProposalInput - return_direct: bool = False - wallet_id: Optional[UUID] = None - - def __init__(self, wallet_id: Optional[UUID] = None, **kwargs): - super().__init__(**kwargs) - self.wallet_id = wallet_id - - def _deploy( - self, - smart_wallet_contract: str, - dao_action_proposals_extension_contract: str, - proposal_id: int, - vote: bool, - **kwargs, - ) -> Dict[str, Any]: - """Execute the tool to vote on an action proposal.""" - if self.wallet_id is None: - return {"success": False, "message": "Wallet ID is required", "data": None} - - args = [ - smart_wallet_contract, - dao_action_proposals_extension_contract, - str(proposal_id), - str(vote).lower(), - ] - - return BunScriptRunner.bun_run( - self.wallet_id, - "aibtc-dao/smart-wallet/public", - "vote-on-action-proposal.ts", - *args, - ) - - def _run( - self, - smart_wallet_contract: str, - dao_action_proposals_extension_contract: str, - proposal_id: int, - vote: bool, - **kwargs, - ) -> Dict[str, Any]: - """Execute the tool to vote on an action proposal.""" - return self._deploy( - smart_wallet_contract, - dao_action_proposals_extension_contract, - proposal_id, - vote, - **kwargs, - ) - - async def _arun( - self, - smart_wallet_contract: str, - dao_action_proposals_extension_contract: str, - proposal_id: int, - vote: bool, - **kwargs, - ) -> Dict[str, Any]: - """Async version of the tool.""" - return self._deploy( - smart_wallet_contract, - dao_action_proposals_extension_contract, - proposal_id, - vote, - **kwargs, - ) - - -class SmartWalletVoteOnCoreProposalInput(BaseModel): - """Input schema for voting on a core proposal through a smart wallet.""" - - smart_wallet_contract: str = Field( - ..., - description="Contract principal of the smart wallet", - example="ST1PQHQKV0RJXZFY1DGX8MNSNYVE3VGZJSRTPGZGM.aibtc-user-agent-smart-wallet", - ) - dao_core_proposals_extension_contract: str = Field( - ..., - description="Contract principal of the DAO core proposals extension", - example="ST35K818S3K2GSNEBC3M35GA3W8Q7X72KF4RVM3QA.aibtc-core-proposals-v2", - ) - dao_proposal_contract: str = Field( - ..., - description="Contract principal of the DAO proposal", - example="ST35K818S3K2GSNEBC3M35GA3W8Q7X72KF4RVM3QA.aibtc-onchain-messaging-send", - ) - vote: bool = Field( - ..., - description="True to vote in favor, False to vote against", - example=True, - ) - - -class SmartWalletVoteOnCoreProposalTool(BaseTool): - name: str = "smartwallet_vote_on_core_proposal" - description: str = ( - "Vote on a core proposal through a smart wallet. " - "Returns the transaction ID of the vote transaction." - ) - args_schema: Type[BaseModel] = SmartWalletVoteOnCoreProposalInput - return_direct: bool = False - wallet_id: Optional[UUID] = None - - def __init__(self, wallet_id: Optional[UUID] = None, **kwargs): - super().__init__(**kwargs) - self.wallet_id = wallet_id - - def _deploy( - self, - smart_wallet_contract: str, - dao_core_proposals_extension_contract: str, - dao_proposal_contract: str, - vote: bool, - **kwargs, - ) -> Dict[str, Any]: - """Execute the tool to vote on a core proposal.""" - if self.wallet_id is None: - return {"success": False, "message": "Wallet ID is required", "data": None} - - args = [ - smart_wallet_contract, - dao_core_proposals_extension_contract, - dao_proposal_contract, - str(vote).lower(), - ] - - return BunScriptRunner.bun_run( - self.wallet_id, - "aibtc-dao/smart-wallet/public", - "vote-on-core-proposal.ts", - *args, - ) - - def _run( - self, - smart_wallet_contract: str, - dao_core_proposals_extension_contract: str, - dao_proposal_contract: str, - vote: bool, - **kwargs, - ) -> Dict[str, Any]: - """Execute the tool to vote on a core proposal.""" - return self._deploy( - smart_wallet_contract, - dao_core_proposals_extension_contract, - dao_proposal_contract, - vote, - **kwargs, - ) - - async def _arun( - self, - smart_wallet_contract: str, - dao_core_proposals_extension_contract: str, - dao_proposal_contract: str, - vote: bool, - **kwargs, - ) -> Dict[str, Any]: - """Async version of the tool.""" - return self._deploy( - smart_wallet_contract, - dao_core_proposals_extension_contract, - dao_proposal_contract, - vote, - **kwargs, - ) - - -class SmartWalletConcludeActionProposalInput(BaseModel): - """Input schema for concluding an action proposal through a smart wallet.""" - - smart_wallet_contract: str = Field( - ..., - description="Contract principal of the smart wallet", - example="ST1PQHQKV0RJXZFY1DGX8MNSNYVE3VGZJSRTPGZGM.aibtc-user-agent-smart-wallet", - ) - dao_action_proposals_extension_contract: str = Field( - ..., - description="Contract principal of the DAO action proposals extension", - example="ST35K818S3K2GSNEBC3M35GA3W8Q7X72KF4RVM3QA.aibtc-action-proposals-v2", - ) - proposal_id: int = Field( - ..., - description="ID of the action proposal", - example=1, - gt=0, - ) - - -class SmartWalletConcludeActionProposalTool(BaseTool): - name: str = "smartwallet_conclude_action_proposal" - description: str = ( - "Conclude an action proposal through a smart wallet. " - "Returns the transaction ID of the conclusion transaction." - ) - args_schema: Type[BaseModel] = SmartWalletConcludeActionProposalInput - return_direct: bool = False - wallet_id: Optional[UUID] = None - - def __init__(self, wallet_id: Optional[UUID] = None, **kwargs): - super().__init__(**kwargs) - self.wallet_id = wallet_id - - def _deploy( - self, - smart_wallet_contract: str, - dao_action_proposals_extension_contract: str, - proposal_id: int, - **kwargs, - ) -> Dict[str, Any]: - """Execute the tool to conclude an action proposal.""" - if self.wallet_id is None: - return {"success": False, "message": "Wallet ID is required", "data": None} - - args = [ - smart_wallet_contract, - dao_action_proposals_extension_contract, - str(proposal_id), - ] - - return BunScriptRunner.bun_run( - self.wallet_id, - "aibtc-dao/smart-wallet/public", - "conclude-action-proposal.ts", - *args, - ) - - def _run( - self, - smart_wallet_contract: str, - dao_action_proposals_extension_contract: str, - proposal_id: int, - **kwargs, - ) -> Dict[str, Any]: - """Execute the tool to conclude an action proposal.""" - return self._deploy( - smart_wallet_contract, - dao_action_proposals_extension_contract, - proposal_id, - **kwargs, - ) - - async def _arun( - self, - smart_wallet_contract: str, - dao_action_proposals_extension_contract: str, - proposal_id: int, - **kwargs, - ) -> Dict[str, Any]: - """Async version of the tool.""" - return self._deploy( - smart_wallet_contract, - dao_action_proposals_extension_contract, - proposal_id, - **kwargs, - ) - - -class SmartWalletConcludeCoreProposalInput(BaseModel): - """Input schema for concluding a core proposal through a smart wallet.""" - - smart_wallet_contract: str = Field( - ..., - description="Contract principal of the smart wallet", - example="ST1PQHQKV0RJXZFY1DGX8MNSNYVE3VGZJSRTPGZGM.aibtc-user-agent-smart-wallet", - ) - dao_core_proposals_extension_contract: str = Field( - ..., - description="Contract principal of the DAO core proposals extension", - example="ST35K818S3K2GSNEBC3M35GA3W8Q7X72KF4RVM3QA.aibtc-core-proposals-v2", - ) - dao_proposal_contract: str = Field( - ..., - description="Contract principal of the DAO proposal", - example="ST35K818S3K2GSNEBC3M35GA3W8Q7X72KF4RVM3QA.aibtc-onchain-messaging-send", - ) - - -class SmartWalletConcludeCoreProposalTool(BaseTool): - name: str = "smartwallet_conclude_core_proposal" - description: str = ( - "Conclude a core proposal through a smart wallet. " - "Returns the transaction ID of the conclusion transaction." - ) - args_schema: Type[BaseModel] = SmartWalletConcludeCoreProposalInput - return_direct: bool = False - wallet_id: Optional[UUID] = None - - def __init__(self, wallet_id: Optional[UUID] = None, **kwargs): - super().__init__(**kwargs) - self.wallet_id = wallet_id - - def _deploy( - self, - smart_wallet_contract: str, - dao_core_proposals_extension_contract: str, - dao_proposal_contract: str, - **kwargs, - ) -> Dict[str, Any]: - """Execute the tool to conclude a core proposal.""" - if self.wallet_id is None: - return {"success": False, "message": "Wallet ID is required", "data": None} - - args = [ - smart_wallet_contract, - dao_core_proposals_extension_contract, - dao_proposal_contract, - ] - - return BunScriptRunner.bun_run( - self.wallet_id, - "aibtc-dao/smart-wallet/public", - "conclude-core-proposal.ts", - *args, - ) - - def _run( - self, - smart_wallet_contract: str, - dao_core_proposals_extension_contract: str, - dao_proposal_contract: str, - **kwargs, - ) -> Dict[str, Any]: - """Execute the tool to conclude a core proposal.""" - return self._deploy( - smart_wallet_contract, - dao_core_proposals_extension_contract, - dao_proposal_contract, - **kwargs, - ) - - async def _arun( - self, - smart_wallet_contract: str, - dao_core_proposals_extension_contract: str, - dao_proposal_contract: str, - **kwargs, - ) -> Dict[str, Any]: - """Async version of the tool.""" - return self._deploy( - smart_wallet_contract, - dao_core_proposals_extension_contract, - dao_proposal_contract, - **kwargs, - ) - - -class SmartWalletProxyProposeActionAddResourceInput(BaseModel): - """Input schema for proposing an action to add a resource through a smart wallet.""" - - smart_wallet_contract: str = Field( - ..., - description="Contract principal of the smart wallet", - example="ST1PQHQKV0RJXZFY1DGX8MNSNYVE3VGZJSRTPGZGM.aibtc-user-agent-smart-wallet", - ) - dao_action_proposals_extension_contract: str = Field( - ..., - description="Contract principal of the DAO action proposals extension", - example="ST35K818S3K2GSNEBC3M35GA3W8Q7X72KF4RVM3QA.aibtc-action-proposals-v2", - ) - dao_action_proposal_contract: str = Field( - ..., - description="Contract principal of the DAO action proposal", - example="ST35K818S3K2GSNEBC3M35GA3W8Q7X72KF4RVM3QA.aibtc-onchain-messaging-send", - ) - dao_token_contract: str = Field( - ..., - description="Contract principal of the DAO token", - example="ST35K818S3K2GSNEBC3M35GA3W8Q7X72KF4RVM3QA.aibtc-token", - ) - resource_name: str = Field( - ..., - description="Name of the resource to add", - example="my-resource", - ) - resource_contract: str = Field( - ..., - description="Contract principal of the resource", - example="ST35K818S3K2GSNEBC3M35GA3W8Q7X72KF4RVM3QA.my-resource", - ) - - -class SmartWalletProxyProposeActionAddResourceTool(BaseTool): - name: str = "smartwallet_proxy_propose_action_add_resource" - description: str = ( - "Propose an action to add a resource through a smart wallet. " - "Returns the transaction ID of the proposal transaction." - ) - args_schema: Type[BaseModel] = SmartWalletProxyProposeActionAddResourceInput - return_direct: bool = False - wallet_id: Optional[UUID] = None - - def __init__(self, wallet_id: Optional[UUID] = None, **kwargs): - super().__init__(**kwargs) - self.wallet_id = wallet_id - - def _deploy( - self, - smart_wallet_contract: str, - dao_action_proposals_extension_contract: str, - dao_action_proposal_contract: str, - dao_token_contract: str, - resource_name: str, - resource_contract: str, - **kwargs, - ) -> Dict[str, Any]: - """Execute the tool to propose an action to add a resource.""" - if self.wallet_id is None: - return {"success": False, "message": "Wallet ID is required", "data": None} - - args = [ - smart_wallet_contract, - dao_action_proposals_extension_contract, - dao_action_proposal_contract, - dao_token_contract, - resource_name, - resource_contract, - ] - - return BunScriptRunner.bun_run( - self.wallet_id, - "aibtc-dao/smart-wallet/public", - "proxy-propose-action-add-resource.ts", - *args, - ) - - def _run( - self, - smart_wallet_contract: str, - dao_action_proposals_extension_contract: str, - dao_action_proposal_contract: str, - dao_token_contract: str, - resource_name: str, - resource_contract: str, - **kwargs, - ) -> Dict[str, Any]: - """Execute the tool to propose an action to add a resource.""" - return self._deploy( - smart_wallet_contract, - dao_action_proposals_extension_contract, - dao_action_proposal_contract, - dao_token_contract, - resource_name, - resource_contract, - **kwargs, - ) - - async def _arun( - self, - smart_wallet_contract: str, - dao_action_proposals_extension_contract: str, - dao_action_proposal_contract: str, - dao_token_contract: str, - resource_name: str, - resource_contract: str, - **kwargs, - ) -> Dict[str, Any]: - """Async version of the tool.""" - return self._deploy( - smart_wallet_contract, - dao_action_proposals_extension_contract, - dao_action_proposal_contract, - dao_token_contract, - resource_name, - resource_contract, - **kwargs, - ) - - -class SmartWalletProxyProposeActionAllowAssetInput(BaseModel): - """Input schema for proposing an action to allow an asset through a smart wallet.""" - - smart_wallet_contract: str = Field( - ..., - description="Contract principal of the smart wallet", - example="ST1PQHQKV0RJXZFY1DGX8MNSNYVE3VGZJSRTPGZGM.aibtc-user-agent-smart-wallet", - ) - dao_action_proposals_extension_contract: str = Field( - ..., - description="Contract principal of the DAO action proposals extension", - example="ST35K818S3K2GSNEBC3M35GA3W8Q7X72KF4RVM3QA.aibtc-action-proposals-v2", - ) - dao_action_proposal_contract: str = Field( - ..., - description="Contract principal of the DAO action proposal", - example="ST35K818S3K2GSNEBC3M35GA3W8Q7X72KF4RVM3QA.aibtc-onchain-messaging-send", - ) - dao_token_contract: str = Field( - ..., - description="Contract principal of the DAO token", - example="ST35K818S3K2GSNEBC3M35GA3W8Q7X72KF4RVM3QA.aibtc-token", - ) - asset_contract: str = Field( - ..., - description="Contract principal of the asset", - example="ST35K818S3K2GSNEBC3M35GA3W8Q7X72KF4RVM3QA.my-asset", - ) - - -class SmartWalletProxyProposeActionAllowAssetTool(BaseTool): - name: str = "smartwallet_proxy_propose_action_allow_asset" - description: str = ( - "Propose an action to allow an asset through a smart wallet. " - "Returns the transaction ID of the proposal transaction." - ) - args_schema: Type[BaseModel] = SmartWalletProxyProposeActionAllowAssetInput - return_direct: bool = False - wallet_id: Optional[UUID] = None - - def __init__(self, wallet_id: Optional[UUID] = None, **kwargs): - super().__init__(**kwargs) - self.wallet_id = wallet_id - - def _deploy( - self, - smart_wallet_contract: str, - dao_action_proposals_extension_contract: str, - dao_action_proposal_contract: str, - dao_token_contract: str, - asset_contract: str, - **kwargs, - ) -> Dict[str, Any]: - """Execute the tool to propose an action to allow an asset.""" - if self.wallet_id is None: - return {"success": False, "message": "Wallet ID is required", "data": None} - - args = [ - smart_wallet_contract, - dao_action_proposals_extension_contract, - dao_action_proposal_contract, - dao_token_contract, - asset_contract, - ] - - return BunScriptRunner.bun_run( - self.wallet_id, - "aibtc-dao/smart-wallet/public", - "proxy-propose-action-allow-asset.ts", - *args, - ) - - def _run( - self, - smart_wallet_contract: str, - dao_action_proposals_extension_contract: str, - dao_action_proposal_contract: str, - dao_token_contract: str, - asset_contract: str, - **kwargs, - ) -> Dict[str, Any]: - """Execute the tool to propose an action to allow an asset.""" - return self._deploy( - smart_wallet_contract, - dao_action_proposals_extension_contract, - dao_action_proposal_contract, - dao_token_contract, - asset_contract, - **kwargs, - ) - - async def _arun( - self, - smart_wallet_contract: str, - dao_action_proposals_extension_contract: str, - dao_action_proposal_contract: str, - dao_token_contract: str, - asset_contract: str, - **kwargs, - ) -> Dict[str, Any]: - """Async version of the tool.""" - return self._deploy( - smart_wallet_contract, - dao_action_proposals_extension_contract, - dao_action_proposal_contract, - dao_token_contract, - asset_contract, - **kwargs, - ) - - -class SmartWalletProxyProposeActionToggleResourceByNameInput(BaseModel): - """Input schema for proposing an action to toggle a resource by name through a smart wallet.""" - - smart_wallet_contract: str = Field( - ..., - description="Contract principal of the smart wallet", - example="ST1PQHQKV0RJXZFY1DGX8MNSNYVE3VGZJSRTPGZGM.aibtc-user-agent-smart-wallet", - ) - dao_action_proposals_extension_contract: str = Field( - ..., - description="Contract principal of the DAO action proposals extension", - example="ST35K818S3K2GSNEBC3M35GA3W8Q7X72KF4RVM3QA.aibtc-action-proposals-v2", - ) - dao_action_proposal_contract: str = Field( - ..., - description="Contract principal of the DAO action proposal", - example="ST35K818S3K2GSNEBC3M35GA3W8Q7X72KF4RVM3QA.aibtc-onchain-messaging-send", - ) - dao_token_contract: str = Field( - ..., - description="Contract principal of the DAO token", - example="ST35K818S3K2GSNEBC3M35GA3W8Q7X72KF4RVM3QA.aibtc-token", - ) - resource_name: str = Field( - ..., - description="Name of the resource to toggle", - example="my-resource", - ) - - -class SmartWalletProxyProposeActionToggleResourceByNameTool(BaseTool): - name: str = "smartwallet_proxy_propose_action_toggle_resource_by_name" - description: str = ( - "Propose an action to toggle a resource by name through a smart wallet. " - "Returns the transaction ID of the proposal transaction." - ) - args_schema: Type[BaseModel] = ( - SmartWalletProxyProposeActionToggleResourceByNameInput - ) - return_direct: bool = False - wallet_id: Optional[UUID] = None - - def __init__(self, wallet_id: Optional[UUID] = None, **kwargs): - super().__init__(**kwargs) - self.wallet_id = wallet_id - - def _deploy( - self, - smart_wallet_contract: str, - dao_action_proposals_extension_contract: str, - dao_action_proposal_contract: str, - dao_token_contract: str, - resource_name: str, - **kwargs, - ) -> Dict[str, Any]: - """Execute the tool to propose an action to toggle a resource by name.""" - if self.wallet_id is None: - return {"success": False, "message": "Wallet ID is required", "data": None} - - args = [ - smart_wallet_contract, - dao_action_proposals_extension_contract, - dao_action_proposal_contract, - dao_token_contract, - resource_name, - ] - - return BunScriptRunner.bun_run( - self.wallet_id, - "aibtc-dao/smart-wallet/public", - "proxy-propose-action-toggle-resource-by-name.ts", - *args, - ) - - def _run( - self, - smart_wallet_contract: str, - dao_action_proposals_extension_contract: str, - dao_action_proposal_contract: str, - dao_token_contract: str, - resource_name: str, - **kwargs, - ) -> Dict[str, Any]: - """Execute the tool to propose an action to toggle a resource by name.""" - return self._deploy( - smart_wallet_contract, - dao_action_proposals_extension_contract, - dao_action_proposal_contract, - dao_token_contract, - resource_name, - **kwargs, - ) - - async def _arun( - self, - smart_wallet_contract: str, - dao_action_proposals_extension_contract: str, - dao_action_proposal_contract: str, - dao_token_contract: str, - resource_name: str, - **kwargs, - ) -> Dict[str, Any]: - """Async version of the tool.""" - return self._deploy( - smart_wallet_contract, - dao_action_proposals_extension_contract, - dao_action_proposal_contract, - dao_token_contract, - resource_name, - **kwargs, - ) - - -class SmartWalletProxyProposeActionSetAccountHolderInput(BaseModel): - """Input schema for proposing an action to set the account holder through a smart wallet.""" - - smart_wallet_contract: str = Field( - ..., - description="Contract principal of the smart wallet", - example="ST1PQHQKV0RJXZFY1DGX8MNSNYVE3VGZJSRTPGZGM.aibtc-user-agent-smart-wallet", - ) - dao_action_proposals_extension_contract: str = Field( - ..., - description="Contract principal of the DAO action proposals extension", - example="ST35K818S3K2GSNEBC3M35GA3W8Q7X72KF4RVM3QA.aibtc-action-proposals-v2", - ) - dao_action_proposal_contract: str = Field( - ..., - description="Contract principal of the DAO action proposal", - example="ST35K818S3K2GSNEBC3M35GA3W8Q7X72KF4RVM3QA.aibtc-onchain-messaging-send", - ) - dao_token_contract: str = Field( - ..., - description="Contract principal of the DAO token", - example="ST35K818S3K2GSNEBC3M35GA3W8Q7X72KF4RVM3QA.aibtc-token", - ) - account_holder: str = Field( - ..., - description="Principal of the new account holder", - example="ST1PQHQKV0RJXZFY1DGX8MNSNYVE3VGZJSRTPGZGM", - ) - - -class SmartWalletProxyProposeActionSetAccountHolderTool(BaseTool): - name: str = "smartwallet_proxy_propose_action_set_account_holder" - description: str = ( - "Propose an action to set the account holder through a smart wallet. " - "Returns the transaction ID of the proposal transaction." - ) - args_schema: Type[BaseModel] = SmartWalletProxyProposeActionSetAccountHolderInput - return_direct: bool = False - wallet_id: Optional[UUID] = None - - def __init__(self, wallet_id: Optional[UUID] = None, **kwargs): - super().__init__(**kwargs) - self.wallet_id = wallet_id - - def _deploy( - self, - smart_wallet_contract: str, - dao_action_proposals_extension_contract: str, - dao_action_proposal_contract: str, - dao_token_contract: str, - account_holder: str, - **kwargs, - ) -> Dict[str, Any]: - """Execute the tool to propose an action to set the account holder.""" - if self.wallet_id is None: - return {"success": False, "message": "Wallet ID is required", "data": None} - - args = [ - smart_wallet_contract, - dao_action_proposals_extension_contract, - dao_action_proposal_contract, - dao_token_contract, - account_holder, - ] - - return BunScriptRunner.bun_run( - self.wallet_id, - "aibtc-dao/smart-wallet/public", - "proxy-propose-action-set-account-holder.ts", - *args, - ) - - def _run( - self, - smart_wallet_contract: str, - dao_action_proposals_extension_contract: str, - dao_action_proposal_contract: str, - dao_token_contract: str, - account_holder: str, - **kwargs, - ) -> Dict[str, Any]: - """Execute the tool to propose an action to set the account holder.""" - return self._deploy( - smart_wallet_contract, - dao_action_proposals_extension_contract, - dao_action_proposal_contract, - dao_token_contract, - account_holder, - **kwargs, - ) - - async def _arun( - self, - smart_wallet_contract: str, - dao_action_proposals_extension_contract: str, - dao_action_proposal_contract: str, - dao_token_contract: str, - account_holder: str, - **kwargs, - ) -> Dict[str, Any]: - """Async version of the tool.""" - return self._deploy( - smart_wallet_contract, - dao_action_proposals_extension_contract, - dao_action_proposal_contract, - dao_token_contract, - account_holder, - **kwargs, - ) - - -class SmartWalletProxyProposeActionSetWithdrawalAmountInput(BaseModel): - """Input schema for proposing an action to set the withdrawal amount through a smart wallet.""" - - smart_wallet_contract: str = Field( - ..., - description="Contract principal of the smart wallet", - example="ST1PQHQKV0RJXZFY1DGX8MNSNYVE3VGZJSRTPGZGM.aibtc-user-agent-smart-wallet", - ) - dao_action_proposals_extension_contract: str = Field( - ..., - description="Contract principal of the DAO action proposals extension", - example="ST35K818S3K2GSNEBC3M35GA3W8Q7X72KF4RVM3QA.aibtc-action-proposals-v2", - ) - dao_action_proposal_contract: str = Field( - ..., - description="Contract principal of the DAO action proposal", - example="ST35K818S3K2GSNEBC3M35GA3W8Q7X72KF4RVM3QA.aibtc-onchain-messaging-send", - ) - dao_token_contract: str = Field( - ..., - description="Contract principal of the DAO token", - example="ST35K818S3K2GSNEBC3M35GA3W8Q7X72KF4RVM3QA.aibtc-token", - ) - withdrawal_amount: int = Field( - ..., - description="New withdrawal amount in micro-STX", - example=1000000, - gt=0, - ) - - -class SmartWalletProxyProposeActionSetWithdrawalAmountTool(BaseTool): - name: str = "smartwallet_proxy_propose_action_set_withdrawal_amount" - description: str = ( - "Propose an action to set the withdrawal amount through a smart wallet. " - "Returns the transaction ID of the proposal transaction." - ) - args_schema: Type[BaseModel] = SmartWalletProxyProposeActionSetWithdrawalAmountInput - return_direct: bool = False - wallet_id: Optional[UUID] = None - - def __init__(self, wallet_id: Optional[UUID] = None, **kwargs): - super().__init__(**kwargs) - self.wallet_id = wallet_id - - def _deploy( - self, - smart_wallet_contract: str, - dao_action_proposals_extension_contract: str, - dao_action_proposal_contract: str, - dao_token_contract: str, - withdrawal_amount: int, - **kwargs, - ) -> Dict[str, Any]: - """Execute the tool to propose an action to set the withdrawal amount.""" - if self.wallet_id is None: - return {"success": False, "message": "Wallet ID is required", "data": None} - - args = [ - smart_wallet_contract, - dao_action_proposals_extension_contract, - dao_action_proposal_contract, - dao_token_contract, - str(withdrawal_amount), - ] - - return BunScriptRunner.bun_run( - self.wallet_id, - "aibtc-dao/smart-wallet/public", - "proxy-propose-action-set-withdrawal-amount.ts", - *args, - ) - - def _run( - self, - smart_wallet_contract: str, - dao_action_proposals_extension_contract: str, - dao_action_proposal_contract: str, - dao_token_contract: str, - withdrawal_amount: int, - **kwargs, - ) -> Dict[str, Any]: - """Execute the tool to propose an action to set the withdrawal amount.""" - return self._deploy( - smart_wallet_contract, - dao_action_proposals_extension_contract, - dao_action_proposal_contract, - dao_token_contract, - withdrawal_amount, - **kwargs, - ) - - async def _arun( - self, - smart_wallet_contract: str, - dao_action_proposals_extension_contract: str, - dao_action_proposal_contract: str, - dao_token_contract: str, - withdrawal_amount: int, - **kwargs, - ) -> Dict[str, Any]: - """Async version of the tool.""" - return self._deploy( - smart_wallet_contract, - dao_action_proposals_extension_contract, - dao_action_proposal_contract, - dao_token_contract, - withdrawal_amount, - **kwargs, - ) - - -class SmartWalletProxyProposeActionSetWithdrawalPeriodInput(BaseModel): - """Input schema for proposing an action to set the withdrawal period through a smart wallet.""" - - smart_wallet_contract: str = Field( - ..., - description="Contract principal of the smart wallet", - example="ST1PQHQKV0RJXZFY1DGX8MNSNYVE3VGZJSRTPGZGM.aibtc-user-agent-smart-wallet", - ) - dao_action_proposals_extension_contract: str = Field( - ..., - description="Contract principal of the DAO action proposals extension", - example="ST35K818S3K2GSNEBC3M35GA3W8Q7X72KF4RVM3QA.aibtc-action-proposals-v2", - ) - dao_action_proposal_contract: str = Field( - ..., - description="Contract principal of the DAO action proposal", - example="ST35K818S3K2GSNEBC3M35GA3W8Q7X72KF4RVM3QA.aibtc-onchain-messaging-send", - ) - dao_token_contract: str = Field( - ..., - description="Contract principal of the DAO token", - example="ST35K818S3K2GSNEBC3M35GA3W8Q7X72KF4RVM3QA.aibtc-token", - ) - withdrawal_period: int = Field( - ..., - description="New withdrawal period in blocks", - example=144, - gt=0, - ) - - -class SmartWalletProxyProposeActionSetWithdrawalPeriodTool(BaseTool): - name: str = "smartwallet_proxy_propose_action_set_withdrawal_period" - description: str = ( - "Propose an action to set the withdrawal period through a smart wallet. " - "Returns the transaction ID of the proposal transaction." - ) - args_schema: Type[BaseModel] = SmartWalletProxyProposeActionSetWithdrawalPeriodInput - return_direct: bool = False - wallet_id: Optional[UUID] = None - - def __init__(self, wallet_id: Optional[UUID] = None, **kwargs): - super().__init__(**kwargs) - self.wallet_id = wallet_id - - def _deploy( - self, - smart_wallet_contract: str, - dao_action_proposals_extension_contract: str, - dao_action_proposal_contract: str, - dao_token_contract: str, - withdrawal_period: int, - **kwargs, - ) -> Dict[str, Any]: - """Execute the tool to propose an action to set the withdrawal period.""" - if self.wallet_id is None: - return {"success": False, "message": "Wallet ID is required", "data": None} - - args = [ - smart_wallet_contract, - dao_action_proposals_extension_contract, - dao_action_proposal_contract, - dao_token_contract, - str(withdrawal_period), - ] - - return BunScriptRunner.bun_run( - self.wallet_id, - "aibtc-dao/smart-wallet/public", - "proxy-propose-action-set-withdrawal-period.ts", - *args, - ) - - def _run( - self, - smart_wallet_contract: str, - dao_action_proposals_extension_contract: str, - dao_action_proposal_contract: str, - dao_token_contract: str, - withdrawal_period: int, - **kwargs, - ) -> Dict[str, Any]: - """Execute the tool to propose an action to set the withdrawal period.""" - return self._deploy( - smart_wallet_contract, - dao_action_proposals_extension_contract, - dao_action_proposal_contract, - dao_token_contract, - withdrawal_period, - **kwargs, - ) - - async def _arun( - self, - smart_wallet_contract: str, - dao_action_proposals_extension_contract: str, - dao_action_proposal_contract: str, - dao_token_contract: str, - withdrawal_period: int, - **kwargs, - ) -> Dict[str, Any]: - """Async version of the tool.""" - return self._deploy( - smart_wallet_contract, - dao_action_proposals_extension_contract, - dao_action_proposal_contract, - dao_token_contract, - withdrawal_period, - **kwargs, - ) - - -class SmartWalletDepositSTXInput(BaseModel): - """Input schema for depositing STX to a smart wallet.""" - - smart_wallet_contract: str = Field( - ..., - description="Contract principal of the smart wallet", - example="ST35K818S3K2GSNEBC3M35GA3W8Q7X72KF4RVM3QA.aibtc-smart-wallet-ST3YT-S5D18", - ) - amount: int = Field(..., description="Amount of STX to deposit in microstacks") - - -class SmartWalletDepositSTXTool(BaseTool): - name: str = "smartwallet_deposit_stx" - description: str = ( - "Deposit STX into a smart wallet. " - "The amount should be specified in microstacks (1 STX = 1,000,000 microstacks)." - ) - args_schema: Type[BaseModel] = SmartWalletDepositSTXInput - return_direct: bool = False - wallet_id: Optional[UUID] = None - - def __init__(self, wallet_id: Optional[UUID] = None, **kwargs): - super().__init__(**kwargs) - self.wallet_id = wallet_id - - def _deploy( - self, - smart_wallet_contract: str, - amount: int, - **kwargs, - ) -> Dict[str, Any]: - """Execute the tool to deposit STX to a smart wallet.""" - if self.wallet_id is None: - return {"success": False, "message": "Wallet ID is required", "data": None} - - args = [smart_wallet_contract, str(amount)] - - return BunScriptRunner.bun_run( - self.wallet_id, - "aibtc-dao/smart-wallet/public", - "deposit-stx.ts", - *args, - ) - - def _run( - self, - smart_wallet_contract: str, - amount: int, - **kwargs, - ) -> Dict[str, Any]: - """Execute the tool to deposit STX to a smart wallet.""" - return self._deploy(smart_wallet_contract, amount, **kwargs) - - async def _arun( - self, - smart_wallet_contract: str, - amount: int, - **kwargs, - ) -> Dict[str, Any]: - """Async version of the tool to deposit STX to a smart wallet.""" - return self._deploy(smart_wallet_contract, amount, **kwargs) - - -class SmartWalletDepositFTInput(BaseModel): - """Input schema for depositing fungible tokens to a smart wallet.""" - - smart_wallet_contract: str = Field( - ..., - description="Contract principal of the smart wallet", - example="ST35K818S3K2GSNEBC3M35GA3W8Q7X72KF4RVM3QA.aibtc-smart-wallet-ST3YT-S5D18", - ) - ft_contract: str = Field( - ..., - description="Contract principal of the fungible token", - example="ST35K818S3K2GSNEBC3M35GA3W8Q7X72KF4RVM3QA.aibtc-token", - ) - amount: int = Field(..., description="Amount of tokens to deposit") - - -class SmartWalletDepositFTTool(BaseTool): - name: str = "smartwallet_deposit_ft" - description: str = ( - "Deposit fungible tokens into a smart wallet. " - "Requires the token contract principal and amount to deposit." - ) - args_schema: Type[BaseModel] = SmartWalletDepositFTInput - return_direct: bool = False - wallet_id: Optional[UUID] = None - - def __init__(self, wallet_id: Optional[UUID] = None, **kwargs): - super().__init__(**kwargs) - self.wallet_id = wallet_id - - def _deploy( - self, - smart_wallet_contract: str, - ft_contract: str, - amount: int, - **kwargs, - ) -> Dict[str, Any]: - """Execute the tool to deposit fungible tokens.""" - if self.wallet_id is None: - return {"success": False, "message": "Wallet ID is required", "data": None} - - args = [smart_wallet_contract, ft_contract, str(amount)] - - return BunScriptRunner.bun_run( - self.wallet_id, - "aibtc-dao/smart-wallet/public", - "deposit-ft.ts", - *args, - ) - - def _run( - self, - smart_wallet_contract: str, - ft_contract: str, - amount: int, - **kwargs, - ) -> Dict[str, Any]: - """Execute the tool to deposit fungible tokens.""" - return self._deploy(smart_wallet_contract, ft_contract, amount, **kwargs) - - async def _arun( - self, - smart_wallet_contract: str, - ft_contract: str, - amount: int, - **kwargs, - ) -> Dict[str, Any]: - """Async version of the tool.""" - return self._deploy(smart_wallet_contract, ft_contract, amount, **kwargs) - - -class SmartWalletGetBalanceSTXInput(BaseModel): - """Input schema for getting STX balance from a smart wallet.""" - - smart_wallet_contract: str = Field( - ..., - description="Contract principal of the smart wallet", - example="ST35K818S3K2GSNEBC3M35GA3W8Q7X72KF4RVM3QA.aibtc-smart-wallet-ST3YT-S5D18", - ) - - -class SmartWalletGetBalanceSTXTool(BaseTool): - name: str = "smartwallet_get_balance_stx" - description: str = ( - "Get the STX balance from a smart wallet. Returns the current STX balance as a number." - ) - args_schema: Type[BaseModel] = SmartWalletGetBalanceSTXInput - return_direct: bool = False - wallet_id: Optional[UUID] = None - - def __init__(self, wallet_id: Optional[UUID] = None, **kwargs): - super().__init__(**kwargs) - self.wallet_id = wallet_id - - def _deploy( - self, - smart_wallet_contract: str, - **kwargs, - ) -> Dict[str, Any]: - """Execute the tool to get STX balance.""" - if self.wallet_id is None: - return {"success": False, "message": "Wallet ID is required", "data": None} - - args = [smart_wallet_contract] - - return BunScriptRunner.bun_run( - self.wallet_id, - "aibtc-dao/smart-wallet/read-only", - "get-balance-stx.ts", - *args, - ) - - def _run( - self, - smart_wallet_contract: str, - **kwargs, - ) -> Dict[str, Any]: - """Execute the tool to get STX balance.""" - return self._deploy(smart_wallet_contract, **kwargs) - - async def _arun( - self, - smart_wallet_contract: str, - **kwargs, - ) -> Dict[str, Any]: - """Async version of the tool.""" - return self._deploy(smart_wallet_contract, **kwargs) - - -class SmartWalletIsApprovedAssetInput(BaseModel): - """Input schema for checking if an asset is approved in a smart wallet.""" - - smart_wallet_contract: str = Field( - ..., - description="Contract principal of the smart wallet", - example="ST35K818S3K2GSNEBC3M35GA3W8Q7X72KF4RVM3QA.aibtc-smart-wallet-ST3YT-S5D18", - ) - asset_contract: str = Field( - ..., - description="Contract principal of the asset to check", - example="ST35K818S3K2GSNEBC3M35GA3W8Q7X72KF4RVM3QA.aibtc-faktory", - ) diff --git a/tools/stxcity.py b/tools/stxcity.py deleted file mode 100644 index aea10db9..00000000 --- a/tools/stxcity.py +++ /dev/null @@ -1,276 +0,0 @@ -from decimal import Decimal -from typing import Any, Dict, Optional, Type - -from langchain.tools import BaseTool -from pydantic import BaseModel, Field - -from backend.models import UUID -from tools.bun import BunScriptRunner - - -class StxCityBaseInput(BaseModel): - """Base input schema for STXCity tools that don't require parameters.""" - - pass - - -class StxCityExecuteBuyInput(BaseModel): - """Input schema for STXCity buy order execution.""" - - stx_amount: str = Field(..., description="Amount of STX to spend on the purchase") - dex_contract_id: str = Field(..., description="Contract ID of the DEX") - token_contract_id: str = Field( - ..., description="Contract ID of the token to purchase" - ) - token_symbol: str = Field(..., description="Symbol of the token to purchase") - slippage: Optional[str] = Field( - default="50", - description="Slippage tolerance in basis points (default: 50, which is 0.5%)", - ) - - -class StxCityExecuteBuyTool(BaseTool): - name: str = "stxcity_execute_buy" - description: str = ( - "Execute a buy order on STXCity DEX with specified STX amount and token details" - ) - args_schema: Type[BaseModel] = StxCityExecuteBuyInput - return_direct: bool = False - wallet_id: Optional[UUID] = None - - def __init__(self, wallet_id: Optional[UUID] = None, **kwargs): - super().__init__(**kwargs) - self.wallet_id = wallet_id - - def _deploy( - self, - stx_amount: str, - dex_contract_id: str, - token_contract_id: str, - token_symbol: str, - slippage: Optional[str] = "50", - **kwargs, - ) -> str: - """Execute the tool to place a buy order.""" - if self.wallet_id is None: - return { - "success": False, - "error": "Wallet ID is required", - "output": "", - } - return BunScriptRunner.bun_run( - self.wallet_id, - "stacks-stxcity", - "exec-buy.ts", - stx_amount, - dex_contract_id, - token_contract_id, - token_symbol, - slippage, - ) - - def _run( - self, - stx_amount: str, - dex_contract_id: str, - token_contract_id: str, - token_symbol: str, - slippage: Optional[str] = "50", - **kwargs, - ) -> str: - """Execute the tool to place a buy order.""" - return self._deploy( - stx_amount, dex_contract_id, token_contract_id, token_symbol, slippage - ) - - async def _arun( - self, - stx_amount: str, - dex_contract_id: str, - token_contract_id: str, - token_symbol: str, - slippage: Optional[str] = "50", - **kwargs, - ) -> str: - """Async version of the tool.""" - return self._deploy( - stx_amount, dex_contract_id, token_contract_id, token_symbol, slippage - ) - - -class StxCityListBondingTokensTool(BaseTool): - name: str = "stxcity_list_bonding_tokens" - description: str = "Get a list of all available tokens for bonding on STXCity" - args_schema: Type[BaseModel] = StxCityBaseInput - return_direct: bool = False - wallet_id: Optional[UUID] = None - - def __init__(self, wallet_id: Optional[UUID] = None, **kwargs): - super().__init__(**kwargs) - self.wallet_id = wallet_id - - def _deploy(self, **kwargs) -> Dict[str, Any]: - """Execute the tool to list available bonding tokens.""" - if self.wallet_id is None: - return { - "success": False, - "error": "Wallet ID is required", - "output": "", - } - return BunScriptRunner.bun_run(self.wallet_id, "stacks-stxcity", "exec-list.ts") - - def _run(self, **kwargs) -> Dict[str, Any]: - """Execute the tool to list available bonding tokens.""" - return self._deploy() - - async def _arun(self, **kwargs) -> Dict[str, Any]: - """Async version of the tool.""" - return self._deploy() - - -class StxCitySearchInput(BaseModel): - """Input schema for searching bonding opportunities.""" - - keyword: Optional[str] = Field( - default=None, description="Search keyword to filter results" - ) - token_contract: Optional[str] = Field( - default=None, description="Token contract to filter results" - ) - - -class StxCitySearchTool(BaseTool): - name: str = "stxcity_search" - description: str = ( - "Search for bonding opportunities on STXCity with optional keyword and token " - "contract filters" - ) - args_schema: Type[BaseModel] = StxCitySearchInput - return_direct: bool = False - wallet_id: Optional[UUID] = None - - def __init__(self, wallet_id: Optional[UUID] = None, **kwargs): - super().__init__(**kwargs) - self.wallet_id = wallet_id - - def _deploy( - self, - keyword: Optional[str] = None, - token_contract: Optional[str] = None, - **kwargs, - ) -> Dict[str, Any]: - """Execute the tool to search for bonding opportunities.""" - if self.wallet_id is None: - return { - "success": False, - "error": "Wallet ID is required", - "output": "", - } - args = [] - if keyword: - args.append(keyword) - if token_contract: - args.append(token_contract) - return BunScriptRunner.bun_run( - self.wallet_id, "stacks-stxcity", "exec-search.ts", *args - ) - - def _run( - self, - keyword: Optional[str] = None, - token_contract: Optional[str] = None, - **kwargs, - ) -> Dict[str, Any]: - """Execute the tool to search for bonding opportunities.""" - return self._deploy(keyword, token_contract) - - async def _arun( - self, - keyword: Optional[str] = None, - token_contract: Optional[str] = None, - **kwargs, - ) -> Dict[str, Any]: - """Async version of the tool.""" - return self._deploy(keyword, token_contract) - - -class StxCityExecuteSellInput(BaseModel): - """Input schema for STXCity sell order execution.""" - - token_amount: Decimal = Field(..., description="Amount of tokens to sell") - dex_contract_id: str = Field(..., description="Contract ID of the DEX") - token_contract_id: str = Field(..., description="Contract ID of the token to sell") - token_symbol: str = Field(..., description="Symbol of the token to sell") - slippage: Optional[int] = Field( - default=50, - description="Slippage tolerance in basis points (default: 50, which is 0.5%)", - ) - - -class StxCityExecuteSellTool(BaseTool): - name: str = "stxcity_execute_sell" - description: str = ( - "Execute a sell order on STXCity DEX with specified token amount and details" - ) - args_schema: Type[BaseModel] = StxCityExecuteSellInput - return_direct: bool = False - wallet_id: Optional[UUID] = None - - def __init__(self, wallet_id: Optional[UUID] = None, **kwargs): - super().__init__(**kwargs) - self.wallet_id = wallet_id - - def _deploy( - self, - token_amount: Decimal, - dex_contract_id: str, - token_contract_id: str, - token_symbol: str, - slippage: Optional[int] = 50, - **kwargs, - ) -> str: - """Execute the tool to place a sell order.""" - if self.wallet_id is None: - return { - "success": False, - "error": "Wallet ID is required", - "output": "", - } - return BunScriptRunner.bun_run( - self.wallet_id, - "stacks-stxcity", - "exec-sell.ts", - token_amount, - dex_contract_id, - token_contract_id, - token_symbol, - slippage, - ) - - def _run( - self, - token_amount: Decimal, - dex_contract_id: str, - token_contract_id: str, - token_symbol: str, - slippage: Optional[int] = 50, - **kwargs, - ) -> str: - """Execute the tool to place a sell order.""" - return self._deploy( - token_amount, dex_contract_id, token_contract_id, token_symbol, slippage - ) - - async def _arun( - self, - token_amount: Decimal, - dex_contract_id: str, - token_contract_id: str, - token_symbol: str, - slippage: Optional[int] = 50, - **kwargs, - ) -> str: - """Async version of the tool.""" - return self._deploy( - token_amount, dex_contract_id, token_contract_id, token_symbol, slippage - ) diff --git a/tools/telegram.py b/tools/telegram.py index 973fb5fa..2f1c1ebd 100644 --- a/tools/telegram.py +++ b/tools/telegram.py @@ -4,7 +4,7 @@ from pydantic import BaseModel, Field from backend.models import UUID -from services.bot import _bot_service +from services.communication.telegram_bot_service import _bot_service class SendTelegramNotificationInput(BaseModel): diff --git a/tools/tools_factory.py b/tools/tools_factory.py index b98f3fb4..20e7e4bd 100644 --- a/tools/tools_factory.py +++ b/tools/tools_factory.py @@ -7,45 +7,26 @@ from backend.models import UUID, Profile, WalletFilter from lib.logger import configure_logger +from .agent_account import AgentAccountDeployTool from .bitflow import BitflowExecuteTradeTool -from .coinmarketcap import GetBitcoinData from .contracts import ContractSIP10InfoTool, FetchContractSourceTool -from .dao_deployments import ContractDAODeployTool from .dao_ext_action_proposals import ( ConcludeActionProposalTool, GetLiquidSupplyTool, GetProposalTool, - GetTotalVotesTool, + GetTotalProposalsTool, + GetVoteRecordTool, + GetVoteRecordsTool, + GetVetoVoteRecordTool, GetVotingConfigurationTool, GetVotingPowerTool, - ProposeActionAddResourceTool, - ProposeActionAllowAssetTool, ProposeActionSendMessageTool, - ProposeActionSetAccountHolderTool, - ProposeActionSetWithdrawalAmountTool, - ProposeActionSetWithdrawalPeriodTool, - ProposeActionToggleResourceTool, + VetoActionProposalTool, VoteOnActionProposalTool, ) from .dao_ext_charter import ( GetCurrentDaoCharterTool, - GetCurrentDaoCharterVersionTool, - GetDaoCharterTool, ) -from .dao_ext_core_proposals import ( - DeployCoreProposalTool, - GenerateCoreProposalTool, -) -from .dao_ext_payments_invoices import ( - GetInvoiceTool, - GetResourceByNameTool, - GetResourceTool, - PayInvoiceByResourceNameTool, - PayInvoiceTool, -) -from .dao_ext_timed_vault import DepositSTXTool as TimedVaultDepositSTXTool -from .dao_ext_timed_vault import GetAccountTermsTool, WithdrawSTXTool -from .dao_ext_treasury import GetAllowedAssetTool, IsAllowedAssetTool from .database import ( AddScheduledTaskTool, DeleteScheduledTaskTool, @@ -66,37 +47,9 @@ LunarCrushTokenMetricsTool, SearchLunarCrushTool, ) -from .smartwallet import ( - SmartWalletApproveAssetTool, - SmartWalletConcludeActionProposalTool, - SmartWalletConcludeCoreProposalTool, - SmartWalletDeployMySmartWalletTool, - SmartWalletDeploySmartWalletTool, - SmartWalletDepositFTTool, - SmartWalletDepositSTXTool, - SmartWalletGenerateMySmartWalletTool, - SmartWalletGenerateSmartWalletTool, - SmartWalletGetBalanceSTXTool, - SmartWalletGetConfigurationTool, - SmartWalletIsApprovedAssetTool, - SmartWalletProxyCreateProposalTool, - SmartWalletProxyProposeActionAddResourceTool, - SmartWalletProxyProposeActionAllowAssetTool, - SmartWalletProxyProposeActionSendMessageTool, - SmartWalletProxyProposeActionSetAccountHolderTool, - SmartWalletProxyProposeActionSetWithdrawalAmountTool, - SmartWalletProxyProposeActionSetWithdrawalPeriodTool, - SmartWalletProxyProposeActionToggleResourceByNameTool, - SmartWalletRevokeAssetTool, - SmartWalletVoteOnActionProposalTool, - SmartWalletVoteOnCoreProposalTool, - SmartWalletWithdrawFTTool, - SmartWalletWithdrawSTXTool, -) from .telegram import SendTelegramNotificationTool from .transactions import ( StacksTransactionByAddressTool, - StacksTransactionStatusTool, StacksTransactionTool, ) from .twitter import TwitterPostTweetTool @@ -149,56 +102,22 @@ def initialize_tools( logger.warning(f"Failed to get wallet for agent {agent_id}: {e}") tools = { - "coinmarketcap_get_market_data": GetBitcoinData(), "bitflow_execute_trade": BitflowExecuteTradeTool(wallet_id), - "contracts_get_sip10_info": ContractSIP10InfoTool(wallet_id), - "contracts_deploy_dao": ContractDAODeployTool(wallet_id), + "contracts_fetch_sip10_info": ContractSIP10InfoTool(wallet_id), "contracts_fetch_source_code": FetchContractSourceTool(wallet_id), - "dao_coreproposals_generate_proposal": GenerateCoreProposalTool(wallet_id), - "dao_coreproposals_deploy_proposal": DeployCoreProposalTool(wallet_id), - "dao_actionproposals_conclude_proposal": ConcludeActionProposalTool(wallet_id), - "dao_actionproposals_get_liquid_supply": GetLiquidSupplyTool(wallet_id), - "dao_actionproposals_get_proposal": GetProposalTool(wallet_id), - "dao_actionproposals_get_total_votes": GetTotalVotesTool(wallet_id), - "dao_actionproposals_get_voting_configuration": GetVotingConfigurationTool( - wallet_id - ), - "dao_actionproposals_get_voting_power": GetVotingPowerTool(wallet_id), - "dao_actionproposals_vote_on_proposal": VoteOnActionProposalTool(wallet_id), - "dao_actionproposals_propose_add_resource": ProposeActionAddResourceTool( - wallet_id - ), - "dao_actionproposals_propose_allow_asset": ProposeActionAllowAssetTool( - wallet_id - ), - "dao_actionproposals_propose_send_message": ProposeActionSendMessageTool( - wallet_id - ), - "dao_actionproposals_propose_set_account_holder": ProposeActionSetAccountHolderTool( - wallet_id - ), - "dao_actionproposals_propose_set_withdrawal_amount": ProposeActionSetWithdrawalAmountTool( - wallet_id - ), - "dao_actionproposals_propose_set_withdrawal_period": ProposeActionSetWithdrawalPeriodTool( - wallet_id - ), - "dao_actionproposals_propose_toggle_resource": ProposeActionToggleResourceTool( - wallet_id - ), - "dao_timedvault_get_account_terms": GetAccountTermsTool(wallet_id), - "dao_timedvault_deposit_stx": TimedVaultDepositSTXTool(wallet_id), - "dao_timedvault_withdraw_stx": WithdrawSTXTool(wallet_id), - "dao_charter_get_current": GetCurrentDaoCharterTool(wallet_id), - "dao_charter_get_current_version": GetCurrentDaoCharterVersionTool(wallet_id), - "dao_charter_get_version": GetDaoCharterTool(wallet_id), - "dao_payments_get_invoice": GetInvoiceTool(wallet_id), - "dao_payments_get_resource": GetResourceTool(wallet_id), - "dao_payments_get_resource_by_name": GetResourceByNameTool(wallet_id), - "dao_payments_pay_invoice": PayInvoiceTool(wallet_id), - "dao_payments_pay_invoice_by_resource": PayInvoiceByResourceNameTool(wallet_id), - "dao_treasury_get_allowed_asset": GetAllowedAssetTool(wallet_id), - "dao_treasury_is_allowed_asset": IsAllowedAssetTool(wallet_id), + "dao_action_conclude_proposal": ConcludeActionProposalTool(wallet_id), + "dao_action_get_liquid_supply": GetLiquidSupplyTool(wallet_id), + "dao_action_get_proposal": GetProposalTool(wallet_id), + "dao_action_get_total_proposals": GetTotalProposalsTool(wallet_id), + "dao_action_get_veto_vote_record": GetVetoVoteRecordTool(wallet_id), + "dao_action_get_vote_record": GetVoteRecordTool(wallet_id), + "dao_action_get_vote_records": GetVoteRecordsTool(wallet_id), + "dao_action_get_voting_configuration": GetVotingConfigurationTool(wallet_id), + "dao_action_get_voting_power": GetVotingPowerTool(wallet_id), + "dao_action_veto_proposal": VetoActionProposalTool(wallet_id), + "dao_action_vote_on_proposal": VoteOnActionProposalTool(wallet_id), + "dao_charter_get_current_charter": GetCurrentDaoCharterTool(wallet_id), + "dao_propose_action_send_message": ProposeActionSendMessageTool(wallet_id), "database_add_scheduled_task": AddScheduledTaskTool(profile_id, agent_id), "database_get_dao_list": GetDAOListTool(), "database_get_dao_get_by_name": GetDAOByNameTool(), @@ -212,7 +131,6 @@ def initialize_tools( "lunarcrush_get_token_metrics": LunarCrushTokenMetricsTool(), "lunarcrush_search": SearchLunarCrushTool(), "lunarcrush_get_token_metadata": LunarCrushTokenMetadataTool(), - "stacks_get_transaction_status": StacksTransactionStatusTool(wallet_id), "stacks_get_transaction_details": StacksTransactionTool(wallet_id), "stacks_get_transactions_by_address": StacksTransactionByAddressTool(wallet_id), "stacks_get_contract_info": STXGetContractInfoTool(), @@ -226,61 +144,7 @@ def initialize_tools( "wallet_get_my_transactions": WalletGetMyTransactions(wallet_id), "wallet_send_sip10": WalletSIP10SendTool(wallet_id), "x_credentials": CollectXCredentialsTool(profile_id), - "smartwallet_deploy_smart_wallet": SmartWalletDeploySmartWalletTool(wallet_id), - "smartwallet_deploy_my_smart_wallet": SmartWalletDeployMySmartWalletTool( - wallet_id - ), - "smartwallet_deposit_stx": SmartWalletDepositSTXTool(wallet_id), - "smartwallet_deposit_ft": SmartWalletDepositFTTool(wallet_id), - "smartwallet_approve_asset": SmartWalletApproveAssetTool(wallet_id), - "smartwallet_revoke_asset": SmartWalletRevokeAssetTool(wallet_id), - "smartwallet_get_balance_stx": SmartWalletGetBalanceSTXTool(wallet_id), - "smartwallet_is_approved_asset": SmartWalletIsApprovedAssetTool(wallet_id), - "smartwallet_get_configuration": SmartWalletGetConfigurationTool(wallet_id), - "smartwallet_generate_smart_wallet": SmartWalletGenerateSmartWalletTool( - wallet_id - ), - "smartwallet_generate_my_smart_wallet": SmartWalletGenerateMySmartWalletTool( - wallet_id - ), - "smartwallet_withdraw_stx": SmartWalletWithdrawSTXTool(wallet_id), - "smartwallet_withdraw_ft": SmartWalletWithdrawFTTool(wallet_id), - "smartwallet_proxy_create_proposal": SmartWalletProxyCreateProposalTool( - wallet_id - ), - "smartwallet_proxy_propose_action_send_message": SmartWalletProxyProposeActionSendMessageTool( - wallet_id - ), - "smartwallet_proxy_propose_action_add_resource": SmartWalletProxyProposeActionAddResourceTool( - wallet_id - ), - "smartwallet_proxy_propose_action_allow_asset": SmartWalletProxyProposeActionAllowAssetTool( - wallet_id - ), - "smartwallet_proxy_propose_action_toggle_resource_by_name": SmartWalletProxyProposeActionToggleResourceByNameTool( - wallet_id - ), - "smartwallet_proxy_propose_action_set_account_holder": SmartWalletProxyProposeActionSetAccountHolderTool( - wallet_id - ), - "smartwallet_proxy_propose_action_set_withdrawal_amount": SmartWalletProxyProposeActionSetWithdrawalAmountTool( - wallet_id - ), - "smartwallet_proxy_propose_action_set_withdrawal_period": SmartWalletProxyProposeActionSetWithdrawalPeriodTool( - wallet_id - ), - "smartwallet_vote_on_action_proposal": SmartWalletVoteOnActionProposalTool( - wallet_id - ), - "smartwallet_vote_on_core_proposal": SmartWalletVoteOnCoreProposalTool( - wallet_id - ), - "smartwallet_conclude_action_proposal": SmartWalletConcludeActionProposalTool( - wallet_id - ), - "smartwallet_conclude_core_proposal": SmartWalletConcludeCoreProposalTool( - wallet_id - ), + "agent_account_deploy": AgentAccountDeployTool(wallet_id), } return tools diff --git a/tools/transactions.py b/tools/transactions.py index 431f798b..47edf8ba 100644 --- a/tools/transactions.py +++ b/tools/transactions.py @@ -8,56 +8,6 @@ from .bun import BunScriptRunner -class StacksTransactionStatusInput(BaseModel): - """Input schema for checking Stacks transaction status.""" - - transaction_id: str = Field( - ..., description="The ID of the transaction to check the status for." - ) - - -class StacksTransactionStatusTool(BaseTool): - name: str = "stacks_transaction_status" - description: str = ( - "Get the current status of a Stacks blockchain transaction using its ID. " - "Returns success status and transaction details if available." - ) - args_schema: Type[BaseModel] = StacksTransactionStatusInput - return_direct: bool = False - wallet_id: Optional[UUID] = None - - def __init__(self, wallet_id: Optional[UUID] = None, **kwargs): - super().__init__(**kwargs) - self.wallet_id = wallet_id - - def _deploy(self, transaction_id: str, **kwargs) -> Dict[str, Any]: - """Execute the tool to check transaction status.""" - if self.wallet_id is None: - return { - "success": False, - "error": "Wallet ID is required", - "output": "", - } - try: - result = BunScriptRunner.bun_run( - self.wallet_id, - "stacks-transactions", - "get-transaction-status.ts", - transaction_id, - ) - return result - except Exception as e: - return {"output": None, "error": str(e), "success": False} - - def _run(self, transaction_id: str, **kwargs) -> Dict[str, Any]: - """Execute the tool to check transaction status.""" - return self._deploy(transaction_id, **kwargs) - - async def _arun(self, transaction_id: str, **kwargs) -> Dict[str, Any]: - """Async version of the tool.""" - return self._deploy(transaction_id, **kwargs) - - class StacksTransactionInput(BaseModel): """Input schema for retrieving detailed Stacks transaction information.""" diff --git a/tools/twitter.py b/tools/twitter.py index 1a9cc717..5df5afc8 100644 --- a/tools/twitter.py +++ b/tools/twitter.py @@ -6,7 +6,7 @@ from backend.factory import backend from backend.models import UUID, XCredsFilter from lib.logger import configure_logger -from lib.twitter import TwitterService +from services.communication.twitter_service import TwitterService logger = configure_logger(__name__) @@ -20,6 +20,15 @@ class TwitterPostTweetInput(BaseModel): ) +class TwitterGetTweetInput(BaseModel): + """Input schema for getting a tweet by its ID.""" + + tweet_id: str = Field( + ..., + description="The ID of the tweet to retrieve. This should be the numeric tweet ID as a string.", + ) + + class TwitterPostTweetTool(BaseTool): name: str = "twitter_post_tweet" description: str = ( @@ -61,6 +70,7 @@ def _deploy(self, content: str, **kwargs) -> str: access_secret=x_creds.access_secret, client_id=x_creds.client_id, client_secret=x_creds.client_secret, + bearer_token=x_creds.bearer_token, ) twitter_service.initialize() response = twitter_service.post_tweet(text=content) @@ -79,3 +89,403 @@ def _run(self, content: str, **kwargs) -> str: async def _arun(self, content: str, **kwargs) -> str: """Execute the tool to post a tweet asynchronously.""" return self._deploy(content, **kwargs) + + +class TwitterGetTweetTool(BaseTool): + name: str = "twitter_get_tweet" + description: str = ( + "Retrieve a tweet by its ID. Returns comprehensive tweet data including " + "text, author information, metrics, entities, and metadata." + ) + args_schema: Type[BaseModel] = TwitterGetTweetInput + return_direct: bool = False + agent_id: Optional[UUID] = None + + def __init__( + self, + agent_id: Optional[UUID] = None, + **kwargs, + ): + super().__init__(**kwargs) + self.agent_id = agent_id + + def _deploy(self, tweet_id: str, **kwargs) -> str: + """Execute the tool to get a tweet by ID synchronously.""" + + if self.agent_id is None: + raise ValueError("Agent ID is required") + + try: + x_creds = backend.list_x_creds( + filters=XCredsFilter(agent_id=self.agent_id), + ) + if not x_creds: + return "No X creds found for this agent" + + x_creds = x_creds[0] + twitter_service = TwitterService( + consumer_key=x_creds.consumer_key, + consumer_secret=x_creds.consumer_secret, + access_token=x_creds.access_token, + access_secret=x_creds.access_secret, + client_id=x_creds.client_id, + client_secret=x_creds.client_secret, + bearer_token=x_creds.bearer_token, + ) + twitter_service.initialize() + + # Use async method in sync context - we need to handle this properly + import asyncio + + try: + loop = asyncio.get_event_loop() + except RuntimeError: + loop = asyncio.new_event_loop() + asyncio.set_event_loop(loop) + + # Get the full response with includes (media data) + response = loop.run_until_complete( + twitter_service.client.get_tweet( + id=tweet_id, + tweet_fields=[ + "id", + "text", + "created_at", + "author_id", + "conversation_id", + "in_reply_to_user_id", + "referenced_tweets", + "public_metrics", + "entities", + "attachments", + "context_annotations", + "withheld", + "reply_settings", + "lang", + ], + expansions=[ + "author_id", + "referenced_tweets.id", + "referenced_tweets.id.author_id", + "entities.mentions.username", + "attachments.media_keys", + "attachments.poll_ids", + "in_reply_to_user_id", + "geo.place_id", + ], + media_fields=[ + "duration_ms", + "height", + "media_key", + "preview_image_url", + "type", + "url", + "width", + "public_metrics", + "alt_text", + ], + user_fields=[ + "id", + "name", + "username", + "created_at", + "description", + "entities", + "location", + "pinned_tweet_id", + "profile_image_url", + "protected", + "public_metrics", + "url", + "verified", + "withheld", + ], + ) + ) + + if response and response.data: + tweet = response.data + + # Format as readable string + result = f"Tweet ID: {tweet.id}\n" + result += f"Text: {tweet.text}\n" + result += f"Author ID: {tweet.author_id}\n" + result += f"Created: {tweet.created_at}\n" + result += f"Language: {tweet.lang}\n" + result += f"Conversation ID: {tweet.conversation_id}\n" + + if tweet.public_metrics: + metrics = tweet.public_metrics + result += f"Metrics - Retweets: {metrics.retweet_count}, " + result += f"Likes: {metrics.like_count}, " + result += f"Replies: {metrics.reply_count}, " + result += f"Quotes: {metrics.quote_count}\n" + + if tweet.in_reply_to_user_id: + result += f"In reply to user: {tweet.in_reply_to_user_id}\n" + + if ( + tweet.entities + and hasattr(tweet.entities, "hashtags") + and tweet.entities.hashtags + ): + hashtags = [tag.tag for tag in tweet.entities.hashtags] + result += f"Hashtags: {', '.join(hashtags)}\n" + + if ( + tweet.entities + and hasattr(tweet.entities, "mentions") + and tweet.entities.mentions + ): + mentions = [mention.username for mention in tweet.entities.mentions] + result += f"Mentions: {', '.join(mentions)}\n" + + if ( + tweet.entities + and hasattr(tweet.entities, "urls") + and tweet.entities.urls + ): + urls = [url.expanded_url for url in tweet.entities.urls] + result += f"URLs: {', '.join(urls)}\n" + + # Handle media attachments (images, videos, etc.) + if ( + hasattr(response, "includes") + and response.includes + and hasattr(response.includes, "media") + ): + media_list = response.includes.media + if media_list: + result += f"Media Attachments ({len(media_list)}):\n" + for i, media in enumerate(media_list, 1): + result += f" Media {i}:\n" + result += f" Type: {media.type}\n" + result += f" Media Key: {media.media_key}\n" + + if hasattr(media, "url") and media.url: + result += f" URL: {media.url}\n" + + if ( + hasattr(media, "preview_image_url") + and media.preview_image_url + ): + result += ( + f" Preview URL: {media.preview_image_url}\n" + ) + + if hasattr(media, "width") and media.width: + result += ( + f" Dimensions: {media.width}x{media.height}\n" + ) + + if hasattr(media, "alt_text") and media.alt_text: + result += f" Alt Text: {media.alt_text}\n" + + if hasattr(media, "duration_ms") and media.duration_ms: + result += f" Duration: {media.duration_ms}ms\n" + + # Handle attachments section + elif ( + tweet.attachments + and hasattr(tweet.attachments, "media_keys") + and tweet.attachments.media_keys + ): + result += f"Media Keys: {', '.join(tweet.attachments.media_keys)}\n" + result += "Note: Media details not available in response includes\n" + + result += f"Tweet URL: https://x.com/i/web/status/{tweet.id}" + + logger.info(f"Successfully retrieved tweet: {tweet_id}") + return result + else: + return f"Tweet with ID {tweet_id} not found or not accessible" + + except Exception as e: + logger.error(f"Error getting tweet {tweet_id}: {str(e)}") + return f"Error getting tweet: {str(e)}" + + def _run(self, tweet_id: str, **kwargs) -> str: + return self._deploy(tweet_id, **kwargs) + + async def _arun(self, tweet_id: str, **kwargs) -> str: + """Execute the tool to get a tweet by ID asynchronously.""" + + if self.agent_id is None: + raise ValueError("Agent ID is required") + + try: + x_creds = backend.list_x_creds( + filters=XCredsFilter(agent_id=self.agent_id), + ) + if not x_creds: + return "No X creds found for this agent" + + x_creds = x_creds[0] + twitter_service = TwitterService( + consumer_key=x_creds.consumer_key, + consumer_secret=x_creds.consumer_secret, + access_token=x_creds.access_token, + access_secret=x_creds.access_secret, + client_id=x_creds.client_id, + client_secret=x_creds.client_secret, + ) + await twitter_service._ainitialize() + + # Get the full response with includes (media data) + response = await twitter_service.client.get_tweet( + id=tweet_id, + tweet_fields=[ + "id", + "text", + "created_at", + "author_id", + "conversation_id", + "in_reply_to_user_id", + "referenced_tweets", + "public_metrics", + "entities", + "attachments", + "context_annotations", + "withheld", + "reply_settings", + "lang", + ], + expansions=[ + "author_id", + "referenced_tweets.id", + "referenced_tweets.id.author_id", + "entities.mentions.username", + "attachments.media_keys", + "attachments.poll_ids", + "in_reply_to_user_id", + "geo.place_id", + ], + media_fields=[ + "duration_ms", + "height", + "media_key", + "preview_image_url", + "type", + "url", + "width", + "public_metrics", + "alt_text", + ], + user_fields=[ + "id", + "name", + "username", + "created_at", + "description", + "entities", + "location", + "pinned_tweet_id", + "profile_image_url", + "protected", + "public_metrics", + "url", + "verified", + "withheld", + ], + ) + + if response and response.data: + tweet = response.data + + # Format comprehensive tweet data + result = f"Tweet ID: {tweet.id}\n" + result += f"Text: {tweet.text}\n" + result += f"Author ID: {tweet.author_id}\n" + result += f"Created: {tweet.created_at}\n" + result += f"Language: {tweet.lang}\n" + result += f"Conversation ID: {tweet.conversation_id}\n" + + if tweet.public_metrics: + metrics = tweet.public_metrics + result += f"Metrics - Retweets: {metrics.retweet_count}, " + result += f"Likes: {metrics.like_count}, " + result += f"Replies: {metrics.reply_count}, " + result += f"Quotes: {metrics.quote_count}\n" + + if tweet.in_reply_to_user_id: + result += f"In reply to user: {tweet.in_reply_to_user_id}\n" + + if ( + tweet.entities + and hasattr(tweet.entities, "hashtags") + and tweet.entities.hashtags + ): + hashtags = [tag.tag for tag in tweet.entities.hashtags] + result += f"Hashtags: {', '.join(hashtags)}\n" + + if ( + tweet.entities + and hasattr(tweet.entities, "mentions") + and tweet.entities.mentions + ): + mentions = [mention.username for mention in tweet.entities.mentions] + result += f"Mentions: {', '.join(mentions)}\n" + + if ( + tweet.entities + and hasattr(tweet.entities, "urls") + and tweet.entities.urls + ): + urls = [url.expanded_url for url in tweet.entities.urls] + result += f"URLs: {', '.join(urls)}\n" + + # Handle media attachments (images, videos, etc.) + if ( + hasattr(response, "includes") + and response.includes + and hasattr(response.includes, "media") + ): + media_list = response.includes.media + if media_list: + result += f"Media Attachments ({len(media_list)}):\n" + for i, media in enumerate(media_list, 1): + result += f" Media {i}:\n" + result += f" Type: {media.type}\n" + result += f" Media Key: {media.media_key}\n" + + if hasattr(media, "url") and media.url: + result += f" URL: {media.url}\n" + + if ( + hasattr(media, "preview_image_url") + and media.preview_image_url + ): + result += ( + f" Preview URL: {media.preview_image_url}\n" + ) + + if hasattr(media, "width") and media.width: + result += ( + f" Dimensions: {media.width}x{media.height}\n" + ) + + if hasattr(media, "alt_text") and media.alt_text: + result += f" Alt Text: {media.alt_text}\n" + + if hasattr(media, "duration_ms") and media.duration_ms: + result += f" Duration: {media.duration_ms}ms\n" + + # Handle attachments section + elif ( + tweet.attachments + and hasattr(tweet.attachments, "media_keys") + and tweet.attachments.media_keys + ): + result += f"Media Keys: {', '.join(tweet.attachments.media_keys)}\n" + result += "Note: Media details not available in response includes\n" + + result += f"Tweet URL: https://x.com/i/web/status/{tweet.id}" + + logger.info(f"Successfully retrieved tweet: {tweet_id}") + return result + else: + return f"Tweet with ID {tweet_id} not found or not accessible" + + except Exception as e: + logger.error(f"Error getting tweet {tweet_id}: {str(e)}") + return f"Error getting tweet: {str(e)}" diff --git a/tools/velar.py b/tools/velar.py deleted file mode 100644 index 583f3582..00000000 --- a/tools/velar.py +++ /dev/null @@ -1,69 +0,0 @@ -from typing import Type - -from langchain.tools import BaseTool -from pydantic import BaseModel, Field - -from lib.velar import VelarApi - - -class VelarPriceHistoryInput(BaseModel): - """Input schema for retrieving token price history from Velar.""" - - token_symbol: str = Field( - ..., description="The symbol of the token to get price history for." - ) - - -class VelarGetPriceHistory(BaseTool): - name: str = "velar_token_price_history" - description: str = ( - "Retrieve historical price data for a specific cryptocurrency token from Velar. " - "Returns monthly price data points for the token's STX trading pair." - ) - args_schema: Type[BaseModel] = VelarPriceHistoryInput - return_direct: bool = False - - def _deploy(self, token_symbol: str, **kwargs) -> str: - """Execute the tool to get token price history.""" - obj = VelarApi() - token_stx_pools = obj.get_token_stx_pools(token_symbol.upper()) - return obj.get_token_price_history(token_stx_pools[0]["id"], "month") - - def _run(self, token_symbol: str, **kwargs) -> str: - """Execute the tool to get token price history.""" - return self._deploy(token_symbol, **kwargs) - - async def _arun(self, token_symbol: str, **kwargs) -> str: - """Async version of the tool.""" - return self._deploy(token_symbol, **kwargs) - - -class VelarGetTokensInput(BaseModel): - """Input schema for retrieving available tokens from Velar. - This tool doesn't require any input parameters but we define the schema for consistency. - """ - - pass - - -class VelarGetTokens(BaseTool): - name: str = "velar_list_tokens" - description: str = ( - "Retrieve a list of all available tokens from the Velar API with their details " - "including symbols, names, and contract information." - ) - args_schema: Type[BaseModel] = VelarGetTokensInput - return_direct: bool = False - - def _deploy(self, **kwargs) -> str: - """Execute the tool to get available tokens.""" - obj = VelarApi() - return obj.get_tokens() - - def _run(self, **kwargs) -> str: - """Execute the tool to get available tokens.""" - return self._deploy(**kwargs) - - async def _arun(self, **kwargs) -> str: - """Async version of the tool.""" - return self._deploy(**kwargs) diff --git a/tools/x_credentials.py b/tools/x_credentials.py index 714a43dc..0856fc76 100644 --- a/tools/x_credentials.py +++ b/tools/x_credentials.py @@ -13,13 +13,13 @@ class CollectXCredentialsInput(BaseModel): """Input schema for collecting X API credentials.""" - contract_principal: str = Field(..., description="Contract Principal") consumer_key: str = Field(..., description="X API Key") consumer_secret: str = Field(..., description="X API Secret") access_token: str = Field(..., description="X API Access Token") access_secret: str = Field(..., description="X API Access Secret") client_id: str = Field(..., description="OAuth 2.0 Client ID") client_secret: str = Field(..., description="OAuth 2.0 Client Secret") + bearer_token: str = Field(..., description="OAuth 2.0 Bearer Token") username: str = Field(..., description="X Username") @@ -42,13 +42,13 @@ def __init__( def _deploy( self, - contract_principal: str, consumer_key: str, consumer_secret: str, access_token: str, access_secret: str, client_id: str, client_secret: str, + bearer_token: str, username: str, **kwargs, ) -> str: @@ -59,12 +59,10 @@ def _deploy( try: logger.info("Attempting to store credentials") - logger.debug(f"Received Contract Principal: {contract_principal}") # Create XCreds object x_creds = XCredsCreate( profile_id=self.profile_id, - contract_principal=contract_principal, consumer_key=consumer_key, consumer_secret=consumer_secret, access_token=access_token, @@ -72,16 +70,15 @@ def _deploy( client_id=client_id, client_secret=client_secret, username=username, + bearer_token=bearer_token, ) # Store in database stored_creds = backend.create_x_creds(x_creds) if stored_creds: - logger.info( - f"Successfully stored X credentials for user {username} with Contract Principal {contract_principal}" - ) - return f"Successfully stored X credentials for {username} with Contract Principal {contract_principal}" + logger.info(f"Successfully stored X credentials for user {username}") + return f"Successfully stored X credentials for {username}" logger.error("Failed to store X credentials - no response from backend") return "Failed to store X credentials" except Exception as e: @@ -90,7 +87,6 @@ def _deploy( def _run( self, - contract_principal: str, consumer_key: str, consumer_secret: str, access_token: str, @@ -98,11 +94,11 @@ def _run( client_id: str, client_secret: str, username: str, + bearer_token: str, **kwargs, ) -> str: """Sync version of the tool.""" return self._deploy( - contract_principal=contract_principal, consumer_key=consumer_key, consumer_secret=consumer_secret, access_token=access_token, @@ -110,12 +106,12 @@ def _run( client_id=client_id, client_secret=client_secret, username=username, + bearer_token=bearer_token, **kwargs, ) async def _arun( self, - contract_principal: str, consumer_key: str, consumer_secret: str, access_token: str, @@ -123,11 +119,11 @@ async def _arun( client_id: str, client_secret: str, username: str, + bearer_token: str, **kwargs, ) -> str: """Async version of the tool.""" return self._deploy( - contract_principal=contract_principal, consumer_key=consumer_key, consumer_secret=consumer_secret, access_token=access_token, @@ -135,5 +131,6 @@ async def _arun( client_id=client_id, client_secret=client_secret, username=username, + bearer_token=bearer_token, **kwargs, ) diff --git a/uv.lock b/uv.lock new file mode 100644 index 00000000..77af4f73 --- /dev/null +++ b/uv.lock @@ -0,0 +1,1655 @@ +version = 1 +revision = 2 +requires-python = ">=3.13" + +[[package]] +name = "aibtcdev-backend" +version = "0.1.0" +source = { virtual = "." } +dependencies = [ + { name = "aiohttp" }, + { name = "apscheduler" }, + { name = "cachetools" }, + { name = "fastapi" }, + { name = "httpx" }, + { name = "langchain" }, + { name = "langchain-community" }, + { name = "langchain-core" }, + { name = "langchain-openai" }, + { name = "langchain-text-splitters" }, + { name = "langgraph" }, + { name = "openai" }, + { name = "pgvector" }, + { name = "psycopg2-binary" }, + { name = "pydantic" }, + { name = "python-dotenv" }, + { name = "python-magic" }, + { name = "python-telegram-bot" }, + { name = "sqlalchemy" }, + { name = "starlette" }, + { name = "supabase" }, + { name = "tiktoken" }, + { name = "tweepy" }, + { name = "uvicorn" }, + { name = "vecs" }, +] + +[package.optional-dependencies] +testing = [ + { name = "pytest" }, + { name = "pytest-asyncio" }, + { name = "pytest-mock" }, + { name = "responses" }, +] + +[package.metadata] +requires-dist = [ + { name = "aiohttp", specifier = "==3.12.13" }, + { name = "apscheduler", specifier = "==3.11.0" }, + { name = "cachetools", specifier = "==6.1.0" }, + { name = "fastapi", specifier = "==0.115.13" }, + { name = "httpx", specifier = ">=0.25.0" }, + { name = "langchain", specifier = "==0.3.25" }, + { name = "langchain-community", specifier = "==0.3.25" }, + { name = "langchain-core", specifier = ">=0.3.56,<1.0.0" }, + { name = "langchain-openai", specifier = "==0.3.24" }, + { name = "langchain-text-splitters", specifier = "==0.3.8" }, + { name = "langgraph", specifier = "==0.4.8" }, + { name = "openai", specifier = "==1.88.0" }, + { name = "pgvector", specifier = "<=0.4.0" }, + { name = "psycopg2-binary", specifier = "==2.9.10" }, + { name = "pydantic", specifier = "==2.11.7" }, + { name = "pytest", marker = "extra == 'testing'", specifier = "==8.3.5" }, + { name = "pytest-asyncio", marker = "extra == 'testing'", specifier = "==0.26.0" }, + { name = "pytest-mock", marker = "extra == 'testing'", specifier = "==3.14.1" }, + { name = "python-dotenv", specifier = "==1.1.0" }, + { name = "python-magic", specifier = "==0.4.27" }, + { name = "python-telegram-bot", specifier = "==22.1" }, + { name = "responses", marker = "extra == 'testing'", specifier = "==0.25.7" }, + { name = "sqlalchemy", specifier = "==2.0.41" }, + { name = "starlette", specifier = "<=0.46.0" }, + { name = "supabase", specifier = "==2.15.3" }, + { name = "tiktoken", specifier = "==0.9.0" }, + { name = "tweepy", specifier = "==4.15.0" }, + { name = "uvicorn", specifier = "==0.34.3" }, + { name = "vecs", specifier = "==0.4.5" }, +] +provides-extras = ["testing"] + +[[package]] +name = "aiohappyeyeballs" +version = "2.6.1" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/26/30/f84a107a9c4331c14b2b586036f40965c128aa4fee4dda5d3d51cb14ad54/aiohappyeyeballs-2.6.1.tar.gz", hash = "sha256:c3f9d0113123803ccadfdf3f0faa505bc78e6a72d1cc4806cbd719826e943558", size = 22760, upload-time = "2025-03-12T01:42:48.764Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/0f/15/5bf3b99495fb160b63f95972b81750f18f7f4e02ad051373b669d17d44f2/aiohappyeyeballs-2.6.1-py3-none-any.whl", hash = "sha256:f349ba8f4b75cb25c99c5c2d84e997e485204d2902a9597802b0371f09331fb8", size = 15265, upload-time = "2025-03-12T01:42:47.083Z" }, +] + +[[package]] +name = "aiohttp" +version = "3.12.13" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "aiohappyeyeballs" }, + { name = "aiosignal" }, + { name = "attrs" }, + { name = "frozenlist" }, + { name = "multidict" }, + { name = "propcache" }, + { name = "yarl" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/42/6e/ab88e7cb2a4058bed2f7870276454f85a7c56cd6da79349eb314fc7bbcaa/aiohttp-3.12.13.tar.gz", hash = "sha256:47e2da578528264a12e4e3dd8dd72a7289e5f812758fe086473fab037a10fcce", size = 7819160, upload-time = "2025-06-14T15:15:41.354Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/11/0f/db19abdf2d86aa1deec3c1e0e5ea46a587b97c07a16516b6438428b3a3f8/aiohttp-3.12.13-cp313-cp313-macosx_10_13_universal2.whl", hash = "sha256:d4a18e61f271127465bdb0e8ff36e8f02ac4a32a80d8927aa52371e93cd87938", size = 694910, upload-time = "2025-06-14T15:14:30.604Z" }, + { url = "https://files.pythonhosted.org/packages/d5/81/0ab551e1b5d7f1339e2d6eb482456ccbe9025605b28eed2b1c0203aaaade/aiohttp-3.12.13-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:532542cb48691179455fab429cdb0d558b5e5290b033b87478f2aa6af5d20ace", size = 472566, upload-time = "2025-06-14T15:14:32.275Z" }, + { url = "https://files.pythonhosted.org/packages/34/3f/6b7d336663337672d29b1f82d1f252ec1a040fe2d548f709d3f90fa2218a/aiohttp-3.12.13-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:d7eea18b52f23c050ae9db5d01f3d264ab08f09e7356d6f68e3f3ac2de9dfabb", size = 464856, upload-time = "2025-06-14T15:14:34.132Z" }, + { url = "https://files.pythonhosted.org/packages/26/7f/32ca0f170496aa2ab9b812630fac0c2372c531b797e1deb3deb4cea904bd/aiohttp-3.12.13-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:ad7c8e5c25f2a26842a7c239de3f7b6bfb92304593ef997c04ac49fb703ff4d7", size = 1703683, upload-time = "2025-06-14T15:14:36.034Z" }, + { url = "https://files.pythonhosted.org/packages/ec/53/d5513624b33a811c0abea8461e30a732294112318276ce3dbf047dbd9d8b/aiohttp-3.12.13-cp313-cp313-manylinux_2_17_armv7l.manylinux2014_armv7l.manylinux_2_31_armv7l.whl", hash = "sha256:6af355b483e3fe9d7336d84539fef460120c2f6e50e06c658fe2907c69262d6b", size = 1684946, upload-time = "2025-06-14T15:14:38Z" }, + { url = "https://files.pythonhosted.org/packages/37/72/4c237dd127827b0247dc138d3ebd49c2ded6114c6991bbe969058575f25f/aiohttp-3.12.13-cp313-cp313-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:a95cf9f097498f35c88e3609f55bb47b28a5ef67f6888f4390b3d73e2bac6177", size = 1737017, upload-time = "2025-06-14T15:14:39.951Z" }, + { url = "https://files.pythonhosted.org/packages/0d/67/8a7eb3afa01e9d0acc26e1ef847c1a9111f8b42b82955fcd9faeb84edeb4/aiohttp-3.12.13-cp313-cp313-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:b8ed8c38a1c584fe99a475a8f60eefc0b682ea413a84c6ce769bb19a7ff1c5ef", size = 1786390, upload-time = "2025-06-14T15:14:42.151Z" }, + { url = "https://files.pythonhosted.org/packages/48/19/0377df97dd0176ad23cd8cad4fd4232cfeadcec6c1b7f036315305c98e3f/aiohttp-3.12.13-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:7a0b9170d5d800126b5bc89d3053a2363406d6e327afb6afaeda2d19ee8bb103", size = 1708719, upload-time = "2025-06-14T15:14:44.039Z" }, + { url = "https://files.pythonhosted.org/packages/61/97/ade1982a5c642b45f3622255173e40c3eed289c169f89d00eeac29a89906/aiohttp-3.12.13-cp313-cp313-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:372feeace612ef8eb41f05ae014a92121a512bd5067db8f25101dd88a8db11da", size = 1622424, upload-time = "2025-06-14T15:14:45.945Z" }, + { url = "https://files.pythonhosted.org/packages/99/ab/00ad3eea004e1d07ccc406e44cfe2b8da5acb72f8c66aeeb11a096798868/aiohttp-3.12.13-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:a946d3702f7965d81f7af7ea8fb03bb33fe53d311df48a46eeca17e9e0beed2d", size = 1675447, upload-time = "2025-06-14T15:14:47.911Z" }, + { url = "https://files.pythonhosted.org/packages/3f/fe/74e5ce8b2ccaba445fe0087abc201bfd7259431d92ae608f684fcac5d143/aiohttp-3.12.13-cp313-cp313-musllinux_1_2_armv7l.whl", hash = "sha256:a0c4725fae86555bbb1d4082129e21de7264f4ab14baf735278c974785cd2041", size = 1707110, upload-time = "2025-06-14T15:14:50.334Z" }, + { url = "https://files.pythonhosted.org/packages/ef/c4/39af17807f694f7a267bd8ab1fbacf16ad66740862192a6c8abac2bff813/aiohttp-3.12.13-cp313-cp313-musllinux_1_2_i686.whl", hash = "sha256:9b28ea2f708234f0a5c44eb6c7d9eb63a148ce3252ba0140d050b091b6e842d1", size = 1649706, upload-time = "2025-06-14T15:14:52.378Z" }, + { url = "https://files.pythonhosted.org/packages/38/e8/f5a0a5f44f19f171d8477059aa5f28a158d7d57fe1a46c553e231f698435/aiohttp-3.12.13-cp313-cp313-musllinux_1_2_ppc64le.whl", hash = "sha256:d4f5becd2a5791829f79608c6f3dc745388162376f310eb9c142c985f9441cc1", size = 1725839, upload-time = "2025-06-14T15:14:54.617Z" }, + { url = "https://files.pythonhosted.org/packages/fd/ac/81acc594c7f529ef4419d3866913f628cd4fa9cab17f7bf410a5c3c04c53/aiohttp-3.12.13-cp313-cp313-musllinux_1_2_s390x.whl", hash = "sha256:60f2ce6b944e97649051d5f5cc0f439360690b73909230e107fd45a359d3e911", size = 1759311, upload-time = "2025-06-14T15:14:56.597Z" }, + { url = "https://files.pythonhosted.org/packages/38/0d/aabe636bd25c6ab7b18825e5a97d40024da75152bec39aa6ac8b7a677630/aiohttp-3.12.13-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:69fc1909857401b67bf599c793f2183fbc4804717388b0b888f27f9929aa41f3", size = 1708202, upload-time = "2025-06-14T15:14:58.598Z" }, + { url = "https://files.pythonhosted.org/packages/1f/ab/561ef2d8a223261683fb95a6283ad0d36cb66c87503f3a7dde7afe208bb2/aiohttp-3.12.13-cp313-cp313-win32.whl", hash = "sha256:7d7e68787a2046b0e44ba5587aa723ce05d711e3a3665b6b7545328ac8e3c0dd", size = 420794, upload-time = "2025-06-14T15:15:00.939Z" }, + { url = "https://files.pythonhosted.org/packages/9d/47/b11d0089875a23bff0abd3edb5516bcd454db3fefab8604f5e4b07bd6210/aiohttp-3.12.13-cp313-cp313-win_amd64.whl", hash = "sha256:5a178390ca90419bfd41419a809688c368e63c86bd725e1186dd97f6b89c2706", size = 446735, upload-time = "2025-06-14T15:15:02.858Z" }, +] + +[[package]] +name = "aiosignal" +version = "1.3.2" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "frozenlist" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/ba/b5/6d55e80f6d8a08ce22b982eafa278d823b541c925f11ee774b0b9c43473d/aiosignal-1.3.2.tar.gz", hash = "sha256:a8c255c66fafb1e499c9351d0bf32ff2d8a0321595ebac3b93713656d2436f54", size = 19424, upload-time = "2024-12-13T17:10:40.86Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/ec/6a/bc7e17a3e87a2985d3e8f4da4cd0f481060eb78fb08596c42be62c90a4d9/aiosignal-1.3.2-py2.py3-none-any.whl", hash = "sha256:45cde58e409a301715980c2b01d0c28bdde3770d8290b5eb2173759d9acb31a5", size = 7597, upload-time = "2024-12-13T17:10:38.469Z" }, +] + +[[package]] +name = "annotated-types" +version = "0.7.0" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/ee/67/531ea369ba64dcff5ec9c3402f9f51bf748cec26dde048a2f973a4eea7f5/annotated_types-0.7.0.tar.gz", hash = "sha256:aff07c09a53a08bc8cfccb9c85b05f1aa9a2a6f23728d790723543408344ce89", size = 16081, upload-time = "2024-05-20T21:33:25.928Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/78/b6/6307fbef88d9b5ee7421e68d78a9f162e0da4900bc5f5793f6d3d0e34fb8/annotated_types-0.7.0-py3-none-any.whl", hash = "sha256:1f02e8b43a8fbbc3f3e0d4f0f4bfc8131bcb4eebe8849b8e5c773f3a1c582a53", size = 13643, upload-time = "2024-05-20T21:33:24.1Z" }, +] + +[[package]] +name = "anyio" +version = "4.9.0" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "idna" }, + { name = "sniffio" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/95/7d/4c1bd541d4dffa1b52bd83fb8527089e097a106fc90b467a7313b105f840/anyio-4.9.0.tar.gz", hash = "sha256:673c0c244e15788651a4ff38710fea9675823028a6f08a5eda409e0c9840a028", size = 190949, upload-time = "2025-03-17T00:02:54.77Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/a1/ee/48ca1a7c89ffec8b6a0c5d02b89c305671d5ffd8d3c94acf8b8c408575bb/anyio-4.9.0-py3-none-any.whl", hash = "sha256:9f76d541cad6e36af7beb62e978876f3b41e3e04f2c1fbf0884604c0a9c4d93c", size = 100916, upload-time = "2025-03-17T00:02:52.713Z" }, +] + +[[package]] +name = "apscheduler" +version = "3.11.0" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "tzlocal" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/4e/00/6d6814ddc19be2df62c8c898c4df6b5b1914f3bd024b780028caa392d186/apscheduler-3.11.0.tar.gz", hash = "sha256:4c622d250b0955a65d5d0eb91c33e6d43fd879834bf541e0a18661ae60460133", size = 107347, upload-time = "2024-11-24T19:39:26.463Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/d0/ae/9a053dd9229c0fde6b1f1f33f609ccff1ee79ddda364c756a924c6d8563b/APScheduler-3.11.0-py3-none-any.whl", hash = "sha256:fc134ca32e50f5eadcc4938e3a4545ab19131435e851abb40b34d63d5141c6da", size = 64004, upload-time = "2024-11-24T19:39:24.442Z" }, +] + +[[package]] +name = "attrs" +version = "25.3.0" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/5a/b0/1367933a8532ee6ff8d63537de4f1177af4bff9f3e829baf7331f595bb24/attrs-25.3.0.tar.gz", hash = "sha256:75d7cefc7fb576747b2c81b4442d4d4a1ce0900973527c011d1030fd3bf4af1b", size = 812032, upload-time = "2025-03-13T11:10:22.779Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/77/06/bb80f5f86020c4551da315d78b3ab75e8228f89f0162f2c3a819e407941a/attrs-25.3.0-py3-none-any.whl", hash = "sha256:427318ce031701fea540783410126f03899a97ffc6f61596ad581ac2e40e3bc3", size = 63815, upload-time = "2025-03-13T11:10:21.14Z" }, +] + +[[package]] +name = "cachetools" +version = "6.1.0" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/8a/89/817ad5d0411f136c484d535952aef74af9b25e0d99e90cdffbe121e6d628/cachetools-6.1.0.tar.gz", hash = "sha256:b4c4f404392848db3ce7aac34950d17be4d864da4b8b66911008e430bc544587", size = 30714, upload-time = "2025-06-16T18:51:03.07Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/00/f0/2ef431fe4141f5e334759d73e81120492b23b2824336883a91ac04ba710b/cachetools-6.1.0-py3-none-any.whl", hash = "sha256:1c7bb3cf9193deaf3508b7c5f2a79986c13ea38965c5adcff1f84519cf39163e", size = 11189, upload-time = "2025-06-16T18:51:01.514Z" }, +] + +[[package]] +name = "certifi" +version = "2025.6.15" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/73/f7/f14b46d4bcd21092d7d3ccef689615220d8a08fb25e564b65d20738e672e/certifi-2025.6.15.tar.gz", hash = "sha256:d747aa5a8b9bbbb1bb8c22bb13e22bd1f18e9796defa16bab421f7f7a317323b", size = 158753, upload-time = "2025-06-15T02:45:51.329Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/84/ae/320161bd181fc06471eed047ecce67b693fd7515b16d495d8932db763426/certifi-2025.6.15-py3-none-any.whl", hash = "sha256:2e0c7ce7cb5d8f8634ca55d2ba7e6ec2689a2fd6537d8dec1296a477a4910057", size = 157650, upload-time = "2025-06-15T02:45:49.977Z" }, +] + +[[package]] +name = "cffi" +version = "1.17.1" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "pycparser" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/fc/97/c783634659c2920c3fc70419e3af40972dbaf758daa229a7d6ea6135c90d/cffi-1.17.1.tar.gz", hash = "sha256:1c39c6016c32bc48dd54561950ebd6836e1670f2ae46128f67cf49e789c52824", size = 516621, upload-time = "2024-09-04T20:45:21.852Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/8d/f8/dd6c246b148639254dad4d6803eb6a54e8c85c6e11ec9df2cffa87571dbe/cffi-1.17.1-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:f3a2b4222ce6b60e2e8b337bb9596923045681d71e5a082783484d845390938e", size = 182989, upload-time = "2024-09-04T20:44:28.956Z" }, + { url = "https://files.pythonhosted.org/packages/8b/f1/672d303ddf17c24fc83afd712316fda78dc6fce1cd53011b839483e1ecc8/cffi-1.17.1-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:0984a4925a435b1da406122d4d7968dd861c1385afe3b45ba82b750f229811e2", size = 178802, upload-time = "2024-09-04T20:44:30.289Z" }, + { url = "https://files.pythonhosted.org/packages/0e/2d/eab2e858a91fdff70533cab61dcff4a1f55ec60425832ddfdc9cd36bc8af/cffi-1.17.1-cp313-cp313-manylinux_2_12_i686.manylinux2010_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:d01b12eeeb4427d3110de311e1774046ad344f5b1a7403101878976ecd7a10f3", size = 454792, upload-time = "2024-09-04T20:44:32.01Z" }, + { url = "https://files.pythonhosted.org/packages/75/b2/fbaec7c4455c604e29388d55599b99ebcc250a60050610fadde58932b7ee/cffi-1.17.1-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:706510fe141c86a69c8ddc029c7910003a17353970cff3b904ff0686a5927683", size = 478893, upload-time = "2024-09-04T20:44:33.606Z" }, + { url = "https://files.pythonhosted.org/packages/4f/b7/6e4a2162178bf1935c336d4da8a9352cccab4d3a5d7914065490f08c0690/cffi-1.17.1-cp313-cp313-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:de55b766c7aa2e2a3092c51e0483d700341182f08e67c63630d5b6f200bb28e5", size = 485810, upload-time = "2024-09-04T20:44:35.191Z" }, + { url = "https://files.pythonhosted.org/packages/c7/8a/1d0e4a9c26e54746dc08c2c6c037889124d4f59dffd853a659fa545f1b40/cffi-1.17.1-cp313-cp313-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:c59d6e989d07460165cc5ad3c61f9fd8f1b4796eacbd81cee78957842b834af4", size = 471200, upload-time = "2024-09-04T20:44:36.743Z" }, + { url = "https://files.pythonhosted.org/packages/26/9f/1aab65a6c0db35f43c4d1b4f580e8df53914310afc10ae0397d29d697af4/cffi-1.17.1-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:dd398dbc6773384a17fe0d3e7eeb8d1a21c2200473ee6806bb5e6a8e62bb73dd", size = 479447, upload-time = "2024-09-04T20:44:38.492Z" }, + { url = "https://files.pythonhosted.org/packages/5f/e4/fb8b3dd8dc0e98edf1135ff067ae070bb32ef9d509d6cb0f538cd6f7483f/cffi-1.17.1-cp313-cp313-musllinux_1_1_aarch64.whl", hash = "sha256:3edc8d958eb099c634dace3c7e16560ae474aa3803a5df240542b305d14e14ed", size = 484358, upload-time = "2024-09-04T20:44:40.046Z" }, + { url = "https://files.pythonhosted.org/packages/f1/47/d7145bf2dc04684935d57d67dff9d6d795b2ba2796806bb109864be3a151/cffi-1.17.1-cp313-cp313-musllinux_1_1_x86_64.whl", hash = "sha256:72e72408cad3d5419375fc87d289076ee319835bdfa2caad331e377589aebba9", size = 488469, upload-time = "2024-09-04T20:44:41.616Z" }, + { url = "https://files.pythonhosted.org/packages/bf/ee/f94057fa6426481d663b88637a9a10e859e492c73d0384514a17d78ee205/cffi-1.17.1-cp313-cp313-win32.whl", hash = "sha256:e03eab0a8677fa80d646b5ddece1cbeaf556c313dcfac435ba11f107ba117b5d", size = 172475, upload-time = "2024-09-04T20:44:43.733Z" }, + { url = "https://files.pythonhosted.org/packages/7c/fc/6a8cb64e5f0324877d503c854da15d76c1e50eb722e320b15345c4d0c6de/cffi-1.17.1-cp313-cp313-win_amd64.whl", hash = "sha256:f6a16c31041f09ead72d69f583767292f750d24913dadacf5756b966aacb3f1a", size = 182009, upload-time = "2024-09-04T20:44:45.309Z" }, +] + +[[package]] +name = "charset-normalizer" +version = "3.4.2" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/e4/33/89c2ced2b67d1c2a61c19c6751aa8902d46ce3dacb23600a283619f5a12d/charset_normalizer-3.4.2.tar.gz", hash = "sha256:5baececa9ecba31eff645232d59845c07aa030f0c81ee70184a90d35099a0e63", size = 126367, upload-time = "2025-05-02T08:34:42.01Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/ea/12/a93df3366ed32db1d907d7593a94f1fe6293903e3e92967bebd6950ed12c/charset_normalizer-3.4.2-cp313-cp313-macosx_10_13_universal2.whl", hash = "sha256:926ca93accd5d36ccdabd803392ddc3e03e6d4cd1cf17deff3b989ab8e9dbcf0", size = 199622, upload-time = "2025-05-02T08:32:56.363Z" }, + { url = "https://files.pythonhosted.org/packages/04/93/bf204e6f344c39d9937d3c13c8cd5bbfc266472e51fc8c07cb7f64fcd2de/charset_normalizer-3.4.2-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:eba9904b0f38a143592d9fc0e19e2df0fa2e41c3c3745554761c5f6447eedabf", size = 143435, upload-time = "2025-05-02T08:32:58.551Z" }, + { url = "https://files.pythonhosted.org/packages/22/2a/ea8a2095b0bafa6c5b5a55ffdc2f924455233ee7b91c69b7edfcc9e02284/charset_normalizer-3.4.2-cp313-cp313-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:3fddb7e2c84ac87ac3a947cb4e66d143ca5863ef48e4a5ecb83bd48619e4634e", size = 153653, upload-time = "2025-05-02T08:33:00.342Z" }, + { url = "https://files.pythonhosted.org/packages/b6/57/1b090ff183d13cef485dfbe272e2fe57622a76694061353c59da52c9a659/charset_normalizer-3.4.2-cp313-cp313-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:98f862da73774290f251b9df8d11161b6cf25b599a66baf087c1ffe340e9bfd1", size = 146231, upload-time = "2025-05-02T08:33:02.081Z" }, + { url = "https://files.pythonhosted.org/packages/e2/28/ffc026b26f441fc67bd21ab7f03b313ab3fe46714a14b516f931abe1a2d8/charset_normalizer-3.4.2-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:6c9379d65defcab82d07b2a9dfbfc2e95bc8fe0ebb1b176a3190230a3ef0e07c", size = 148243, upload-time = "2025-05-02T08:33:04.063Z" }, + { url = "https://files.pythonhosted.org/packages/c0/0f/9abe9bd191629c33e69e47c6ef45ef99773320e9ad8e9cb08b8ab4a8d4cb/charset_normalizer-3.4.2-cp313-cp313-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:e635b87f01ebc977342e2697d05b56632f5f879a4f15955dfe8cef2448b51691", size = 150442, upload-time = "2025-05-02T08:33:06.418Z" }, + { url = "https://files.pythonhosted.org/packages/67/7c/a123bbcedca91d5916c056407f89a7f5e8fdfce12ba825d7d6b9954a1a3c/charset_normalizer-3.4.2-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:1c95a1e2902a8b722868587c0e1184ad5c55631de5afc0eb96bc4b0d738092c0", size = 145147, upload-time = "2025-05-02T08:33:08.183Z" }, + { url = "https://files.pythonhosted.org/packages/ec/fe/1ac556fa4899d967b83e9893788e86b6af4d83e4726511eaaad035e36595/charset_normalizer-3.4.2-cp313-cp313-musllinux_1_2_i686.whl", hash = "sha256:ef8de666d6179b009dce7bcb2ad4c4a779f113f12caf8dc77f0162c29d20490b", size = 153057, upload-time = "2025-05-02T08:33:09.986Z" }, + { url = "https://files.pythonhosted.org/packages/2b/ff/acfc0b0a70b19e3e54febdd5301a98b72fa07635e56f24f60502e954c461/charset_normalizer-3.4.2-cp313-cp313-musllinux_1_2_ppc64le.whl", hash = "sha256:32fc0341d72e0f73f80acb0a2c94216bd704f4f0bce10aedea38f30502b271ff", size = 156454, upload-time = "2025-05-02T08:33:11.814Z" }, + { url = "https://files.pythonhosted.org/packages/92/08/95b458ce9c740d0645feb0e96cea1f5ec946ea9c580a94adfe0b617f3573/charset_normalizer-3.4.2-cp313-cp313-musllinux_1_2_s390x.whl", hash = "sha256:289200a18fa698949d2b39c671c2cc7a24d44096784e76614899a7ccf2574b7b", size = 154174, upload-time = "2025-05-02T08:33:13.707Z" }, + { url = "https://files.pythonhosted.org/packages/78/be/8392efc43487ac051eee6c36d5fbd63032d78f7728cb37aebcc98191f1ff/charset_normalizer-3.4.2-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:4a476b06fbcf359ad25d34a057b7219281286ae2477cc5ff5e3f70a246971148", size = 149166, upload-time = "2025-05-02T08:33:15.458Z" }, + { url = "https://files.pythonhosted.org/packages/44/96/392abd49b094d30b91d9fbda6a69519e95802250b777841cf3bda8fe136c/charset_normalizer-3.4.2-cp313-cp313-win32.whl", hash = "sha256:aaeeb6a479c7667fbe1099af9617c83aaca22182d6cf8c53966491a0f1b7ffb7", size = 98064, upload-time = "2025-05-02T08:33:17.06Z" }, + { url = "https://files.pythonhosted.org/packages/e9/b0/0200da600134e001d91851ddc797809e2fe0ea72de90e09bec5a2fbdaccb/charset_normalizer-3.4.2-cp313-cp313-win_amd64.whl", hash = "sha256:aa6af9e7d59f9c12b33ae4e9450619cf2488e2bbe9b44030905877f0b2324980", size = 105641, upload-time = "2025-05-02T08:33:18.753Z" }, + { url = "https://files.pythonhosted.org/packages/20/94/c5790835a017658cbfabd07f3bfb549140c3ac458cfc196323996b10095a/charset_normalizer-3.4.2-py3-none-any.whl", hash = "sha256:7f56930ab0abd1c45cd15be65cc741c28b1c9a34876ce8c17a2fa107810c0af0", size = 52626, upload-time = "2025-05-02T08:34:40.053Z" }, +] + +[[package]] +name = "click" +version = "8.2.1" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "colorama", marker = "sys_platform == 'win32'" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/60/6c/8ca2efa64cf75a977a0d7fac081354553ebe483345c734fb6b6515d96bbc/click-8.2.1.tar.gz", hash = "sha256:27c491cc05d968d271d5a1db13e3b5a184636d9d930f148c50b038f0d0646202", size = 286342, upload-time = "2025-05-20T23:19:49.832Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/85/32/10bb5764d90a8eee674e9dc6f4db6a0ab47c8c4d0d83c27f7c39ac415a4d/click-8.2.1-py3-none-any.whl", hash = "sha256:61a3265b914e850b85317d0b3109c7f8cd35a670f963866005d6ef1d5175a12b", size = 102215, upload-time = "2025-05-20T23:19:47.796Z" }, +] + +[[package]] +name = "colorama" +version = "0.4.6" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/d8/53/6f443c9a4a8358a93a6792e2acffb9d9d5cb0a5cfd8802644b7b1c9a02e4/colorama-0.4.6.tar.gz", hash = "sha256:08695f5cb7ed6e0531a20572697297273c47b8cae5a63ffc6d6ed5c201be6e44", size = 27697, upload-time = "2022-10-25T02:36:22.414Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/d1/d6/3965ed04c63042e047cb6a3e6ed1a63a35087b6a609aa3a15ed8ac56c221/colorama-0.4.6-py2.py3-none-any.whl", hash = "sha256:4f1d9991f5acc0ca119f9d443620b77f9d6b33703e51011c16baf57afb285fc6", size = 25335, upload-time = "2022-10-25T02:36:20.889Z" }, +] + +[[package]] +name = "dataclasses-json" +version = "0.6.7" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "marshmallow" }, + { name = "typing-inspect" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/64/a4/f71d9cf3a5ac257c993b5ca3f93df5f7fb395c725e7f1e6479d2514173c3/dataclasses_json-0.6.7.tar.gz", hash = "sha256:b6b3e528266ea45b9535223bc53ca645f5208833c29229e847b3f26a1cc55fc0", size = 32227, upload-time = "2024-06-09T16:20:19.103Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/c3/be/d0d44e092656fe7a06b55e6103cbce807cdbdee17884a5367c68c9860853/dataclasses_json-0.6.7-py3-none-any.whl", hash = "sha256:0dbf33f26c8d5305befd61b39d2b3414e8a407bedc2834dea9b8d642666fb40a", size = 28686, upload-time = "2024-06-09T16:20:16.715Z" }, +] + +[[package]] +name = "deprecated" +version = "1.2.18" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "wrapt" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/98/97/06afe62762c9a8a86af0cfb7bfdab22a43ad17138b07af5b1a58442690a2/deprecated-1.2.18.tar.gz", hash = "sha256:422b6f6d859da6f2ef57857761bfb392480502a64c3028ca9bbe86085d72115d", size = 2928744, upload-time = "2025-01-27T10:46:25.7Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/6e/c6/ac0b6c1e2d138f1002bcf799d330bd6d85084fece321e662a14223794041/Deprecated-1.2.18-py2.py3-none-any.whl", hash = "sha256:bd5011788200372a32418f888e326a09ff80d0214bd961147cfed01b5c018eec", size = 9998, upload-time = "2025-01-27T10:46:09.186Z" }, +] + +[[package]] +name = "deprecation" +version = "2.1.0" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "packaging" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/5a/d3/8ae2869247df154b64c1884d7346d412fed0c49df84db635aab2d1c40e62/deprecation-2.1.0.tar.gz", hash = "sha256:72b3bde64e5d778694b0cf68178aed03d15e15477116add3fb773e581f9518ff", size = 173788, upload-time = "2020-04-20T14:23:38.738Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/02/c3/253a89ee03fc9b9682f1541728eb66db7db22148cd94f89ab22528cd1e1b/deprecation-2.1.0-py2.py3-none-any.whl", hash = "sha256:a10811591210e1fb0e768a8c25517cabeabcba6f0bf96564f8ff45189f90b14a", size = 11178, upload-time = "2020-04-20T14:23:36.581Z" }, +] + +[[package]] +name = "distro" +version = "1.9.0" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/fc/f8/98eea607f65de6527f8a2e8885fc8015d3e6f5775df186e443e0964a11c3/distro-1.9.0.tar.gz", hash = "sha256:2fa77c6fd8940f116ee1d6b94a2f90b13b5ea8d019b98bc8bafdcabcdd9bdbed", size = 60722, upload-time = "2023-12-24T09:54:32.31Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/12/b3/231ffd4ab1fc9d679809f356cebee130ac7daa00d6d6f3206dd4fd137e9e/distro-1.9.0-py3-none-any.whl", hash = "sha256:7bffd925d65168f85027d8da9af6bddab658135b840670a223589bc0c8ef02b2", size = 20277, upload-time = "2023-12-24T09:54:30.421Z" }, +] + +[[package]] +name = "fastapi" +version = "0.115.13" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "pydantic" }, + { name = "starlette" }, + { name = "typing-extensions" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/20/64/ec0788201b5554e2a87c49af26b77a4d132f807a0fa9675257ac92c6aa0e/fastapi-0.115.13.tar.gz", hash = "sha256:55d1d25c2e1e0a0a50aceb1c8705cd932def273c102bff0b1c1da88b3c6eb307", size = 295680, upload-time = "2025-06-17T11:49:45.575Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/59/4a/e17764385382062b0edbb35a26b7cf76d71e27e456546277a42ba6545c6e/fastapi-0.115.13-py3-none-any.whl", hash = "sha256:0a0cab59afa7bab22f5eb347f8c9864b681558c278395e94035a741fc10cd865", size = 95315, upload-time = "2025-06-17T11:49:44.106Z" }, +] + +[[package]] +name = "flupy" +version = "1.2.2" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "typing-extensions" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/e5/1f/ebbfcbaa4a117bfa2b334aac14d9d51570d0e8fee5d67426e80a80c15fc5/flupy-1.2.2.tar.gz", hash = "sha256:c28ff659b55800a26a64dd0ac1cc616355900718b87978c2db06f5e5cfb58535", size = 11971, upload-time = "2025-05-15T18:38:51.777Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/d6/0c/9e3635262e8978882a76e8043fa178f907c2bb54132afaffa15ad720b6e5/flupy-1.2.2-py3-none-any.whl", hash = "sha256:df78e86a6b26ec1be558310b9dd967952d06cf138b1e8c0b422987fcd3d44ca0", size = 12143, upload-time = "2025-05-15T18:38:50.149Z" }, +] + +[[package]] +name = "frozenlist" +version = "1.7.0" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/79/b1/b64018016eeb087db503b038296fd782586432b9c077fc5c7839e9cb6ef6/frozenlist-1.7.0.tar.gz", hash = "sha256:2e310d81923c2437ea8670467121cc3e9b0f76d3043cc1d2331d56c7fb7a3a8f", size = 45078, upload-time = "2025-06-09T23:02:35.538Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/24/90/6b2cebdabdbd50367273c20ff6b57a3dfa89bd0762de02c3a1eb42cb6462/frozenlist-1.7.0-cp313-cp313-macosx_10_13_universal2.whl", hash = "sha256:ee80eeda5e2a4e660651370ebffd1286542b67e268aa1ac8d6dbe973120ef7ee", size = 79791, upload-time = "2025-06-09T23:01:09.368Z" }, + { url = "https://files.pythonhosted.org/packages/83/2e/5b70b6a3325363293fe5fc3ae74cdcbc3e996c2a11dde2fd9f1fb0776d19/frozenlist-1.7.0-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:d1a81c85417b914139e3a9b995d4a1c84559afc839a93cf2cb7f15e6e5f6ed2d", size = 47165, upload-time = "2025-06-09T23:01:10.653Z" }, + { url = "https://files.pythonhosted.org/packages/f4/25/a0895c99270ca6966110f4ad98e87e5662eab416a17e7fd53c364bf8b954/frozenlist-1.7.0-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:cbb65198a9132ebc334f237d7b0df163e4de83fb4f2bdfe46c1e654bdb0c5d43", size = 45881, upload-time = "2025-06-09T23:01:12.296Z" }, + { url = "https://files.pythonhosted.org/packages/19/7c/71bb0bbe0832793c601fff68cd0cf6143753d0c667f9aec93d3c323f4b55/frozenlist-1.7.0-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:dab46c723eeb2c255a64f9dc05b8dd601fde66d6b19cdb82b2e09cc6ff8d8b5d", size = 232409, upload-time = "2025-06-09T23:01:13.641Z" }, + { url = "https://files.pythonhosted.org/packages/c0/45/ed2798718910fe6eb3ba574082aaceff4528e6323f9a8570be0f7028d8e9/frozenlist-1.7.0-cp313-cp313-manylinux_2_17_armv7l.manylinux2014_armv7l.manylinux_2_31_armv7l.whl", hash = "sha256:6aeac207a759d0dedd2e40745575ae32ab30926ff4fa49b1635def65806fddee", size = 225132, upload-time = "2025-06-09T23:01:15.264Z" }, + { url = "https://files.pythonhosted.org/packages/ba/e2/8417ae0f8eacb1d071d4950f32f229aa6bf68ab69aab797b72a07ea68d4f/frozenlist-1.7.0-cp313-cp313-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:bd8c4e58ad14b4fa7802b8be49d47993182fdd4023393899632c88fd8cd994eb", size = 237638, upload-time = "2025-06-09T23:01:16.752Z" }, + { url = "https://files.pythonhosted.org/packages/f8/b7/2ace5450ce85f2af05a871b8c8719b341294775a0a6c5585d5e6170f2ce7/frozenlist-1.7.0-cp313-cp313-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:04fb24d104f425da3540ed83cbfc31388a586a7696142004c577fa61c6298c3f", size = 233539, upload-time = "2025-06-09T23:01:18.202Z" }, + { url = "https://files.pythonhosted.org/packages/46/b9/6989292c5539553dba63f3c83dc4598186ab2888f67c0dc1d917e6887db6/frozenlist-1.7.0-cp313-cp313-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:6a5c505156368e4ea6b53b5ac23c92d7edc864537ff911d2fb24c140bb175e60", size = 215646, upload-time = "2025-06-09T23:01:19.649Z" }, + { url = "https://files.pythonhosted.org/packages/72/31/bc8c5c99c7818293458fe745dab4fd5730ff49697ccc82b554eb69f16a24/frozenlist-1.7.0-cp313-cp313-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:8bd7eb96a675f18aa5c553eb7ddc24a43c8c18f22e1f9925528128c052cdbe00", size = 232233, upload-time = "2025-06-09T23:01:21.175Z" }, + { url = "https://files.pythonhosted.org/packages/59/52/460db4d7ba0811b9ccb85af996019f5d70831f2f5f255f7cc61f86199795/frozenlist-1.7.0-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:05579bf020096fe05a764f1f84cd104a12f78eaab68842d036772dc6d4870b4b", size = 227996, upload-time = "2025-06-09T23:01:23.098Z" }, + { url = "https://files.pythonhosted.org/packages/ba/c9/f4b39e904c03927b7ecf891804fd3b4df3db29b9e487c6418e37988d6e9d/frozenlist-1.7.0-cp313-cp313-musllinux_1_2_armv7l.whl", hash = "sha256:376b6222d114e97eeec13d46c486facd41d4f43bab626b7c3f6a8b4e81a5192c", size = 242280, upload-time = "2025-06-09T23:01:24.808Z" }, + { url = "https://files.pythonhosted.org/packages/b8/33/3f8d6ced42f162d743e3517781566b8481322be321b486d9d262adf70bfb/frozenlist-1.7.0-cp313-cp313-musllinux_1_2_i686.whl", hash = "sha256:0aa7e176ebe115379b5b1c95b4096fb1c17cce0847402e227e712c27bdb5a949", size = 217717, upload-time = "2025-06-09T23:01:26.28Z" }, + { url = "https://files.pythonhosted.org/packages/3e/e8/ad683e75da6ccef50d0ab0c2b2324b32f84fc88ceee778ed79b8e2d2fe2e/frozenlist-1.7.0-cp313-cp313-musllinux_1_2_ppc64le.whl", hash = "sha256:3fbba20e662b9c2130dc771e332a99eff5da078b2b2648153a40669a6d0e36ca", size = 236644, upload-time = "2025-06-09T23:01:27.887Z" }, + { url = "https://files.pythonhosted.org/packages/b2/14/8d19ccdd3799310722195a72ac94ddc677541fb4bef4091d8e7775752360/frozenlist-1.7.0-cp313-cp313-musllinux_1_2_s390x.whl", hash = "sha256:f3f4410a0a601d349dd406b5713fec59b4cee7e71678d5b17edda7f4655a940b", size = 238879, upload-time = "2025-06-09T23:01:29.524Z" }, + { url = "https://files.pythonhosted.org/packages/ce/13/c12bf657494c2fd1079a48b2db49fa4196325909249a52d8f09bc9123fd7/frozenlist-1.7.0-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:e2cdfaaec6a2f9327bf43c933c0319a7c429058e8537c508964a133dffee412e", size = 232502, upload-time = "2025-06-09T23:01:31.287Z" }, + { url = "https://files.pythonhosted.org/packages/d7/8b/e7f9dfde869825489382bc0d512c15e96d3964180c9499efcec72e85db7e/frozenlist-1.7.0-cp313-cp313-win32.whl", hash = "sha256:5fc4df05a6591c7768459caba1b342d9ec23fa16195e744939ba5914596ae3e1", size = 39169, upload-time = "2025-06-09T23:01:35.503Z" }, + { url = "https://files.pythonhosted.org/packages/35/89/a487a98d94205d85745080a37860ff5744b9820a2c9acbcdd9440bfddf98/frozenlist-1.7.0-cp313-cp313-win_amd64.whl", hash = "sha256:52109052b9791a3e6b5d1b65f4b909703984b770694d3eb64fad124c835d7cba", size = 43219, upload-time = "2025-06-09T23:01:36.784Z" }, + { url = "https://files.pythonhosted.org/packages/56/d5/5c4cf2319a49eddd9dd7145e66c4866bdc6f3dbc67ca3d59685149c11e0d/frozenlist-1.7.0-cp313-cp313t-macosx_10_13_universal2.whl", hash = "sha256:a6f86e4193bb0e235ef6ce3dde5cbabed887e0b11f516ce8a0f4d3b33078ec2d", size = 84345, upload-time = "2025-06-09T23:01:38.295Z" }, + { url = "https://files.pythonhosted.org/packages/a4/7d/ec2c1e1dc16b85bc9d526009961953df9cec8481b6886debb36ec9107799/frozenlist-1.7.0-cp313-cp313t-macosx_10_13_x86_64.whl", hash = "sha256:82d664628865abeb32d90ae497fb93df398a69bb3434463d172b80fc25b0dd7d", size = 48880, upload-time = "2025-06-09T23:01:39.887Z" }, + { url = "https://files.pythonhosted.org/packages/69/86/f9596807b03de126e11e7d42ac91e3d0b19a6599c714a1989a4e85eeefc4/frozenlist-1.7.0-cp313-cp313t-macosx_11_0_arm64.whl", hash = "sha256:912a7e8375a1c9a68325a902f3953191b7b292aa3c3fb0d71a216221deca460b", size = 48498, upload-time = "2025-06-09T23:01:41.318Z" }, + { url = "https://files.pythonhosted.org/packages/5e/cb/df6de220f5036001005f2d726b789b2c0b65f2363b104bbc16f5be8084f8/frozenlist-1.7.0-cp313-cp313t-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:9537c2777167488d539bc5de2ad262efc44388230e5118868e172dd4a552b146", size = 292296, upload-time = "2025-06-09T23:01:42.685Z" }, + { url = "https://files.pythonhosted.org/packages/83/1f/de84c642f17c8f851a2905cee2dae401e5e0daca9b5ef121e120e19aa825/frozenlist-1.7.0-cp313-cp313t-manylinux_2_17_armv7l.manylinux2014_armv7l.manylinux_2_31_armv7l.whl", hash = "sha256:f34560fb1b4c3e30ba35fa9a13894ba39e5acfc5f60f57d8accde65f46cc5e74", size = 273103, upload-time = "2025-06-09T23:01:44.166Z" }, + { url = "https://files.pythonhosted.org/packages/88/3c/c840bfa474ba3fa13c772b93070893c6e9d5c0350885760376cbe3b6c1b3/frozenlist-1.7.0-cp313-cp313t-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:acd03d224b0175f5a850edc104ac19040d35419eddad04e7cf2d5986d98427f1", size = 292869, upload-time = "2025-06-09T23:01:45.681Z" }, + { url = "https://files.pythonhosted.org/packages/a6/1c/3efa6e7d5a39a1d5ef0abeb51c48fb657765794a46cf124e5aca2c7a592c/frozenlist-1.7.0-cp313-cp313t-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:f2038310bc582f3d6a09b3816ab01737d60bf7b1ec70f5356b09e84fb7408ab1", size = 291467, upload-time = "2025-06-09T23:01:47.234Z" }, + { url = "https://files.pythonhosted.org/packages/4f/00/d5c5e09d4922c395e2f2f6b79b9a20dab4b67daaf78ab92e7729341f61f6/frozenlist-1.7.0-cp313-cp313t-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:b8c05e4c8e5f36e5e088caa1bf78a687528f83c043706640a92cb76cd6999384", size = 266028, upload-time = "2025-06-09T23:01:48.819Z" }, + { url = "https://files.pythonhosted.org/packages/4e/27/72765be905619dfde25a7f33813ac0341eb6b076abede17a2e3fbfade0cb/frozenlist-1.7.0-cp313-cp313t-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:765bb588c86e47d0b68f23c1bee323d4b703218037765dcf3f25c838c6fecceb", size = 284294, upload-time = "2025-06-09T23:01:50.394Z" }, + { url = "https://files.pythonhosted.org/packages/88/67/c94103a23001b17808eb7dd1200c156bb69fb68e63fcf0693dde4cd6228c/frozenlist-1.7.0-cp313-cp313t-musllinux_1_2_aarch64.whl", hash = "sha256:32dc2e08c67d86d0969714dd484fd60ff08ff81d1a1e40a77dd34a387e6ebc0c", size = 281898, upload-time = "2025-06-09T23:01:52.234Z" }, + { url = "https://files.pythonhosted.org/packages/42/34/a3e2c00c00f9e2a9db5653bca3fec306349e71aff14ae45ecc6d0951dd24/frozenlist-1.7.0-cp313-cp313t-musllinux_1_2_armv7l.whl", hash = "sha256:c0303e597eb5a5321b4de9c68e9845ac8f290d2ab3f3e2c864437d3c5a30cd65", size = 290465, upload-time = "2025-06-09T23:01:53.788Z" }, + { url = "https://files.pythonhosted.org/packages/bb/73/f89b7fbce8b0b0c095d82b008afd0590f71ccb3dee6eee41791cf8cd25fd/frozenlist-1.7.0-cp313-cp313t-musllinux_1_2_i686.whl", hash = "sha256:a47f2abb4e29b3a8d0b530f7c3598badc6b134562b1a5caee867f7c62fee51e3", size = 266385, upload-time = "2025-06-09T23:01:55.769Z" }, + { url = "https://files.pythonhosted.org/packages/cd/45/e365fdb554159462ca12df54bc59bfa7a9a273ecc21e99e72e597564d1ae/frozenlist-1.7.0-cp313-cp313t-musllinux_1_2_ppc64le.whl", hash = "sha256:3d688126c242a6fabbd92e02633414d40f50bb6002fa4cf995a1d18051525657", size = 288771, upload-time = "2025-06-09T23:01:57.4Z" }, + { url = "https://files.pythonhosted.org/packages/00/11/47b6117002a0e904f004d70ec5194fe9144f117c33c851e3d51c765962d0/frozenlist-1.7.0-cp313-cp313t-musllinux_1_2_s390x.whl", hash = "sha256:4e7e9652b3d367c7bd449a727dc79d5043f48b88d0cbfd4f9f1060cf2b414104", size = 288206, upload-time = "2025-06-09T23:01:58.936Z" }, + { url = "https://files.pythonhosted.org/packages/40/37/5f9f3c3fd7f7746082ec67bcdc204db72dad081f4f83a503d33220a92973/frozenlist-1.7.0-cp313-cp313t-musllinux_1_2_x86_64.whl", hash = "sha256:1a85e345b4c43db8b842cab1feb41be5cc0b10a1830e6295b69d7310f99becaf", size = 282620, upload-time = "2025-06-09T23:02:00.493Z" }, + { url = "https://files.pythonhosted.org/packages/0b/31/8fbc5af2d183bff20f21aa743b4088eac4445d2bb1cdece449ae80e4e2d1/frozenlist-1.7.0-cp313-cp313t-win32.whl", hash = "sha256:3a14027124ddb70dfcee5148979998066897e79f89f64b13328595c4bdf77c81", size = 43059, upload-time = "2025-06-09T23:02:02.072Z" }, + { url = "https://files.pythonhosted.org/packages/bb/ed/41956f52105b8dbc26e457c5705340c67c8cc2b79f394b79bffc09d0e938/frozenlist-1.7.0-cp313-cp313t-win_amd64.whl", hash = "sha256:3bf8010d71d4507775f658e9823210b7427be36625b387221642725b515dcf3e", size = 47516, upload-time = "2025-06-09T23:02:03.779Z" }, + { url = "https://files.pythonhosted.org/packages/ee/45/b82e3c16be2182bff01179db177fe144d58b5dc787a7d4492c6ed8b9317f/frozenlist-1.7.0-py3-none-any.whl", hash = "sha256:9a5af342e34f7e97caf8c995864c7a396418ae2859cc6fdf1b1073020d516a7e", size = 13106, upload-time = "2025-06-09T23:02:34.204Z" }, +] + +[[package]] +name = "gotrue" +version = "2.12.0" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "httpx", extra = ["http2"] }, + { name = "pydantic" }, + { name = "pyjwt" }, + { name = "pytest-mock" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/4d/97/577c6d67f2d3687199ba7c5628af65108f346a15877c93831081ab67a341/gotrue-2.12.0.tar.gz", hash = "sha256:b9ea164ee52964d8364c550cde16dd0e9576241a4cffeaa52eca339f61d1d14b", size = 37883, upload-time = "2025-03-26T11:49:12.661Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/ee/5c/fe0dd370294c782fc1f627bb7e3eedd87c3d4d7f8d2b39fe8dd63c3096a8/gotrue-2.12.0-py3-none-any.whl", hash = "sha256:de94928eebb42d7d9672dbe4fbd0b51140a45051a31626a06dad2ad44a9a976a", size = 43649, upload-time = "2025-03-26T11:49:11.234Z" }, +] + +[[package]] +name = "greenlet" +version = "3.2.3" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/c9/92/bb85bd6e80148a4d2e0c59f7c0c2891029f8fd510183afc7d8d2feeed9b6/greenlet-3.2.3.tar.gz", hash = "sha256:8b0dd8ae4c0d6f5e54ee55ba935eeb3d735a9b58a8a1e5b5cbab64e01a39f365", size = 185752, upload-time = "2025-06-05T16:16:09.955Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/b1/cf/f5c0b23309070ae93de75c90d29300751a5aacefc0a3ed1b1d8edb28f08b/greenlet-3.2.3-cp313-cp313-macosx_11_0_universal2.whl", hash = "sha256:500b8689aa9dd1ab26872a34084503aeddefcb438e2e7317b89b11eaea1901ad", size = 270732, upload-time = "2025-06-05T16:10:08.26Z" }, + { url = "https://files.pythonhosted.org/packages/48/ae/91a957ba60482d3fecf9be49bc3948f341d706b52ddb9d83a70d42abd498/greenlet-3.2.3-cp313-cp313-manylinux2014_aarch64.manylinux_2_17_aarch64.whl", hash = "sha256:a07d3472c2a93117af3b0136f246b2833fdc0b542d4a9799ae5f41c28323faef", size = 639033, upload-time = "2025-06-05T16:38:53.983Z" }, + { url = "https://files.pythonhosted.org/packages/6f/df/20ffa66dd5a7a7beffa6451bdb7400d66251374ab40b99981478c69a67a8/greenlet-3.2.3-cp313-cp313-manylinux2014_ppc64le.manylinux_2_17_ppc64le.whl", hash = "sha256:8704b3768d2f51150626962f4b9a9e4a17d2e37c8a8d9867bbd9fa4eb938d3b3", size = 652999, upload-time = "2025-06-05T16:41:37.89Z" }, + { url = "https://files.pythonhosted.org/packages/51/b4/ebb2c8cb41e521f1d72bf0465f2f9a2fd803f674a88db228887e6847077e/greenlet-3.2.3-cp313-cp313-manylinux2014_s390x.manylinux_2_17_s390x.whl", hash = "sha256:5035d77a27b7c62db6cf41cf786cfe2242644a7a337a0e155c80960598baab95", size = 647368, upload-time = "2025-06-05T16:48:21.467Z" }, + { url = "https://files.pythonhosted.org/packages/8e/6a/1e1b5aa10dced4ae876a322155705257748108b7fd2e4fae3f2a091fe81a/greenlet-3.2.3-cp313-cp313-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:2d8aa5423cd4a396792f6d4580f88bdc6efcb9205891c9d40d20f6e670992efb", size = 650037, upload-time = "2025-06-05T16:13:06.402Z" }, + { url = "https://files.pythonhosted.org/packages/26/f2/ad51331a157c7015c675702e2d5230c243695c788f8f75feba1af32b3617/greenlet-3.2.3-cp313-cp313-manylinux_2_24_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:2c724620a101f8170065d7dded3f962a2aea7a7dae133a009cada42847e04a7b", size = 608402, upload-time = "2025-06-05T16:12:51.91Z" }, + { url = "https://files.pythonhosted.org/packages/26/bc/862bd2083e6b3aff23300900a956f4ea9a4059de337f5c8734346b9b34fc/greenlet-3.2.3-cp313-cp313-musllinux_1_1_aarch64.whl", hash = "sha256:873abe55f134c48e1f2a6f53f7d1419192a3d1a4e873bace00499a4e45ea6af0", size = 1119577, upload-time = "2025-06-05T16:36:49.787Z" }, + { url = "https://files.pythonhosted.org/packages/86/94/1fc0cc068cfde885170e01de40a619b00eaa8f2916bf3541744730ffb4c3/greenlet-3.2.3-cp313-cp313-musllinux_1_1_x86_64.whl", hash = "sha256:024571bbce5f2c1cfff08bf3fbaa43bbc7444f580ae13b0099e95d0e6e67ed36", size = 1147121, upload-time = "2025-06-05T16:12:42.527Z" }, + { url = "https://files.pythonhosted.org/packages/27/1a/199f9587e8cb08a0658f9c30f3799244307614148ffe8b1e3aa22f324dea/greenlet-3.2.3-cp313-cp313-win_amd64.whl", hash = "sha256:5195fb1e75e592dd04ce79881c8a22becdfa3e6f500e7feb059b1e6fdd54d3e3", size = 297603, upload-time = "2025-06-05T16:20:12.651Z" }, + { url = "https://files.pythonhosted.org/packages/d8/ca/accd7aa5280eb92b70ed9e8f7fd79dc50a2c21d8c73b9a0856f5b564e222/greenlet-3.2.3-cp314-cp314-macosx_11_0_universal2.whl", hash = "sha256:3d04332dddb10b4a211b68111dabaee2e1a073663d117dc10247b5b1642bac86", size = 271479, upload-time = "2025-06-05T16:10:47.525Z" }, + { url = "https://files.pythonhosted.org/packages/55/71/01ed9895d9eb49223280ecc98a557585edfa56b3d0e965b9fa9f7f06b6d9/greenlet-3.2.3-cp314-cp314-manylinux2014_aarch64.manylinux_2_17_aarch64.whl", hash = "sha256:8186162dffde068a465deab08fc72c767196895c39db26ab1c17c0b77a6d8b97", size = 683952, upload-time = "2025-06-05T16:38:55.125Z" }, + { url = "https://files.pythonhosted.org/packages/ea/61/638c4bdf460c3c678a0a1ef4c200f347dff80719597e53b5edb2fb27ab54/greenlet-3.2.3-cp314-cp314-manylinux2014_ppc64le.manylinux_2_17_ppc64le.whl", hash = "sha256:f4bfbaa6096b1b7a200024784217defedf46a07c2eee1a498e94a1b5f8ec5728", size = 696917, upload-time = "2025-06-05T16:41:38.959Z" }, + { url = "https://files.pythonhosted.org/packages/22/cc/0bd1a7eb759d1f3e3cc2d1bc0f0b487ad3cc9f34d74da4b80f226fde4ec3/greenlet-3.2.3-cp314-cp314-manylinux2014_s390x.manylinux_2_17_s390x.whl", hash = "sha256:ed6cfa9200484d234d8394c70f5492f144b20d4533f69262d530a1a082f6ee9a", size = 692443, upload-time = "2025-06-05T16:48:23.113Z" }, + { url = "https://files.pythonhosted.org/packages/67/10/b2a4b63d3f08362662e89c103f7fe28894a51ae0bc890fabf37d1d780e52/greenlet-3.2.3-cp314-cp314-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:02b0df6f63cd15012bed5401b47829cfd2e97052dc89da3cfaf2c779124eb892", size = 692995, upload-time = "2025-06-05T16:13:07.972Z" }, + { url = "https://files.pythonhosted.org/packages/5a/c6/ad82f148a4e3ce9564056453a71529732baf5448ad53fc323e37efe34f66/greenlet-3.2.3-cp314-cp314-manylinux_2_24_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:86c2d68e87107c1792e2e8d5399acec2487a4e993ab76c792408e59394d52141", size = 655320, upload-time = "2025-06-05T16:12:53.453Z" }, + { url = "https://files.pythonhosted.org/packages/5c/4f/aab73ecaa6b3086a4c89863d94cf26fa84cbff63f52ce9bc4342b3087a06/greenlet-3.2.3-cp314-cp314-win_amd64.whl", hash = "sha256:8c47aae8fbbfcf82cc13327ae802ba13c9c36753b67e760023fd116bc124a62a", size = 301236, upload-time = "2025-06-05T16:15:20.111Z" }, +] + +[[package]] +name = "h11" +version = "0.16.0" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/01/ee/02a2c011bdab74c6fb3c75474d40b3052059d95df7e73351460c8588d963/h11-0.16.0.tar.gz", hash = "sha256:4e35b956cf45792e4caa5885e69fba00bdbc6ffafbfa020300e549b208ee5ff1", size = 101250, upload-time = "2025-04-24T03:35:25.427Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/04/4b/29cac41a4d98d144bf5f6d33995617b185d14b22401f75ca86f384e87ff1/h11-0.16.0-py3-none-any.whl", hash = "sha256:63cf8bbe7522de3bf65932fda1d9c2772064ffb3dae62d55932da54b31cb6c86", size = 37515, upload-time = "2025-04-24T03:35:24.344Z" }, +] + +[[package]] +name = "h2" +version = "4.2.0" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "hpack" }, + { name = "hyperframe" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/1b/38/d7f80fd13e6582fb8e0df8c9a653dcc02b03ca34f4d72f34869298c5baf8/h2-4.2.0.tar.gz", hash = "sha256:c8a52129695e88b1a0578d8d2cc6842bbd79128ac685463b887ee278126ad01f", size = 2150682, upload-time = "2025-02-02T07:43:51.815Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/d0/9e/984486f2d0a0bd2b024bf4bc1c62688fcafa9e61991f041fb0e2def4a982/h2-4.2.0-py3-none-any.whl", hash = "sha256:479a53ad425bb29af087f3458a61d30780bc818e4ebcf01f0b536ba916462ed0", size = 60957, upload-time = "2025-02-01T11:02:26.481Z" }, +] + +[[package]] +name = "hpack" +version = "4.1.0" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/2c/48/71de9ed269fdae9c8057e5a4c0aa7402e8bb16f2c6e90b3aa53327b113f8/hpack-4.1.0.tar.gz", hash = "sha256:ec5eca154f7056aa06f196a557655c5b009b382873ac8d1e66e79e87535f1dca", size = 51276, upload-time = "2025-01-22T21:44:58.347Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/07/c6/80c95b1b2b94682a72cbdbfb85b81ae2daffa4291fbfa1b1464502ede10d/hpack-4.1.0-py3-none-any.whl", hash = "sha256:157ac792668d995c657d93111f46b4535ed114f0c9c8d672271bbec7eae1b496", size = 34357, upload-time = "2025-01-22T21:44:56.92Z" }, +] + +[[package]] +name = "httpcore" +version = "1.0.9" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "certifi" }, + { name = "h11" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/06/94/82699a10bca87a5556c9c59b5963f2d039dbd239f25bc2a63907a05a14cb/httpcore-1.0.9.tar.gz", hash = "sha256:6e34463af53fd2ab5d807f399a9b45ea31c3dfa2276f15a2c3f00afff6e176e8", size = 85484, upload-time = "2025-04-24T22:06:22.219Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/7e/f5/f66802a942d491edb555dd61e3a9961140fd64c90bce1eafd741609d334d/httpcore-1.0.9-py3-none-any.whl", hash = "sha256:2d400746a40668fc9dec9810239072b40b4484b640a8c38fd654a024c7a1bf55", size = 78784, upload-time = "2025-04-24T22:06:20.566Z" }, +] + +[[package]] +name = "httpx" +version = "0.28.1" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "anyio" }, + { name = "certifi" }, + { name = "httpcore" }, + { name = "idna" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/b1/df/48c586a5fe32a0f01324ee087459e112ebb7224f646c0b5023f5e79e9956/httpx-0.28.1.tar.gz", hash = "sha256:75e98c5f16b0f35b567856f597f06ff2270a374470a5c2392242528e3e3e42fc", size = 141406, upload-time = "2024-12-06T15:37:23.222Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/2a/39/e50c7c3a983047577ee07d2a9e53faf5a69493943ec3f6a384bdc792deb2/httpx-0.28.1-py3-none-any.whl", hash = "sha256:d909fcccc110f8c7faf814ca82a9a4d816bc5a6dbfea25d6591d6985b8ba59ad", size = 73517, upload-time = "2024-12-06T15:37:21.509Z" }, +] + +[package.optional-dependencies] +http2 = [ + { name = "h2" }, +] + +[[package]] +name = "httpx-sse" +version = "0.4.0" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/4c/60/8f4281fa9bbf3c8034fd54c0e7412e66edbab6bc74c4996bd616f8d0406e/httpx-sse-0.4.0.tar.gz", hash = "sha256:1e81a3a3070ce322add1d3529ed42eb5f70817f45ed6ec915ab753f961139721", size = 12624, upload-time = "2023-12-22T08:01:21.083Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/e1/9b/a181f281f65d776426002f330c31849b86b31fc9d848db62e16f03ff739f/httpx_sse-0.4.0-py3-none-any.whl", hash = "sha256:f329af6eae57eaa2bdfd962b42524764af68075ea87370a2de920af5341e318f", size = 7819, upload-time = "2023-12-22T08:01:19.89Z" }, +] + +[[package]] +name = "hyperframe" +version = "6.1.0" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/02/e7/94f8232d4a74cc99514c13a9f995811485a6903d48e5d952771ef6322e30/hyperframe-6.1.0.tar.gz", hash = "sha256:f630908a00854a7adeabd6382b43923a4c4cd4b821fcb527e6ab9e15382a3b08", size = 26566, upload-time = "2025-01-22T21:41:49.302Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/48/30/47d0bf6072f7252e6521f3447ccfa40b421b6824517f82854703d0f5a98b/hyperframe-6.1.0-py3-none-any.whl", hash = "sha256:b03380493a519fce58ea5af42e4a42317bf9bd425596f7a0835ffce80f1a42e5", size = 13007, upload-time = "2025-01-22T21:41:47.295Z" }, +] + +[[package]] +name = "idna" +version = "3.10" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/f1/70/7703c29685631f5a7590aa73f1f1d3fa9a380e654b86af429e0934a32f7d/idna-3.10.tar.gz", hash = "sha256:12f65c9b470abda6dc35cf8e63cc574b1c52b11df2c86030af0ac09b01b13ea9", size = 190490, upload-time = "2024-09-15T18:07:39.745Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/76/c6/c88e154df9c4e1a2a66ccf0005a88dfb2650c1dffb6f5ce603dfbd452ce3/idna-3.10-py3-none-any.whl", hash = "sha256:946d195a0d259cbba61165e88e65941f16e9b36ea6ddb97f00452bae8b1287d3", size = 70442, upload-time = "2024-09-15T18:07:37.964Z" }, +] + +[[package]] +name = "iniconfig" +version = "2.1.0" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/f2/97/ebf4da567aa6827c909642694d71c9fcf53e5b504f2d96afea02718862f3/iniconfig-2.1.0.tar.gz", hash = "sha256:3abbd2e30b36733fee78f9c7f7308f2d0050e88f0087fd25c2645f63c773e1c7", size = 4793, upload-time = "2025-03-19T20:09:59.721Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/2c/e1/e6716421ea10d38022b952c159d5161ca1193197fb744506875fbb87ea7b/iniconfig-2.1.0-py3-none-any.whl", hash = "sha256:9deba5723312380e77435581c6bf4935c94cbfab9b1ed33ef8d238ea168eb760", size = 6050, upload-time = "2025-03-19T20:10:01.071Z" }, +] + +[[package]] +name = "jiter" +version = "0.10.0" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/ee/9d/ae7ddb4b8ab3fb1b51faf4deb36cb48a4fbbd7cb36bad6a5fca4741306f7/jiter-0.10.0.tar.gz", hash = "sha256:07a7142c38aacc85194391108dc91b5b57093c978a9932bd86a36862759d9500", size = 162759, upload-time = "2025-05-18T19:04:59.73Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/2e/b0/279597e7a270e8d22623fea6c5d4eeac328e7d95c236ed51a2b884c54f70/jiter-0.10.0-cp313-cp313-macosx_10_12_x86_64.whl", hash = "sha256:e0588107ec8e11b6f5ef0e0d656fb2803ac6cf94a96b2b9fc675c0e3ab5e8644", size = 311617, upload-time = "2025-05-18T19:04:02.078Z" }, + { url = "https://files.pythonhosted.org/packages/91/e3/0916334936f356d605f54cc164af4060e3e7094364add445a3bc79335d46/jiter-0.10.0-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:cafc4628b616dc32530c20ee53d71589816cf385dd9449633e910d596b1f5c8a", size = 318947, upload-time = "2025-05-18T19:04:03.347Z" }, + { url = "https://files.pythonhosted.org/packages/6a/8e/fd94e8c02d0e94539b7d669a7ebbd2776e51f329bb2c84d4385e8063a2ad/jiter-0.10.0-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:520ef6d981172693786a49ff5b09eda72a42e539f14788124a07530f785c3ad6", size = 344618, upload-time = "2025-05-18T19:04:04.709Z" }, + { url = "https://files.pythonhosted.org/packages/6f/b0/f9f0a2ec42c6e9c2e61c327824687f1e2415b767e1089c1d9135f43816bd/jiter-0.10.0-cp313-cp313-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:554dedfd05937f8fc45d17ebdf298fe7e0c77458232bcb73d9fbbf4c6455f5b3", size = 368829, upload-time = "2025-05-18T19:04:06.912Z" }, + { url = "https://files.pythonhosted.org/packages/e8/57/5bbcd5331910595ad53b9fd0c610392ac68692176f05ae48d6ce5c852967/jiter-0.10.0-cp313-cp313-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:5bc299da7789deacf95f64052d97f75c16d4fc8c4c214a22bf8d859a4288a1c2", size = 491034, upload-time = "2025-05-18T19:04:08.222Z" }, + { url = "https://files.pythonhosted.org/packages/9b/be/c393df00e6e6e9e623a73551774449f2f23b6ec6a502a3297aeeece2c65a/jiter-0.10.0-cp313-cp313-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:5161e201172de298a8a1baad95eb85db4fb90e902353b1f6a41d64ea64644e25", size = 388529, upload-time = "2025-05-18T19:04:09.566Z" }, + { url = "https://files.pythonhosted.org/packages/42/3e/df2235c54d365434c7f150b986a6e35f41ebdc2f95acea3036d99613025d/jiter-0.10.0-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:2e2227db6ba93cb3e2bf67c87e594adde0609f146344e8207e8730364db27041", size = 350671, upload-time = "2025-05-18T19:04:10.98Z" }, + { url = "https://files.pythonhosted.org/packages/c6/77/71b0b24cbcc28f55ab4dbfe029f9a5b73aeadaba677843fc6dc9ed2b1d0a/jiter-0.10.0-cp313-cp313-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:15acb267ea5e2c64515574b06a8bf393fbfee6a50eb1673614aa45f4613c0cca", size = 390864, upload-time = "2025-05-18T19:04:12.722Z" }, + { url = "https://files.pythonhosted.org/packages/6a/d3/ef774b6969b9b6178e1d1e7a89a3bd37d241f3d3ec5f8deb37bbd203714a/jiter-0.10.0-cp313-cp313-musllinux_1_1_aarch64.whl", hash = "sha256:901b92f2e2947dc6dfcb52fd624453862e16665ea909a08398dde19c0731b7f4", size = 522989, upload-time = "2025-05-18T19:04:14.261Z" }, + { url = "https://files.pythonhosted.org/packages/0c/41/9becdb1d8dd5d854142f45a9d71949ed7e87a8e312b0bede2de849388cb9/jiter-0.10.0-cp313-cp313-musllinux_1_1_x86_64.whl", hash = "sha256:d0cb9a125d5a3ec971a094a845eadde2db0de85b33c9f13eb94a0c63d463879e", size = 513495, upload-time = "2025-05-18T19:04:15.603Z" }, + { url = "https://files.pythonhosted.org/packages/9c/36/3468e5a18238bdedae7c4d19461265b5e9b8e288d3f86cd89d00cbb48686/jiter-0.10.0-cp313-cp313-win32.whl", hash = "sha256:48a403277ad1ee208fb930bdf91745e4d2d6e47253eedc96e2559d1e6527006d", size = 211289, upload-time = "2025-05-18T19:04:17.541Z" }, + { url = "https://files.pythonhosted.org/packages/7e/07/1c96b623128bcb913706e294adb5f768fb7baf8db5e1338ce7b4ee8c78ef/jiter-0.10.0-cp313-cp313-win_amd64.whl", hash = "sha256:75f9eb72ecb640619c29bf714e78c9c46c9c4eaafd644bf78577ede459f330d4", size = 205074, upload-time = "2025-05-18T19:04:19.21Z" }, + { url = "https://files.pythonhosted.org/packages/54/46/caa2c1342655f57d8f0f2519774c6d67132205909c65e9aa8255e1d7b4f4/jiter-0.10.0-cp313-cp313t-macosx_11_0_arm64.whl", hash = "sha256:28ed2a4c05a1f32ef0e1d24c2611330219fed727dae01789f4a335617634b1ca", size = 318225, upload-time = "2025-05-18T19:04:20.583Z" }, + { url = "https://files.pythonhosted.org/packages/43/84/c7d44c75767e18946219ba2d703a5a32ab37b0bc21886a97bc6062e4da42/jiter-0.10.0-cp313-cp313t-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:14a4c418b1ec86a195f1ca69da8b23e8926c752b685af665ce30777233dfe070", size = 350235, upload-time = "2025-05-18T19:04:22.363Z" }, + { url = "https://files.pythonhosted.org/packages/01/16/f5a0135ccd968b480daad0e6ab34b0c7c5ba3bc447e5088152696140dcb3/jiter-0.10.0-cp313-cp313t-win_amd64.whl", hash = "sha256:d7bfed2fe1fe0e4dda6ef682cee888ba444b21e7a6553e03252e4feb6cf0adca", size = 207278, upload-time = "2025-05-18T19:04:23.627Z" }, + { url = "https://files.pythonhosted.org/packages/1c/9b/1d646da42c3de6c2188fdaa15bce8ecb22b635904fc68be025e21249ba44/jiter-0.10.0-cp314-cp314-macosx_10_12_x86_64.whl", hash = "sha256:5e9251a5e83fab8d87799d3e1a46cb4b7f2919b895c6f4483629ed2446f66522", size = 310866, upload-time = "2025-05-18T19:04:24.891Z" }, + { url = "https://files.pythonhosted.org/packages/ad/0e/26538b158e8a7c7987e94e7aeb2999e2e82b1f9d2e1f6e9874ddf71ebda0/jiter-0.10.0-cp314-cp314-macosx_11_0_arm64.whl", hash = "sha256:023aa0204126fe5b87ccbcd75c8a0d0261b9abdbbf46d55e7ae9f8e22424eeb8", size = 318772, upload-time = "2025-05-18T19:04:26.161Z" }, + { url = "https://files.pythonhosted.org/packages/7b/fb/d302893151caa1c2636d6574d213e4b34e31fd077af6050a9c5cbb42f6fb/jiter-0.10.0-cp314-cp314-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:3c189c4f1779c05f75fc17c0c1267594ed918996a231593a21a5ca5438445216", size = 344534, upload-time = "2025-05-18T19:04:27.495Z" }, + { url = "https://files.pythonhosted.org/packages/01/d8/5780b64a149d74e347c5128d82176eb1e3241b1391ac07935693466d6219/jiter-0.10.0-cp314-cp314-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:15720084d90d1098ca0229352607cd68256c76991f6b374af96f36920eae13c4", size = 369087, upload-time = "2025-05-18T19:04:28.896Z" }, + { url = "https://files.pythonhosted.org/packages/e8/5b/f235a1437445160e777544f3ade57544daf96ba7e96c1a5b24a6f7ac7004/jiter-0.10.0-cp314-cp314-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:e4f2fb68e5f1cfee30e2b2a09549a00683e0fde4c6a2ab88c94072fc33cb7426", size = 490694, upload-time = "2025-05-18T19:04:30.183Z" }, + { url = "https://files.pythonhosted.org/packages/85/a9/9c3d4617caa2ff89cf61b41e83820c27ebb3f7b5fae8a72901e8cd6ff9be/jiter-0.10.0-cp314-cp314-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:ce541693355fc6da424c08b7edf39a2895f58d6ea17d92cc2b168d20907dee12", size = 388992, upload-time = "2025-05-18T19:04:32.028Z" }, + { url = "https://files.pythonhosted.org/packages/68/b1/344fd14049ba5c94526540af7eb661871f9c54d5f5601ff41a959b9a0bbd/jiter-0.10.0-cp314-cp314-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:31c50c40272e189d50006ad5c73883caabb73d4e9748a688b216e85a9a9ca3b9", size = 351723, upload-time = "2025-05-18T19:04:33.467Z" }, + { url = "https://files.pythonhosted.org/packages/41/89/4c0e345041186f82a31aee7b9d4219a910df672b9fef26f129f0cda07a29/jiter-0.10.0-cp314-cp314-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:fa3402a2ff9815960e0372a47b75c76979d74402448509ccd49a275fa983ef8a", size = 392215, upload-time = "2025-05-18T19:04:34.827Z" }, + { url = "https://files.pythonhosted.org/packages/55/58/ee607863e18d3f895feb802154a2177d7e823a7103f000df182e0f718b38/jiter-0.10.0-cp314-cp314-musllinux_1_1_aarch64.whl", hash = "sha256:1956f934dca32d7bb647ea21d06d93ca40868b505c228556d3373cbd255ce853", size = 522762, upload-time = "2025-05-18T19:04:36.19Z" }, + { url = "https://files.pythonhosted.org/packages/15/d0/9123fb41825490d16929e73c212de9a42913d68324a8ce3c8476cae7ac9d/jiter-0.10.0-cp314-cp314-musllinux_1_1_x86_64.whl", hash = "sha256:fcedb049bdfc555e261d6f65a6abe1d5ad68825b7202ccb9692636c70fcced86", size = 513427, upload-time = "2025-05-18T19:04:37.544Z" }, + { url = "https://files.pythonhosted.org/packages/d8/b3/2bd02071c5a2430d0b70403a34411fc519c2f227da7b03da9ba6a956f931/jiter-0.10.0-cp314-cp314-win32.whl", hash = "sha256:ac509f7eccca54b2a29daeb516fb95b6f0bd0d0d8084efaf8ed5dfc7b9f0b357", size = 210127, upload-time = "2025-05-18T19:04:38.837Z" }, + { url = "https://files.pythonhosted.org/packages/03/0c/5fe86614ea050c3ecd728ab4035534387cd41e7c1855ef6c031f1ca93e3f/jiter-0.10.0-cp314-cp314t-macosx_11_0_arm64.whl", hash = "sha256:5ed975b83a2b8639356151cef5c0d597c68376fc4922b45d0eb384ac058cfa00", size = 318527, upload-time = "2025-05-18T19:04:40.612Z" }, + { url = "https://files.pythonhosted.org/packages/b3/4a/4175a563579e884192ba6e81725fc0448b042024419be8d83aa8a80a3f44/jiter-0.10.0-cp314-cp314t-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:3aa96f2abba33dc77f79b4cf791840230375f9534e5fac927ccceb58c5e604a5", size = 354213, upload-time = "2025-05-18T19:04:41.894Z" }, +] + +[[package]] +name = "jsonpatch" +version = "1.33" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "jsonpointer" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/42/78/18813351fe5d63acad16aec57f94ec2b70a09e53ca98145589e185423873/jsonpatch-1.33.tar.gz", hash = "sha256:9fcd4009c41e6d12348b4a0ff2563ba56a2923a7dfee731d004e212e1ee5030c", size = 21699, upload-time = "2023-06-26T12:07:29.144Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/73/07/02e16ed01e04a374e644b575638ec7987ae846d25ad97bcc9945a3ee4b0e/jsonpatch-1.33-py2.py3-none-any.whl", hash = "sha256:0ae28c0cd062bbd8b8ecc26d7d164fbbea9652a1a3693f3b956c1eae5145dade", size = 12898, upload-time = "2023-06-16T21:01:28.466Z" }, +] + +[[package]] +name = "jsonpointer" +version = "3.0.0" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/6a/0a/eebeb1fa92507ea94016a2a790b93c2ae41a7e18778f85471dc54475ed25/jsonpointer-3.0.0.tar.gz", hash = "sha256:2b2d729f2091522d61c3b31f82e11870f60b68f43fbc705cb76bf4b832af59ef", size = 9114, upload-time = "2024-06-10T19:24:42.462Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/71/92/5e77f98553e9e75130c78900d000368476aed74276eb8ae8796f65f00918/jsonpointer-3.0.0-py2.py3-none-any.whl", hash = "sha256:13e088adc14fca8b6aa8177c044e12701e6ad4b28ff10e65f2267a90109c9942", size = 7595, upload-time = "2024-06-10T19:24:40.698Z" }, +] + +[[package]] +name = "langchain" +version = "0.3.25" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "langchain-core" }, + { name = "langchain-text-splitters" }, + { name = "langsmith" }, + { name = "pydantic" }, + { name = "pyyaml" }, + { name = "requests" }, + { name = "sqlalchemy" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/fc/f9/a256609096a9fc7a1b3a6300a97000091efabdf21555a97988f93d4d9258/langchain-0.3.25.tar.gz", hash = "sha256:a1d72aa39546a23db08492d7228464af35c9ee83379945535ceef877340d2a3a", size = 10225045, upload-time = "2025-05-02T18:39:04.353Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/ed/5c/5c0be747261e1f8129b875fa3bfea736bc5fe17652f9d5e15ca118571b6f/langchain-0.3.25-py3-none-any.whl", hash = "sha256:931f7d2d1eaf182f9f41c5e3272859cfe7f94fc1f7cef6b3e5a46024b4884c21", size = 1011008, upload-time = "2025-05-02T18:39:02.21Z" }, +] + +[[package]] +name = "langchain-community" +version = "0.3.25" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "aiohttp" }, + { name = "dataclasses-json" }, + { name = "httpx-sse" }, + { name = "langchain" }, + { name = "langchain-core" }, + { name = "langsmith" }, + { name = "numpy" }, + { name = "pydantic-settings" }, + { name = "pyyaml" }, + { name = "requests" }, + { name = "sqlalchemy" }, + { name = "tenacity" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/c0/9b/332e69933ce7d96153fe5468d5428052ae20b143fa0dba0c78eea8859f94/langchain_community-0.3.25.tar.gz", hash = "sha256:a536888a48b36184dee20df86d266827a01916397fb398af2088ab7c3dfee684", size = 33235586, upload-time = "2025-06-10T20:19:08.809Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/5c/e1/975bcd11e86de74c10023d291879810d4eaffcfbb5d4c0d8fb6fb41b8247/langchain_community-0.3.25-py3-none-any.whl", hash = "sha256:0d7f673d463019ab1aca4e50e750048214a7772efd2c8e1d59256739b8318f97", size = 2529170, upload-time = "2025-06-10T20:19:06.775Z" }, +] + +[[package]] +name = "langchain-core" +version = "0.3.65" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "jsonpatch" }, + { name = "langsmith" }, + { name = "packaging" }, + { name = "pydantic" }, + { name = "pyyaml" }, + { name = "tenacity" }, + { name = "typing-extensions" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/04/8a/d08c83195d1ef26c42728412ab92ab08211893906b283abce65775e21327/langchain_core-0.3.65.tar.gz", hash = "sha256:54b5e0c8d9bb405415c3211da508ef9cfe0acbe5b490d1b4a15664408ee82d9b", size = 558557, upload-time = "2025-06-10T20:08:28.94Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/54/f0/31db18b7b8213266aed926ce89b5bdd84ccde7ee2edf4cab14e3dd2bfcf1/langchain_core-0.3.65-py3-none-any.whl", hash = "sha256:80e8faf6e9f331f8ef728f3fe793549f1d3fb244fcf9e1bdcecab6a6f4669394", size = 438052, upload-time = "2025-06-10T20:08:27.393Z" }, +] + +[[package]] +name = "langchain-openai" +version = "0.3.24" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "langchain-core" }, + { name = "openai" }, + { name = "tiktoken" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/da/e1/7be9384c5cb6fd6d0466ac6e781e44c3d80081c624faa7a9d2f8bf3a59ba/langchain_openai-0.3.24.tar.gz", hash = "sha256:cec1ab4ce7a8680af1eb11427b4384d2ceb46e9b20ff3f7beb0b0d83cab61a97", size = 687773, upload-time = "2025-06-17T20:20:57.422Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/fc/9b/b8f86d78dbc651decd684ab938a1340e1ad3ba1dbcef805e12db65dee0ba/langchain_openai-0.3.24-py3-none-any.whl", hash = "sha256:3db7bb2964f86636276a8f4bbed4514daf13865b80896e547ff7ea13ce98e593", size = 68950, upload-time = "2025-06-17T20:20:56.09Z" }, +] + +[[package]] +name = "langchain-text-splitters" +version = "0.3.8" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "langchain-core" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/e7/ac/b4a25c5716bb0103b1515f1f52cc69ffb1035a5a225ee5afe3aed28bf57b/langchain_text_splitters-0.3.8.tar.gz", hash = "sha256:116d4b9f2a22dda357d0b79e30acf005c5518177971c66a9f1ab0edfdb0f912e", size = 42128, upload-time = "2025-04-04T14:03:51.521Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/8b/a3/3696ff2444658053c01b6b7443e761f28bb71217d82bb89137a978c5f66f/langchain_text_splitters-0.3.8-py3-none-any.whl", hash = "sha256:e75cc0f4ae58dcf07d9f18776400cf8ade27fadd4ff6d264df6278bb302f6f02", size = 32440, upload-time = "2025-04-04T14:03:50.6Z" }, +] + +[[package]] +name = "langgraph" +version = "0.4.8" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "langchain-core" }, + { name = "langgraph-checkpoint" }, + { name = "langgraph-prebuilt" }, + { name = "langgraph-sdk" }, + { name = "pydantic" }, + { name = "xxhash" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/9b/53/03380b675fef3d00d2d270e530d1a8bfe4e6f27117016a478670c9c74469/langgraph-0.4.8.tar.gz", hash = "sha256:48445ac8a351b7bdc6dee94e2e6a597f8582e0516ebd9dea0fd0164ae01b915e", size = 453277, upload-time = "2025-06-02T23:26:16.979Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/17/8a/fe05ec63ee4c3889a8b89679a6bdd1be6087962818996f3b361da23a5529/langgraph-0.4.8-py3-none-any.whl", hash = "sha256:273b02782669a474ba55ef4296607ac3bac9e93639d37edc0d32d8cf1a41a45b", size = 152444, upload-time = "2025-06-02T23:26:15.107Z" }, +] + +[[package]] +name = "langgraph-checkpoint" +version = "2.1.0" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "langchain-core" }, + { name = "ormsgpack" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/f9/30/c04abcb2ac30f200dbfde5839ca3832552fe2bd852d9e85a68e47418a11c/langgraph_checkpoint-2.1.0.tar.gz", hash = "sha256:cdaa2f0b49aa130ab185c02d82f02b40299a1fbc9ac59ac20cecce09642a1abe", size = 135501, upload-time = "2025-06-16T22:05:01.918Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/0f/41/390a97d9d0abe5b71eea2f6fb618d8adadefa674e97f837bae6cda670bc7/langgraph_checkpoint-2.1.0-py3-none-any.whl", hash = "sha256:4cea3e512081da1241396a519cbfe4c5d92836545e2c64e85b6f5c34a1b8bc61", size = 43844, upload-time = "2025-06-16T22:05:00.758Z" }, +] + +[[package]] +name = "langgraph-prebuilt" +version = "0.2.2" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "langchain-core" }, + { name = "langgraph-checkpoint" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/88/f5/15b26cda94ebb89400048d478a3b1927005d85e273a557d8683f4cda775c/langgraph_prebuilt-0.2.2.tar.gz", hash = "sha256:0a5d1f651f97c848cd1c3dd0ef017614f47ee74effb7375b59ac639e41b253f9", size = 112785, upload-time = "2025-05-28T13:39:54.235Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/14/46/c98fec1f8620cbffbabda346a2c68155eec3720c6c3393ab3b9529618810/langgraph_prebuilt-0.2.2-py3-none-any.whl", hash = "sha256:72de5ef1d969a8f02ad7adc7cc1915bb9b4467912d57ba60da34b5a70fdad1f6", size = 23748, upload-time = "2025-05-28T13:39:53.361Z" }, +] + +[[package]] +name = "langgraph-sdk" +version = "0.1.70" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "httpx" }, + { name = "orjson" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/c1/dd/c074adf91d2fe67f00dc3be4348119f40a9d0ead9e55c958f81492c522c0/langgraph_sdk-0.1.70.tar.gz", hash = "sha256:cc65ec33bcdf8c7008d43da2d2b0bc1dd09f98d21a7f636828d9379535069cf9", size = 71530, upload-time = "2025-05-21T22:23:22.502Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/8c/77/b0930ca5d54ef91e2bdb37e0f7dbeda1923e1e0b5b71ab3af35c103c2e39/langgraph_sdk-0.1.70-py3-none-any.whl", hash = "sha256:47f2b04a964f40a610c1636b387ea52f961ce7a233afc21d3103e5faac8ca1e5", size = 49986, upload-time = "2025-05-21T22:23:21.377Z" }, +] + +[[package]] +name = "langsmith" +version = "0.3.45" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "httpx" }, + { name = "orjson", marker = "platform_python_implementation != 'PyPy'" }, + { name = "packaging" }, + { name = "pydantic" }, + { name = "requests" }, + { name = "requests-toolbelt" }, + { name = "zstandard" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/be/86/b941012013260f95af2e90a3d9415af4a76a003a28412033fc4b09f35731/langsmith-0.3.45.tar.gz", hash = "sha256:1df3c6820c73ed210b2c7bc5cdb7bfa19ddc9126cd03fdf0da54e2e171e6094d", size = 348201, upload-time = "2025-06-05T05:10:28.948Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/6a/f4/c206c0888f8a506404cb4f16ad89593bdc2f70cf00de26a1a0a7a76ad7a3/langsmith-0.3.45-py3-none-any.whl", hash = "sha256:5b55f0518601fa65f3bb6b1a3100379a96aa7b3ed5e9380581615ba9c65ed8ed", size = 363002, upload-time = "2025-06-05T05:10:27.228Z" }, +] + +[[package]] +name = "marshmallow" +version = "3.26.1" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "packaging" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/ab/5e/5e53d26b42ab75491cda89b871dab9e97c840bf12c63ec58a1919710cd06/marshmallow-3.26.1.tar.gz", hash = "sha256:e6d8affb6cb61d39d26402096dc0aee12d5a26d490a121f118d2e81dc0719dc6", size = 221825, upload-time = "2025-02-03T15:32:25.093Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/34/75/51952c7b2d3873b44a0028b1bd26a25078c18f92f256608e8d1dc61b39fd/marshmallow-3.26.1-py3-none-any.whl", hash = "sha256:3350409f20a70a7e4e11a27661187b77cdcaeb20abca41c1454fe33636bea09c", size = 50878, upload-time = "2025-02-03T15:32:22.295Z" }, +] + +[[package]] +name = "multidict" +version = "6.5.0" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/46/b5/59f27b4ce9951a4bce56b88ba5ff5159486797ab18863f2b4c1c5e8465bd/multidict-6.5.0.tar.gz", hash = "sha256:942bd8002492ba819426a8d7aefde3189c1b87099cdf18aaaefefcf7f3f7b6d2", size = 98512, upload-time = "2025-06-17T14:15:56.556Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/1a/c9/092c4e9402b6d16de761cff88cb842a5c8cc50ccecaf9c4481ba53264b9e/multidict-6.5.0-cp313-cp313-macosx_10_13_universal2.whl", hash = "sha256:53d92df1752df67a928fa7f884aa51edae6f1cf00eeb38cbcf318cf841c17456", size = 73486, upload-time = "2025-06-17T14:14:37.238Z" }, + { url = "https://files.pythonhosted.org/packages/08/f9/6f7ddb8213f5fdf4db48d1d640b78e8aef89b63a5de8a2313286db709250/multidict-6.5.0-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:680210de2c38eef17ce46b8df8bf2c1ece489261a14a6e43c997d49843a27c99", size = 43745, upload-time = "2025-06-17T14:14:38.32Z" }, + { url = "https://files.pythonhosted.org/packages/f3/a7/b9be0163bfeee3bb08a77a1705e24eb7e651d594ea554107fac8a1ca6a4d/multidict-6.5.0-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:e279259bcb936732bfa1a8eec82b5d2352b3df69d2fa90d25808cfc403cee90a", size = 42135, upload-time = "2025-06-17T14:14:39.897Z" }, + { url = "https://files.pythonhosted.org/packages/8e/30/93c8203f943a417bda3c573a34d5db0cf733afdfffb0ca78545c7716dbd8/multidict-6.5.0-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:d1c185fc1069781e3fc8b622c4331fb3b433979850392daa5efbb97f7f9959bb", size = 238585, upload-time = "2025-06-17T14:14:41.332Z" }, + { url = "https://files.pythonhosted.org/packages/9d/fe/2582b56a1807604774f566eeef183b0d6b148f4b89d1612cd077567b2e1e/multidict-6.5.0-cp313-cp313-manylinux_2_17_armv7l.manylinux2014_armv7l.manylinux_2_31_armv7l.whl", hash = "sha256:6bb5f65ff91daf19ce97f48f63585e51595539a8a523258b34f7cef2ec7e0617", size = 236174, upload-time = "2025-06-17T14:14:42.602Z" }, + { url = "https://files.pythonhosted.org/packages/9b/c4/d8b66d42d385bd4f974cbd1eaa8b265e6b8d297249009f312081d5ded5c7/multidict-6.5.0-cp313-cp313-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:d8646b4259450c59b9286db280dd57745897897284f6308edbdf437166d93855", size = 250145, upload-time = "2025-06-17T14:14:43.944Z" }, + { url = "https://files.pythonhosted.org/packages/bc/64/62feda5093ee852426aae3df86fab079f8bf1cdbe403e1078c94672ad3ec/multidict-6.5.0-cp313-cp313-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:d245973d4ecc04eea0a8e5ebec7882cf515480036e1b48e65dffcfbdf86d00be", size = 243470, upload-time = "2025-06-17T14:14:45.343Z" }, + { url = "https://files.pythonhosted.org/packages/67/dc/9f6fa6e854625cf289c0e9f4464b40212a01f76b2f3edfe89b6779b4fb93/multidict-6.5.0-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:a133e7ddc9bc7fb053733d0ff697ce78c7bf39b5aec4ac12857b6116324c8d75", size = 236968, upload-time = "2025-06-17T14:14:46.609Z" }, + { url = "https://files.pythonhosted.org/packages/46/ae/4b81c6e3745faee81a156f3f87402315bdccf04236f75c03e37be19c94ff/multidict-6.5.0-cp313-cp313-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:80d696fa38d738fcebfd53eec4d2e3aeb86a67679fd5e53c325756682f152826", size = 236575, upload-time = "2025-06-17T14:14:47.929Z" }, + { url = "https://files.pythonhosted.org/packages/8a/fa/4089d7642ea344226e1bfab60dd588761d4791754f8072e911836a39bedf/multidict-6.5.0-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:20d30c9410ac3908abbaa52ee5967a754c62142043cf2ba091e39681bd51d21a", size = 247632, upload-time = "2025-06-17T14:14:49.525Z" }, + { url = "https://files.pythonhosted.org/packages/16/ee/a353dac797de0f28fb7f078cc181c5f2eefe8dd16aa11a7100cbdc234037/multidict-6.5.0-cp313-cp313-musllinux_1_2_armv7l.whl", hash = "sha256:6c65068cc026f217e815fa519d8e959a7188e94ec163ffa029c94ca3ef9d4a73", size = 243520, upload-time = "2025-06-17T14:14:50.83Z" }, + { url = "https://files.pythonhosted.org/packages/50/ec/560deb3d2d95822d6eb1bcb1f1cb728f8f0197ec25be7c936d5d6a5d133c/multidict-6.5.0-cp313-cp313-musllinux_1_2_i686.whl", hash = "sha256:e355ac668a8c3e49c2ca8daa4c92f0ad5b705d26da3d5af6f7d971e46c096da7", size = 248551, upload-time = "2025-06-17T14:14:52.229Z" }, + { url = "https://files.pythonhosted.org/packages/10/85/ddf277e67c78205f6695f2a7639be459bca9cc353b962fd8085a492a262f/multidict-6.5.0-cp313-cp313-musllinux_1_2_ppc64le.whl", hash = "sha256:08db204213d0375a91a381cae0677ab95dd8c67a465eb370549daf6dbbf8ba10", size = 258362, upload-time = "2025-06-17T14:14:53.934Z" }, + { url = "https://files.pythonhosted.org/packages/02/fc/d64ee1df9b87c5210f2d4c419cab07f28589c81b4e5711eda05a122d0614/multidict-6.5.0-cp313-cp313-musllinux_1_2_s390x.whl", hash = "sha256:ffa58e3e215af8f6536dc837a990e456129857bb6fd546b3991be470abd9597a", size = 253862, upload-time = "2025-06-17T14:14:55.323Z" }, + { url = "https://files.pythonhosted.org/packages/c9/7c/a2743c00d9e25f4826d3a77cc13d4746398872cf21c843eef96bb9945665/multidict-6.5.0-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:3e86eb90015c6f21658dbd257bb8e6aa18bdb365b92dd1fba27ec04e58cdc31b", size = 247391, upload-time = "2025-06-17T14:14:57.293Z" }, + { url = "https://files.pythonhosted.org/packages/9b/03/7773518db74c442904dbd349074f1e7f2a854cee4d9529fc59e623d3949e/multidict-6.5.0-cp313-cp313-win32.whl", hash = "sha256:f34a90fbd9959d0f857323bd3c52b3e6011ed48f78d7d7b9e04980b8a41da3af", size = 41115, upload-time = "2025-06-17T14:14:59.33Z" }, + { url = "https://files.pythonhosted.org/packages/eb/9a/6fc51b1dc11a7baa944bc101a92167d8b0f5929d376a8c65168fc0d35917/multidict-6.5.0-cp313-cp313-win_amd64.whl", hash = "sha256:fcb2aa79ac6aef8d5b709bbfc2fdb1d75210ba43038d70fbb595b35af470ce06", size = 44768, upload-time = "2025-06-17T14:15:00.427Z" }, + { url = "https://files.pythonhosted.org/packages/82/2d/0d010be24b663b3c16e3d3307bbba2de5ae8eec496f6027d5c0515b371a8/multidict-6.5.0-cp313-cp313-win_arm64.whl", hash = "sha256:6dcee5e7e92060b4bb9bb6f01efcbb78c13d0e17d9bc6eec71660dd71dc7b0c2", size = 41770, upload-time = "2025-06-17T14:15:01.854Z" }, + { url = "https://files.pythonhosted.org/packages/aa/d1/a71711a5f32f84b7b036e82182e3250b949a0ce70d51a2c6a4079e665449/multidict-6.5.0-cp313-cp313t-macosx_10_13_universal2.whl", hash = "sha256:cbbc88abea2388fde41dd574159dec2cda005cb61aa84950828610cb5010f21a", size = 80450, upload-time = "2025-06-17T14:15:02.968Z" }, + { url = "https://files.pythonhosted.org/packages/0f/a2/953a9eede63a98fcec2c1a2c1a0d88de120056219931013b871884f51b43/multidict-6.5.0-cp313-cp313t-macosx_10_13_x86_64.whl", hash = "sha256:70b599f70ae6536e5976364d3c3cf36f40334708bd6cebdd1e2438395d5e7676", size = 46971, upload-time = "2025-06-17T14:15:04.149Z" }, + { url = "https://files.pythonhosted.org/packages/44/61/60250212953459edda2c729e1d85130912f23c67bd4f585546fe4bdb1578/multidict-6.5.0-cp313-cp313t-macosx_11_0_arm64.whl", hash = "sha256:828bab777aa8d29d59700018178061854e3a47727e0611cb9bec579d3882de3b", size = 45548, upload-time = "2025-06-17T14:15:05.666Z" }, + { url = "https://files.pythonhosted.org/packages/11/b6/e78ee82e96c495bc2582b303f68bed176b481c8d81a441fec07404fce2ca/multidict-6.5.0-cp313-cp313t-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:a9695fc1462f17b131c111cf0856a22ff154b0480f86f539d24b2778571ff94d", size = 238545, upload-time = "2025-06-17T14:15:06.88Z" }, + { url = "https://files.pythonhosted.org/packages/5a/0f/6132ca06670c8d7b374c3a4fd1ba896fc37fbb66b0de903f61db7d1020ec/multidict-6.5.0-cp313-cp313t-manylinux_2_17_armv7l.manylinux2014_armv7l.manylinux_2_31_armv7l.whl", hash = "sha256:0b5ac6ebaf5d9814b15f399337ebc6d3a7f4ce9331edd404e76c49a01620b68d", size = 229931, upload-time = "2025-06-17T14:15:08.24Z" }, + { url = "https://files.pythonhosted.org/packages/c0/63/d9957c506e6df6b3e7a194f0eea62955c12875e454b978f18262a65d017b/multidict-6.5.0-cp313-cp313t-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:84a51e3baa77ded07be4766a9e41d977987b97e49884d4c94f6d30ab6acaee14", size = 248181, upload-time = "2025-06-17T14:15:09.907Z" }, + { url = "https://files.pythonhosted.org/packages/43/3f/7d5490579640db5999a948e2c41d4a0efd91a75989bda3e0a03a79c92be2/multidict-6.5.0-cp313-cp313t-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:8de67f79314d24179e9b1869ed15e88d6ba5452a73fc9891ac142e0ee018b5d6", size = 241846, upload-time = "2025-06-17T14:15:11.596Z" }, + { url = "https://files.pythonhosted.org/packages/e1/f7/252b1ce949ece52bba4c0de7aa2e3a3d5964e800bce71fb778c2e6c66f7c/multidict-6.5.0-cp313-cp313t-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:17f78a52c214481d30550ec18208e287dfc4736f0c0148208334b105fd9e0887", size = 232893, upload-time = "2025-06-17T14:15:12.946Z" }, + { url = "https://files.pythonhosted.org/packages/45/7e/0070bfd48c16afc26e056f2acce49e853c0d604a69c7124bc0bbdb1bcc0a/multidict-6.5.0-cp313-cp313t-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:2966d0099cb2e2039f9b0e73e7fd5eb9c85805681aa2a7f867f9d95b35356921", size = 228567, upload-time = "2025-06-17T14:15:14.267Z" }, + { url = "https://files.pythonhosted.org/packages/2a/31/90551c75322113ebf5fd9c5422e8641d6952f6edaf6b6c07fdc49b1bebdd/multidict-6.5.0-cp313-cp313t-musllinux_1_2_aarch64.whl", hash = "sha256:86fb42ed5ed1971c642cc52acc82491af97567534a8e381a8d50c02169c4e684", size = 246188, upload-time = "2025-06-17T14:15:15.985Z" }, + { url = "https://files.pythonhosted.org/packages/cc/e2/aa4b02a55e7767ff292871023817fe4db83668d514dab7ccbce25eaf7659/multidict-6.5.0-cp313-cp313t-musllinux_1_2_armv7l.whl", hash = "sha256:4e990cbcb6382f9eae4ec720bcac6a1351509e6fc4a5bb70e4984b27973934e6", size = 235178, upload-time = "2025-06-17T14:15:17.395Z" }, + { url = "https://files.pythonhosted.org/packages/7d/5c/f67e726717c4b138b166be1700e2b56e06fbbcb84643d15f9a9d7335ff41/multidict-6.5.0-cp313-cp313t-musllinux_1_2_i686.whl", hash = "sha256:d99a59d64bb1f7f2117bec837d9e534c5aeb5dcedf4c2b16b9753ed28fdc20a3", size = 243422, upload-time = "2025-06-17T14:15:18.939Z" }, + { url = "https://files.pythonhosted.org/packages/e5/1c/15fa318285e26a50aa3fa979bbcffb90f9b4d5ec58882d0590eda067d0da/multidict-6.5.0-cp313-cp313t-musllinux_1_2_ppc64le.whl", hash = "sha256:e8ef15cc97c9890212e1caf90f0d63f6560e1e101cf83aeaf63a57556689fb34", size = 254898, upload-time = "2025-06-17T14:15:20.31Z" }, + { url = "https://files.pythonhosted.org/packages/ad/3d/d6c6d1c2e9b61ca80313912d30bb90d4179335405e421ef0a164eac2c0f9/multidict-6.5.0-cp313-cp313t-musllinux_1_2_s390x.whl", hash = "sha256:b8a09aec921b34bd8b9f842f0bcfd76c6a8c033dc5773511e15f2d517e7e1068", size = 247129, upload-time = "2025-06-17T14:15:21.665Z" }, + { url = "https://files.pythonhosted.org/packages/29/15/1568258cf0090bfa78d44be66247cfdb16e27dfd935c8136a1e8632d3057/multidict-6.5.0-cp313-cp313t-musllinux_1_2_x86_64.whl", hash = "sha256:ff07b504c23b67f2044533244c230808a1258b3493aaf3ea2a0785f70b7be461", size = 243841, upload-time = "2025-06-17T14:15:23.38Z" }, + { url = "https://files.pythonhosted.org/packages/65/57/64af5dbcfd61427056e840c8e520b502879d480f9632fbe210929fd87393/multidict-6.5.0-cp313-cp313t-win32.whl", hash = "sha256:9232a117341e7e979d210e41c04e18f1dc3a1d251268df6c818f5334301274e1", size = 46761, upload-time = "2025-06-17T14:15:24.733Z" }, + { url = "https://files.pythonhosted.org/packages/26/a8/cac7f7d61e188ff44f28e46cb98f9cc21762e671c96e031f06c84a60556e/multidict-6.5.0-cp313-cp313t-win_amd64.whl", hash = "sha256:44cb5c53fb2d4cbcee70a768d796052b75d89b827643788a75ea68189f0980a1", size = 52112, upload-time = "2025-06-17T14:15:25.906Z" }, + { url = "https://files.pythonhosted.org/packages/51/9f/076533feb1b5488d22936da98b9c217205cfbf9f56f7174e8c5c86d86fe6/multidict-6.5.0-cp313-cp313t-win_arm64.whl", hash = "sha256:51d33fafa82640c0217391d4ce895d32b7e84a832b8aee0dcc1b04d8981ec7f4", size = 44358, upload-time = "2025-06-17T14:15:27.117Z" }, + { url = "https://files.pythonhosted.org/packages/44/d8/45e8fc9892a7386d074941429e033adb4640e59ff0780d96a8cf46fe788e/multidict-6.5.0-py3-none-any.whl", hash = "sha256:5634b35f225977605385f56153bd95a7133faffc0ffe12ad26e10517537e8dfc", size = 12181, upload-time = "2025-06-17T14:15:55.156Z" }, +] + +[[package]] +name = "mypy-extensions" +version = "1.1.0" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/a2/6e/371856a3fb9d31ca8dac321cda606860fa4548858c0cc45d9d1d4ca2628b/mypy_extensions-1.1.0.tar.gz", hash = "sha256:52e68efc3284861e772bbcd66823fde5ae21fd2fdb51c62a211403730b916558", size = 6343, upload-time = "2025-04-22T14:54:24.164Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/79/7b/2c79738432f5c924bef5071f933bcc9efd0473bac3b4aa584a6f7c1c8df8/mypy_extensions-1.1.0-py3-none-any.whl", hash = "sha256:1be4cccdb0f2482337c4743e60421de3a356cd97508abadd57d47403e94f5505", size = 4963, upload-time = "2025-04-22T14:54:22.983Z" }, +] + +[[package]] +name = "numpy" +version = "2.3.0" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/f3/db/8e12381333aea300890829a0a36bfa738cac95475d88982d538725143fd9/numpy-2.3.0.tar.gz", hash = "sha256:581f87f9e9e9db2cba2141400e160e9dd644ee248788d6f90636eeb8fd9260a6", size = 20382813, upload-time = "2025-06-07T14:54:32.608Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/73/fc/1d67f751fd4dbafc5780244fe699bc4084268bad44b7c5deb0492473127b/numpy-2.3.0-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:5754ab5595bfa2c2387d241296e0381c21f44a4b90a776c3c1d39eede13a746a", size = 20889633, upload-time = "2025-06-07T14:44:06.839Z" }, + { url = "https://files.pythonhosted.org/packages/e8/95/73ffdb69e5c3f19ec4530f8924c4386e7ba097efc94b9c0aff607178ad94/numpy-2.3.0-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:d11fa02f77752d8099573d64e5fe33de3229b6632036ec08f7080f46b6649959", size = 14151683, upload-time = "2025-06-07T14:44:28.847Z" }, + { url = "https://files.pythonhosted.org/packages/64/d5/06d4bb31bb65a1d9c419eb5676173a2f90fd8da3c59f816cc54c640ce265/numpy-2.3.0-cp313-cp313-macosx_14_0_arm64.whl", hash = "sha256:aba48d17e87688a765ab1cd557882052f238e2f36545dfa8e29e6a91aef77afe", size = 5102683, upload-time = "2025-06-07T14:44:38.417Z" }, + { url = "https://files.pythonhosted.org/packages/12/8b/6c2cef44f8ccdc231f6b56013dff1d71138c48124334aded36b1a1b30c5a/numpy-2.3.0-cp313-cp313-macosx_14_0_x86_64.whl", hash = "sha256:4dc58865623023b63b10d52f18abaac3729346a7a46a778381e0e3af4b7f3beb", size = 6640253, upload-time = "2025-06-07T14:44:49.359Z" }, + { url = "https://files.pythonhosted.org/packages/62/aa/fca4bf8de3396ddb59544df9b75ffe5b73096174de97a9492d426f5cd4aa/numpy-2.3.0-cp313-cp313-manylinux_2_28_aarch64.whl", hash = "sha256:df470d376f54e052c76517393fa443758fefcdd634645bc9c1f84eafc67087f0", size = 14258658, upload-time = "2025-06-07T14:45:10.156Z" }, + { url = "https://files.pythonhosted.org/packages/1c/12/734dce1087eed1875f2297f687e671cfe53a091b6f2f55f0c7241aad041b/numpy-2.3.0-cp313-cp313-manylinux_2_28_x86_64.whl", hash = "sha256:87717eb24d4a8a64683b7a4e91ace04e2f5c7c77872f823f02a94feee186168f", size = 16628765, upload-time = "2025-06-07T14:45:35.076Z" }, + { url = "https://files.pythonhosted.org/packages/48/03/ffa41ade0e825cbcd5606a5669962419528212a16082763fc051a7247d76/numpy-2.3.0-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:d8fa264d56882b59dcb5ea4d6ab6f31d0c58a57b41aec605848b6eb2ef4a43e8", size = 15564335, upload-time = "2025-06-07T14:45:58.797Z" }, + { url = "https://files.pythonhosted.org/packages/07/58/869398a11863310aee0ff85a3e13b4c12f20d032b90c4b3ee93c3b728393/numpy-2.3.0-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:e651756066a0eaf900916497e20e02fe1ae544187cb0fe88de981671ee7f6270", size = 18360608, upload-time = "2025-06-07T14:46:25.687Z" }, + { url = "https://files.pythonhosted.org/packages/2f/8a/5756935752ad278c17e8a061eb2127c9a3edf4ba2c31779548b336f23c8d/numpy-2.3.0-cp313-cp313-win32.whl", hash = "sha256:e43c3cce3b6ae5f94696669ff2a6eafd9a6b9332008bafa4117af70f4b88be6f", size = 6310005, upload-time = "2025-06-07T14:50:13.138Z" }, + { url = "https://files.pythonhosted.org/packages/08/60/61d60cf0dfc0bf15381eaef46366ebc0c1a787856d1db0c80b006092af84/numpy-2.3.0-cp313-cp313-win_amd64.whl", hash = "sha256:81ae0bf2564cf475f94be4a27ef7bcf8af0c3e28da46770fc904da9abd5279b5", size = 12729093, upload-time = "2025-06-07T14:50:31.82Z" }, + { url = "https://files.pythonhosted.org/packages/66/31/2f2f2d2b3e3c32d5753d01437240feaa32220b73258c9eef2e42a0832866/numpy-2.3.0-cp313-cp313-win_arm64.whl", hash = "sha256:c8738baa52505fa6e82778580b23f945e3578412554d937093eac9205e845e6e", size = 9885689, upload-time = "2025-06-07T14:50:47.888Z" }, + { url = "https://files.pythonhosted.org/packages/f1/89/c7828f23cc50f607ceb912774bb4cff225ccae7131c431398ad8400e2c98/numpy-2.3.0-cp313-cp313t-macosx_10_13_x86_64.whl", hash = "sha256:39b27d8b38942a647f048b675f134dd5a567f95bfff481f9109ec308515c51d8", size = 20986612, upload-time = "2025-06-07T14:46:56.077Z" }, + { url = "https://files.pythonhosted.org/packages/dd/46/79ecf47da34c4c50eedec7511e53d57ffdfd31c742c00be7dc1d5ffdb917/numpy-2.3.0-cp313-cp313t-macosx_11_0_arm64.whl", hash = "sha256:0eba4a1ea88f9a6f30f56fdafdeb8da3774349eacddab9581a21234b8535d3d3", size = 14298953, upload-time = "2025-06-07T14:47:18.053Z" }, + { url = "https://files.pythonhosted.org/packages/59/44/f6caf50713d6ff4480640bccb2a534ce1d8e6e0960c8f864947439f0ee95/numpy-2.3.0-cp313-cp313t-macosx_14_0_arm64.whl", hash = "sha256:b0f1f11d0a1da54927436505a5a7670b154eac27f5672afc389661013dfe3d4f", size = 5225806, upload-time = "2025-06-07T14:47:27.524Z" }, + { url = "https://files.pythonhosted.org/packages/a6/43/e1fd1aca7c97e234dd05e66de4ab7a5be54548257efcdd1bc33637e72102/numpy-2.3.0-cp313-cp313t-macosx_14_0_x86_64.whl", hash = "sha256:690d0a5b60a47e1f9dcec7b77750a4854c0d690e9058b7bef3106e3ae9117808", size = 6735169, upload-time = "2025-06-07T14:47:38.057Z" }, + { url = "https://files.pythonhosted.org/packages/84/89/f76f93b06a03177c0faa7ca94d0856c4e5c4bcaf3c5f77640c9ed0303e1c/numpy-2.3.0-cp313-cp313t-manylinux_2_28_aarch64.whl", hash = "sha256:8b51ead2b258284458e570942137155978583e407babc22e3d0ed7af33ce06f8", size = 14330701, upload-time = "2025-06-07T14:47:59.113Z" }, + { url = "https://files.pythonhosted.org/packages/aa/f5/4858c3e9ff7a7d64561b20580cf7cc5d085794bd465a19604945d6501f6c/numpy-2.3.0-cp313-cp313t-manylinux_2_28_x86_64.whl", hash = "sha256:aaf81c7b82c73bd9b45e79cfb9476cb9c29e937494bfe9092c26aece812818ad", size = 16692983, upload-time = "2025-06-07T14:48:24.196Z" }, + { url = "https://files.pythonhosted.org/packages/08/17/0e3b4182e691a10e9483bcc62b4bb8693dbf9ea5dc9ba0b77a60435074bb/numpy-2.3.0-cp313-cp313t-musllinux_1_2_aarch64.whl", hash = "sha256:f420033a20b4f6a2a11f585f93c843ac40686a7c3fa514060a97d9de93e5e72b", size = 15641435, upload-time = "2025-06-07T14:48:47.712Z" }, + { url = "https://files.pythonhosted.org/packages/4e/d5/463279fda028d3c1efa74e7e8d507605ae87f33dbd0543cf4c4527c8b882/numpy-2.3.0-cp313-cp313t-musllinux_1_2_x86_64.whl", hash = "sha256:d344ca32ab482bcf8735d8f95091ad081f97120546f3d250240868430ce52555", size = 18433798, upload-time = "2025-06-07T14:49:14.866Z" }, + { url = "https://files.pythonhosted.org/packages/0e/1e/7a9d98c886d4c39a2b4d3a7c026bffcf8fbcaf518782132d12a301cfc47a/numpy-2.3.0-cp313-cp313t-win32.whl", hash = "sha256:48a2e8eaf76364c32a1feaa60d6925eaf32ed7a040183b807e02674305beef61", size = 6438632, upload-time = "2025-06-07T14:49:25.67Z" }, + { url = "https://files.pythonhosted.org/packages/fe/ab/66fc909931d5eb230107d016861824f335ae2c0533f422e654e5ff556784/numpy-2.3.0-cp313-cp313t-win_amd64.whl", hash = "sha256:ba17f93a94e503551f154de210e4d50c5e3ee20f7e7a1b5f6ce3f22d419b93bb", size = 12868491, upload-time = "2025-06-07T14:49:44.898Z" }, + { url = "https://files.pythonhosted.org/packages/ee/e8/2c8a1c9e34d6f6d600c83d5ce5b71646c32a13f34ca5c518cc060639841c/numpy-2.3.0-cp313-cp313t-win_arm64.whl", hash = "sha256:f14e016d9409680959691c109be98c436c6249eaf7f118b424679793607b5944", size = 9935345, upload-time = "2025-06-07T14:50:02.311Z" }, +] + +[[package]] +name = "oauthlib" +version = "3.3.0" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/98/8a/6ea75ff7acf89f43afb157604429af4661a9840b1f2cece602b6a13c1893/oauthlib-3.3.0.tar.gz", hash = "sha256:4e707cf88d7dfc22a8cce22ca736a2eef9967c1dd3845efc0703fc922353eeb2", size = 190292, upload-time = "2025-06-17T23:19:18.309Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/e1/3d/760b1456010ed11ce87c0109007f0166078dfdada7597f0091ae76eb7305/oauthlib-3.3.0-py3-none-any.whl", hash = "sha256:a2b3a0a2a4ec2feb4b9110f56674a39b2cc2f23e14713f4ed20441dfba14e934", size = 165155, upload-time = "2025-06-17T23:19:16.771Z" }, +] + +[[package]] +name = "openai" +version = "1.88.0" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "anyio" }, + { name = "distro" }, + { name = "httpx" }, + { name = "jiter" }, + { name = "pydantic" }, + { name = "sniffio" }, + { name = "tqdm" }, + { name = "typing-extensions" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/5a/ea/bbeef604d1fe0f7e9111745bb8a81362973a95713b28855beb9a9832ab12/openai-1.88.0.tar.gz", hash = "sha256:122d35e42998255cf1fc84560f6ee49a844e65c054cd05d3e42fda506b832bb1", size = 470963, upload-time = "2025-06-17T05:04:45.856Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/f4/03/ef68d77a38dd383cbed7fc898857d394d5a8b0520a35f054e7fe05dc3ac1/openai-1.88.0-py3-none-any.whl", hash = "sha256:7edd7826b3b83f5846562a6f310f040c79576278bf8e3687b30ba05bb5dff978", size = 734293, upload-time = "2025-06-17T05:04:43.858Z" }, +] + +[[package]] +name = "orjson" +version = "3.10.18" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/81/0b/fea456a3ffe74e70ba30e01ec183a9b26bec4d497f61dcfce1b601059c60/orjson-3.10.18.tar.gz", hash = "sha256:e8da3947d92123eda795b68228cafe2724815621fe35e8e320a9e9593a4bcd53", size = 5422810, upload-time = "2025-04-29T23:30:08.423Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/04/f0/8aedb6574b68096f3be8f74c0b56d36fd94bcf47e6c7ed47a7bd1474aaa8/orjson-3.10.18-cp313-cp313-macosx_10_15_x86_64.macosx_11_0_arm64.macosx_10_15_universal2.whl", hash = "sha256:69c34b9441b863175cc6a01f2935de994025e773f814412030f269da4f7be147", size = 249087, upload-time = "2025-04-29T23:29:19.083Z" }, + { url = "https://files.pythonhosted.org/packages/bc/f7/7118f965541aeac6844fcb18d6988e111ac0d349c9b80cda53583e758908/orjson-3.10.18-cp313-cp313-macosx_15_0_arm64.whl", hash = "sha256:1ebeda919725f9dbdb269f59bc94f861afbe2a27dce5608cdba2d92772364d1c", size = 133273, upload-time = "2025-04-29T23:29:20.602Z" }, + { url = "https://files.pythonhosted.org/packages/fb/d9/839637cc06eaf528dd8127b36004247bf56e064501f68df9ee6fd56a88ee/orjson-3.10.18-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:5adf5f4eed520a4959d29ea80192fa626ab9a20b2ea13f8f6dc58644f6927103", size = 136779, upload-time = "2025-04-29T23:29:22.062Z" }, + { url = "https://files.pythonhosted.org/packages/2b/6d/f226ecfef31a1f0e7d6bf9a31a0bbaf384c7cbe3fce49cc9c2acc51f902a/orjson-3.10.18-cp313-cp313-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:7592bb48a214e18cd670974f289520f12b7aed1fa0b2e2616b8ed9e069e08595", size = 132811, upload-time = "2025-04-29T23:29:23.602Z" }, + { url = "https://files.pythonhosted.org/packages/73/2d/371513d04143c85b681cf8f3bce743656eb5b640cb1f461dad750ac4b4d4/orjson-3.10.18-cp313-cp313-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:f872bef9f042734110642b7a11937440797ace8c87527de25e0c53558b579ccc", size = 137018, upload-time = "2025-04-29T23:29:25.094Z" }, + { url = "https://files.pythonhosted.org/packages/69/cb/a4d37a30507b7a59bdc484e4a3253c8141bf756d4e13fcc1da760a0b00cb/orjson-3.10.18-cp313-cp313-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:0315317601149c244cb3ecef246ef5861a64824ccbcb8018d32c66a60a84ffbc", size = 138368, upload-time = "2025-04-29T23:29:26.609Z" }, + { url = "https://files.pythonhosted.org/packages/1e/ae/cd10883c48d912d216d541eb3db8b2433415fde67f620afe6f311f5cd2ca/orjson-3.10.18-cp313-cp313-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:e0da26957e77e9e55a6c2ce2e7182a36a6f6b180ab7189315cb0995ec362e049", size = 142840, upload-time = "2025-04-29T23:29:28.153Z" }, + { url = "https://files.pythonhosted.org/packages/6d/4c/2bda09855c6b5f2c055034c9eda1529967b042ff8d81a05005115c4e6772/orjson-3.10.18-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:bb70d489bc79b7519e5803e2cc4c72343c9dc1154258adf2f8925d0b60da7c58", size = 133135, upload-time = "2025-04-29T23:29:29.726Z" }, + { url = "https://files.pythonhosted.org/packages/13/4a/35971fd809a8896731930a80dfff0b8ff48eeb5d8b57bb4d0d525160017f/orjson-3.10.18-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:e9e86a6af31b92299b00736c89caf63816f70a4001e750bda179e15564d7a034", size = 134810, upload-time = "2025-04-29T23:29:31.269Z" }, + { url = "https://files.pythonhosted.org/packages/99/70/0fa9e6310cda98365629182486ff37a1c6578e34c33992df271a476ea1cd/orjson-3.10.18-cp313-cp313-musllinux_1_2_armv7l.whl", hash = "sha256:c382a5c0b5931a5fc5405053d36c1ce3fd561694738626c77ae0b1dfc0242ca1", size = 413491, upload-time = "2025-04-29T23:29:33.315Z" }, + { url = "https://files.pythonhosted.org/packages/32/cb/990a0e88498babddb74fb97855ae4fbd22a82960e9b06eab5775cac435da/orjson-3.10.18-cp313-cp313-musllinux_1_2_i686.whl", hash = "sha256:8e4b2ae732431127171b875cb2668f883e1234711d3c147ffd69fe5be51a8012", size = 153277, upload-time = "2025-04-29T23:29:34.946Z" }, + { url = "https://files.pythonhosted.org/packages/92/44/473248c3305bf782a384ed50dd8bc2d3cde1543d107138fd99b707480ca1/orjson-3.10.18-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:2d808e34ddb24fc29a4d4041dcfafbae13e129c93509b847b14432717d94b44f", size = 137367, upload-time = "2025-04-29T23:29:36.52Z" }, + { url = "https://files.pythonhosted.org/packages/ad/fd/7f1d3edd4ffcd944a6a40e9f88af2197b619c931ac4d3cfba4798d4d3815/orjson-3.10.18-cp313-cp313-win32.whl", hash = "sha256:ad8eacbb5d904d5591f27dee4031e2c1db43d559edb8f91778efd642d70e6bea", size = 142687, upload-time = "2025-04-29T23:29:38.292Z" }, + { url = "https://files.pythonhosted.org/packages/4b/03/c75c6ad46be41c16f4cfe0352a2d1450546f3c09ad2c9d341110cd87b025/orjson-3.10.18-cp313-cp313-win_amd64.whl", hash = "sha256:aed411bcb68bf62e85588f2a7e03a6082cc42e5a2796e06e72a962d7c6310b52", size = 134794, upload-time = "2025-04-29T23:29:40.349Z" }, + { url = "https://files.pythonhosted.org/packages/c2/28/f53038a5a72cc4fd0b56c1eafb4ef64aec9685460d5ac34de98ca78b6e29/orjson-3.10.18-cp313-cp313-win_arm64.whl", hash = "sha256:f54c1385a0e6aba2f15a40d703b858bedad36ded0491e55d35d905b2c34a4cc3", size = 131186, upload-time = "2025-04-29T23:29:41.922Z" }, +] + +[[package]] +name = "ormsgpack" +version = "1.10.0" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/92/36/44eed5ef8ce93cded76a576780bab16425ce7876f10d3e2e6265e46c21ea/ormsgpack-1.10.0.tar.gz", hash = "sha256:7f7a27efd67ef22d7182ec3b7fa7e9d147c3ad9be2a24656b23c989077e08b16", size = 58629, upload-time = "2025-05-24T19:07:53.944Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/61/f8/ec5f4e03268d0097545efaab2893aa63f171cf2959cb0ea678a5690e16a1/ormsgpack-1.10.0-cp313-cp313-macosx_10_12_x86_64.macosx_11_0_arm64.macosx_10_12_universal2.whl", hash = "sha256:8d816d45175a878993b7372bd5408e0f3ec5a40f48e2d5b9d8f1cc5d31b61f1f", size = 376806, upload-time = "2025-05-24T19:07:29.555Z" }, + { url = "https://files.pythonhosted.org/packages/c1/19/b3c53284aad1e90d4d7ed8c881a373d218e16675b8b38e3569d5b40cc9b8/ormsgpack-1.10.0-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:a90345ccb058de0f35262893751c603b6376b05f02be2b6f6b7e05d9dd6d5643", size = 204433, upload-time = "2025-05-24T19:07:30.977Z" }, + { url = "https://files.pythonhosted.org/packages/09/0b/845c258f59df974a20a536c06cace593698491defdd3d026a8a5f9b6e745/ormsgpack-1.10.0-cp313-cp313-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:144b5e88f1999433e54db9d637bae6fe21e935888be4e3ac3daecd8260bd454e", size = 215549, upload-time = "2025-05-24T19:07:32.345Z" }, + { url = "https://files.pythonhosted.org/packages/61/56/57fce8fb34ca6c9543c026ebebf08344c64dbb7b6643d6ddd5355d37e724/ormsgpack-1.10.0-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:2190b352509d012915921cca76267db136cd026ddee42f1b0d9624613cc7058c", size = 216747, upload-time = "2025-05-24T19:07:34.075Z" }, + { url = "https://files.pythonhosted.org/packages/b8/3f/655b5f6a2475c8d209f5348cfbaaf73ce26237b92d79ef2ad439407dd0fa/ormsgpack-1.10.0-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:86fd9c1737eaba43d3bb2730add9c9e8b5fbed85282433705dd1b1e88ea7e6fb", size = 384785, upload-time = "2025-05-24T19:07:35.83Z" }, + { url = "https://files.pythonhosted.org/packages/4b/94/687a0ad8afd17e4bce1892145d6a1111e58987ddb176810d02a1f3f18686/ormsgpack-1.10.0-cp313-cp313-musllinux_1_2_armv7l.whl", hash = "sha256:33afe143a7b61ad21bb60109a86bb4e87fec70ef35db76b89c65b17e32da7935", size = 479076, upload-time = "2025-05-24T19:07:37.533Z" }, + { url = "https://files.pythonhosted.org/packages/c8/34/68925232e81e0e062a2f0ac678f62aa3b6f7009d6a759e19324dbbaebae7/ormsgpack-1.10.0-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:f23d45080846a7b90feabec0d330a9cc1863dc956728412e4f7986c80ab3a668", size = 390446, upload-time = "2025-05-24T19:07:39.469Z" }, + { url = "https://files.pythonhosted.org/packages/12/ad/f4e1a36a6d1714afb7ffb74b3ababdcb96529cf4e7a216f9f7c8eda837b6/ormsgpack-1.10.0-cp313-cp313-win_amd64.whl", hash = "sha256:534d18acb805c75e5fba09598bf40abe1851c853247e61dda0c01f772234da69", size = 121399, upload-time = "2025-05-24T19:07:40.854Z" }, +] + +[[package]] +name = "packaging" +version = "24.2" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/d0/63/68dbb6eb2de9cb10ee4c9c14a0148804425e13c4fb20d61cce69f53106da/packaging-24.2.tar.gz", hash = "sha256:c228a6dc5e932d346bc5739379109d49e8853dd8223571c7c5b55260edc0b97f", size = 163950, upload-time = "2024-11-08T09:47:47.202Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/88/ef/eb23f262cca3c0c4eb7ab1933c3b1f03d021f2c48f54763065b6f0e321be/packaging-24.2-py3-none-any.whl", hash = "sha256:09abb1bccd265c01f4a3aa3f7a7db064b36514d2cba19a2f694fe6150451a759", size = 65451, upload-time = "2024-11-08T09:47:44.722Z" }, +] + +[[package]] +name = "pgvector" +version = "0.3.6" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "numpy" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/7d/d8/fd6009cee3e03214667df488cdcf9609461d729968da94e4f95d6359d304/pgvector-0.3.6.tar.gz", hash = "sha256:31d01690e6ea26cea8a633cde5f0f55f5b246d9c8292d68efdef8c22ec994ade", size = 25421, upload-time = "2024-10-27T00:15:09.632Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/fb/81/f457d6d361e04d061bef413749a6e1ab04d98cfeec6d8abcfe40184750f3/pgvector-0.3.6-py3-none-any.whl", hash = "sha256:f6c269b3c110ccb7496bac87202148ed18f34b390a0189c783e351062400a75a", size = 24880, upload-time = "2024-10-27T00:15:08.045Z" }, +] + +[[package]] +name = "pluggy" +version = "1.6.0" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/f9/e2/3e91f31a7d2b083fe6ef3fa267035b518369d9511ffab804f839851d2779/pluggy-1.6.0.tar.gz", hash = "sha256:7dcc130b76258d33b90f61b658791dede3486c3e6bfb003ee5c9bfb396dd22f3", size = 69412, upload-time = "2025-05-15T12:30:07.975Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/54/20/4d324d65cc6d9205fabedc306948156824eb9f0ee1633355a8f7ec5c66bf/pluggy-1.6.0-py3-none-any.whl", hash = "sha256:e920276dd6813095e9377c0bc5566d94c932c33b27a3e3945d8389c374dd4746", size = 20538, upload-time = "2025-05-15T12:30:06.134Z" }, +] + +[[package]] +name = "postgrest" +version = "1.0.2" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "deprecation" }, + { name = "httpx", extra = ["http2"] }, + { name = "pydantic" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/4a/ea/9b0b5301c4a8fa360add83877f29f690b1b29d02a7d162aac528c7d385db/postgrest-1.0.2.tar.gz", hash = "sha256:42fa3a6e493d6c9e54afd907213608dcacb1f3d2f276ada19ef7b22bf64c78bd", size = 15284, upload-time = "2025-05-21T18:48:22.349Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/52/ce/a0655928584bba457ceda316e7a4fa02dfbb4366c6f393fe9473d0150597/postgrest-1.0.2-py3-none-any.whl", hash = "sha256:d115c56d3bd2672029a3805e9c73c14aa6608343dc5228db18e0e5e6134a3c62", size = 22531, upload-time = "2025-05-21T18:48:20.274Z" }, +] + +[[package]] +name = "propcache" +version = "0.3.2" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/a6/16/43264e4a779dd8588c21a70f0709665ee8f611211bdd2c87d952cfa7c776/propcache-0.3.2.tar.gz", hash = "sha256:20d7d62e4e7ef05f221e0db2856b979540686342e7dd9973b815599c7057e168", size = 44139, upload-time = "2025-06-09T22:56:06.081Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/dc/d1/8c747fafa558c603c4ca19d8e20b288aa0c7cda74e9402f50f31eb65267e/propcache-0.3.2-cp313-cp313-macosx_10_13_universal2.whl", hash = "sha256:ca592ed634a73ca002967458187109265e980422116c0a107cf93d81f95af945", size = 71286, upload-time = "2025-06-09T22:54:54.369Z" }, + { url = "https://files.pythonhosted.org/packages/61/99/d606cb7986b60d89c36de8a85d58764323b3a5ff07770a99d8e993b3fa73/propcache-0.3.2-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:9ecb0aad4020e275652ba3975740f241bd12a61f1a784df044cf7477a02bc252", size = 42425, upload-time = "2025-06-09T22:54:55.642Z" }, + { url = "https://files.pythonhosted.org/packages/8c/96/ef98f91bbb42b79e9bb82bdd348b255eb9d65f14dbbe3b1594644c4073f7/propcache-0.3.2-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:7f08f1cc28bd2eade7a8a3d2954ccc673bb02062e3e7da09bc75d843386b342f", size = 41846, upload-time = "2025-06-09T22:54:57.246Z" }, + { url = "https://files.pythonhosted.org/packages/5b/ad/3f0f9a705fb630d175146cd7b1d2bf5555c9beaed54e94132b21aac098a6/propcache-0.3.2-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:d1a342c834734edb4be5ecb1e9fb48cb64b1e2320fccbd8c54bf8da8f2a84c33", size = 208871, upload-time = "2025-06-09T22:54:58.975Z" }, + { url = "https://files.pythonhosted.org/packages/3a/38/2085cda93d2c8b6ec3e92af2c89489a36a5886b712a34ab25de9fbca7992/propcache-0.3.2-cp313-cp313-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:8a544caaae1ac73f1fecfae70ded3e93728831affebd017d53449e3ac052ac1e", size = 215720, upload-time = "2025-06-09T22:55:00.471Z" }, + { url = "https://files.pythonhosted.org/packages/61/c1/d72ea2dc83ac7f2c8e182786ab0fc2c7bd123a1ff9b7975bee671866fe5f/propcache-0.3.2-cp313-cp313-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:310d11aa44635298397db47a3ebce7db99a4cc4b9bbdfcf6c98a60c8d5261cf1", size = 215203, upload-time = "2025-06-09T22:55:01.834Z" }, + { url = "https://files.pythonhosted.org/packages/af/81/b324c44ae60c56ef12007105f1460d5c304b0626ab0cc6b07c8f2a9aa0b8/propcache-0.3.2-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:4c1396592321ac83157ac03a2023aa6cc4a3cc3cfdecb71090054c09e5a7cce3", size = 206365, upload-time = "2025-06-09T22:55:03.199Z" }, + { url = "https://files.pythonhosted.org/packages/09/73/88549128bb89e66d2aff242488f62869014ae092db63ccea53c1cc75a81d/propcache-0.3.2-cp313-cp313-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:8cabf5b5902272565e78197edb682017d21cf3b550ba0460ee473753f28d23c1", size = 196016, upload-time = "2025-06-09T22:55:04.518Z" }, + { url = "https://files.pythonhosted.org/packages/b9/3f/3bdd14e737d145114a5eb83cb172903afba7242f67c5877f9909a20d948d/propcache-0.3.2-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:0a2f2235ac46a7aa25bdeb03a9e7060f6ecbd213b1f9101c43b3090ffb971ef6", size = 205596, upload-time = "2025-06-09T22:55:05.942Z" }, + { url = "https://files.pythonhosted.org/packages/0f/ca/2f4aa819c357d3107c3763d7ef42c03980f9ed5c48c82e01e25945d437c1/propcache-0.3.2-cp313-cp313-musllinux_1_2_armv7l.whl", hash = "sha256:92b69e12e34869a6970fd2f3da91669899994b47c98f5d430b781c26f1d9f387", size = 200977, upload-time = "2025-06-09T22:55:07.792Z" }, + { url = "https://files.pythonhosted.org/packages/cd/4a/e65276c7477533c59085251ae88505caf6831c0e85ff8b2e31ebcbb949b1/propcache-0.3.2-cp313-cp313-musllinux_1_2_i686.whl", hash = "sha256:54e02207c79968ebbdffc169591009f4474dde3b4679e16634d34c9363ff56b4", size = 197220, upload-time = "2025-06-09T22:55:09.173Z" }, + { url = "https://files.pythonhosted.org/packages/7c/54/fc7152e517cf5578278b242396ce4d4b36795423988ef39bb8cd5bf274c8/propcache-0.3.2-cp313-cp313-musllinux_1_2_ppc64le.whl", hash = "sha256:4adfb44cb588001f68c5466579d3f1157ca07f7504fc91ec87862e2b8e556b88", size = 210642, upload-time = "2025-06-09T22:55:10.62Z" }, + { url = "https://files.pythonhosted.org/packages/b9/80/abeb4a896d2767bf5f1ea7b92eb7be6a5330645bd7fb844049c0e4045d9d/propcache-0.3.2-cp313-cp313-musllinux_1_2_s390x.whl", hash = "sha256:fd3e6019dc1261cd0291ee8919dd91fbab7b169bb76aeef6c716833a3f65d206", size = 212789, upload-time = "2025-06-09T22:55:12.029Z" }, + { url = "https://files.pythonhosted.org/packages/b3/db/ea12a49aa7b2b6d68a5da8293dcf50068d48d088100ac016ad92a6a780e6/propcache-0.3.2-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:4c181cad81158d71c41a2bce88edce078458e2dd5ffee7eddd6b05da85079f43", size = 205880, upload-time = "2025-06-09T22:55:13.45Z" }, + { url = "https://files.pythonhosted.org/packages/d1/e5/9076a0bbbfb65d1198007059c65639dfd56266cf8e477a9707e4b1999ff4/propcache-0.3.2-cp313-cp313-win32.whl", hash = "sha256:8a08154613f2249519e549de2330cf8e2071c2887309a7b07fb56098f5170a02", size = 37220, upload-time = "2025-06-09T22:55:15.284Z" }, + { url = "https://files.pythonhosted.org/packages/d3/f5/b369e026b09a26cd77aa88d8fffd69141d2ae00a2abaaf5380d2603f4b7f/propcache-0.3.2-cp313-cp313-win_amd64.whl", hash = "sha256:e41671f1594fc4ab0a6dec1351864713cb3a279910ae8b58f884a88a0a632c05", size = 40678, upload-time = "2025-06-09T22:55:16.445Z" }, + { url = "https://files.pythonhosted.org/packages/a4/3a/6ece377b55544941a08d03581c7bc400a3c8cd3c2865900a68d5de79e21f/propcache-0.3.2-cp313-cp313t-macosx_10_13_universal2.whl", hash = "sha256:9a3cf035bbaf035f109987d9d55dc90e4b0e36e04bbbb95af3055ef17194057b", size = 76560, upload-time = "2025-06-09T22:55:17.598Z" }, + { url = "https://files.pythonhosted.org/packages/0c/da/64a2bb16418740fa634b0e9c3d29edff1db07f56d3546ca2d86ddf0305e1/propcache-0.3.2-cp313-cp313t-macosx_10_13_x86_64.whl", hash = "sha256:156c03d07dc1323d8dacaa221fbe028c5c70d16709cdd63502778e6c3ccca1b0", size = 44676, upload-time = "2025-06-09T22:55:18.922Z" }, + { url = "https://files.pythonhosted.org/packages/36/7b/f025e06ea51cb72c52fb87e9b395cced02786610b60a3ed51da8af017170/propcache-0.3.2-cp313-cp313t-macosx_11_0_arm64.whl", hash = "sha256:74413c0ba02ba86f55cf60d18daab219f7e531620c15f1e23d95563f505efe7e", size = 44701, upload-time = "2025-06-09T22:55:20.106Z" }, + { url = "https://files.pythonhosted.org/packages/a4/00/faa1b1b7c3b74fc277f8642f32a4c72ba1d7b2de36d7cdfb676db7f4303e/propcache-0.3.2-cp313-cp313t-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:f066b437bb3fa39c58ff97ab2ca351db465157d68ed0440abecb21715eb24b28", size = 276934, upload-time = "2025-06-09T22:55:21.5Z" }, + { url = "https://files.pythonhosted.org/packages/74/ab/935beb6f1756e0476a4d5938ff44bf0d13a055fed880caf93859b4f1baf4/propcache-0.3.2-cp313-cp313t-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:f1304b085c83067914721e7e9d9917d41ad87696bf70f0bc7dee450e9c71ad0a", size = 278316, upload-time = "2025-06-09T22:55:22.918Z" }, + { url = "https://files.pythonhosted.org/packages/f8/9d/994a5c1ce4389610838d1caec74bdf0e98b306c70314d46dbe4fcf21a3e2/propcache-0.3.2-cp313-cp313t-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:ab50cef01b372763a13333b4e54021bdcb291fc9a8e2ccb9c2df98be51bcde6c", size = 282619, upload-time = "2025-06-09T22:55:24.651Z" }, + { url = "https://files.pythonhosted.org/packages/2b/00/a10afce3d1ed0287cef2e09506d3be9822513f2c1e96457ee369adb9a6cd/propcache-0.3.2-cp313-cp313t-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:fad3b2a085ec259ad2c2842666b2a0a49dea8463579c606426128925af1ed725", size = 265896, upload-time = "2025-06-09T22:55:26.049Z" }, + { url = "https://files.pythonhosted.org/packages/2e/a8/2aa6716ffa566ca57c749edb909ad27884680887d68517e4be41b02299f3/propcache-0.3.2-cp313-cp313t-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:261fa020c1c14deafd54c76b014956e2f86991af198c51139faf41c4d5e83892", size = 252111, upload-time = "2025-06-09T22:55:27.381Z" }, + { url = "https://files.pythonhosted.org/packages/36/4f/345ca9183b85ac29c8694b0941f7484bf419c7f0fea2d1e386b4f7893eed/propcache-0.3.2-cp313-cp313t-musllinux_1_2_aarch64.whl", hash = "sha256:46d7f8aa79c927e5f987ee3a80205c987717d3659f035c85cf0c3680526bdb44", size = 268334, upload-time = "2025-06-09T22:55:28.747Z" }, + { url = "https://files.pythonhosted.org/packages/3e/ca/fcd54f78b59e3f97b3b9715501e3147f5340167733d27db423aa321e7148/propcache-0.3.2-cp313-cp313t-musllinux_1_2_armv7l.whl", hash = "sha256:6d8f3f0eebf73e3c0ff0e7853f68be638b4043c65a70517bb575eff54edd8dbe", size = 255026, upload-time = "2025-06-09T22:55:30.184Z" }, + { url = "https://files.pythonhosted.org/packages/8b/95/8e6a6bbbd78ac89c30c225210a5c687790e532ba4088afb8c0445b77ef37/propcache-0.3.2-cp313-cp313t-musllinux_1_2_i686.whl", hash = "sha256:03c89c1b14a5452cf15403e291c0ccd7751d5b9736ecb2c5bab977ad6c5bcd81", size = 250724, upload-time = "2025-06-09T22:55:31.646Z" }, + { url = "https://files.pythonhosted.org/packages/ee/b0/0dd03616142baba28e8b2d14ce5df6631b4673850a3d4f9c0f9dd714a404/propcache-0.3.2-cp313-cp313t-musllinux_1_2_ppc64le.whl", hash = "sha256:0cc17efde71e12bbaad086d679ce575268d70bc123a5a71ea7ad76f70ba30bba", size = 268868, upload-time = "2025-06-09T22:55:33.209Z" }, + { url = "https://files.pythonhosted.org/packages/c5/98/2c12407a7e4fbacd94ddd32f3b1e3d5231e77c30ef7162b12a60e2dd5ce3/propcache-0.3.2-cp313-cp313t-musllinux_1_2_s390x.whl", hash = "sha256:acdf05d00696bc0447e278bb53cb04ca72354e562cf88ea6f9107df8e7fd9770", size = 271322, upload-time = "2025-06-09T22:55:35.065Z" }, + { url = "https://files.pythonhosted.org/packages/35/91/9cb56efbb428b006bb85db28591e40b7736847b8331d43fe335acf95f6c8/propcache-0.3.2-cp313-cp313t-musllinux_1_2_x86_64.whl", hash = "sha256:4445542398bd0b5d32df908031cb1b30d43ac848e20470a878b770ec2dcc6330", size = 265778, upload-time = "2025-06-09T22:55:36.45Z" }, + { url = "https://files.pythonhosted.org/packages/9a/4c/b0fe775a2bdd01e176b14b574be679d84fc83958335790f7c9a686c1f468/propcache-0.3.2-cp313-cp313t-win32.whl", hash = "sha256:f86e5d7cd03afb3a1db8e9f9f6eff15794e79e791350ac48a8c924e6f439f394", size = 41175, upload-time = "2025-06-09T22:55:38.436Z" }, + { url = "https://files.pythonhosted.org/packages/a4/ff/47f08595e3d9b5e149c150f88d9714574f1a7cbd89fe2817158a952674bf/propcache-0.3.2-cp313-cp313t-win_amd64.whl", hash = "sha256:9704bedf6e7cbe3c65eca4379a9b53ee6a83749f047808cbb5044d40d7d72198", size = 44857, upload-time = "2025-06-09T22:55:39.687Z" }, + { url = "https://files.pythonhosted.org/packages/cc/35/cc0aaecf278bb4575b8555f2b137de5ab821595ddae9da9d3cd1da4072c7/propcache-0.3.2-py3-none-any.whl", hash = "sha256:98f1ec44fb675f5052cccc8e609c46ed23a35a1cfd18545ad4e29002d858a43f", size = 12663, upload-time = "2025-06-09T22:56:04.484Z" }, +] + +[[package]] +name = "psycopg2-binary" +version = "2.9.10" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/cb/0e/bdc8274dc0585090b4e3432267d7be4dfbfd8971c0fa59167c711105a6bf/psycopg2-binary-2.9.10.tar.gz", hash = "sha256:4b3df0e6990aa98acda57d983942eff13d824135fe2250e6522edaa782a06de2", size = 385764, upload-time = "2024-10-16T11:24:58.126Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/3e/30/d41d3ba765609c0763505d565c4d12d8f3c79793f0d0f044ff5a28bf395b/psycopg2_binary-2.9.10-cp313-cp313-macosx_12_0_x86_64.whl", hash = "sha256:26540d4a9a4e2b096f1ff9cce51253d0504dca5a85872c7f7be23be5a53eb18d", size = 3044699, upload-time = "2024-10-16T11:21:42.841Z" }, + { url = "https://files.pythonhosted.org/packages/35/44/257ddadec7ef04536ba71af6bc6a75ec05c5343004a7ec93006bee66c0bc/psycopg2_binary-2.9.10-cp313-cp313-macosx_14_0_arm64.whl", hash = "sha256:e217ce4d37667df0bc1c397fdcd8de5e81018ef305aed9415c3b093faaeb10fb", size = 3275245, upload-time = "2024-10-16T11:21:51.989Z" }, + { url = "https://files.pythonhosted.org/packages/1b/11/48ea1cd11de67f9efd7262085588790a95d9dfcd9b8a687d46caf7305c1a/psycopg2_binary-2.9.10-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:245159e7ab20a71d989da00f280ca57da7641fa2cdcf71749c193cea540a74f7", size = 2851631, upload-time = "2024-10-16T11:21:57.584Z" }, + { url = "https://files.pythonhosted.org/packages/62/e0/62ce5ee650e6c86719d621a761fe4bc846ab9eff8c1f12b1ed5741bf1c9b/psycopg2_binary-2.9.10-cp313-cp313-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:3c4ded1a24b20021ebe677b7b08ad10bf09aac197d6943bfe6fec70ac4e4690d", size = 3082140, upload-time = "2024-10-16T11:22:02.005Z" }, + { url = "https://files.pythonhosted.org/packages/27/ce/63f946c098611f7be234c0dd7cb1ad68b0b5744d34f68062bb3c5aa510c8/psycopg2_binary-2.9.10-cp313-cp313-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:3abb691ff9e57d4a93355f60d4f4c1dd2d68326c968e7db17ea96df3c023ef73", size = 3264762, upload-time = "2024-10-16T11:22:06.412Z" }, + { url = "https://files.pythonhosted.org/packages/43/25/c603cd81402e69edf7daa59b1602bd41eb9859e2824b8c0855d748366ac9/psycopg2_binary-2.9.10-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:8608c078134f0b3cbd9f89b34bd60a943b23fd33cc5f065e8d5f840061bd0673", size = 3020967, upload-time = "2024-10-16T11:22:11.583Z" }, + { url = "https://files.pythonhosted.org/packages/5f/d6/8708d8c6fca531057fa170cdde8df870e8b6a9b136e82b361c65e42b841e/psycopg2_binary-2.9.10-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:230eeae2d71594103cd5b93fd29d1ace6420d0b86f4778739cb1a5a32f607d1f", size = 2872326, upload-time = "2024-10-16T11:22:16.406Z" }, + { url = "https://files.pythonhosted.org/packages/ce/ac/5b1ea50fc08a9df82de7e1771537557f07c2632231bbab652c7e22597908/psycopg2_binary-2.9.10-cp313-cp313-musllinux_1_2_i686.whl", hash = "sha256:bb89f0a835bcfc1d42ccd5f41f04870c1b936d8507c6df12b7737febc40f0909", size = 2822712, upload-time = "2024-10-16T11:22:21.366Z" }, + { url = "https://files.pythonhosted.org/packages/c4/fc/504d4503b2abc4570fac3ca56eb8fed5e437bf9c9ef13f36b6621db8ef00/psycopg2_binary-2.9.10-cp313-cp313-musllinux_1_2_ppc64le.whl", hash = "sha256:f0c2d907a1e102526dd2986df638343388b94c33860ff3bbe1384130828714b1", size = 2920155, upload-time = "2024-10-16T11:22:25.684Z" }, + { url = "https://files.pythonhosted.org/packages/b2/d1/323581e9273ad2c0dbd1902f3fb50c441da86e894b6e25a73c3fda32c57e/psycopg2_binary-2.9.10-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:f8157bed2f51db683f31306aa497311b560f2265998122abe1dce6428bd86567", size = 2959356, upload-time = "2024-10-16T11:22:30.562Z" }, + { url = "https://files.pythonhosted.org/packages/08/50/d13ea0a054189ae1bc21af1d85b6f8bb9bbc5572991055d70ad9006fe2d6/psycopg2_binary-2.9.10-cp313-cp313-win_amd64.whl", hash = "sha256:27422aa5f11fbcd9b18da48373eb67081243662f9b46e6fd07c3eb46e4535142", size = 2569224, upload-time = "2025-01-04T20:09:19.234Z" }, +] + +[[package]] +name = "pycparser" +version = "2.22" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/1d/b2/31537cf4b1ca988837256c910a668b553fceb8f069bedc4b1c826024b52c/pycparser-2.22.tar.gz", hash = "sha256:491c8be9c040f5390f5bf44a5b07752bd07f56edf992381b05c701439eec10f6", size = 172736, upload-time = "2024-03-30T13:22:22.564Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/13/a3/a812df4e2dd5696d1f351d58b8fe16a405b234ad2886a0dab9183fb78109/pycparser-2.22-py3-none-any.whl", hash = "sha256:c3702b6d3dd8c7abc1afa565d7e63d53a1d0bd86cdc24edd75470f4de499cfcc", size = 117552, upload-time = "2024-03-30T13:22:20.476Z" }, +] + +[[package]] +name = "pydantic" +version = "2.11.7" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "annotated-types" }, + { name = "pydantic-core" }, + { name = "typing-extensions" }, + { name = "typing-inspection" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/00/dd/4325abf92c39ba8623b5af936ddb36ffcfe0beae70405d456ab1fb2f5b8c/pydantic-2.11.7.tar.gz", hash = "sha256:d989c3c6cb79469287b1569f7447a17848c998458d49ebe294e975b9baf0f0db", size = 788350, upload-time = "2025-06-14T08:33:17.137Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/6a/c0/ec2b1c8712ca690e5d61979dee872603e92b8a32f94cc1b72d53beab008a/pydantic-2.11.7-py3-none-any.whl", hash = "sha256:dde5df002701f6de26248661f6835bbe296a47bf73990135c7d07ce741b9623b", size = 444782, upload-time = "2025-06-14T08:33:14.905Z" }, +] + +[[package]] +name = "pydantic-core" +version = "2.33.2" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "typing-extensions" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/ad/88/5f2260bdfae97aabf98f1778d43f69574390ad787afb646292a638c923d4/pydantic_core-2.33.2.tar.gz", hash = "sha256:7cb8bc3605c29176e1b105350d2e6474142d7c1bd1d9327c4a9bdb46bf827acc", size = 435195, upload-time = "2025-04-23T18:33:52.104Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/46/8c/99040727b41f56616573a28771b1bfa08a3d3fe74d3d513f01251f79f172/pydantic_core-2.33.2-cp313-cp313-macosx_10_12_x86_64.whl", hash = "sha256:1082dd3e2d7109ad8b7da48e1d4710c8d06c253cbc4a27c1cff4fbcaa97a9e3f", size = 2015688, upload-time = "2025-04-23T18:31:53.175Z" }, + { url = "https://files.pythonhosted.org/packages/3a/cc/5999d1eb705a6cefc31f0b4a90e9f7fc400539b1a1030529700cc1b51838/pydantic_core-2.33.2-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:f517ca031dfc037a9c07e748cefd8d96235088b83b4f4ba8939105d20fa1dcd6", size = 1844808, upload-time = "2025-04-23T18:31:54.79Z" }, + { url = "https://files.pythonhosted.org/packages/6f/5e/a0a7b8885c98889a18b6e376f344da1ef323d270b44edf8174d6bce4d622/pydantic_core-2.33.2-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:0a9f2c9dd19656823cb8250b0724ee9c60a82f3cdf68a080979d13092a3b0fef", size = 1885580, upload-time = "2025-04-23T18:31:57.393Z" }, + { url = "https://files.pythonhosted.org/packages/3b/2a/953581f343c7d11a304581156618c3f592435523dd9d79865903272c256a/pydantic_core-2.33.2-cp313-cp313-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:2b0a451c263b01acebe51895bfb0e1cc842a5c666efe06cdf13846c7418caa9a", size = 1973859, upload-time = "2025-04-23T18:31:59.065Z" }, + { url = "https://files.pythonhosted.org/packages/e6/55/f1a813904771c03a3f97f676c62cca0c0a4138654107c1b61f19c644868b/pydantic_core-2.33.2-cp313-cp313-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:1ea40a64d23faa25e62a70ad163571c0b342b8bf66d5fa612ac0dec4f069d916", size = 2120810, upload-time = "2025-04-23T18:32:00.78Z" }, + { url = "https://files.pythonhosted.org/packages/aa/c3/053389835a996e18853ba107a63caae0b9deb4a276c6b472931ea9ae6e48/pydantic_core-2.33.2-cp313-cp313-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:0fb2d542b4d66f9470e8065c5469ec676978d625a8b7a363f07d9a501a9cb36a", size = 2676498, upload-time = "2025-04-23T18:32:02.418Z" }, + { url = "https://files.pythonhosted.org/packages/eb/3c/f4abd740877a35abade05e437245b192f9d0ffb48bbbbd708df33d3cda37/pydantic_core-2.33.2-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:9fdac5d6ffa1b5a83bca06ffe7583f5576555e6c8b3a91fbd25ea7780f825f7d", size = 2000611, upload-time = "2025-04-23T18:32:04.152Z" }, + { url = "https://files.pythonhosted.org/packages/59/a7/63ef2fed1837d1121a894d0ce88439fe3e3b3e48c7543b2a4479eb99c2bd/pydantic_core-2.33.2-cp313-cp313-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:04a1a413977ab517154eebb2d326da71638271477d6ad87a769102f7c2488c56", size = 2107924, upload-time = "2025-04-23T18:32:06.129Z" }, + { url = "https://files.pythonhosted.org/packages/04/8f/2551964ef045669801675f1cfc3b0d74147f4901c3ffa42be2ddb1f0efc4/pydantic_core-2.33.2-cp313-cp313-musllinux_1_1_aarch64.whl", hash = "sha256:c8e7af2f4e0194c22b5b37205bfb293d166a7344a5b0d0eaccebc376546d77d5", size = 2063196, upload-time = "2025-04-23T18:32:08.178Z" }, + { url = "https://files.pythonhosted.org/packages/26/bd/d9602777e77fc6dbb0c7db9ad356e9a985825547dce5ad1d30ee04903918/pydantic_core-2.33.2-cp313-cp313-musllinux_1_1_armv7l.whl", hash = "sha256:5c92edd15cd58b3c2d34873597a1e20f13094f59cf88068adb18947df5455b4e", size = 2236389, upload-time = "2025-04-23T18:32:10.242Z" }, + { url = "https://files.pythonhosted.org/packages/42/db/0e950daa7e2230423ab342ae918a794964b053bec24ba8af013fc7c94846/pydantic_core-2.33.2-cp313-cp313-musllinux_1_1_x86_64.whl", hash = "sha256:65132b7b4a1c0beded5e057324b7e16e10910c106d43675d9bd87d4f38dde162", size = 2239223, upload-time = "2025-04-23T18:32:12.382Z" }, + { url = "https://files.pythonhosted.org/packages/58/4d/4f937099c545a8a17eb52cb67fe0447fd9a373b348ccfa9a87f141eeb00f/pydantic_core-2.33.2-cp313-cp313-win32.whl", hash = "sha256:52fb90784e0a242bb96ec53f42196a17278855b0f31ac7c3cc6f5c1ec4811849", size = 1900473, upload-time = "2025-04-23T18:32:14.034Z" }, + { url = "https://files.pythonhosted.org/packages/a0/75/4a0a9bac998d78d889def5e4ef2b065acba8cae8c93696906c3a91f310ca/pydantic_core-2.33.2-cp313-cp313-win_amd64.whl", hash = "sha256:c083a3bdd5a93dfe480f1125926afcdbf2917ae714bdb80b36d34318b2bec5d9", size = 1955269, upload-time = "2025-04-23T18:32:15.783Z" }, + { url = "https://files.pythonhosted.org/packages/f9/86/1beda0576969592f1497b4ce8e7bc8cbdf614c352426271b1b10d5f0aa64/pydantic_core-2.33.2-cp313-cp313-win_arm64.whl", hash = "sha256:e80b087132752f6b3d714f041ccf74403799d3b23a72722ea2e6ba2e892555b9", size = 1893921, upload-time = "2025-04-23T18:32:18.473Z" }, + { url = "https://files.pythonhosted.org/packages/a4/7d/e09391c2eebeab681df2b74bfe6c43422fffede8dc74187b2b0bf6fd7571/pydantic_core-2.33.2-cp313-cp313t-macosx_11_0_arm64.whl", hash = "sha256:61c18fba8e5e9db3ab908620af374db0ac1baa69f0f32df4f61ae23f15e586ac", size = 1806162, upload-time = "2025-04-23T18:32:20.188Z" }, + { url = "https://files.pythonhosted.org/packages/f1/3d/847b6b1fed9f8ed3bb95a9ad04fbd0b212e832d4f0f50ff4d9ee5a9f15cf/pydantic_core-2.33.2-cp313-cp313t-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:95237e53bb015f67b63c91af7518a62a8660376a6a0db19b89acc77a4d6199f5", size = 1981560, upload-time = "2025-04-23T18:32:22.354Z" }, + { url = "https://files.pythonhosted.org/packages/6f/9a/e73262f6c6656262b5fdd723ad90f518f579b7bc8622e43a942eec53c938/pydantic_core-2.33.2-cp313-cp313t-win_amd64.whl", hash = "sha256:c2fc0a768ef76c15ab9238afa6da7f69895bb5d1ee83aeea2e3509af4472d0b9", size = 1935777, upload-time = "2025-04-23T18:32:25.088Z" }, +] + +[[package]] +name = "pydantic-settings" +version = "2.9.1" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "pydantic" }, + { name = "python-dotenv" }, + { name = "typing-inspection" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/67/1d/42628a2c33e93f8e9acbde0d5d735fa0850f3e6a2f8cb1eb6c40b9a732ac/pydantic_settings-2.9.1.tar.gz", hash = "sha256:c509bf79d27563add44e8446233359004ed85066cd096d8b510f715e6ef5d268", size = 163234, upload-time = "2025-04-18T16:44:48.265Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/b6/5f/d6d641b490fd3ec2c4c13b4244d68deea3a1b970a97be64f34fb5504ff72/pydantic_settings-2.9.1-py3-none-any.whl", hash = "sha256:59b4f431b1defb26fe620c71a7d3968a710d719f5f4cdbbdb7926edeb770f6ef", size = 44356, upload-time = "2025-04-18T16:44:46.617Z" }, +] + +[[package]] +name = "pyjwt" +version = "2.10.1" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/e7/46/bd74733ff231675599650d3e47f361794b22ef3e3770998dda30d3b63726/pyjwt-2.10.1.tar.gz", hash = "sha256:3cc5772eb20009233caf06e9d8a0577824723b44e6648ee0a2aedb6cf9381953", size = 87785, upload-time = "2024-11-28T03:43:29.933Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/61/ad/689f02752eeec26aed679477e80e632ef1b682313be70793d798c1d5fc8f/PyJWT-2.10.1-py3-none-any.whl", hash = "sha256:dcdd193e30abefd5debf142f9adfcdd2b58004e644f25406ffaebd50bd98dacb", size = 22997, upload-time = "2024-11-28T03:43:27.893Z" }, +] + +[[package]] +name = "pytest" +version = "8.3.5" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "colorama", marker = "sys_platform == 'win32'" }, + { name = "iniconfig" }, + { name = "packaging" }, + { name = "pluggy" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/ae/3c/c9d525a414d506893f0cd8a8d0de7706446213181570cdbd766691164e40/pytest-8.3.5.tar.gz", hash = "sha256:f4efe70cc14e511565ac476b57c279e12a855b11f48f212af1080ef2263d3845", size = 1450891, upload-time = "2025-03-02T12:54:54.503Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/30/3d/64ad57c803f1fa1e963a7946b6e0fea4a70df53c1a7fed304586539c2bac/pytest-8.3.5-py3-none-any.whl", hash = "sha256:c69214aa47deac29fad6c2a4f590b9c4a9fdb16a403176fe154b79c0b4d4d820", size = 343634, upload-time = "2025-03-02T12:54:52.069Z" }, +] + +[[package]] +name = "pytest-asyncio" +version = "0.26.0" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "pytest" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/8e/c4/453c52c659521066969523e87d85d54139bbd17b78f09532fb8eb8cdb58e/pytest_asyncio-0.26.0.tar.gz", hash = "sha256:c4df2a697648241ff39e7f0e4a73050b03f123f760673956cf0d72a4990e312f", size = 54156, upload-time = "2025-03-25T06:22:28.883Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/20/7f/338843f449ace853647ace35870874f69a764d251872ed1b4de9f234822c/pytest_asyncio-0.26.0-py3-none-any.whl", hash = "sha256:7b51ed894f4fbea1340262bdae5135797ebbe21d8638978e35d31c6d19f72fb0", size = 19694, upload-time = "2025-03-25T06:22:27.807Z" }, +] + +[[package]] +name = "pytest-mock" +version = "3.14.1" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "pytest" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/71/28/67172c96ba684058a4d24ffe144d64783d2a270d0af0d9e792737bddc75c/pytest_mock-3.14.1.tar.gz", hash = "sha256:159e9edac4c451ce77a5cdb9fc5d1100708d2dd4ba3c3df572f14097351af80e", size = 33241, upload-time = "2025-05-26T13:58:45.167Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/b2/05/77b60e520511c53d1c1ca75f1930c7dd8e971d0c4379b7f4b3f9644685ba/pytest_mock-3.14.1-py3-none-any.whl", hash = "sha256:178aefcd11307d874b4cd3100344e7e2d888d9791a6a1d9bfe90fbc1b74fd1d0", size = 9923, upload-time = "2025-05-26T13:58:43.487Z" }, +] + +[[package]] +name = "python-dateutil" +version = "2.9.0.post0" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "six" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/66/c0/0c8b6ad9f17a802ee498c46e004a0eb49bc148f2fd230864601a86dcf6db/python-dateutil-2.9.0.post0.tar.gz", hash = "sha256:37dd54208da7e1cd875388217d5e00ebd4179249f90fb72437e91a35459a0ad3", size = 342432, upload-time = "2024-03-01T18:36:20.211Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/ec/57/56b9bcc3c9c6a792fcbaf139543cee77261f3651ca9da0c93f5c1221264b/python_dateutil-2.9.0.post0-py2.py3-none-any.whl", hash = "sha256:a8b2bc7bffae282281c8140a97d3aa9c14da0b136dfe83f850eea9a5f7470427", size = 229892, upload-time = "2024-03-01T18:36:18.57Z" }, +] + +[[package]] +name = "python-dotenv" +version = "1.1.0" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/88/2c/7bb1416c5620485aa793f2de31d3df393d3686aa8a8506d11e10e13c5baf/python_dotenv-1.1.0.tar.gz", hash = "sha256:41f90bc6f5f177fb41f53e87666db362025010eb28f60a01c9143bfa33a2b2d5", size = 39920, upload-time = "2025-03-25T10:14:56.835Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/1e/18/98a99ad95133c6a6e2005fe89faedf294a748bd5dc803008059409ac9b1e/python_dotenv-1.1.0-py3-none-any.whl", hash = "sha256:d7c01d9e2293916c18baf562d95698754b0dbbb5e74d457c45d4f6561fb9d55d", size = 20256, upload-time = "2025-03-25T10:14:55.034Z" }, +] + +[[package]] +name = "python-magic" +version = "0.4.27" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/da/db/0b3e28ac047452d079d375ec6798bf76a036a08182dbb39ed38116a49130/python-magic-0.4.27.tar.gz", hash = "sha256:c1ba14b08e4a5f5c31a302b7721239695b2f0f058d125bd5ce1ee36b9d9d3c3b", size = 14677, upload-time = "2022-06-07T20:16:59.508Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/6c/73/9f872cb81fc5c3bb48f7227872c28975f998f3e7c2b1c16e95e6432bbb90/python_magic-0.4.27-py2.py3-none-any.whl", hash = "sha256:c212960ad306f700aa0d01e5d7a325d20548ff97eb9920dcd29513174f0294d3", size = 13840, upload-time = "2022-06-07T20:16:57.763Z" }, +] + +[[package]] +name = "python-telegram-bot" +version = "22.1" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "httpx" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/35/2f/52ad8a19b75a6b5c9525abce9e3d9c57dfe21c29a4723ba2aeeb2c611a9d/python_telegram_bot-22.1.tar.gz", hash = "sha256:b6c7fc1f3635cef6aff0c431827407cafde183e7e1992060edeacc2bf08d23d8", size = 459976, upload-time = "2025-05-15T20:21:23.976Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/5e/7b/b06663b3563299e15dac0b3a2044830db35c676753caeb45ae0acbf029a9/python_telegram_bot-22.1-py3-none-any.whl", hash = "sha256:71afd091fde9037ac44728c2768eb958682140dcc350900a191da0e9cef319d3", size = 702289, upload-time = "2025-05-15T20:21:21.12Z" }, +] + +[[package]] +name = "pyyaml" +version = "6.0.2" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/54/ed/79a089b6be93607fa5cdaedf301d7dfb23af5f25c398d5ead2525b063e17/pyyaml-6.0.2.tar.gz", hash = "sha256:d584d9ec91ad65861cc08d42e834324ef890a082e591037abe114850ff7bbc3e", size = 130631, upload-time = "2024-08-06T20:33:50.674Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/ef/e3/3af305b830494fa85d95f6d95ef7fa73f2ee1cc8ef5b495c7c3269fb835f/PyYAML-6.0.2-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:efdca5630322a10774e8e98e1af481aad470dd62c3170801852d752aa7a783ba", size = 181309, upload-time = "2024-08-06T20:32:43.4Z" }, + { url = "https://files.pythonhosted.org/packages/45/9f/3b1c20a0b7a3200524eb0076cc027a970d320bd3a6592873c85c92a08731/PyYAML-6.0.2-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:50187695423ffe49e2deacb8cd10510bc361faac997de9efef88badc3bb9e2d1", size = 171679, upload-time = "2024-08-06T20:32:44.801Z" }, + { url = "https://files.pythonhosted.org/packages/7c/9a/337322f27005c33bcb656c655fa78325b730324c78620e8328ae28b64d0c/PyYAML-6.0.2-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:0ffe8360bab4910ef1b9e87fb812d8bc0a308b0d0eef8c8f44e0254ab3b07133", size = 733428, upload-time = "2024-08-06T20:32:46.432Z" }, + { url = "https://files.pythonhosted.org/packages/a3/69/864fbe19e6c18ea3cc196cbe5d392175b4cf3d5d0ac1403ec3f2d237ebb5/PyYAML-6.0.2-cp313-cp313-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:17e311b6c678207928d649faa7cb0d7b4c26a0ba73d41e99c4fff6b6c3276484", size = 763361, upload-time = "2024-08-06T20:32:51.188Z" }, + { url = "https://files.pythonhosted.org/packages/04/24/b7721e4845c2f162d26f50521b825fb061bc0a5afcf9a386840f23ea19fa/PyYAML-6.0.2-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:70b189594dbe54f75ab3a1acec5f1e3faa7e8cf2f1e08d9b561cb41b845f69d5", size = 759523, upload-time = "2024-08-06T20:32:53.019Z" }, + { url = "https://files.pythonhosted.org/packages/2b/b2/e3234f59ba06559c6ff63c4e10baea10e5e7df868092bf9ab40e5b9c56b6/PyYAML-6.0.2-cp313-cp313-musllinux_1_1_aarch64.whl", hash = "sha256:41e4e3953a79407c794916fa277a82531dd93aad34e29c2a514c2c0c5fe971cc", size = 726660, upload-time = "2024-08-06T20:32:54.708Z" }, + { url = "https://files.pythonhosted.org/packages/fe/0f/25911a9f080464c59fab9027482f822b86bf0608957a5fcc6eaac85aa515/PyYAML-6.0.2-cp313-cp313-musllinux_1_1_x86_64.whl", hash = "sha256:68ccc6023a3400877818152ad9a1033e3db8625d899c72eacb5a668902e4d652", size = 751597, upload-time = "2024-08-06T20:32:56.985Z" }, + { url = "https://files.pythonhosted.org/packages/14/0d/e2c3b43bbce3cf6bd97c840b46088a3031085179e596d4929729d8d68270/PyYAML-6.0.2-cp313-cp313-win32.whl", hash = "sha256:bc2fa7c6b47d6bc618dd7fb02ef6fdedb1090ec036abab80d4681424b84c1183", size = 140527, upload-time = "2024-08-06T20:33:03.001Z" }, + { url = "https://files.pythonhosted.org/packages/fa/de/02b54f42487e3d3c6efb3f89428677074ca7bf43aae402517bc7cca949f3/PyYAML-6.0.2-cp313-cp313-win_amd64.whl", hash = "sha256:8388ee1976c416731879ac16da0aff3f63b286ffdd57cdeb95f3f2e085687563", size = 156446, upload-time = "2024-08-06T20:33:04.33Z" }, +] + +[[package]] +name = "realtime" +version = "2.4.3" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "aiohttp" }, + { name = "python-dateutil" }, + { name = "typing-extensions" }, + { name = "websockets" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/75/fc/ef69bd4a1bf30a5435bc2d09f6c33bfef5f317746b1a4ca2932ef14b22fc/realtime-2.4.3.tar.gz", hash = "sha256:152febabc822ce60e11f202842c5aa6858ae4bd04920bfd6a00c1dd492f426b0", size = 18849, upload-time = "2025-04-28T19:50:38.387Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/29/0c/68ce3db6354c466f68bba2be0fe0ad3a93dca8219e10b9bad3138077efec/realtime-2.4.3-py3-none-any.whl", hash = "sha256:09ff3b61ac928413a27765640b67362380eaddba84a7037a17972a64b1ac52f7", size = 22086, upload-time = "2025-04-28T19:50:37.01Z" }, +] + +[[package]] +name = "regex" +version = "2024.11.6" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/8e/5f/bd69653fbfb76cf8604468d3b4ec4c403197144c7bfe0e6a5fc9e02a07cb/regex-2024.11.6.tar.gz", hash = "sha256:7ab159b063c52a0333c884e4679f8d7a85112ee3078fe3d9004b2dd875585519", size = 399494, upload-time = "2024-11-06T20:12:31.635Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/90/73/bcb0e36614601016552fa9344544a3a2ae1809dc1401b100eab02e772e1f/regex-2024.11.6-cp313-cp313-macosx_10_13_universal2.whl", hash = "sha256:a6ba92c0bcdf96cbf43a12c717eae4bc98325ca3730f6b130ffa2e3c3c723d84", size = 483525, upload-time = "2024-11-06T20:10:45.19Z" }, + { url = "https://files.pythonhosted.org/packages/0f/3f/f1a082a46b31e25291d830b369b6b0c5576a6f7fb89d3053a354c24b8a83/regex-2024.11.6-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:525eab0b789891ac3be914d36893bdf972d483fe66551f79d3e27146191a37d4", size = 288324, upload-time = "2024-11-06T20:10:47.177Z" }, + { url = "https://files.pythonhosted.org/packages/09/c9/4e68181a4a652fb3ef5099e077faf4fd2a694ea6e0f806a7737aff9e758a/regex-2024.11.6-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:086a27a0b4ca227941700e0b31425e7a28ef1ae8e5e05a33826e17e47fbfdba0", size = 284617, upload-time = "2024-11-06T20:10:49.312Z" }, + { url = "https://files.pythonhosted.org/packages/fc/fd/37868b75eaf63843165f1d2122ca6cb94bfc0271e4428cf58c0616786dce/regex-2024.11.6-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:bde01f35767c4a7899b7eb6e823b125a64de314a8ee9791367c9a34d56af18d0", size = 795023, upload-time = "2024-11-06T20:10:51.102Z" }, + { url = "https://files.pythonhosted.org/packages/c4/7c/d4cd9c528502a3dedb5c13c146e7a7a539a3853dc20209c8e75d9ba9d1b2/regex-2024.11.6-cp313-cp313-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:b583904576650166b3d920d2bcce13971f6f9e9a396c673187f49811b2769dc7", size = 833072, upload-time = "2024-11-06T20:10:52.926Z" }, + { url = "https://files.pythonhosted.org/packages/4f/db/46f563a08f969159c5a0f0e722260568425363bea43bb7ae370becb66a67/regex-2024.11.6-cp313-cp313-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:1c4de13f06a0d54fa0d5ab1b7138bfa0d883220965a29616e3ea61b35d5f5fc7", size = 823130, upload-time = "2024-11-06T20:10:54.828Z" }, + { url = "https://files.pythonhosted.org/packages/db/60/1eeca2074f5b87df394fccaa432ae3fc06c9c9bfa97c5051aed70e6e00c2/regex-2024.11.6-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:3cde6e9f2580eb1665965ce9bf17ff4952f34f5b126beb509fee8f4e994f143c", size = 796857, upload-time = "2024-11-06T20:10:56.634Z" }, + { url = "https://files.pythonhosted.org/packages/10/db/ac718a08fcee981554d2f7bb8402f1faa7e868c1345c16ab1ebec54b0d7b/regex-2024.11.6-cp313-cp313-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:0d7f453dca13f40a02b79636a339c5b62b670141e63efd511d3f8f73fba162b3", size = 784006, upload-time = "2024-11-06T20:10:59.369Z" }, + { url = "https://files.pythonhosted.org/packages/c2/41/7da3fe70216cea93144bf12da2b87367590bcf07db97604edeea55dac9ad/regex-2024.11.6-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:59dfe1ed21aea057a65c6b586afd2a945de04fc7db3de0a6e3ed5397ad491b07", size = 781650, upload-time = "2024-11-06T20:11:02.042Z" }, + { url = "https://files.pythonhosted.org/packages/a7/d5/880921ee4eec393a4752e6ab9f0fe28009435417c3102fc413f3fe81c4e5/regex-2024.11.6-cp313-cp313-musllinux_1_2_i686.whl", hash = "sha256:b97c1e0bd37c5cd7902e65f410779d39eeda155800b65fc4d04cc432efa9bc6e", size = 789545, upload-time = "2024-11-06T20:11:03.933Z" }, + { url = "https://files.pythonhosted.org/packages/dc/96/53770115e507081122beca8899ab7f5ae28ae790bfcc82b5e38976df6a77/regex-2024.11.6-cp313-cp313-musllinux_1_2_ppc64le.whl", hash = "sha256:f9d1e379028e0fc2ae3654bac3cbbef81bf3fd571272a42d56c24007979bafb6", size = 853045, upload-time = "2024-11-06T20:11:06.497Z" }, + { url = "https://files.pythonhosted.org/packages/31/d3/1372add5251cc2d44b451bd94f43b2ec78e15a6e82bff6a290ef9fd8f00a/regex-2024.11.6-cp313-cp313-musllinux_1_2_s390x.whl", hash = "sha256:13291b39131e2d002a7940fb176e120bec5145f3aeb7621be6534e46251912c4", size = 860182, upload-time = "2024-11-06T20:11:09.06Z" }, + { url = "https://files.pythonhosted.org/packages/ed/e3/c446a64984ea9f69982ba1a69d4658d5014bc7a0ea468a07e1a1265db6e2/regex-2024.11.6-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:4f51f88c126370dcec4908576c5a627220da6c09d0bff31cfa89f2523843316d", size = 787733, upload-time = "2024-11-06T20:11:11.256Z" }, + { url = "https://files.pythonhosted.org/packages/2b/f1/e40c8373e3480e4f29f2692bd21b3e05f296d3afebc7e5dcf21b9756ca1c/regex-2024.11.6-cp313-cp313-win32.whl", hash = "sha256:63b13cfd72e9601125027202cad74995ab26921d8cd935c25f09c630436348ff", size = 262122, upload-time = "2024-11-06T20:11:13.161Z" }, + { url = "https://files.pythonhosted.org/packages/45/94/bc295babb3062a731f52621cdc992d123111282e291abaf23faa413443ea/regex-2024.11.6-cp313-cp313-win_amd64.whl", hash = "sha256:2b3361af3198667e99927da8b84c1b010752fa4b1115ee30beaa332cabc3ef1a", size = 273545, upload-time = "2024-11-06T20:11:15Z" }, +] + +[[package]] +name = "requests" +version = "2.32.4" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "certifi" }, + { name = "charset-normalizer" }, + { name = "idna" }, + { name = "urllib3" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/e1/0a/929373653770d8a0d7ea76c37de6e41f11eb07559b103b1c02cafb3f7cf8/requests-2.32.4.tar.gz", hash = "sha256:27d0316682c8a29834d3264820024b62a36942083d52caf2f14c0591336d3422", size = 135258, upload-time = "2025-06-09T16:43:07.34Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/7c/e4/56027c4a6b4ae70ca9de302488c5ca95ad4a39e190093d6c1a8ace08341b/requests-2.32.4-py3-none-any.whl", hash = "sha256:27babd3cda2a6d50b30443204ee89830707d396671944c998b5975b031ac2b2c", size = 64847, upload-time = "2025-06-09T16:43:05.728Z" }, +] + +[[package]] +name = "requests-oauthlib" +version = "2.0.0" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "oauthlib" }, + { name = "requests" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/42/f2/05f29bc3913aea15eb670be136045bf5c5bbf4b99ecb839da9b422bb2c85/requests-oauthlib-2.0.0.tar.gz", hash = "sha256:b3dffaebd884d8cd778494369603a9e7b58d29111bf6b41bdc2dcd87203af4e9", size = 55650, upload-time = "2024-03-22T20:32:29.939Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/3b/5d/63d4ae3b9daea098d5d6f5da83984853c1bbacd5dc826764b249fe119d24/requests_oauthlib-2.0.0-py2.py3-none-any.whl", hash = "sha256:7dd8a5c40426b779b0868c404bdef9768deccf22749cde15852df527e6269b36", size = 24179, upload-time = "2024-03-22T20:32:28.055Z" }, +] + +[[package]] +name = "requests-toolbelt" +version = "1.0.0" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "requests" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/f3/61/d7545dafb7ac2230c70d38d31cbfe4cc64f7144dc41f6e4e4b78ecd9f5bb/requests-toolbelt-1.0.0.tar.gz", hash = "sha256:7681a0a3d047012b5bdc0ee37d7f8f07ebe76ab08caeccfc3921ce23c88d5bc6", size = 206888, upload-time = "2023-05-01T04:11:33.229Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/3f/51/d4db610ef29373b879047326cbf6fa98b6c1969d6f6dc423279de2b1be2c/requests_toolbelt-1.0.0-py2.py3-none-any.whl", hash = "sha256:cccfdd665f0a24fcf4726e690f65639d272bb0637b9b92dfd91a5568ccf6bd06", size = 54481, upload-time = "2023-05-01T04:11:28.427Z" }, +] + +[[package]] +name = "responses" +version = "0.25.7" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "pyyaml" }, + { name = "requests" }, + { name = "urllib3" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/81/7e/2345ac3299bd62bd7163216702bbc88976c099cfceba5b889f2a457727a1/responses-0.25.7.tar.gz", hash = "sha256:8ebae11405d7a5df79ab6fd54277f6f2bc29b2d002d0dd2d5c632594d1ddcedb", size = 79203, upload-time = "2025-03-11T15:36:16.624Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/e4/fc/1d20b64fa90e81e4fa0a34c9b0240a6cfb1326b7e06d18a5432a9917c316/responses-0.25.7-py3-none-any.whl", hash = "sha256:92ca17416c90fe6b35921f52179bff29332076bb32694c0df02dcac2c6bc043c", size = 34732, upload-time = "2025-03-11T15:36:14.589Z" }, +] + +[[package]] +name = "six" +version = "1.17.0" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/94/e7/b2c673351809dca68a0e064b6af791aa332cf192da575fd474ed7d6f16a2/six-1.17.0.tar.gz", hash = "sha256:ff70335d468e7eb6ec65b95b99d3a2836546063f63acc5171de367e834932a81", size = 34031, upload-time = "2024-12-04T17:35:28.174Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/b7/ce/149a00dd41f10bc29e5921b496af8b574d8413afcd5e30dfa0ed46c2cc5e/six-1.17.0-py2.py3-none-any.whl", hash = "sha256:4721f391ed90541fddacab5acf947aa0d3dc7d27b2e1e8eda2be8970586c3274", size = 11050, upload-time = "2024-12-04T17:35:26.475Z" }, +] + +[[package]] +name = "sniffio" +version = "1.3.1" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/a2/87/a6771e1546d97e7e041b6ae58d80074f81b7d5121207425c964ddf5cfdbd/sniffio-1.3.1.tar.gz", hash = "sha256:f4324edc670a0f49750a81b895f35c3adb843cca46f0530f79fc1babb23789dc", size = 20372, upload-time = "2024-02-25T23:20:04.057Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/e9/44/75a9c9421471a6c4805dbf2356f7c181a29c1879239abab1ea2cc8f38b40/sniffio-1.3.1-py3-none-any.whl", hash = "sha256:2f6da418d1f1e0fddd844478f41680e794e6051915791a034ff65e5f100525a2", size = 10235, upload-time = "2024-02-25T23:20:01.196Z" }, +] + +[[package]] +name = "sqlalchemy" +version = "2.0.41" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "greenlet", marker = "(python_full_version < '3.14' and platform_machine == 'AMD64') or (python_full_version < '3.14' and platform_machine == 'WIN32') or (python_full_version < '3.14' and platform_machine == 'aarch64') or (python_full_version < '3.14' and platform_machine == 'amd64') or (python_full_version < '3.14' and platform_machine == 'ppc64le') or (python_full_version < '3.14' and platform_machine == 'win32') or (python_full_version < '3.14' and platform_machine == 'x86_64')" }, + { name = "typing-extensions" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/63/66/45b165c595ec89aa7dcc2c1cd222ab269bc753f1fc7a1e68f8481bd957bf/sqlalchemy-2.0.41.tar.gz", hash = "sha256:edba70118c4be3c2b1f90754d308d0b79c6fe2c0fdc52d8ddf603916f83f4db9", size = 9689424, upload-time = "2025-05-14T17:10:32.339Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/d3/ad/2e1c6d4f235a97eeef52d0200d8ddda16f6c4dd70ae5ad88c46963440480/sqlalchemy-2.0.41-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:4eeb195cdedaf17aab6b247894ff2734dcead6c08f748e617bfe05bd5a218443", size = 2115491, upload-time = "2025-05-14T17:55:31.177Z" }, + { url = "https://files.pythonhosted.org/packages/cf/8d/be490e5db8400dacc89056f78a52d44b04fbf75e8439569d5b879623a53b/sqlalchemy-2.0.41-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:d4ae769b9c1c7757e4ccce94b0641bc203bbdf43ba7a2413ab2523d8d047d8dc", size = 2102827, upload-time = "2025-05-14T17:55:34.921Z" }, + { url = "https://files.pythonhosted.org/packages/a0/72/c97ad430f0b0e78efaf2791342e13ffeafcbb3c06242f01a3bb8fe44f65d/sqlalchemy-2.0.41-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:a62448526dd9ed3e3beedc93df9bb6b55a436ed1474db31a2af13b313a70a7e1", size = 3225224, upload-time = "2025-05-14T17:50:41.418Z" }, + { url = "https://files.pythonhosted.org/packages/5e/51/5ba9ea3246ea068630acf35a6ba0d181e99f1af1afd17e159eac7e8bc2b8/sqlalchemy-2.0.41-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:dc56c9788617b8964ad02e8fcfeed4001c1f8ba91a9e1f31483c0dffb207002a", size = 3230045, upload-time = "2025-05-14T17:51:54.722Z" }, + { url = "https://files.pythonhosted.org/packages/78/2f/8c14443b2acea700c62f9b4a8bad9e49fc1b65cfb260edead71fd38e9f19/sqlalchemy-2.0.41-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:c153265408d18de4cc5ded1941dcd8315894572cddd3c58df5d5b5705b3fa28d", size = 3159357, upload-time = "2025-05-14T17:50:43.483Z" }, + { url = "https://files.pythonhosted.org/packages/fc/b2/43eacbf6ccc5276d76cea18cb7c3d73e294d6fb21f9ff8b4eef9b42bbfd5/sqlalchemy-2.0.41-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:4f67766965996e63bb46cfbf2ce5355fc32d9dd3b8ad7e536a920ff9ee422e23", size = 3197511, upload-time = "2025-05-14T17:51:57.308Z" }, + { url = "https://files.pythonhosted.org/packages/fa/2e/677c17c5d6a004c3c45334ab1dbe7b7deb834430b282b8a0f75ae220c8eb/sqlalchemy-2.0.41-cp313-cp313-win32.whl", hash = "sha256:bfc9064f6658a3d1cadeaa0ba07570b83ce6801a1314985bf98ec9b95d74e15f", size = 2082420, upload-time = "2025-05-14T17:55:52.69Z" }, + { url = "https://files.pythonhosted.org/packages/e9/61/e8c1b9b6307c57157d328dd8b8348ddc4c47ffdf1279365a13b2b98b8049/sqlalchemy-2.0.41-cp313-cp313-win_amd64.whl", hash = "sha256:82ca366a844eb551daff9d2e6e7a9e5e76d2612c8564f58db6c19a726869c1df", size = 2108329, upload-time = "2025-05-14T17:55:54.495Z" }, + { url = "https://files.pythonhosted.org/packages/1c/fc/9ba22f01b5cdacc8f5ed0d22304718d2c758fce3fd49a5372b886a86f37c/sqlalchemy-2.0.41-py3-none-any.whl", hash = "sha256:57df5dc6fdb5ed1a88a1ed2195fd31927e705cad62dedd86b46972752a80f576", size = 1911224, upload-time = "2025-05-14T17:39:42.154Z" }, +] + +[[package]] +name = "starlette" +version = "0.46.0" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "anyio" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/44/b6/fb9a32e3c5d59b1e383c357534c63c2d3caa6f25bf3c59dd89d296ecbaec/starlette-0.46.0.tar.gz", hash = "sha256:b359e4567456b28d473d0193f34c0de0ed49710d75ef183a74a5ce0499324f50", size = 2575568, upload-time = "2025-02-22T17:34:45.949Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/41/94/8af675a62e3c91c2dee47cf92e602cfac86e8767b1a1ac3caf1b327c2ab0/starlette-0.46.0-py3-none-any.whl", hash = "sha256:913f0798bd90ba90a9156383bcf1350a17d6259451d0d8ee27fc0cf2db609038", size = 71991, upload-time = "2025-02-22T17:34:43.786Z" }, +] + +[[package]] +name = "storage3" +version = "0.11.3" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "httpx", extra = ["http2"] }, + { name = "python-dateutil" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/ac/25/83eb4e4612dc07a3bb3cab96253c9c83752d4816f2cf38aa832dfb8d8813/storage3-0.11.3.tar.gz", hash = "sha256:883637132aad36d9d92b7c497a8a56dff7c51f15faf2ff7acbccefbbd5e97347", size = 9930, upload-time = "2025-01-29T20:43:18.392Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/c9/8d/ff89f85c4b48285ac7cddf0fafe5e55bb3742d374672b2fbd2627c213fa6/storage3-0.11.3-py3-none-any.whl", hash = "sha256:090c42152217d5d39bd94af3ddeb60c8982f3a283dcd90b53d058f2db33e6007", size = 17831, upload-time = "2025-01-29T20:43:16.075Z" }, +] + +[[package]] +name = "strenum" +version = "0.4.15" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/85/ad/430fb60d90e1d112a62ff57bdd1f286ec73a2a0331272febfddd21f330e1/StrEnum-0.4.15.tar.gz", hash = "sha256:878fb5ab705442070e4dd1929bb5e2249511c0bcf2b0eeacf3bcd80875c82eff", size = 23384, upload-time = "2023-06-29T22:02:58.399Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/81/69/297302c5f5f59c862faa31e6cb9a4cd74721cd1e052b38e464c5b402df8b/StrEnum-0.4.15-py3-none-any.whl", hash = "sha256:a30cda4af7cc6b5bf52c8055bc4bf4b2b6b14a93b574626da33df53cf7740659", size = 8851, upload-time = "2023-06-29T22:02:56.947Z" }, +] + +[[package]] +name = "supabase" +version = "2.15.3" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "gotrue" }, + { name = "httpx" }, + { name = "postgrest" }, + { name = "realtime" }, + { name = "storage3" }, + { name = "supafunc" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/7f/55/ac9544fb0013142e224469828255f4f520b87217582339525bec728f6278/supabase-2.15.3.tar.gz", hash = "sha256:24013e3bcb7b86fcbd220476048de080c61fbe5fb234ee182b7a6eab96d35d3d", size = 14562, upload-time = "2025-06-10T11:23:10.726Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/5f/fe/f47d98a2fc0c5b08483b4e1afdb78f649bc69035644329d51c45ccdb79d2/supabase-2.15.3-py3-none-any.whl", hash = "sha256:d6c7abfd0e6db9667428e77c6f623487140acf3d7342edff1a1072ab8c77e537", size = 17479, upload-time = "2025-06-10T11:23:09.392Z" }, +] + +[[package]] +name = "supafunc" +version = "0.9.4" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "httpx", extra = ["http2"] }, + { name = "strenum" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/9f/74/4f9e23690d2dfc0afb4a13d2d232415a6ef9b80397495afb548410035532/supafunc-0.9.4.tar.gz", hash = "sha256:68824a9a7bcccf5ab1e038cda632ba47cba27f2a7dc606014206b56f5a071de2", size = 4806, upload-time = "2025-03-26T12:40:04.55Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/eb/51/b0bb6d405c053ecf9c51267b5a429424cab9ae3de229a1dfda3197ab251f/supafunc-0.9.4-py3-none-any.whl", hash = "sha256:2b34a794fb7930953150a434cdb93c24a04cf526b2f51a9e60b2be0b86d44fb2", size = 7792, upload-time = "2025-03-26T12:40:02.848Z" }, +] + +[[package]] +name = "tenacity" +version = "9.1.2" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/0a/d4/2b0cd0fe285e14b36db076e78c93766ff1d529d70408bd1d2a5a84f1d929/tenacity-9.1.2.tar.gz", hash = "sha256:1169d376c297e7de388d18b4481760d478b0e99a777cad3a9c86e556f4b697cb", size = 48036, upload-time = "2025-04-02T08:25:09.966Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/e5/30/643397144bfbfec6f6ef821f36f33e57d35946c44a2352d3c9f0ae847619/tenacity-9.1.2-py3-none-any.whl", hash = "sha256:f77bf36710d8b73a50b2dd155c97b870017ad21afe6ab300326b0371b3b05138", size = 28248, upload-time = "2025-04-02T08:25:07.678Z" }, +] + +[[package]] +name = "tiktoken" +version = "0.9.0" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "regex" }, + { name = "requests" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/ea/cf/756fedf6981e82897f2d570dd25fa597eb3f4459068ae0572d7e888cfd6f/tiktoken-0.9.0.tar.gz", hash = "sha256:d02a5ca6a938e0490e1ff957bc48c8b078c88cb83977be1625b1fd8aac792c5d", size = 35991, upload-time = "2025-02-14T06:03:01.003Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/7a/11/09d936d37f49f4f494ffe660af44acd2d99eb2429d60a57c71318af214e0/tiktoken-0.9.0-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:2b0e8e05a26eda1249e824156d537015480af7ae222ccb798e5234ae0285dbdb", size = 1064919, upload-time = "2025-02-14T06:02:37.494Z" }, + { url = "https://files.pythonhosted.org/packages/80/0e/f38ba35713edb8d4197ae602e80837d574244ced7fb1b6070b31c29816e0/tiktoken-0.9.0-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:27d457f096f87685195eea0165a1807fae87b97b2161fe8c9b1df5bd74ca6f63", size = 1007877, upload-time = "2025-02-14T06:02:39.516Z" }, + { url = "https://files.pythonhosted.org/packages/fe/82/9197f77421e2a01373e27a79dd36efdd99e6b4115746ecc553318ecafbf0/tiktoken-0.9.0-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:2cf8ded49cddf825390e36dd1ad35cd49589e8161fdcb52aa25f0583e90a3e01", size = 1140095, upload-time = "2025-02-14T06:02:41.791Z" }, + { url = "https://files.pythonhosted.org/packages/f2/bb/4513da71cac187383541facd0291c4572b03ec23c561de5811781bbd988f/tiktoken-0.9.0-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:cc156cb314119a8bb9748257a2eaebd5cc0753b6cb491d26694ed42fc7cb3139", size = 1195649, upload-time = "2025-02-14T06:02:43Z" }, + { url = "https://files.pythonhosted.org/packages/fa/5c/74e4c137530dd8504e97e3a41729b1103a4ac29036cbfd3250b11fd29451/tiktoken-0.9.0-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:cd69372e8c9dd761f0ab873112aba55a0e3e506332dd9f7522ca466e817b1b7a", size = 1258465, upload-time = "2025-02-14T06:02:45.046Z" }, + { url = "https://files.pythonhosted.org/packages/de/a8/8f499c179ec900783ffe133e9aab10044481679bb9aad78436d239eee716/tiktoken-0.9.0-cp313-cp313-win_amd64.whl", hash = "sha256:5ea0edb6f83dc56d794723286215918c1cde03712cbbafa0348b33448faf5b95", size = 894669, upload-time = "2025-02-14T06:02:47.341Z" }, +] + +[[package]] +name = "tqdm" +version = "4.67.1" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "colorama", marker = "sys_platform == 'win32'" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/a8/4b/29b4ef32e036bb34e4ab51796dd745cdba7ed47ad142a9f4a1eb8e0c744d/tqdm-4.67.1.tar.gz", hash = "sha256:f8aef9c52c08c13a65f30ea34f4e5aac3fd1a34959879d7e59e63027286627f2", size = 169737, upload-time = "2024-11-24T20:12:22.481Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/d0/30/dc54f88dd4a2b5dc8a0279bdd7270e735851848b762aeb1c1184ed1f6b14/tqdm-4.67.1-py3-none-any.whl", hash = "sha256:26445eca388f82e72884e0d580d5464cd801a3ea01e63e5601bdff9ba6a48de2", size = 78540, upload-time = "2024-11-24T20:12:19.698Z" }, +] + +[[package]] +name = "tweepy" +version = "4.15.0" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "oauthlib" }, + { name = "requests" }, + { name = "requests-oauthlib" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/99/05/7c2c01bd62900eff24534779f1e1531491dfd872edb6a9d432ae91e18b4b/tweepy-4.15.0.tar.gz", hash = "sha256:1345cbcdf0a75e2d89f424c559fd49fda4d8cd7be25cd5131e3b57bad8a21d76", size = 100268, upload-time = "2025-01-15T21:25:05.307Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/81/53/ca632ec02085b5c432e98ae1f872a21f2b6bb6c3d022dcf586809cc65cd0/tweepy-4.15.0-py3-none-any.whl", hash = "sha256:64adcea317158937059e4e2897b3ceb750b0c2dd5df58938c2da8f7eb3b88e6a", size = 99379, upload-time = "2025-01-15T21:25:02.856Z" }, +] + +[[package]] +name = "typing-extensions" +version = "4.14.0" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/d1/bc/51647cd02527e87d05cb083ccc402f93e441606ff1f01739a62c8ad09ba5/typing_extensions-4.14.0.tar.gz", hash = "sha256:8676b788e32f02ab42d9e7c61324048ae4c6d844a399eebace3d4979d75ceef4", size = 107423, upload-time = "2025-06-02T14:52:11.399Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/69/e0/552843e0d356fbb5256d21449fa957fa4eff3bbc135a74a691ee70c7c5da/typing_extensions-4.14.0-py3-none-any.whl", hash = "sha256:a1514509136dd0b477638fc68d6a91497af5076466ad0fa6c338e44e359944af", size = 43839, upload-time = "2025-06-02T14:52:10.026Z" }, +] + +[[package]] +name = "typing-inspect" +version = "0.9.0" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "mypy-extensions" }, + { name = "typing-extensions" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/dc/74/1789779d91f1961fa9438e9a8710cdae6bd138c80d7303996933d117264a/typing_inspect-0.9.0.tar.gz", hash = "sha256:b23fc42ff6f6ef6954e4852c1fb512cdd18dbea03134f91f856a95ccc9461f78", size = 13825, upload-time = "2023-05-24T20:25:47.612Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/65/f3/107a22063bf27bdccf2024833d3445f4eea42b2e598abfbd46f6a63b6cb0/typing_inspect-0.9.0-py3-none-any.whl", hash = "sha256:9ee6fc59062311ef8547596ab6b955e1b8aa46242d854bfc78f4f6b0eff35f9f", size = 8827, upload-time = "2023-05-24T20:25:45.287Z" }, +] + +[[package]] +name = "typing-inspection" +version = "0.4.1" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "typing-extensions" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/f8/b1/0c11f5058406b3af7609f121aaa6b609744687f1d158b3c3a5bf4cc94238/typing_inspection-0.4.1.tar.gz", hash = "sha256:6ae134cc0203c33377d43188d4064e9b357dba58cff3185f22924610e70a9d28", size = 75726, upload-time = "2025-05-21T18:55:23.885Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/17/69/cd203477f944c353c31bade965f880aa1061fd6bf05ded0726ca845b6ff7/typing_inspection-0.4.1-py3-none-any.whl", hash = "sha256:389055682238f53b04f7badcb49b989835495a96700ced5dab2d8feae4b26f51", size = 14552, upload-time = "2025-05-21T18:55:22.152Z" }, +] + +[[package]] +name = "tzdata" +version = "2025.2" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/95/32/1a225d6164441be760d75c2c42e2780dc0873fe382da3e98a2e1e48361e5/tzdata-2025.2.tar.gz", hash = "sha256:b60a638fcc0daffadf82fe0f57e53d06bdec2f36c4df66280ae79bce6bd6f2b9", size = 196380, upload-time = "2025-03-23T13:54:43.652Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/5c/23/c7abc0ca0a1526a0774eca151daeb8de62ec457e77262b66b359c3c7679e/tzdata-2025.2-py2.py3-none-any.whl", hash = "sha256:1a403fada01ff9221ca8044d701868fa132215d84beb92242d9acd2147f667a8", size = 347839, upload-time = "2025-03-23T13:54:41.845Z" }, +] + +[[package]] +name = "tzlocal" +version = "5.3.1" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "tzdata", marker = "sys_platform == 'win32'" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/8b/2e/c14812d3d4d9cd1773c6be938f89e5735a1f11a9f184ac3639b93cef35d5/tzlocal-5.3.1.tar.gz", hash = "sha256:cceffc7edecefea1f595541dbd6e990cb1ea3d19bf01b2809f362a03dd7921fd", size = 30761, upload-time = "2025-03-05T21:17:41.549Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/c2/14/e2a54fabd4f08cd7af1c07030603c3356b74da07f7cc056e600436edfa17/tzlocal-5.3.1-py3-none-any.whl", hash = "sha256:eb1a66c3ef5847adf7a834f1be0800581b683b5608e74f86ecbcef8ab91bb85d", size = 18026, upload-time = "2025-03-05T21:17:39.857Z" }, +] + +[[package]] +name = "urllib3" +version = "2.4.0" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/8a/78/16493d9c386d8e60e442a35feac5e00f0913c0f4b7c217c11e8ec2ff53e0/urllib3-2.4.0.tar.gz", hash = "sha256:414bc6535b787febd7567804cc015fee39daab8ad86268f1310a9250697de466", size = 390672, upload-time = "2025-04-10T15:23:39.232Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/6b/11/cc635220681e93a0183390e26485430ca2c7b5f9d33b15c74c2861cb8091/urllib3-2.4.0-py3-none-any.whl", hash = "sha256:4e16665048960a0900c702d4a66415956a584919c03361cac9f1df5c5dd7e813", size = 128680, upload-time = "2025-04-10T15:23:37.377Z" }, +] + +[[package]] +name = "uvicorn" +version = "0.34.3" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "click" }, + { name = "h11" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/de/ad/713be230bcda622eaa35c28f0d328c3675c371238470abdea52417f17a8e/uvicorn-0.34.3.tar.gz", hash = "sha256:35919a9a979d7a59334b6b10e05d77c1d0d574c50e0fc98b8b1a0f165708b55a", size = 76631, upload-time = "2025-06-01T07:48:17.531Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/6d/0d/8adfeaa62945f90d19ddc461c55f4a50c258af7662d34b6a3d5d1f8646f6/uvicorn-0.34.3-py3-none-any.whl", hash = "sha256:16246631db62bdfbf069b0645177d6e8a77ba950cfedbfd093acef9444e4d885", size = 62431, upload-time = "2025-06-01T07:48:15.664Z" }, +] + +[[package]] +name = "vecs" +version = "0.4.5" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "deprecated" }, + { name = "flupy" }, + { name = "pgvector" }, + { name = "psycopg2-binary" }, + { name = "sqlalchemy" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/0c/87/9fb55aff1e18278c2a0d93ba48432e060086702e258e7e13068a31376548/vecs-0.4.5.tar.gz", hash = "sha256:7cd3ab65cf88f5869d49f70ae7385e844c4915700da1f2299c938afa56148cb6", size = 22036, upload-time = "2024-12-13T20:53:50.983Z" } + +[[package]] +name = "websockets" +version = "14.2" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/94/54/8359678c726243d19fae38ca14a334e740782336c9f19700858c4eb64a1e/websockets-14.2.tar.gz", hash = "sha256:5059ed9c54945efb321f097084b4c7e52c246f2c869815876a69d1efc4ad6eb5", size = 164394, upload-time = "2025-01-19T21:00:56.431Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/82/94/4f9b55099a4603ac53c2912e1f043d6c49d23e94dd82a9ce1eb554a90215/websockets-14.2-cp313-cp313-macosx_10_13_universal2.whl", hash = "sha256:6f1372e511c7409a542291bce92d6c83320e02c9cf392223272287ce55bc224e", size = 163102, upload-time = "2025-01-19T20:59:52.177Z" }, + { url = "https://files.pythonhosted.org/packages/8e/b7/7484905215627909d9a79ae07070057afe477433fdacb59bf608ce86365a/websockets-14.2-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:4da98b72009836179bb596a92297b1a61bb5a830c0e483a7d0766d45070a08ad", size = 160766, upload-time = "2025-01-19T20:59:54.368Z" }, + { url = "https://files.pythonhosted.org/packages/a3/a4/edb62efc84adb61883c7d2c6ad65181cb087c64252138e12d655989eec05/websockets-14.2-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:f8a86a269759026d2bde227652b87be79f8a734e582debf64c9d302faa1e9f03", size = 160998, upload-time = "2025-01-19T20:59:56.671Z" }, + { url = "https://files.pythonhosted.org/packages/f5/79/036d320dc894b96af14eac2529967a6fc8b74f03b83c487e7a0e9043d842/websockets-14.2-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:86cf1aaeca909bf6815ea714d5c5736c8d6dd3a13770e885aafe062ecbd04f1f", size = 170780, upload-time = "2025-01-19T20:59:58.085Z" }, + { url = "https://files.pythonhosted.org/packages/63/75/5737d21ee4dd7e4b9d487ee044af24a935e36a9ff1e1419d684feedcba71/websockets-14.2-cp313-cp313-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:a9b0f6c3ba3b1240f602ebb3971d45b02cc12bd1845466dd783496b3b05783a5", size = 169717, upload-time = "2025-01-19T20:59:59.545Z" }, + { url = "https://files.pythonhosted.org/packages/2c/3c/bf9b2c396ed86a0b4a92ff4cdaee09753d3ee389be738e92b9bbd0330b64/websockets-14.2-cp313-cp313-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:669c3e101c246aa85bc8534e495952e2ca208bd87994650b90a23d745902db9a", size = 170155, upload-time = "2025-01-19T21:00:01.887Z" }, + { url = "https://files.pythonhosted.org/packages/75/2d/83a5aca7247a655b1da5eb0ee73413abd5c3a57fc8b92915805e6033359d/websockets-14.2-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:eabdb28b972f3729348e632ab08f2a7b616c7e53d5414c12108c29972e655b20", size = 170495, upload-time = "2025-01-19T21:00:04.064Z" }, + { url = "https://files.pythonhosted.org/packages/79/dd/699238a92761e2f943885e091486378813ac8f43e3c84990bc394c2be93e/websockets-14.2-cp313-cp313-musllinux_1_2_i686.whl", hash = "sha256:2066dc4cbcc19f32c12a5a0e8cc1b7ac734e5b64ac0a325ff8353451c4b15ef2", size = 169880, upload-time = "2025-01-19T21:00:05.695Z" }, + { url = "https://files.pythonhosted.org/packages/c8/c9/67a8f08923cf55ce61aadda72089e3ed4353a95a3a4bc8bf42082810e580/websockets-14.2-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:ab95d357cd471df61873dadf66dd05dd4709cae001dd6342edafc8dc6382f307", size = 169856, upload-time = "2025-01-19T21:00:07.192Z" }, + { url = "https://files.pythonhosted.org/packages/17/b1/1ffdb2680c64e9c3921d99db460546194c40d4acbef999a18c37aa4d58a3/websockets-14.2-cp313-cp313-win32.whl", hash = "sha256:a9e72fb63e5f3feacdcf5b4ff53199ec8c18d66e325c34ee4c551ca748623bbc", size = 163974, upload-time = "2025-01-19T21:00:08.698Z" }, + { url = "https://files.pythonhosted.org/packages/14/13/8b7fc4cb551b9cfd9890f0fd66e53c18a06240319915533b033a56a3d520/websockets-14.2-cp313-cp313-win_amd64.whl", hash = "sha256:b439ea828c4ba99bb3176dc8d9b933392a2413c0f6b149fdcba48393f573377f", size = 164420, upload-time = "2025-01-19T21:00:10.182Z" }, + { url = "https://files.pythonhosted.org/packages/7b/c8/d529f8a32ce40d98309f4470780631e971a5a842b60aec864833b3615786/websockets-14.2-py3-none-any.whl", hash = "sha256:7a6ceec4ea84469f15cf15807a747e9efe57e369c384fa86e022b3bea679b79b", size = 157416, upload-time = "2025-01-19T21:00:54.843Z" }, +] + +[[package]] +name = "wrapt" +version = "1.17.2" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/c3/fc/e91cc220803d7bc4db93fb02facd8461c37364151b8494762cc88b0fbcef/wrapt-1.17.2.tar.gz", hash = "sha256:41388e9d4d1522446fe79d3213196bd9e3b301a336965b9e27ca2788ebd122f3", size = 55531, upload-time = "2025-01-14T10:35:45.465Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/ce/b9/0ffd557a92f3b11d4c5d5e0c5e4ad057bd9eb8586615cdaf901409920b14/wrapt-1.17.2-cp313-cp313-macosx_10_13_universal2.whl", hash = "sha256:6ed6ffac43aecfe6d86ec5b74b06a5be33d5bb9243d055141e8cabb12aa08125", size = 53800, upload-time = "2025-01-14T10:34:21.571Z" }, + { url = "https://files.pythonhosted.org/packages/c0/ef/8be90a0b7e73c32e550c73cfb2fa09db62234227ece47b0e80a05073b375/wrapt-1.17.2-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:35621ae4c00e056adb0009f8e86e28eb4a41a4bfa8f9bfa9fca7d343fe94f998", size = 38824, upload-time = "2025-01-14T10:34:22.999Z" }, + { url = "https://files.pythonhosted.org/packages/36/89/0aae34c10fe524cce30fe5fc433210376bce94cf74d05b0d68344c8ba46e/wrapt-1.17.2-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:a604bf7a053f8362d27eb9fefd2097f82600b856d5abe996d623babd067b1ab5", size = 38920, upload-time = "2025-01-14T10:34:25.386Z" }, + { url = "https://files.pythonhosted.org/packages/3b/24/11c4510de906d77e0cfb5197f1b1445d4fec42c9a39ea853d482698ac681/wrapt-1.17.2-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:5cbabee4f083b6b4cd282f5b817a867cf0b1028c54d445b7ec7cfe6505057cf8", size = 88690, upload-time = "2025-01-14T10:34:28.058Z" }, + { url = "https://files.pythonhosted.org/packages/71/d7/cfcf842291267bf455b3e266c0c29dcb675b5540ee8b50ba1699abf3af45/wrapt-1.17.2-cp313-cp313-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:49703ce2ddc220df165bd2962f8e03b84c89fee2d65e1c24a7defff6f988f4d6", size = 80861, upload-time = "2025-01-14T10:34:29.167Z" }, + { url = "https://files.pythonhosted.org/packages/d5/66/5d973e9f3e7370fd686fb47a9af3319418ed925c27d72ce16b791231576d/wrapt-1.17.2-cp313-cp313-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:8112e52c5822fc4253f3901b676c55ddf288614dc7011634e2719718eaa187dc", size = 89174, upload-time = "2025-01-14T10:34:31.702Z" }, + { url = "https://files.pythonhosted.org/packages/a7/d3/8e17bb70f6ae25dabc1aaf990f86824e4fd98ee9cadf197054e068500d27/wrapt-1.17.2-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:9fee687dce376205d9a494e9c121e27183b2a3df18037f89d69bd7b35bcf59e2", size = 86721, upload-time = "2025-01-14T10:34:32.91Z" }, + { url = "https://files.pythonhosted.org/packages/6f/54/f170dfb278fe1c30d0ff864513cff526d624ab8de3254b20abb9cffedc24/wrapt-1.17.2-cp313-cp313-musllinux_1_2_i686.whl", hash = "sha256:18983c537e04d11cf027fbb60a1e8dfd5190e2b60cc27bc0808e653e7b218d1b", size = 79763, upload-time = "2025-01-14T10:34:34.903Z" }, + { url = "https://files.pythonhosted.org/packages/4a/98/de07243751f1c4a9b15c76019250210dd3486ce098c3d80d5f729cba029c/wrapt-1.17.2-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:703919b1633412ab54bcf920ab388735832fdcb9f9a00ae49387f0fe67dad504", size = 87585, upload-time = "2025-01-14T10:34:36.13Z" }, + { url = "https://files.pythonhosted.org/packages/f9/f0/13925f4bd6548013038cdeb11ee2cbd4e37c30f8bfd5db9e5a2a370d6e20/wrapt-1.17.2-cp313-cp313-win32.whl", hash = "sha256:abbb9e76177c35d4e8568e58650aa6926040d6a9f6f03435b7a522bf1c487f9a", size = 36676, upload-time = "2025-01-14T10:34:37.962Z" }, + { url = "https://files.pythonhosted.org/packages/bf/ae/743f16ef8c2e3628df3ddfd652b7d4c555d12c84b53f3d8218498f4ade9b/wrapt-1.17.2-cp313-cp313-win_amd64.whl", hash = "sha256:69606d7bb691b50a4240ce6b22ebb319c1cfb164e5f6569835058196e0f3a845", size = 38871, upload-time = "2025-01-14T10:34:39.13Z" }, + { url = "https://files.pythonhosted.org/packages/3d/bc/30f903f891a82d402ffb5fda27ec1d621cc97cb74c16fea0b6141f1d4e87/wrapt-1.17.2-cp313-cp313t-macosx_10_13_universal2.whl", hash = "sha256:4a721d3c943dae44f8e243b380cb645a709ba5bd35d3ad27bc2ed947e9c68192", size = 56312, upload-time = "2025-01-14T10:34:40.604Z" }, + { url = "https://files.pythonhosted.org/packages/8a/04/c97273eb491b5f1c918857cd26f314b74fc9b29224521f5b83f872253725/wrapt-1.17.2-cp313-cp313t-macosx_10_13_x86_64.whl", hash = "sha256:766d8bbefcb9e00c3ac3b000d9acc51f1b399513f44d77dfe0eb026ad7c9a19b", size = 40062, upload-time = "2025-01-14T10:34:45.011Z" }, + { url = "https://files.pythonhosted.org/packages/4e/ca/3b7afa1eae3a9e7fefe499db9b96813f41828b9fdb016ee836c4c379dadb/wrapt-1.17.2-cp313-cp313t-macosx_11_0_arm64.whl", hash = "sha256:e496a8ce2c256da1eb98bd15803a79bee00fc351f5dfb9ea82594a3f058309e0", size = 40155, upload-time = "2025-01-14T10:34:47.25Z" }, + { url = "https://files.pythonhosted.org/packages/89/be/7c1baed43290775cb9030c774bc53c860db140397047cc49aedaf0a15477/wrapt-1.17.2-cp313-cp313t-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:40d615e4fe22f4ad3528448c193b218e077656ca9ccb22ce2cb20db730f8d306", size = 113471, upload-time = "2025-01-14T10:34:50.934Z" }, + { url = "https://files.pythonhosted.org/packages/32/98/4ed894cf012b6d6aae5f5cc974006bdeb92f0241775addad3f8cd6ab71c8/wrapt-1.17.2-cp313-cp313t-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:a5aaeff38654462bc4b09023918b7f21790efb807f54c000a39d41d69cf552cb", size = 101208, upload-time = "2025-01-14T10:34:52.297Z" }, + { url = "https://files.pythonhosted.org/packages/ea/fd/0c30f2301ca94e655e5e057012e83284ce8c545df7661a78d8bfca2fac7a/wrapt-1.17.2-cp313-cp313t-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:9a7d15bbd2bc99e92e39f49a04653062ee6085c0e18b3b7512a4f2fe91f2d681", size = 109339, upload-time = "2025-01-14T10:34:53.489Z" }, + { url = "https://files.pythonhosted.org/packages/75/56/05d000de894c4cfcb84bcd6b1df6214297b8089a7bd324c21a4765e49b14/wrapt-1.17.2-cp313-cp313t-musllinux_1_2_aarch64.whl", hash = "sha256:e3890b508a23299083e065f435a492b5435eba6e304a7114d2f919d400888cc6", size = 110232, upload-time = "2025-01-14T10:34:55.327Z" }, + { url = "https://files.pythonhosted.org/packages/53/f8/c3f6b2cf9b9277fb0813418e1503e68414cd036b3b099c823379c9575e6d/wrapt-1.17.2-cp313-cp313t-musllinux_1_2_i686.whl", hash = "sha256:8c8b293cd65ad716d13d8dd3624e42e5a19cc2a2f1acc74b30c2c13f15cb61a6", size = 100476, upload-time = "2025-01-14T10:34:58.055Z" }, + { url = "https://files.pythonhosted.org/packages/a7/b1/0bb11e29aa5139d90b770ebbfa167267b1fc548d2302c30c8f7572851738/wrapt-1.17.2-cp313-cp313t-musllinux_1_2_x86_64.whl", hash = "sha256:4c82b8785d98cdd9fed4cac84d765d234ed3251bd6afe34cb7ac523cb93e8b4f", size = 106377, upload-time = "2025-01-14T10:34:59.3Z" }, + { url = "https://files.pythonhosted.org/packages/6a/e1/0122853035b40b3f333bbb25f1939fc1045e21dd518f7f0922b60c156f7c/wrapt-1.17.2-cp313-cp313t-win32.whl", hash = "sha256:13e6afb7fe71fe7485a4550a8844cc9ffbe263c0f1a1eea569bc7091d4898555", size = 37986, upload-time = "2025-01-14T10:35:00.498Z" }, + { url = "https://files.pythonhosted.org/packages/09/5e/1655cf481e079c1f22d0cabdd4e51733679932718dc23bf2db175f329b76/wrapt-1.17.2-cp313-cp313t-win_amd64.whl", hash = "sha256:eaf675418ed6b3b31c7a989fd007fa7c3be66ce14e5c3b27336383604c9da85c", size = 40750, upload-time = "2025-01-14T10:35:03.378Z" }, + { url = "https://files.pythonhosted.org/packages/2d/82/f56956041adef78f849db6b289b282e72b55ab8045a75abad81898c28d19/wrapt-1.17.2-py3-none-any.whl", hash = "sha256:b18f2d1533a71f069c7f82d524a52599053d4c7166e9dd374ae2136b7f40f7c8", size = 23594, upload-time = "2025-01-14T10:35:44.018Z" }, +] + +[[package]] +name = "xxhash" +version = "3.5.0" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/00/5e/d6e5258d69df8b4ed8c83b6664f2b47d30d2dec551a29ad72a6c69eafd31/xxhash-3.5.0.tar.gz", hash = "sha256:84f2caddf951c9cbf8dc2e22a89d4ccf5d86391ac6418fe81e3c67d0cf60b45f", size = 84241, upload-time = "2024-08-17T09:20:38.972Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/c9/b8/e4b3ad92d249be5c83fa72916c9091b0965cb0faeff05d9a0a3870ae6bff/xxhash-3.5.0-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:37889a0d13b0b7d739cfc128b1c902f04e32de17b33d74b637ad42f1c55101f6", size = 31795, upload-time = "2024-08-17T09:18:46.813Z" }, + { url = "https://files.pythonhosted.org/packages/fc/d8/b3627a0aebfbfa4c12a41e22af3742cf08c8ea84f5cc3367b5de2d039cce/xxhash-3.5.0-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:97a662338797c660178e682f3bc180277b9569a59abfb5925e8620fba00b9fc5", size = 30792, upload-time = "2024-08-17T09:18:47.862Z" }, + { url = "https://files.pythonhosted.org/packages/c3/cc/762312960691da989c7cd0545cb120ba2a4148741c6ba458aa723c00a3f8/xxhash-3.5.0-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:7f85e0108d51092bdda90672476c7d909c04ada6923c14ff9d913c4f7dc8a3bc", size = 220950, upload-time = "2024-08-17T09:18:49.06Z" }, + { url = "https://files.pythonhosted.org/packages/fe/e9/cc266f1042c3c13750e86a535496b58beb12bf8c50a915c336136f6168dc/xxhash-3.5.0-cp313-cp313-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:cd2fd827b0ba763ac919440042302315c564fdb797294d86e8cdd4578e3bc7f3", size = 199980, upload-time = "2024-08-17T09:18:50.445Z" }, + { url = "https://files.pythonhosted.org/packages/bf/85/a836cd0dc5cc20376de26b346858d0ac9656f8f730998ca4324921a010b9/xxhash-3.5.0-cp313-cp313-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:82085c2abec437abebf457c1d12fccb30cc8b3774a0814872511f0f0562c768c", size = 428324, upload-time = "2024-08-17T09:18:51.988Z" }, + { url = "https://files.pythonhosted.org/packages/b4/0e/15c243775342ce840b9ba34aceace06a1148fa1630cd8ca269e3223987f5/xxhash-3.5.0-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:07fda5de378626e502b42b311b049848c2ef38784d0d67b6f30bb5008642f8eb", size = 194370, upload-time = "2024-08-17T09:18:54.164Z" }, + { url = "https://files.pythonhosted.org/packages/87/a1/b028bb02636dfdc190da01951d0703b3d904301ed0ef6094d948983bef0e/xxhash-3.5.0-cp313-cp313-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:c279f0d2b34ef15f922b77966640ade58b4ccdfef1c4d94b20f2a364617a493f", size = 207911, upload-time = "2024-08-17T09:18:55.509Z" }, + { url = "https://files.pythonhosted.org/packages/80/d5/73c73b03fc0ac73dacf069fdf6036c9abad82de0a47549e9912c955ab449/xxhash-3.5.0-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:89e66ceed67b213dec5a773e2f7a9e8c58f64daeb38c7859d8815d2c89f39ad7", size = 216352, upload-time = "2024-08-17T09:18:57.073Z" }, + { url = "https://files.pythonhosted.org/packages/b6/2a/5043dba5ddbe35b4fe6ea0a111280ad9c3d4ba477dd0f2d1fe1129bda9d0/xxhash-3.5.0-cp313-cp313-musllinux_1_2_i686.whl", hash = "sha256:bcd51708a633410737111e998ceb3b45d3dbc98c0931f743d9bb0a209033a326", size = 203410, upload-time = "2024-08-17T09:18:58.54Z" }, + { url = "https://files.pythonhosted.org/packages/a2/b2/9a8ded888b7b190aed75b484eb5c853ddd48aa2896e7b59bbfbce442f0a1/xxhash-3.5.0-cp313-cp313-musllinux_1_2_ppc64le.whl", hash = "sha256:3ff2c0a34eae7df88c868be53a8dd56fbdf592109e21d4bfa092a27b0bf4a7bf", size = 210322, upload-time = "2024-08-17T09:18:59.943Z" }, + { url = "https://files.pythonhosted.org/packages/98/62/440083fafbc917bf3e4b67c2ade621920dd905517e85631c10aac955c1d2/xxhash-3.5.0-cp313-cp313-musllinux_1_2_s390x.whl", hash = "sha256:4e28503dccc7d32e0b9817aa0cbfc1f45f563b2c995b7a66c4c8a0d232e840c7", size = 414725, upload-time = "2024-08-17T09:19:01.332Z" }, + { url = "https://files.pythonhosted.org/packages/75/db/009206f7076ad60a517e016bb0058381d96a007ce3f79fa91d3010f49cc2/xxhash-3.5.0-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:a6c50017518329ed65a9e4829154626f008916d36295b6a3ba336e2458824c8c", size = 192070, upload-time = "2024-08-17T09:19:03.007Z" }, + { url = "https://files.pythonhosted.org/packages/1f/6d/c61e0668943a034abc3a569cdc5aeae37d686d9da7e39cf2ed621d533e36/xxhash-3.5.0-cp313-cp313-win32.whl", hash = "sha256:53a068fe70301ec30d868ece566ac90d873e3bb059cf83c32e76012c889b8637", size = 30172, upload-time = "2024-08-17T09:19:04.355Z" }, + { url = "https://files.pythonhosted.org/packages/96/14/8416dce965f35e3d24722cdf79361ae154fa23e2ab730e5323aa98d7919e/xxhash-3.5.0-cp313-cp313-win_amd64.whl", hash = "sha256:80babcc30e7a1a484eab952d76a4f4673ff601f54d5142c26826502740e70b43", size = 30041, upload-time = "2024-08-17T09:19:05.435Z" }, + { url = "https://files.pythonhosted.org/packages/27/ee/518b72faa2073f5aa8e3262408d284892cb79cf2754ba0c3a5870645ef73/xxhash-3.5.0-cp313-cp313-win_arm64.whl", hash = "sha256:4811336f1ce11cac89dcbd18f3a25c527c16311709a89313c3acaf771def2d4b", size = 26801, upload-time = "2024-08-17T09:19:06.547Z" }, +] + +[[package]] +name = "yarl" +version = "1.20.1" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "idna" }, + { name = "multidict" }, + { name = "propcache" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/3c/fb/efaa23fa4e45537b827620f04cf8f3cd658b76642205162e072703a5b963/yarl-1.20.1.tar.gz", hash = "sha256:d017a4997ee50c91fd5466cef416231bb82177b93b029906cefc542ce14c35ac", size = 186428, upload-time = "2025-06-10T00:46:09.923Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/8a/e1/2411b6d7f769a07687acee88a062af5833cf1966b7266f3d8dfb3d3dc7d3/yarl-1.20.1-cp313-cp313-macosx_10_13_universal2.whl", hash = "sha256:0b5ff0fbb7c9f1b1b5ab53330acbfc5247893069e7716840c8e7d5bb7355038a", size = 131811, upload-time = "2025-06-10T00:44:18.933Z" }, + { url = "https://files.pythonhosted.org/packages/b2/27/584394e1cb76fb771371770eccad35de400e7b434ce3142c2dd27392c968/yarl-1.20.1-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:14f326acd845c2b2e2eb38fb1346c94f7f3b01a4f5c788f8144f9b630bfff9a3", size = 90078, upload-time = "2025-06-10T00:44:20.635Z" }, + { url = "https://files.pythonhosted.org/packages/bf/9a/3246ae92d4049099f52d9b0fe3486e3b500e29b7ea872d0f152966fc209d/yarl-1.20.1-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:f60e4ad5db23f0b96e49c018596707c3ae89f5d0bd97f0ad3684bcbad899f1e7", size = 88748, upload-time = "2025-06-10T00:44:22.34Z" }, + { url = "https://files.pythonhosted.org/packages/a3/25/35afe384e31115a1a801fbcf84012d7a066d89035befae7c5d4284df1e03/yarl-1.20.1-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:49bdd1b8e00ce57e68ba51916e4bb04461746e794e7c4d4bbc42ba2f18297691", size = 349595, upload-time = "2025-06-10T00:44:24.314Z" }, + { url = "https://files.pythonhosted.org/packages/28/2d/8aca6cb2cabc8f12efcb82749b9cefecbccfc7b0384e56cd71058ccee433/yarl-1.20.1-cp313-cp313-manylinux_2_17_armv7l.manylinux2014_armv7l.manylinux_2_31_armv7l.whl", hash = "sha256:66252d780b45189975abfed839616e8fd2dbacbdc262105ad7742c6ae58f3e31", size = 342616, upload-time = "2025-06-10T00:44:26.167Z" }, + { url = "https://files.pythonhosted.org/packages/0b/e9/1312633d16b31acf0098d30440ca855e3492d66623dafb8e25b03d00c3da/yarl-1.20.1-cp313-cp313-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:59174e7332f5d153d8f7452a102b103e2e74035ad085f404df2e40e663a22b28", size = 361324, upload-time = "2025-06-10T00:44:27.915Z" }, + { url = "https://files.pythonhosted.org/packages/bc/a0/688cc99463f12f7669eec7c8acc71ef56a1521b99eab7cd3abb75af887b0/yarl-1.20.1-cp313-cp313-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:e3968ec7d92a0c0f9ac34d5ecfd03869ec0cab0697c91a45db3fbbd95fe1b653", size = 359676, upload-time = "2025-06-10T00:44:30.041Z" }, + { url = "https://files.pythonhosted.org/packages/af/44/46407d7f7a56e9a85a4c207724c9f2c545c060380718eea9088f222ba697/yarl-1.20.1-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:d1a4fbb50e14396ba3d375f68bfe02215d8e7bc3ec49da8341fe3157f59d2ff5", size = 352614, upload-time = "2025-06-10T00:44:32.171Z" }, + { url = "https://files.pythonhosted.org/packages/b1/91/31163295e82b8d5485d31d9cf7754d973d41915cadce070491778d9c9825/yarl-1.20.1-cp313-cp313-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:11a62c839c3a8eac2410e951301309426f368388ff2f33799052787035793b02", size = 336766, upload-time = "2025-06-10T00:44:34.494Z" }, + { url = "https://files.pythonhosted.org/packages/b4/8e/c41a5bc482121f51c083c4c2bcd16b9e01e1cf8729e380273a952513a21f/yarl-1.20.1-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:041eaa14f73ff5a8986b4388ac6bb43a77f2ea09bf1913df7a35d4646db69e53", size = 364615, upload-time = "2025-06-10T00:44:36.856Z" }, + { url = "https://files.pythonhosted.org/packages/e3/5b/61a3b054238d33d70ea06ebba7e58597891b71c699e247df35cc984ab393/yarl-1.20.1-cp313-cp313-musllinux_1_2_armv7l.whl", hash = "sha256:377fae2fef158e8fd9d60b4c8751387b8d1fb121d3d0b8e9b0be07d1b41e83dc", size = 360982, upload-time = "2025-06-10T00:44:39.141Z" }, + { url = "https://files.pythonhosted.org/packages/df/a3/6a72fb83f8d478cb201d14927bc8040af901811a88e0ff2da7842dd0ed19/yarl-1.20.1-cp313-cp313-musllinux_1_2_i686.whl", hash = "sha256:1c92f4390e407513f619d49319023664643d3339bd5e5a56a3bebe01bc67ec04", size = 369792, upload-time = "2025-06-10T00:44:40.934Z" }, + { url = "https://files.pythonhosted.org/packages/7c/af/4cc3c36dfc7c077f8dedb561eb21f69e1e9f2456b91b593882b0b18c19dc/yarl-1.20.1-cp313-cp313-musllinux_1_2_ppc64le.whl", hash = "sha256:d25ddcf954df1754ab0f86bb696af765c5bfaba39b74095f27eececa049ef9a4", size = 382049, upload-time = "2025-06-10T00:44:42.854Z" }, + { url = "https://files.pythonhosted.org/packages/19/3a/e54e2c4752160115183a66dc9ee75a153f81f3ab2ba4bf79c3c53b33de34/yarl-1.20.1-cp313-cp313-musllinux_1_2_s390x.whl", hash = "sha256:909313577e9619dcff8c31a0ea2aa0a2a828341d92673015456b3ae492e7317b", size = 384774, upload-time = "2025-06-10T00:44:45.275Z" }, + { url = "https://files.pythonhosted.org/packages/9c/20/200ae86dabfca89060ec6447649f219b4cbd94531e425e50d57e5f5ac330/yarl-1.20.1-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:793fd0580cb9664548c6b83c63b43c477212c0260891ddf86809e1c06c8b08f1", size = 374252, upload-time = "2025-06-10T00:44:47.31Z" }, + { url = "https://files.pythonhosted.org/packages/83/75/11ee332f2f516b3d094e89448da73d557687f7d137d5a0f48c40ff211487/yarl-1.20.1-cp313-cp313-win32.whl", hash = "sha256:468f6e40285de5a5b3c44981ca3a319a4b208ccc07d526b20b12aeedcfa654b7", size = 81198, upload-time = "2025-06-10T00:44:49.164Z" }, + { url = "https://files.pythonhosted.org/packages/ba/ba/39b1ecbf51620b40ab402b0fc817f0ff750f6d92712b44689c2c215be89d/yarl-1.20.1-cp313-cp313-win_amd64.whl", hash = "sha256:495b4ef2fea40596bfc0affe3837411d6aa3371abcf31aac0ccc4bdd64d4ef5c", size = 86346, upload-time = "2025-06-10T00:44:51.182Z" }, + { url = "https://files.pythonhosted.org/packages/43/c7/669c52519dca4c95153c8ad96dd123c79f354a376346b198f438e56ffeb4/yarl-1.20.1-cp313-cp313t-macosx_10_13_universal2.whl", hash = "sha256:f60233b98423aab21d249a30eb27c389c14929f47be8430efa7dbd91493a729d", size = 138826, upload-time = "2025-06-10T00:44:52.883Z" }, + { url = "https://files.pythonhosted.org/packages/6a/42/fc0053719b44f6ad04a75d7f05e0e9674d45ef62f2d9ad2c1163e5c05827/yarl-1.20.1-cp313-cp313t-macosx_10_13_x86_64.whl", hash = "sha256:6f3eff4cc3f03d650d8755c6eefc844edde99d641d0dcf4da3ab27141a5f8ddf", size = 93217, upload-time = "2025-06-10T00:44:54.658Z" }, + { url = "https://files.pythonhosted.org/packages/4f/7f/fa59c4c27e2a076bba0d959386e26eba77eb52ea4a0aac48e3515c186b4c/yarl-1.20.1-cp313-cp313t-macosx_11_0_arm64.whl", hash = "sha256:69ff8439d8ba832d6bed88af2c2b3445977eba9a4588b787b32945871c2444e3", size = 92700, upload-time = "2025-06-10T00:44:56.784Z" }, + { url = "https://files.pythonhosted.org/packages/2f/d4/062b2f48e7c93481e88eff97a6312dca15ea200e959f23e96d8ab898c5b8/yarl-1.20.1-cp313-cp313t-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:3cf34efa60eb81dd2645a2e13e00bb98b76c35ab5061a3989c7a70f78c85006d", size = 347644, upload-time = "2025-06-10T00:44:59.071Z" }, + { url = "https://files.pythonhosted.org/packages/89/47/78b7f40d13c8f62b499cc702fdf69e090455518ae544c00a3bf4afc9fc77/yarl-1.20.1-cp313-cp313t-manylinux_2_17_armv7l.manylinux2014_armv7l.manylinux_2_31_armv7l.whl", hash = "sha256:8e0fe9364ad0fddab2688ce72cb7a8e61ea42eff3c7caeeb83874a5d479c896c", size = 323452, upload-time = "2025-06-10T00:45:01.605Z" }, + { url = "https://files.pythonhosted.org/packages/eb/2b/490d3b2dc66f52987d4ee0d3090a147ea67732ce6b4d61e362c1846d0d32/yarl-1.20.1-cp313-cp313t-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:8f64fbf81878ba914562c672024089e3401974a39767747691c65080a67b18c1", size = 346378, upload-time = "2025-06-10T00:45:03.946Z" }, + { url = "https://files.pythonhosted.org/packages/66/ad/775da9c8a94ce925d1537f939a4f17d782efef1f973039d821cbe4bcc211/yarl-1.20.1-cp313-cp313t-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:f6342d643bf9a1de97e512e45e4b9560a043347e779a173250824f8b254bd5ce", size = 353261, upload-time = "2025-06-10T00:45:05.992Z" }, + { url = "https://files.pythonhosted.org/packages/4b/23/0ed0922b47a4f5c6eb9065d5ff1e459747226ddce5c6a4c111e728c9f701/yarl-1.20.1-cp313-cp313t-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:56dac5f452ed25eef0f6e3c6a066c6ab68971d96a9fb441791cad0efba6140d3", size = 335987, upload-time = "2025-06-10T00:45:08.227Z" }, + { url = "https://files.pythonhosted.org/packages/3e/49/bc728a7fe7d0e9336e2b78f0958a2d6b288ba89f25a1762407a222bf53c3/yarl-1.20.1-cp313-cp313t-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:c7d7f497126d65e2cad8dc5f97d34c27b19199b6414a40cb36b52f41b79014be", size = 329361, upload-time = "2025-06-10T00:45:10.11Z" }, + { url = "https://files.pythonhosted.org/packages/93/8f/b811b9d1f617c83c907e7082a76e2b92b655400e61730cd61a1f67178393/yarl-1.20.1-cp313-cp313t-musllinux_1_2_aarch64.whl", hash = "sha256:67e708dfb8e78d8a19169818eeb5c7a80717562de9051bf2413aca8e3696bf16", size = 346460, upload-time = "2025-06-10T00:45:12.055Z" }, + { url = "https://files.pythonhosted.org/packages/70/fd/af94f04f275f95da2c3b8b5e1d49e3e79f1ed8b6ceb0f1664cbd902773ff/yarl-1.20.1-cp313-cp313t-musllinux_1_2_armv7l.whl", hash = "sha256:595c07bc79af2494365cc96ddeb772f76272364ef7c80fb892ef9d0649586513", size = 334486, upload-time = "2025-06-10T00:45:13.995Z" }, + { url = "https://files.pythonhosted.org/packages/84/65/04c62e82704e7dd0a9b3f61dbaa8447f8507655fd16c51da0637b39b2910/yarl-1.20.1-cp313-cp313t-musllinux_1_2_i686.whl", hash = "sha256:7bdd2f80f4a7df852ab9ab49484a4dee8030023aa536df41f2d922fd57bf023f", size = 342219, upload-time = "2025-06-10T00:45:16.479Z" }, + { url = "https://files.pythonhosted.org/packages/91/95/459ca62eb958381b342d94ab9a4b6aec1ddec1f7057c487e926f03c06d30/yarl-1.20.1-cp313-cp313t-musllinux_1_2_ppc64le.whl", hash = "sha256:c03bfebc4ae8d862f853a9757199677ab74ec25424d0ebd68a0027e9c639a390", size = 350693, upload-time = "2025-06-10T00:45:18.399Z" }, + { url = "https://files.pythonhosted.org/packages/a6/00/d393e82dd955ad20617abc546a8f1aee40534d599ff555ea053d0ec9bf03/yarl-1.20.1-cp313-cp313t-musllinux_1_2_s390x.whl", hash = "sha256:344d1103e9c1523f32a5ed704d576172d2cabed3122ea90b1d4e11fe17c66458", size = 355803, upload-time = "2025-06-10T00:45:20.677Z" }, + { url = "https://files.pythonhosted.org/packages/9e/ed/c5fb04869b99b717985e244fd93029c7a8e8febdfcffa06093e32d7d44e7/yarl-1.20.1-cp313-cp313t-musllinux_1_2_x86_64.whl", hash = "sha256:88cab98aa4e13e1ade8c141daeedd300a4603b7132819c484841bb7af3edce9e", size = 341709, upload-time = "2025-06-10T00:45:23.221Z" }, + { url = "https://files.pythonhosted.org/packages/24/fd/725b8e73ac2a50e78a4534ac43c6addf5c1c2d65380dd48a9169cc6739a9/yarl-1.20.1-cp313-cp313t-win32.whl", hash = "sha256:b121ff6a7cbd4abc28985b6028235491941b9fe8fe226e6fdc539c977ea1739d", size = 86591, upload-time = "2025-06-10T00:45:25.793Z" }, + { url = "https://files.pythonhosted.org/packages/94/c3/b2e9f38bc3e11191981d57ea08cab2166e74ea770024a646617c9cddd9f6/yarl-1.20.1-cp313-cp313t-win_amd64.whl", hash = "sha256:541d050a355bbbc27e55d906bc91cb6fe42f96c01413dd0f4ed5a5240513874f", size = 93003, upload-time = "2025-06-10T00:45:27.752Z" }, + { url = "https://files.pythonhosted.org/packages/b4/2d/2345fce04cfd4bee161bf1e7d9cdc702e3e16109021035dbb24db654a622/yarl-1.20.1-py3-none-any.whl", hash = "sha256:83b8eb083fe4683c6115795d9fc1cfaf2cbbefb19b3a1cb68f6527460f483a77", size = 46542, upload-time = "2025-06-10T00:46:07.521Z" }, +] + +[[package]] +name = "zstandard" +version = "0.23.0" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "cffi", marker = "platform_python_implementation == 'PyPy'" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/ed/f6/2ac0287b442160a89d726b17a9184a4c615bb5237db763791a7fd16d9df1/zstandard-0.23.0.tar.gz", hash = "sha256:b2d8c62d08e7255f68f7a740bae85b3c9b8e5466baa9cbf7f57f1cde0ac6bc09", size = 681701, upload-time = "2024-07-15T00:18:06.141Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/80/f1/8386f3f7c10261fe85fbc2c012fdb3d4db793b921c9abcc995d8da1b7a80/zstandard-0.23.0-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:576856e8594e6649aee06ddbfc738fec6a834f7c85bf7cadd1c53d4a58186ef9", size = 788975, upload-time = "2024-07-15T00:16:16.005Z" }, + { url = "https://files.pythonhosted.org/packages/16/e8/cbf01077550b3e5dc86089035ff8f6fbbb312bc0983757c2d1117ebba242/zstandard-0.23.0-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:38302b78a850ff82656beaddeb0bb989a0322a8bbb1bf1ab10c17506681d772a", size = 633448, upload-time = "2024-07-15T00:16:17.897Z" }, + { url = "https://files.pythonhosted.org/packages/06/27/4a1b4c267c29a464a161aeb2589aff212b4db653a1d96bffe3598f3f0d22/zstandard-0.23.0-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:d2240ddc86b74966c34554c49d00eaafa8200a18d3a5b6ffbf7da63b11d74ee2", size = 4945269, upload-time = "2024-07-15T00:16:20.136Z" }, + { url = "https://files.pythonhosted.org/packages/7c/64/d99261cc57afd9ae65b707e38045ed8269fbdae73544fd2e4a4d50d0ed83/zstandard-0.23.0-cp313-cp313-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:2ef230a8fd217a2015bc91b74f6b3b7d6522ba48be29ad4ea0ca3a3775bf7dd5", size = 5306228, upload-time = "2024-07-15T00:16:23.398Z" }, + { url = "https://files.pythonhosted.org/packages/7a/cf/27b74c6f22541f0263016a0fd6369b1b7818941de639215c84e4e94b2a1c/zstandard-0.23.0-cp313-cp313-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:774d45b1fac1461f48698a9d4b5fa19a69d47ece02fa469825b442263f04021f", size = 5336891, upload-time = "2024-07-15T00:16:26.391Z" }, + { url = "https://files.pythonhosted.org/packages/fa/18/89ac62eac46b69948bf35fcd90d37103f38722968e2981f752d69081ec4d/zstandard-0.23.0-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:6f77fa49079891a4aab203d0b1744acc85577ed16d767b52fc089d83faf8d8ed", size = 5436310, upload-time = "2024-07-15T00:16:29.018Z" }, + { url = "https://files.pythonhosted.org/packages/a8/a8/5ca5328ee568a873f5118d5b5f70d1f36c6387716efe2e369010289a5738/zstandard-0.23.0-cp313-cp313-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:ac184f87ff521f4840e6ea0b10c0ec90c6b1dcd0bad2f1e4a9a1b4fa177982ea", size = 4859912, upload-time = "2024-07-15T00:16:31.871Z" }, + { url = "https://files.pythonhosted.org/packages/ea/ca/3781059c95fd0868658b1cf0440edd832b942f84ae60685d0cfdb808bca1/zstandard-0.23.0-cp313-cp313-musllinux_1_1_aarch64.whl", hash = "sha256:c363b53e257246a954ebc7c488304b5592b9c53fbe74d03bc1c64dda153fb847", size = 4936946, upload-time = "2024-07-15T00:16:34.593Z" }, + { url = "https://files.pythonhosted.org/packages/ce/11/41a58986f809532742c2b832c53b74ba0e0a5dae7e8ab4642bf5876f35de/zstandard-0.23.0-cp313-cp313-musllinux_1_1_x86_64.whl", hash = "sha256:e7792606d606c8df5277c32ccb58f29b9b8603bf83b48639b7aedf6df4fe8171", size = 5466994, upload-time = "2024-07-15T00:16:36.887Z" }, + { url = "https://files.pythonhosted.org/packages/83/e3/97d84fe95edd38d7053af05159465d298c8b20cebe9ccb3d26783faa9094/zstandard-0.23.0-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:a0817825b900fcd43ac5d05b8b3079937073d2b1ff9cf89427590718b70dd840", size = 4848681, upload-time = "2024-07-15T00:16:39.709Z" }, + { url = "https://files.pythonhosted.org/packages/6e/99/cb1e63e931de15c88af26085e3f2d9af9ce53ccafac73b6e48418fd5a6e6/zstandard-0.23.0-cp313-cp313-musllinux_1_2_i686.whl", hash = "sha256:9da6bc32faac9a293ddfdcb9108d4b20416219461e4ec64dfea8383cac186690", size = 4694239, upload-time = "2024-07-15T00:16:41.83Z" }, + { url = "https://files.pythonhosted.org/packages/ab/50/b1e703016eebbc6501fc92f34db7b1c68e54e567ef39e6e59cf5fb6f2ec0/zstandard-0.23.0-cp313-cp313-musllinux_1_2_ppc64le.whl", hash = "sha256:fd7699e8fd9969f455ef2926221e0233f81a2542921471382e77a9e2f2b57f4b", size = 5200149, upload-time = "2024-07-15T00:16:44.287Z" }, + { url = "https://files.pythonhosted.org/packages/aa/e0/932388630aaba70197c78bdb10cce2c91fae01a7e553b76ce85471aec690/zstandard-0.23.0-cp313-cp313-musllinux_1_2_s390x.whl", hash = "sha256:d477ed829077cd945b01fc3115edd132c47e6540ddcd96ca169facff28173057", size = 5655392, upload-time = "2024-07-15T00:16:46.423Z" }, + { url = "https://files.pythonhosted.org/packages/02/90/2633473864f67a15526324b007a9f96c96f56d5f32ef2a56cc12f9548723/zstandard-0.23.0-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:fa6ce8b52c5987b3e34d5674b0ab529a4602b632ebab0a93b07bfb4dfc8f8a33", size = 5191299, upload-time = "2024-07-15T00:16:49.053Z" }, + { url = "https://files.pythonhosted.org/packages/b0/4c/315ca5c32da7e2dc3455f3b2caee5c8c2246074a61aac6ec3378a97b7136/zstandard-0.23.0-cp313-cp313-win32.whl", hash = "sha256:a9b07268d0c3ca5c170a385a0ab9fb7fdd9f5fd866be004c4ea39e44edce47dd", size = 430862, upload-time = "2024-07-15T00:16:51.003Z" }, + { url = "https://files.pythonhosted.org/packages/a2/bf/c6aaba098e2d04781e8f4f7c0ba3c7aa73d00e4c436bcc0cf059a66691d1/zstandard-0.23.0-cp313-cp313-win_amd64.whl", hash = "sha256:f3513916e8c645d0610815c257cbfd3242adfd5c4cfa78be514e5a3ebb42a41b", size = 495578, upload-time = "2024-07-15T00:16:53.135Z" }, +] diff --git a/vector_react_example.py b/vector_react_example.py deleted file mode 100644 index 56391f30..00000000 --- a/vector_react_example.py +++ /dev/null @@ -1,170 +0,0 @@ -#!/usr/bin/env python -""" -Example of using the Vector React workflow with Supabase Vecs. - -This example demonstrates how to initialize the vector store, -add documents, and execute the vector-enabled ReAct workflow. -""" - -import asyncio - -import dotenv -from langchain_community.document_loaders import WebBaseLoader -from langchain_core.documents import Document -from langchain_openai import OpenAIEmbeddings -from langchain_text_splitters import RecursiveCharacterTextSplitter - -from backend.factory import backend -from services.workflows.vector_react import ( - add_documents_to_vectors, - execute_vector_langgraph_stream, -) - -dotenv.load_dotenv() - - -async def load_documents_from_url(url): - """Load documents from a URL using WebBaseLoader and split them with RecursiveCharacterTextSplitter.""" - try: - print(f"Loading content from {url}...") - loader = WebBaseLoader(url) - docs = loader.load() - - # Initialize the text splitter - text_splitter = RecursiveCharacterTextSplitter( - chunk_size=4000, - chunk_overlap=200, - length_function=len, - separators=["\n\n", "\n", " ", ""], - ) - - # Split the documents - split_docs = text_splitter.split_documents(docs) - - # Add metadata to each document - for doc in split_docs: - doc.metadata["type"] = "stacks_documentation" - doc.metadata["url"] = url - - print( - f"Successfully loaded and split into {len(split_docs)} documents from {url}" - ) - return split_docs - except Exception as e: - print(f"Error loading content from {url}: {str(e)}") - return [] - - -async def main(): - """Run the Vector React example.""" - # Set your OpenAI API key - # Create some example documents - # documents = [ - # Document( - # page_content="OpenAI was founded in 2015 and released GPT-1, GPT-2, GPT-3, and GPT-4.", - # metadata={"source": "about_openai.txt"}, - # ), - # Document( - # page_content="Python is a programming language known for its readability and versatility.", - # metadata={"source": "programming_languages.txt"}, - # ), - # Document( - # page_content="Supabase is an open source Firebase alternative with a PostgreSQL database.", - # metadata={"source": "database_services.txt"}, - # ), - # ] - documents = [] - - # Add Stacks documentation content - stacks_urls = [ - "https://docs.stacks.co/reference/functions", - "https://docs.stacks.co/reference/keywords", - "https://raw.githubusercontent.com/aibtcdev/aibtcdev-docs/refs/heads/main/aibtc-daos/dao-extensions/README.md", - "https://raw.githubusercontent.com/aibtcdev/aibtcdev-docs/refs/heads/main/aibtc-daos/dao-extensions/action-proposals.md", - "https://raw.githubusercontent.com/aibtcdev/aibtcdev-docs/refs/heads/main/aibtc-daos/dao-extensions/bank-account.md", - "https://raw.githubusercontent.com/aibtcdev/aibtcdev-docs/refs/heads/main/aibtc-daos/dao-extensions/core-proposals.md", - "https://raw.githubusercontent.com/aibtcdev/aibtcdev-docs/refs/heads/main/aibtc-daos/dao-extensions/dao-charter.md", - "https://raw.githubusercontent.com/aibtcdev/aibtcdev-docs/refs/heads/main/aibtc-daos/dao-extensions/onchain-messaging.md", - "https://raw.githubusercontent.com/aibtcdev/aibtcdev-docs/refs/heads/main/aibtc-daos/dao-extensions/payments-invoices.md", - "https://raw.githubusercontent.com/aibtcdev/aibtcdev-docs/refs/heads/main/aibtc-daos/dao-extensions/token-owner.md", - "https://raw.githubusercontent.com/aibtcdev/aibtcdev-docs/refs/heads/main/aibtc-daos/dao-extensions/treasury.md", - ] - - for url in stacks_urls: - print(f"\nProcessing documentation from {url}") - docs = await load_documents_from_url(url) - if docs: - print(f"Adding {len(docs)} documents from {url}") - documents.extend(docs) - else: - print(f"No content was retrieved from {url}") - - # Collection name for the vector store - collection_name = "example_collection" - - # Initialize embeddings - embeddings = OpenAIEmbeddings() - - # Ensure the vector collection exists - try: - # Try to get the collection first - backend.get_vector_collection(collection_name) - print(f"Using existing vector collection: {collection_name}") - except Exception: - # Create the collection if it doesn't exist - embed_dim = 1536 # Default for OpenAI embeddings - if hasattr(embeddings, "embedding_dim"): - embed_dim = embeddings.embedding_dim - backend.create_vector_collection(collection_name, dimensions=embed_dim) - print( - f"Created new vector collection: {collection_name} with dimensions: {embed_dim}" - ) - - # Add documents to the vector store - print("Adding documents to vector store...") - await add_documents_to_vectors( - collection_name=collection_name, documents=documents, embeddings=embeddings - ) - print("Documents added successfully!") - - # Create an index on the collection for better query performance - print("Creating index on vector collection...") - try: - backend.create_vector_index(collection_name) - print("Index created successfully!") - except Exception as e: - print(f"Error creating index: {str(e)}") - - # Setup example conversation history - history = [ - {"role": "system", "content": "You are a helpful AI assistant."}, - {"role": "user", "content": "Hello, who are you?"}, - { - "role": "assistant", - "content": "I'm an AI assistant here to help you with your questions.", - }, - ] - - # User query that will leverage the vector store for context - user_query = "Write a Clarity function that returns the current block height." - - print(f"\nExecuting Vector React workflow with query: '{user_query}'") - print("Streaming response:") - - # Execute the Vector React workflow and stream the response - async for chunk in execute_vector_langgraph_stream( - history=history, - input_str=user_query, - collection_name=collection_name, - embeddings=embeddings, - ): - if chunk["type"] == "token": - print(chunk["content"], end="", flush=True) - elif chunk["type"] == "end": - print("\n\nStream completed!") - elif chunk["type"] == "result": - print("\n\nFinal result metadata:", chunk) - - -if __name__ == "__main__": - asyncio.run(main()) diff --git a/worker.py b/worker.py new file mode 100644 index 00000000..ddc53593 --- /dev/null +++ b/worker.py @@ -0,0 +1,41 @@ +"""Worker mode entrypoint for running background services without the web server.""" + +import asyncio +import sys + +from config import config +from lib.logger import configure_logger +from services.infrastructure.startup_service import run_standalone + +# Configure module logger +logger = configure_logger(__name__) + +# Load configuration +_ = config + + +async def main(): + """Main worker function that runs all background services.""" + logger.info("Starting AI BTC Dev Backend in worker mode...") + logger.info("Worker mode - Web server disabled, running background services only") + + try: + # Run the startup service in standalone mode + # This includes: + # - Enhanced job system with auto-discovery + # - Telegram bot (if enabled) + # - WebSocket cleanup tasks + # - System metrics monitoring + await run_standalone() + + except KeyboardInterrupt: + logger.info("Worker mode interrupted by user") + except Exception as e: + logger.error(f"Critical error in worker mode: {e}", exc_info=True) + sys.exit(1) + finally: + logger.info("Worker mode shutdown complete") + + +if __name__ == "__main__": + asyncio.run(main())