diff --git a/.env.example b/.env.example index 921e11dc..a7f72765 100644 --- a/.env.example +++ b/.env.example @@ -41,7 +41,7 @@ AIBTC_ALEX_BASE_URL=https://api.alexgo.io/ AIBTC_VELAR_BASE_URL="https://gateway.velar.network/" # AI Services -OPENAI_MODEL_NAME="gpt-4o" +OPENAI_MODEL_NAME="gpt-4.1" OPENAI_API_KEY="sk-proj-your-api-key-here" # For local model deployment # OPENAI_API_BASE="http://localhost:5000" @@ -81,6 +81,15 @@ AIBTC_DAO_PROPOSAL_CONCLUDE_RUNNER_ENABLED=false AIBTC_DAO_PROPOSAL_CONCLUDE_RUNNER_INTERVAL_SECONDS=60 AIBTC_DAO_PROPOSAL_CONCLUDE_RUNNER_WALLET_ID="your-wallet-id" +# Step 6: +AIBTC_DAO_PROPOSAL_EVALUATION_RUNNER_ENABLED=false +AIBTC_DAO_PROPOSAL_EVALUATION_RUNNER_INTERVAL_SECONDS=60 + +# Step 6: Process agent account deployments +AIBTC_AGENT_ACCOUNT_DEPLOY_RUNNER_ENABLED=false +AIBTC_AGENT_ACCOUNT_DEPLOY_RUNNER_INTERVAL_SECONDS=60 +AIBTC_AGENT_ACCOUNT_DEPLOY_RUNNER_WALLET_ID="your-wallet-id" + # ============================================================================= # Social Media Integration # ============================================================================= diff --git a/README.md b/README.md index d80523c2..fdf9c62e 100644 --- a/README.md +++ b/README.md @@ -2,7 +2,16 @@ ## Overview -aibtcdev-backend is a FastAPI-based backend service that provides API endpoints for chat functionality, tools, and webhooks. It integrates with various external services including OpenAI, Twitter, Telegram, and blockchain-related APIs. +aibtcdev-backend is a sophisticated FastAPI-based backend service that powers AI-driven interactions with Bitcoin and Stacks blockchain technologies. The service provides: + +1. Real-time chat functionality with AI agents via WebSocket +2. Automated DAO management and monitoring +3. Social media integration (Twitter, Telegram, Discord) +4. Blockchain interaction capabilities (Stacks, Bitcoin) +5. Market data analysis and reporting +6. Document processing and vector search capabilities + +The system is designed to be modular, scalable, and easily configurable through environment variables. ## Disclaimer @@ -11,59 +20,84 @@ aibtc.dev is not liable for any lost, locked, or mistakenly sent funds. This is ## Prerequisites - Python 3.13 -- [Bun](https://bun.sh/) (for running TypeScript scripts) +- [Bun](https://bun.sh/) (for TypeScript tools) - Git - Conda (recommended for development) or Docker - -## Features - -- FastAPI-based REST API -- WebSocket support for real-time communication -- Integration with multiple external services: - - Supabase for database and storage - - OpenAI for AI capabilities - - Twitter API for social media integration - - Telegram Bot API - - Blockchain APIs (Hiro, Alex, Velar) - - Market data APIs (LunarCrush, CMC) -- Background task scheduling system -- CORS support for multiple frontend environments -- Comprehensive logging system -- Workflow automation for tweet analysis and generation +- Node.js and npm (for agent tools) ## Project Structure -The project is organized into several key directories: - -- `api/`: Contains API endpoint definitions - - `chat.py`: WebSocket chat endpoints - - `tools.py`: Available tools endpoints - - `webhooks.py`: Webhook handling endpoints - -- `backend/`: Database abstraction and models - - `abstract.py`: Abstract base classes for database operations - - `factory.py`: Factory pattern for database backend creation - - `models.py`: Data models - - `supabase.py`: Supabase-specific implementation - -- `services/`: Core business logic and integrations - - `bot.py`: Telegram bot integration - - `chat.py`: Chat handling services - - `daos.py`: DAO processing services - - `schedule.py`: Scheduling services - - `startup.py`: Application startup and shutdown services - - `twitter.py`: Twitter integration services - - `workflows/`: Workflow implementations - - `base.py`: Base workflow classes - - `react.py`: ReAct workflow implementation - - `tweet_analysis.py`: Tweet analysis workflow - - `tweet_generator.py`: Tweet generation workflow - -- `tools/`: Tool implementations for agent use - -- `lib/`: Shared utilities and libraries +``` +aibtcdev-backend/ +├── api/ # FastAPI endpoint definitions +│ ├── chat.py # WebSocket chat endpoints +│ ├── tools.py # Tool endpoints +│ ├── webhooks.py # Webhook handlers +│ └── dependencies.py # API dependencies +├── services/ # Core business logic +│ ├── workflows/ # Workflow implementations +│ ├── runner/ # Background task runners +│ ├── webhooks/ # Webhook processors +│ ├── discord/ # Discord integration +│ ├── chat.py # Chat service +│ ├── daos.py # DAO operations +│ ├── schedule.py # Task scheduling +│ ├── startup.py # App lifecycle management +│ ├── twitter.py # Twitter integration +│ ├── bot.py # Telegram bot +│ └── websocket.py # WebSocket management +├── backend/ # Database and storage +├── tools/ # AI agent tools +├── lib/ # Shared utilities +├── tests/ # Test suite +├── docs/ # Documentation +├── examples/ # Usage examples +└── agent-tools-ts/ # TypeScript-based agent tools +``` -- `agent-tools-ts/`: TypeScript tools for agent integration +## Key Features + +### 1. AI Chat System +- Real-time WebSocket-based chat +- AI agent integration with OpenAI +- Context-aware conversations +- Document-based knowledge integration +- Vector search capabilities + +### 2. DAO Management +- Automated DAO deployment monitoring +- Proposal creation and tracking +- Vote processing +- Automated conclusion handling +- Tweet generation for DAO events + +### 3. Social Media Integration +- Twitter automation and monitoring +- Telegram bot integration +- Discord notifications +- Automated content generation +- Social engagement tracking + +### 4. Blockchain Integration +- Stacks blockchain interaction +- Bitcoin network monitoring +- Multiple API integrations: + - Hiro + - Alex + - Velar + - Platform API + +### 5. Market Analysis +- LunarCrush integration +- CoinMarketCap data processing +- Market trend analysis +- Automated reporting + +### 6. Background Processing +- Scheduled task management +- Event-driven processing +- Multi-threaded task execution +- Failure recovery and retry logic ## Installation @@ -83,93 +117,61 @@ git submodule update --remote cp .env.example .env ``` -2. Configure the following key sections in your `.env` file: -- Core Application Settings -- Database Configuration (Supabase) -- External API Endpoints & Keys -- Task Scheduling Configuration -- Social Media Integration -- Additional Tools & Services +2. Configure your environment variables by following the [Configuration Guide](CONFIG.md) ### 3. Development Setup (Conda Recommended) -1. Install Miniconda: ```bash -# On macOS +# Install Miniconda brew install miniconda # Initialize conda conda init "$(basename "${SHELL}")" # Restart your terminal -``` -2. Create and activate the environment: -```bash +# Create and activate environment conda create --name aibackend python=3.12 conda activate aibackend -``` -3. Install dependencies: -```bash +# Install dependencies pip install -r requirements.txt -``` -4. Set up TypeScript tools: -```bash +# Set up TypeScript tools cd agent-tools-ts/ bun install cd .. ``` -### 4. Alternative: Docker Setup +### 4. Docker Setup ```bash docker build -t aibtcdev-backend . docker run -p 8000:8000 --env-file .env aibtcdev-backend ``` -## API Endpoints - -The service exposes the following endpoints: +## API Documentation -### Chat Endpoints (`/chat`) -- `/chat/ws` - WebSocket endpoint for real-time chat communication - - Supports message history retrieval - - Real-time message processing - - Supports agent-based conversations - - Maintains thread-based chat history +### WebSocket Endpoints (`/chat`) +- `/chat/ws`: Real-time chat communication + - Supports message history + - AI agent integration + - Context management + - Document processing -### Tools Endpoints (`/tools`) -- `/tools/available` - Get list of available tools and their descriptions - - Returns tool information including: - - Tool ID and name - - Description - - Category - - Required parameters +### Tool Endpoints (`/tools`) +- `/tools/available`: Available tool listing +- `/tools/execute`: Tool execution endpoint +- Custom tool integration support ### Webhook Endpoints (`/webhooks`) -- `/webhooks/chainhook` - Handle blockchain-related webhook events -- `/webhooks/github` - Process GitHub webhook events +- `/webhooks/chainhook`: Blockchain event processing +- `/webhooks/github`: GitHub integration +- `/webhooks/discord`: Discord notifications ### Bot Endpoints (`/bot`) -- `/bot/telegram/test` - Test Telegram bot integration - - Send test messages to verified users - - Requires user profile verification - -All endpoints require proper authentication and most endpoints use profile verification middleware to ensure secure access to the API. - -For detailed API documentation including request/response schemas, visit `/docs` when running the server. - -## Configuration - -The application uses a hierarchical configuration system defined in `config.py`, including: - -- DatabaseConfig: Supabase connection settings -- TwitterConfig: Twitter API integration settings -- TelegramConfig: Telegram bot settings -- SchedulerConfig: Background task scheduling -- APIConfig: External API endpoints and keys -- NetworkConfig: Network-specific settings (testnet/mainnet) +- `/bot/telegram`: Telegram bot integration +- User verification and management +- Command processing ## Development @@ -179,45 +181,65 @@ The application uses a hierarchical configuration system defined in `config.py`, uvicorn main:app --host 0.0.0.0 --port 8000 --reload ``` -### Background Tasks +### Code Style -The application includes several background tasks that can be enabled/disabled via environment variables: -- Schedule synchronization -- DAO processing pipeline -- Tweet generation and posting -- Social media integration tasks -- Tweet analysis workflows +The project uses ruff for code formatting and linting. Configuration is in `ruff.toml`. + +### Testing + +```bash +pytest tests/ +``` -## Dependencies +### Documentation -Key dependencies include: -- APScheduler: For scheduling background tasks -- FastAPI: Web framework -- LangChain & LangGraph: For AI agent workflows -- OpenAI: For AI capabilities -- Supabase: For database and storage -- python-twitter-v2: For Twitter integration -- python-telegram-bot: For Telegram integration +API documentation is available at `/docs` when running the server. ## Contributing -1. Branch protection is enabled on `main` -2. Auto-deployment is configured for updates -3. Pull requests require one approval -4. Please ensure all tests pass before submitting a PR +1. Fork the repository +2. Create a feature branch +3. Make your changes +4. Run tests +5. Submit a pull request + +Guidelines: +- Follow the Python code style guide +- Add tests for new features +- Update documentation as needed +- Keep pull requests focused and atomic ## Troubleshooting -### OpenAI Rate Limits -- Check your current tier limits at https://platform.openai.com/settings/organization/limits -- TPM (Tokens Per Minute) limits: - - Tier 1: 200,000 TPM - - Tier 2: 2,000,000 TPM +### Common Issues -## License +1. OpenAI Rate Limits + - Check limits at https://platform.openai.com/settings/organization/limits + - TPM (Tokens Per Minute) limits: + - Tier 1: 200,000 TPM + - Tier 2: 2,000,000 TPM -[License Information] +2. WebSocket Connection Issues + - Check network connectivity + - Verify authentication tokens + - Check server logs for details + +3. Database Connection Issues + - Verify Supabase credentials + - Check network access to database + - Verify connection string format ## Support -[Support Information] +For support: +1. Check the documentation +2. Search existing issues +3. Create a new issue with: + - Clear description + - Steps to reproduce + - Expected vs actual behavior + - Environment details + +## License + +[License Information] diff --git a/agent-tools-ts b/agent-tools-ts index 831b0386..ea13e086 160000 --- a/agent-tools-ts +++ b/agent-tools-ts @@ -1 +1 @@ -Subproject commit 831b0386dcba7215002f1eb47a9ea016c8616194 +Subproject commit ea13e0864d5755b29fd81990fc39b1b8a57b8ca4 diff --git a/backend/models.py b/backend/models.py index 34a0c53b..6e188817 100644 --- a/backend/models.py +++ b/backend/models.py @@ -68,6 +68,12 @@ class QueueMessageType(str, Enum): DAO_TWEET = "dao_tweet" DAO_PROPOSAL_VOTE = "dao_proposal_vote" DAO_PROPOSAL_CONCLUDE = "dao_proposal_conclude" + DAO_PROPOSAL_EVALUATION = ( + "dao_proposal_evaluation" # New type for proposal evaluation + ) + AGENT_ACCOUNT_DEPLOY = ( + "agent_account_deploy" # New type for agent account deployment + ) def __str__(self): return self.value @@ -523,6 +529,7 @@ class QueueMessageFilter(CustomBaseModel): tweet_id: Optional[str] = None conversation_id: Optional[str] = None wallet_id: Optional[UUID] = None + dao_id: Optional[UUID] = None class AgentFilter(CustomBaseModel): @@ -678,6 +685,10 @@ class VoteBase(CustomBaseModel): amount: Optional[str] = None # String to handle large token amounts confidence: Optional[float] = None prompt: Optional[str] = None + voted: Optional[bool] = None + cost: Optional[float] = None + model: Optional[str] = None + profile_id: Optional[UUID] = None class VoteCreate(VoteBase): @@ -696,6 +707,10 @@ class VoteFilter(CustomBaseModel): proposal_id: Optional[UUID] = None answer: Optional[bool] = None address: Optional[str] = None + voted: Optional[bool] = None + model: Optional[str] = None + tx_id: Optional[str] = None + profile_id: Optional[UUID] = None # Add this to your backend interface class to get agents by tokens @@ -721,6 +736,8 @@ class PromptBase(CustomBaseModel): profile_id: Optional[UUID] = None prompt_text: Optional[str] = None is_active: Optional[bool] = True + model: Optional[str] = "gpt-4.1" + temperature: Optional[float] = 0.1 # Add temperature field with default value class PromptCreate(PromptBase): diff --git a/backend/supabase.py b/backend/supabase.py index 0b5b1371..f2b11ecc 100644 --- a/backend/supabase.py +++ b/backend/supabase.py @@ -227,7 +227,9 @@ async def add_vectors( try: # Upsert records collection.upsert(records=records) - logger.info(f"Added {len(records)} vectors to collection {collection_name}") + logger.debug( + f"Added {len(records)} vectors to collection {collection_name}" + ) return record_ids except Exception as e: logger.error( @@ -298,7 +300,7 @@ async def query_vectors( documents.append(doc) - logger.info( + logger.debug( f"Found {len(documents)} relevant documents for query in {collection_name}" ) return documents @@ -663,6 +665,9 @@ def list_queue_messages( query = query.eq("conversation_id", filters.conversation_id) if filters.wallet_id is not None: query = query.eq("wallet_id", filters.wallet_id) + if filters.dao_id is not None: + query = query.eq("dao_id", str(filters.dao_id)) + response = query.execute() data = response.data or [] return [QueueMessage(**row) for row in data] @@ -1573,6 +1578,14 @@ def list_votes(self, filters: Optional["VoteFilter"] = None) -> List["Vote"]: query = query.eq("answer", filters.answer) if filters.address is not None: query = query.eq("address", filters.address) + if filters.voted is not None: + query = query.eq("voted", filters.voted) + if filters.model is not None: + query = query.eq("model", filters.model) + if filters.tx_id is not None: + query = query.eq("tx_id", filters.tx_id) + if filters.profile_id is not None: + query = query.eq("profile_id", str(filters.profile_id)) response = query.execute() data = response.data or [] return [Vote(**row) for row in data] diff --git a/check_organization.py b/check_organization.py deleted file mode 100644 index 464e88e0..00000000 --- a/check_organization.py +++ /dev/null @@ -1,93 +0,0 @@ -#!/usr/bin/env python -""" -This script checks the organization of the project to ensure that all components -are properly organized according to the new structure. -""" - -import importlib -import sys -from pathlib import Path - - -def check_imports(): - """Check if all the required modules can be imported correctly.""" - try: - # Core components - import config - from config import config as config_instance - - print("✅ config module imported successfully") - - # API components - import api.chat - from api.chat import router as chat_router - - print("✅ api.chat module imported successfully") - - # Services components - import services.websocket - from services.websocket import websocket_manager - - print("✅ services.websocket module imported successfully") - - import services.chat - from services.chat import ChatService - - print("✅ services.chat module imported successfully") - - return True - except ImportError as e: - print(f"❌ Import error: {e}") - return False - - -def check_structure(): - """Check if the file structure is correct.""" - base_path = Path(__file__).parent - - # Check for required files - required_files = [ - "services/websocket.py", - "services/chat.py", - "api/chat.py", - "main.py", - "config.py", - ] - - all_exist = True - for file_path in required_files: - full_path = base_path / file_path - if full_path.exists(): - print(f"✅ {file_path} exists") - else: - print(f"❌ {file_path} does not exist") - all_exist = False - - # Check for old files that should be removed - old_files = ["lib/websocket_manager.py"] - - for file_path in old_files: - full_path = base_path / file_path - if full_path.exists(): - print(f"❌ {file_path} should be removed") - all_exist = False - else: - print(f"✅ {file_path} correctly removed") - - return all_exist - - -if __name__ == "__main__": - print("🔍 Checking project organization...") - print("\n=== File Structure ===") - structure_ok = check_structure() - - print("\n=== Import Checks ===") - imports_ok = check_imports() - - if structure_ok and imports_ok: - print("\n✅ All checks passed! Project organization is correct.") - sys.exit(0) - else: - print("\n❌ Some checks failed. Please fix the issues above.") - sys.exit(1) diff --git a/config.py b/config.py index ff415751..da68c751 100644 --- a/config.py +++ b/config.py @@ -94,6 +94,26 @@ class SchedulerConfig: dao_proposal_conclude_runner_wallet_id: str = os.getenv( "AIBTC_DAO_PROPOSAL_CONCLUDE_RUNNER_WALLET_ID", "" ) + dao_proposal_evaluation_runner_enabled: bool = ( + os.getenv("AIBTC_DAO_PROPOSAL_EVALUATION_RUNNER_ENABLED", "false").lower() + == "true" + ) + dao_proposal_evaluation_runner_interval_seconds: int = int( + os.getenv("AIBTC_DAO_PROPOSAL_EVALUATION_RUNNER_INTERVAL_SECONDS", "60") + ) + agent_account_deploy_runner_enabled: bool = ( + os.getenv("AIBTC_AGENT_ACCOUNT_DEPLOY_RUNNER_ENABLED", "false").lower() + == "true" + ) + agent_account_deploy_runner_interval_seconds: int = int( + os.getenv("AIBTC_AGENT_ACCOUNT_DEPLOY_RUNNER_INTERVAL_SECONDS", "60") + ) + agent_account_deploy_runner_wallet_id: str = os.getenv( + "AIBTC_AGENT_ACCOUNT_DEPLOY_RUNNER_WALLET_ID", "" + ) + dao_proposal_vote_delay_blocks: int = int( + os.getenv("AIBTC_DAO_PROPOSAL_VOTE_DELAY_BLOCKS", "2") + ) @dataclass diff --git a/docs/CONFIG.md b/docs/CONFIG.md new file mode 100644 index 00000000..bb16b8a0 --- /dev/null +++ b/docs/CONFIG.md @@ -0,0 +1,163 @@ +# Configuration Guide + +This document details all configuration options for the aibtcdev-backend service. All configuration is loaded from environment variables. + +## Quick Start + +1. Copy the example environment file: +```bash +cp .env.example .env +``` + +2. Configure the environment variables according to the sections below. + +## Configuration Components + +### Database Configuration (DatabaseConfig) +- `AIBTC_BACKEND`: Database backend type (default: "supabase") +- `AIBTC_SUPABASE_USER`: Supabase user +- `AIBTC_SUPABASE_PASSWORD`: Supabase password +- `AIBTC_SUPABASE_HOST`: Database host +- `AIBTC_SUPABASE_PORT`: Database port +- `AIBTC_SUPABASE_DBNAME`: Database name +- `AIBTC_SUPABASE_URL`: Supabase project URL +- `AIBTC_SUPABASE_SERVICE_KEY`: Supabase service key +- `AIBTC_SUPABASE_BUCKET_NAME`: Storage bucket name + +### Twitter Configuration (TwitterConfig) +- `AIBTC_TWITTER_ENABLED`: Enable Twitter integration (true/false) +- `AIBTC_TWITTER_INTERVAL_SECONDS`: Interval for Twitter operations (default: 120) +- `AIBTC_TWITTER_CONSUMER_KEY`: Twitter API consumer key +- `AIBTC_TWITTER_CONSUMER_SECRET`: Twitter API consumer secret +- `AIBTC_TWITTER_CLIENT_ID`: Twitter client ID +- `AIBTC_TWITTER_CLIENT_SECRET`: Twitter client secret +- `AIBTC_TWITTER_ACCESS_TOKEN`: Twitter access token +- `AIBTC_TWITTER_ACCESS_SECRET`: Twitter access secret +- `AIBTC_TWITTER_AUTOMATED_USER_ID`: Automated Twitter user ID +- `AIBTC_TWITTER_WHITELISTED`: Comma-separated list of whitelisted authors + +### Telegram Configuration (TelegramConfig) +- `AIBTC_TELEGRAM_BOT_TOKEN`: Telegram bot token +- `AIBTC_TELEGRAM_BOT_ENABLED`: Enable Telegram bot (true/false) + +### Discord Configuration (DiscordConfig) +- `AIBTC_DISCORD_WEBHOOK_URL`: Discord webhook URL for notifications + +### API Configuration (APIConfig) +- `AIBTC_ALEX_BASE_URL`: Alex API base URL (default: "https://api.alexgo.io/") +- `AIBTC_HIRO_API_URL`: Hiro API URL (default: "https://api.hiro.so") +- `AIBTC_PLATFORM_API_URL`: Platform API URL +- `AIBTC_VELAR_BASE_URL`: Velar network gateway URL +- `AIBTC_LUNARCRUSH_BASE_URL`: LunarCrush API base URL +- `HIRO_API_KEY`: Hiro API key +- `AIBTC_WEBHOOK_URL`: Webhook URL for notifications +- `AIBTC_WEBHOOK_AUTH_TOKEN`: Webhook authentication token +- `AIBTC_LUNARCRUSH_API_KEY`: LunarCrush API key +- `AIBTC_CMC_API_KEY`: CoinMarketCap API key +- `OPENAI_API_KEY`: OpenAI API key + +### Network Configuration (NetworkConfig) +- `NETWORK`: Network type (testnet/mainnet) + +### Scheduler Configuration (SchedulerConfig) + +The application includes several background task runners that can be configured: + +#### Schedule Sync Runner +- `AIBTC_SCHEDULE_SYNC_ENABLED`: Enable schedule sync (true/false) +- `AIBTC_SCHEDULE_SYNC_INTERVAL_SECONDS`: Sync interval in seconds (default: 60) + +#### DAO Runners +- `AIBTC_DAO_RUNNER_ENABLED`: Enable DAO processing (true/false) +- `AIBTC_DAO_RUNNER_INTERVAL_SECONDS`: Processing interval (default: 30) +- `AIBTC_DAO_TWEET_RUNNER_ENABLED`: Enable DAO tweet generation (true/false) +- `AIBTC_DAO_TWEET_RUNNER_INTERVAL_SECONDS`: Tweet generation interval (default: 30) +- `AIBTC_DAO_PROPOSAL_VOTE_RUNNER_ENABLED`: Enable proposal vote processing (true/false) +- `AIBTC_DAO_PROPOSAL_VOTE_RUNNER_INTERVAL_SECONDS`: Vote processing interval (default: 60) +- `AIBTC_DAO_PROPOSAL_CONCLUDE_RUNNER_ENABLED`: Enable proposal conclusion processing (true/false) +- `AIBTC_DAO_PROPOSAL_CONCLUDE_RUNNER_INTERVAL_SECONDS`: Conclusion processing interval (default: 60) +- `AIBTC_DAO_PROPOSAL_CONCLUDE_RUNNER_WALLET_ID`: Wallet ID for conclusion processing + +#### Agent Account Runner +- `AIBTC_AGENT_ACCOUNT_DEPLOY_RUNNER_ENABLED`: Enable agent account deployment (true/false) +- `AIBTC_AGENT_ACCOUNT_DEPLOY_RUNNER_INTERVAL_SECONDS`: Deployment interval (default: 60) +- `AIBTC_AGENT_ACCOUNT_DEPLOY_RUNNER_WALLET_ID`: Wallet ID for deployments + +#### Tweet Runner +- `AIBTC_TWEET_RUNNER_ENABLED`: Enable tweet processing (true/false) +- `AIBTC_TWEET_RUNNER_INTERVAL_SECONDS`: Processing interval (default: 30) + +## Example Configurations + +### DAO Processing Configuration +```env +AIBTC_DAO_RUNNER_ENABLED=true +AIBTC_DAO_RUNNER_INTERVAL_SECONDS=30 +AIBTC_DAO_TWEET_RUNNER_ENABLED=true +AIBTC_DAO_TWEET_RUNNER_INTERVAL_SECONDS=30 +``` + +### Agent Account Deployment +```env +AIBTC_AGENT_ACCOUNT_DEPLOY_RUNNER_ENABLED=false +AIBTC_AGENT_ACCOUNT_DEPLOY_RUNNER_INTERVAL_SECONDS=60 +AIBTC_AGENT_ACCOUNT_DEPLOY_RUNNER_WALLET_ID="your-wallet-id" +``` + +### Social Media Integration +```env +AIBTC_TWITTER_ENABLED=true +AIBTC_TWITTER_INTERVAL_SECONDS=120 +AIBTC_TELEGRAM_BOT_ENABLED=true +``` + +## Security Considerations + +1. API Keys and Secrets + - Never commit API keys or secrets to version control + - Use environment variables for all sensitive data + - Rotate keys regularly + - Use appropriate access scopes + +2. Network Configuration + - Use HTTPS for all external API communications + - Configure appropriate CORS settings + - Use secure WebSocket connections (WSS) + +3. Database Security + - Use strong passwords + - Limit database user permissions + - Enable SSL for database connections + - Regular backup configuration + +## Troubleshooting + +### Common Configuration Issues + +1. Database Connection + - Verify all database credentials are correct + - Check network access to database + - Verify SSL requirements + +2. API Integration + - Validate API keys and tokens + - Check API rate limits + - Verify endpoint URLs + +3. Background Tasks + - Check runner enabled flags + - Verify interval settings + - Monitor task execution logs + +## Maintenance + +1. Regular Tasks + - Monitor API usage and rate limits + - Check log files for errors + - Review and rotate API keys + - Update configuration as needed + +2. Backup Configuration + - Regular database backups + - Configuration backup + - Key rotation schedule \ No newline at end of file diff --git a/docs/chainhook_service.md b/docs/chainhook_service.md new file mode 100644 index 00000000..cf58ede2 --- /dev/null +++ b/docs/chainhook_service.md @@ -0,0 +1,121 @@ +# Chainhook Parsing Service + +## Overview + +The Chainhook parsing service is a specialized component within the backend that processes and handles blockchain-related webhook events. It's designed to parse, validate, and process webhook payloads from the Chainhook service, which monitors blockchain events and state changes. + +## Architecture + +The service is composed of three main components: + +1. **ChainhookService** (`service.py`) + - Acts as the main entry point for webhook processing + - Coordinates between the parser and handler components + - Implements the base WebhookService interface + +2. **ChainhookParser** (`parser.py`) + - Responsible for parsing raw webhook payloads into structured data + - Implements comprehensive validation and type checking + - Converts JSON data into strongly-typed Python objects + +3. **ChainhookHandler** (`handler.py`) + - Manages the processing of parsed webhook events + - Coordinates multiple specialized handlers for different event types + - Implements a sequential processing pipeline + +## Data Models + +The service uses a comprehensive set of data models (`models.py`) to represent blockchain data: + +- `ChainHookData`: Top-level container for webhook payloads +- `ChainHookInfo`: Metadata about the webhook configuration +- `Apply`: Represents block-level data and transactions +- `BlockIdentifier`: Block hash and index information +- `TransactionWithReceipt`: Detailed transaction data with receipts +- `Operation`: Individual blockchain operations +- `Event`: Transaction events and their data +- `Receipt`: Transaction receipts and execution results + +## Event Handlers + +The service includes several specialized handlers for different types of blockchain events: + +- `BlockStateHandler`: Processes block-level state changes +- `BuyEventHandler`: Handles purchase-related events +- `SellEventHandler`: Processes sale-related events +- `DAOProposalHandler`: Manages DAO proposal events +- `DAOVoteHandler`: Handles DAO voting events +- `ContractMessageHandler`: Processes smart contract messages +- `DAOProposalBurnHeightHandler`: Handles proposal burn height events +- `DAOProposalConclusionHandler`: Processes proposal conclusions + +## Processing Pipeline + +The webhook processing follows a sequential pipeline: + +1. **Parsing Phase** + - Raw JSON payload is received + - Data is validated and converted to typed objects + - Structured data is created using the defined models + +2. **Handling Phase** + - Block-level processing occurs first + - Transaction-level processing follows + - Each handler processes events it's responsible for + - Post-processing cleanup is performed + +3. **Error Handling** + - Comprehensive error catching and logging + - Structured error responses + - Transaction rollback support + +## Usage + +The service is automatically initialized when webhook events are received. It processes events in the following order: + +1. The webhook payload is received by the service +2. The parser converts the raw data into structured objects +3. The handler coordinates processing through specialized handlers +4. Results are logged and any necessary actions are taken + +## Logging + +The service implements comprehensive logging using the project's standard logging configuration: + +- DEBUG level for detailed processing information +- INFO level for standard operation logging +- ERROR level for exception handling +- Contextual information included in all log messages + +## Error Handling + +The service implements robust error handling: + +- Specific exception types for different error scenarios +- Comprehensive error logging +- Transaction rollback support +- Structured error responses + +## Security Considerations + +- Input validation on all webhook payloads +- Type checking and sanitization +- Secure handling of sensitive blockchain data +- Proper error handling to prevent information leakage + +## Dependencies + +The service relies on several key components: + +- Base webhook service infrastructure +- Logging configuration +- Type hints from the Python typing library +- JSON parsing and validation +- Blockchain-specific data models + +## Future Considerations + +- Potential for parallel processing of transactions +- Enhanced monitoring and metrics +- Additional specialized handlers for new event types +- Performance optimizations for large block processing \ No newline at end of file diff --git a/docs/runners.md b/docs/runners.md new file mode 100644 index 00000000..be1391c4 --- /dev/null +++ b/docs/runners.md @@ -0,0 +1,125 @@ +# Runners System Documentation + +## Overview + +The runners system is a core component of the AIBTC backend that manages and executes various automated tasks. It provides a flexible and extensible framework for scheduling and running different types of jobs, from DAO operations to Twitter interactions. + +## Architecture + +### Core Components + +1. **BaseTask** + - Abstract base class for all runner tasks + - Provides common functionality for task execution and validation + - Implements logging and metrics collection + - Supports generic result types through type parameters + +2. **JobManager** + - Manages scheduled jobs using AsyncIOScheduler + - Handles job configuration and scheduling + - Supports enabling/disabling jobs through configuration + +3. **JobRegistry** + - Maintains a registry of available runners + - Maps job types to their corresponding runner implementations + - Provides registration and lookup functionality + +### Job Types + +The system supports several types of jobs: + +- `DAO`: General DAO operations +- `DAO_PROPOSAL_VOTE`: Handling DAO proposal voting +- `DAO_PROPOSAL_CONCLUDE`: Concluding DAO proposals +- `DAO_TWEET`: Managing DAO-related tweets +- `TWEET`: General tweet operations +- `AGENT_ACCOUNT_DEPLOY`: Deploying agent accounts + +## Configuration + +Runners are configured through environment variables and configuration files. Key configuration includes: + +- Twitter profile and agent IDs +- Wallet configurations +- Job intervals and scheduling parameters +- Feature toggles for enabling/disabling specific runners + +## Job Execution Flow + +1. **Initialization** + - JobManager loads configurations for all available jobs + - Enabled jobs are scheduled with specified intervals + +2. **Execution** + - Jobs are executed according to their schedule + - Each execution follows a standard pipeline: + 1. Configuration validation + 2. Prerequisites validation + 3. Task-specific validation + 4. Task execution + 5. Result logging and metrics collection + +3. **Error Handling** + - Comprehensive error handling and logging + - Support for retries with configurable retry counts + - Detailed error reporting and metrics + +## Runner Implementation + +To implement a new runner: + +1. Create a new class inheriting from `BaseTask` +2. Define the result type using the generic parameter +3. Implement required methods: + - `_validate_config` + - `_validate_prerequisites` + - `_validate_task_specific` + - `_execute_impl` +4. Register the runner with `JobRegistry` + +Example: +```python +class MyCustomRunner(BaseTask[MyCustomResult]): + async def _execute_impl(self, context: JobContext) -> List[MyCustomResult]: + # Implementation here + pass +``` + +## Monitoring and Logging + +The runner system includes comprehensive logging: + +- Task start and completion times +- Success and failure metrics +- Execution duration +- Detailed error information +- Debug-level configuration logging + +## Best Practices + +1. **Validation** + - Implement thorough validation in all runners + - Check prerequisites before execution + - Validate configuration and parameters + +2. **Error Handling** + - Use specific exception types + - Provide detailed error messages + - Implement appropriate retry logic + +3. **Logging** + - Use appropriate log levels + - Include context in log messages + - Log metrics for monitoring + +4. **Configuration** + - Use environment variables for sensitive data + - Implement feature toggles for runners + - Document configuration requirements + +## Security Considerations + +- Sensitive configuration is managed through environment variables +- Wallet operations require proper authentication +- Task validation ensures proper authorization +- Error messages are sanitized for security \ No newline at end of file diff --git a/docs/workflows.md b/docs/workflows.md new file mode 100644 index 00000000..39c61856 --- /dev/null +++ b/docs/workflows.md @@ -0,0 +1,190 @@ +# Workflows System Documentation + +## Overview + +The workflows system is a sophisticated implementation of AI-driven task execution pipelines built on top of LangGraph and LangChain. It provides a flexible and extensible framework for creating complex AI workflows that can combine multiple capabilities such as planning, vector retrieval, web searching, and reactive decision-making. + +## Core Components + +### Base Workflow (`base.py`) + +The foundation of the workflow system is the `BaseWorkflow` class, which provides: + +- Common functionality for all workflow types +- State management and validation +- LLM integration with OpenAI models +- Error handling and logging +- Extensible architecture through mixins + +### Available Workflows + +1. **ReAct Workflow** (`react.py`) + - Implements the Reasoning and Acting pattern + - Supports streaming responses + - Handles tool execution and state management + - Uses a message-based architecture for communication + +2. **Vector ReAct Workflow** (`vector_react.py`) + - Extends ReAct with vector database integration + - Enables semantic search and retrieval + - Combines vector search results with reasoning + +3. **Preplan ReAct Workflow** (`preplan_react.py`) + - Adds planning capabilities before execution + - Creates structured plans for complex tasks + - Executes plans step by step + +4. **Vector Preplan ReAct Workflow** (`vector_preplan_react.py`) + - Combines planning with vector retrieval + - Uses context from vector store for better planning + - Enhanced decision making with relevant information + +5. **Web Search Workflow** (`web_search.py`) + - Integrates web search capabilities + - Processes and summarizes web results + - Combines web information with other workflow steps + +6. **Proposal Evaluation Workflow** (`proposal_evaluation.py`) + - Specialized workflow for evaluating proposals + - Structured analysis and decision making + - Supports complex evaluation criteria + +7. **Tweet Analysis Workflow** (`tweet_analysis.py`) + - Analyzes tweet content and metrics + - Provides insights and recommendations + - Supports social media strategy + +8. **Tweet Generator Workflow** (`tweet_generator.py`) + - Creates engaging tweet content + - Follows best practices and guidelines + - Optimizes for engagement + +## Key Features + +### Workflow Capabilities + +The system includes several core capabilities that can be mixed into workflows: + +1. **Planning Capability** + - Creates structured plans for complex tasks + - Breaks down problems into manageable steps + - Ensures systematic approach to problem-solving + +2. **Vector Retrieval Capability** + - Integrates with vector databases + - Enables semantic search and context retrieval + - Enhances decision making with relevant information + +3. **Web Search Capability** + - Performs web searches for real-time information + - Processes and summarizes search results + - Integrates external knowledge into workflows + +### State Management + +- Type-safe state handling using TypedDict +- Validation of required fields +- Clean state transitions +- Error handling and recovery + +### Streaming Support + +- Real-time response streaming +- Progress updates during execution +- Tool execution status updates +- Error handling during streaming + +## Implementation Details + +### Message Processing + +The system uses a sophisticated message processing system that: +- Filters and formats message history +- Converts messages to LangChain format +- Handles different message types (system, human, AI) +- Supports tool calls and responses + +### Error Handling + +Comprehensive error handling includes: +- `LangGraphError`: Base exception class +- `StreamingError`: For streaming-related issues +- `ExecutionError`: For workflow execution problems +- `ValidationError`: For state validation failures + +### Logging + +- Structured logging throughout the system +- Debug information for development +- Error tracking and reporting +- Performance monitoring + +## Usage Guidelines + +### Creating New Workflows + +To create a new workflow: + +1. Inherit from `BaseWorkflow` +2. Implement required methods: + - `_create_prompt()` + - `_create_graph()` +3. Define state validation rules +4. Add necessary capabilities through mixins + +### Best Practices + +1. **State Management** + - Keep state minimal and focused + - Validate state transitions + - Handle edge cases + +2. **Error Handling** + - Use specific error types + - Provide detailed error messages + - Implement recovery strategies + +3. **Performance** + - Optimize tool usage + - Implement caching where appropriate + - Monitor execution times + +4. **Testing** + - Write unit tests for workflows + - Test edge cases and error conditions + - Validate tool integration + +## Integration + +The workflow system integrates with: +- LangChain for LLM interactions +- LangGraph for workflow orchestration +- Vector databases for retrieval +- Web search APIs +- Custom tools and capabilities + +## Security Considerations + +- API key management +- Input validation +- Rate limiting +- Error handling +- Access control + +## Future Enhancements + +Potential areas for expansion: +- Additional workflow types +- More capabilities and tools +- Enhanced monitoring +- Performance optimizations +- Additional integrations + +## Contributing + +When contributing new workflows: +1. Follow existing patterns and conventions +2. Implement comprehensive error handling +3. Add appropriate documentation +4. Include tests +5. Consider performance implications \ No newline at end of file diff --git a/document_processor.py b/document_processor.py index 1a92560e..e533d8a3 100644 --- a/document_processor.py +++ b/document_processor.py @@ -7,10 +7,9 @@ """ import asyncio -import json import os from pathlib import Path -from typing import Dict, List, Optional, Union +from typing import List, Optional import dotenv from langchain_community.document_loaders import TextLoader, WebBaseLoader @@ -20,21 +19,10 @@ from backend.factory import backend from backend.models import ( - DAO, - UUID, - ContractStatus, - DAOFilter, - Extension, ExtensionFilter, - HolderFilter, - Proposal, ProposalFilter, - Token, TokenFilter, - Vote, VoteFilter, - WalletToken, - WalletTokenFilter, ) from services.workflows.vector_react import add_documents_to_vectors @@ -346,88 +334,6 @@ def extract_dao_documents() -> List[Document]: ) documents.append(vote_doc) - # Get wallet tokens for this DAO - wallet_tokens = backend.list_wallet_tokens(WalletTokenFilter(dao_id=dao.id)) - if wallet_tokens: - print(f"Found {len(wallet_tokens)} wallet tokens for DAO {dao.name}") - - wallet_token_content = f""" - Token Holdings for DAO: {dao.name} - """ - - for wallet_token in wallet_tokens: - # Get the wallet - wallet = backend.get_wallet(wallet_token.wallet_id) - if wallet: - wallet_address = ( - wallet.mainnet_address - or wallet.testnet_address - or "Unknown" - ) - - # Get the token - token = backend.get_token(wallet_token.token_id) - token_name = token.name if token else "Unknown" - token_symbol = token.symbol if token else "Unknown" - - wallet_token_content += f""" - Wallet: {wallet_address} - Token: {token_name} ({token_symbol}) - Amount: {wallet_token.amount} - """ - - wallet_token_doc = Document( - page_content=wallet_token_content, - metadata={ - "type": "wallet_tokens", - "dao_id": str(dao.id), - "dao_name": dao.name or "Unnamed DAO", - "source_type": "database", - "entity_type": "wallet_tokens", - }, - ) - documents.append(wallet_token_doc) - - # Process token holders - holders = backend.list_holders(HolderFilter(dao_id=dao.id)) - if holders: - print(f"Found {len(holders)} holders for DAO {dao.name}") - - # Create content for token holders - holder_content = f""" - Token Holders for DAO {dao.name} - =================================== - """ - - for holder in holders: - # Get wallet info - wallet = backend.get_wallet(holder.wallet_id) - if not wallet: - continue - - # Get token info - token = backend.get_token(holder.token_id) - if not token: - continue - - holder_content += f""" - Wallet: {wallet.mainnet_address or wallet.testnet_address} - Token: {token.name} ({token.symbol}) - Amount: {holder.amount} - """ - - # Create document for token holders - holder_doc = Document( - page_content=holder_content, - metadata={ - "type": "holders", - "dao_id": str(dao.id), - "dao_name": dao.name, - "entity_type": "holders", - }, - ) - documents.append(holder_doc) - # Split the documents if they are too large text_splitter = RecursiveCharacterTextSplitter( chunk_size=4000, @@ -626,7 +532,6 @@ async def main() -> None: "https://docs.stacks.co/reference/keywords", "https://docs.stacks.co/reference/types", "https://docs.stacks.co/reference/the-stack", - "https://raw.githubusercontent.com/aibtcdev/aibtcdev-docs/refs/heads/main/aibtc-daos/dao-extensions/README.md", ] # Example directories to process @@ -653,7 +558,7 @@ async def main() -> None: knowledge_collection_name="knowledge_collection", # Collection for URLs and files dao_collection_name="dao_collection", # Collection for DAO database data recursive=True, - include_database=False, # Include DAO data from the database + include_database=True, # Include DAO data from the database ) diff --git a/lib/hiro.py b/lib/hiro.py index c4021e14..82817bcc 100644 --- a/lib/hiro.py +++ b/lib/hiro.py @@ -250,7 +250,7 @@ def __init__(self, base_url: str): self._request_times: List[float] = [] self._cache = TTLCache(maxsize=100, ttl=300) # Cache with 5-minute TTL self._session: Optional[aiohttp.ClientSession] = None - logger.info("Initialized API client with base URL: %s", self.base_url) + logger.debug("Initialized API client with base URL: %s", self.base_url) def _rate_limit(self) -> None: """Implement rate limiting.""" @@ -666,14 +666,14 @@ def __init__(self): @cached(lambda self: self._cache) def get_token_holders(self, token: str) -> Dict[str, Any]: """Retrieve a list of token holders with caching.""" - logger.info("Retrieving token holders for %s", token) + logger.debug("Retrieving token holders for %s", token) return self._make_request( "GET", f"{self.ENDPOINTS['tokens']}/ft/{token}/holders" ) def get_address_balance(self, addr: str) -> Dict[str, Any]: """Retrieve wallet balance for an address.""" - logger.info("Retrieving balance for address %s", addr) + logger.debug("Retrieving balance for address %s", addr) return self._make_request( "GET", f"{self.ENDPOINTS['addresses']}/{addr}/balances" ) @@ -816,19 +816,19 @@ def get_address_total_burnchain_rewards(self, address: str) -> Dict[str, Any]: @cached(lambda self: self._cache) def get_fee_rate(self) -> Dict[str, Any]: """Get current fee rate with caching.""" - logger.info("Retrieving current fee rate") + logger.debug("Retrieving current fee rate") return self._make_request("GET", "/extended/v1/fee_rate") @cached(lambda self: self._cache) def get_stx_supply(self) -> Dict[str, Any]: """Get STX supply with caching.""" - logger.info("Retrieving STX supply") + logger.debug("Retrieving STX supply") return self._make_request("GET", "/extended/v1/stx_supply") @cached(lambda self: self._cache) def get_stx_price(self) -> float: """Get the current STX price with caching.""" - logger.info("Retrieving current STX price") + logger.debug("Retrieving current STX price") response = requests.get( "https://explorer.hiro.so/stxPrice", params={"blockBurnTime": "current"} ) @@ -838,7 +838,7 @@ def get_stx_price(self) -> float: # @cached(lambda self: self._cache) def get_current_block_height(self) -> int: """Get the current block height""" - logger.info("Retrieving current block height") + logger.debug("Retrieving current block height") logger.debug(f"Endpoint: {self.ENDPOINTS['blocks']}") response = self._make_request( method="GET", @@ -850,20 +850,20 @@ def get_current_block_height(self) -> int: def search(self, query_id: str) -> Dict[str, Any]: """Search for blocks, transactions, contracts, or addresses.""" - logger.info("Performing search for query: %s", query_id) + logger.debug("Performing search for query: %s", query_id) return self._make_request("GET", f"{self.ENDPOINTS['search']}/{query_id}") # Async versions of selected methods async def aget_token_holders(self, token: str) -> Dict[str, Any]: """Async version of get_token_holders.""" - logger.info("Async retrieving token holders for %s", token) + logger.debug("Async retrieving token holders for %s", token) return await self._amake_request( "GET", f"{self.ENDPOINTS['tokens']}/ft/{token}/holders" ) async def aget_address_balance(self, addr: str) -> Dict[str, Any]: """Async version of get_address_balance.""" - logger.info("Async retrieving balance for address %s", addr) + logger.debug("Async retrieving balance for address %s", addr) return await self._amake_request( "GET", f"{self.ENDPOINTS['addresses']}/{addr}/balances" ) diff --git a/lib/logger.py b/lib/logger.py index f1734c1f..f748d6f2 100644 --- a/lib/logger.py +++ b/lib/logger.py @@ -22,12 +22,20 @@ def configure_logger(name: Optional[str] = None) -> logging.Logger: Returns: logging.Logger: Configured logger instance """ - # Get the logger - logger = logging.getLogger("uvicorn.error") + # Get the logger with the provided name + logger = logging.getLogger(name if name else __name__) # Set log level from environment variable, default to INFO if not set log_level_str = os.getenv("LOG_LEVEL", "INFO").upper() log_level = LOG_LEVELS.get(log_level_str, logging.INFO) logger.setLevel(log_level) + # Add console handler if none exists + if not logger.handlers: + console_handler = logging.StreamHandler() + console_handler.setLevel(log_level) + formatter = logging.Formatter("%(levelname)s - %(message)s") + console_handler.setFormatter(formatter) + logger.addHandler(console_handler) + return logger diff --git a/lib/tokenizer.py b/lib/tokenizer.py index 32e64fdb..12fb9332 100644 --- a/lib/tokenizer.py +++ b/lib/tokenizer.py @@ -5,7 +5,7 @@ class Trimmer: def __init__( - self, token_model: str = "gpt-4o", maxsize: int = 50000, margin: int = 500 + self, token_model: str = "gpt-4.1", maxsize: int = 50000, margin: int = 500 ): self.token_model = token_model self.maxsize = maxsize diff --git a/lib/twitter.py b/lib/twitter.py index 35a91cf4..c5e39069 100644 --- a/lib/twitter.py +++ b/lib/twitter.py @@ -224,3 +224,89 @@ async def get_mentions_by_user_id( except Exception as e: logger.error(f"Failed to get mentions: {str(e)}") return [] + + async def get_me(self) -> Optional[User]: + """ + Get information about the authenticated user. + + Returns: + User data if successful, None if failed + """ + try: + if self.client is None: + raise Exception("Twitter client is not initialized") + response = self.client.get_me() + if isinstance(response, User): + return response + return None + except Exception as e: + logger.error(f"Failed to get authenticated user info: {str(e)}") + return None + + async def follow_user(self, target_username: str) -> bool: + """ + Follow a user using their username. Uses the authenticated user as the follower. + + Args: + target_username: Username of the account to follow (without @ symbol) + + Returns: + True if successful, False if failed + """ + try: + if self.client is None: + raise Exception("Twitter client is not initialized") + + # Get authenticated user's ID + me = await self.get_me() + if not me: + raise Exception("Failed to get authenticated user info") + + # Get target user's ID + target_user = await self.get_user_by_username(target_username) + if not target_user: + raise Exception(f"Failed to get user info for {target_username}") + + # Follow the user + response = self.client.follow_user( + user_id=me.id, target_user_id=target_user.id + ) + logger.info(f"Successfully followed user: {target_username}") + return True + except Exception as e: + logger.error(f"Failed to follow user {target_username}: {str(e)}") + return False + + async def unfollow_user(self, target_username: str) -> bool: + """ + Unfollow a user using their username. Uses the authenticated user as the unfollower. + + Args: + target_username: Username of the account to unfollow (without @ symbol) + + Returns: + True if successful, False if failed + """ + try: + if self.client is None: + raise Exception("Twitter client is not initialized") + + # Get authenticated user's ID + me = await self.get_me() + if not me: + raise Exception("Failed to get authenticated user info") + + # Get target user's ID + target_user = await self.get_user_by_username(target_username) + if not target_user: + raise Exception(f"Failed to get user info for {target_username}") + + # Unfollow the user + response = self.client.unfollow_user( + user_id=me.id, target_user_id=target_user.id + ) + logger.info(f"Successfully unfollowed user: {target_username}") + return True + except Exception as e: + logger.error(f"Failed to unfollow user {target_username}: {str(e)}") + return False diff --git a/proposal_evaluation_test.py b/proposal_evaluation_test.py new file mode 100644 index 00000000..449b063f --- /dev/null +++ b/proposal_evaluation_test.py @@ -0,0 +1,171 @@ +"""Functional test script for the proposal evaluation workflow. + +This script demonstrates the usage of the proposal evaluation workflow +with real-world scenarios. It's not a unit test but rather a functional +test to see the workflow in action. +""" + +import asyncio +from typing import Dict, Optional +from uuid import UUID + +from backend.factory import backend +from backend.models import ( + ProposalCreate, + ProposalType, + QueueMessageCreate, + QueueMessageType, +) +from services.workflows.proposal_evaluation import ( + evaluate_and_vote_on_proposal, + evaluate_proposal_only, +) + + +async def create_test_proposal(dao_id: UUID) -> UUID: + """Create a test proposal for evaluation. + + Args: + dao_id: The ID of the DAO to create the proposal for + + Returns: + The ID of the created proposal + """ + # Create test parameters as a JSON object + parameters = { + "action": "test_action", + "amount": 1000, + "description": "Test proposal for evaluation", + "recipient": "ST1PQHQKV0RJXZFY1DGX8MNSNYVE3VGZJSRTPGZGM", + } + + # Create a test proposal + proposal_data = ProposalCreate( + dao_id=dao_id, + type=ProposalType.ACTION, + parameters=str(parameters), # Convert parameters to string + action="test_action", + contract_principal="ST1PQHQKV0RJXZFY1DGX8MNSNYVE3VGZJSRTPGZGM.test-contract", + creator="ST1PQHQKV0RJXZFY1DGX8MNSNYVE3VGZJSRTPGZGM", + created_at_block=1, + end_block=100, + start_block=1, + liquid_tokens="1000", # Keep as string since that's what the model expects + proposal_id=1, + ) + + try: + # # Create some test tweets for the DAO + # for i in range(3): + # tweet_message = { + # "text": f"Test tweet {i+1} for proposal evaluation", + # "created_at": "2024-03-06T00:00:00Z", + # } + # backend.create_queue_message( + # QueueMessageCreate( + # type=QueueMessageType.TWEET, + # dao_id=dao_id, + # message=tweet_message, + # is_processed=True, + # ) + # ) + # print(f"Created test tweet {i+1} for DAO {dao_id}") + + # Create the proposal + proposal = backend.create_proposal(proposal_data) + print(f"Created test proposal with ID: {proposal.id}") + return proposal.id + except Exception as e: + print(f"Failed to create test proposal: {e}") + raise + + +async def test_proposal_evaluation_workflow(): + """Test the proposal evaluation workflow with different scenarios.""" + try: + # # First, let's run the debug workflow to test basic functionality + # print("Running debug workflow test...") + # debug_result = await debug_proposal_evaluation_workflow() + # print(f"Debug workflow test result: {debug_result}") + + # Now let's test with a real proposal + # First, we need a DAO ID - you would replace this with a real DAO ID + dao_id = UUID( + "cffb355f-50c1-4ec5-8e2f-a0e65547c746" + ) # Replace with real DAO ID + + # Create a test proposal + proposal_id = await create_test_proposal(dao_id) + + # Test scenarios + scenarios = [ + { + "name": "Evaluation Only", + "auto_vote": False, + "confidence_threshold": 0.7, + "description": "Testing proposal evaluation without voting", + }, + { + "name": "Auto-vote Enabled", + "auto_vote": False, # Fixed: Changed to True for auto-vote scenario + "confidence_threshold": 0.7, + "description": "Testing proposal evaluation with auto-voting", + }, + { + "name": "Low Confidence Threshold", + "auto_vote": False, + "confidence_threshold": 0.3, + "description": "Testing with lower confidence threshold", + }, + ] + + # Run each scenario + for scenario in scenarios: + print(f"\nRunning scenario: {scenario['name']}") + print(f"Description: {scenario['description']}") + + try: + if scenario["auto_vote"]: + result = await evaluate_and_vote_on_proposal( + proposal_id=proposal_id, + auto_vote=scenario["auto_vote"], + confidence_threshold=scenario["confidence_threshold"], + dao_id=dao_id, + ) + else: + result = await evaluate_proposal_only( + proposal_id=proposal_id, + wallet_id=UUID("532fd36b-8a9d-4fdd-82d2-25ddcf007488"), + ) + + # Print the results + print("\nEvaluation Results:") + print(f"Success: {result['success']}") + if result["success"]: + print(f"Approval: {result['evaluation']['approve']}") + print(f"Confidence: {result['evaluation']['confidence_score']}") + print(f"Reasoning: {result['evaluation']['reasoning']}") + print(f"Token Usage: {result['token_usage']}") + print(f"Cost: ${result['token_costs']['total_cost']:.4f}") + + if scenario["auto_vote"]: + print(f"Auto-voted: {result['auto_voted']}") + if result["vote_result"]: + print(f"Vote Result: {result['vote_result']}") + if result.get("tx_id"): + print(f"Transaction ID: {result['tx_id']}") + else: + print(f"Error: {result.get('error', 'Unknown error')}") + + except Exception as e: + print(f"Error in scenario {scenario['name']}: {e}") + + except Exception as e: + print(f"Test failed: {e}") + raise + + +if __name__ == "__main__": + + # Run the tests + asyncio.run(test_proposal_evaluation_workflow()) diff --git a/requirements.txt b/requirements.txt index bc291eb2..3c72841d 100644 --- a/requirements.txt +++ b/requirements.txt @@ -1,22 +1,23 @@ APScheduler==3.11.0 cachetools==5.5.2 -fastapi==0.115.11 -langchain==0.3.20 -langchain_core==0.3.44 -langchain_openai==0.3.8 -langchain_text_splitters==0.3.6 -langgraph==0.3.8 -openai==1.66.3 +fastapi==0.115.12 +langchain==0.3.23 +langchain_core==0.3.51 +langchain_openai==0.3.12 +langchain_text_splitters==0.3.8 +langgraph==0.3.29 +openai==1.73.0 pgvector==0.3.6 psycopg2==2.9.10 -pydantic==2.10.6 +pydantic==2.11.3 python-dotenv==1.0.1 python-telegram-bot==21.11.1 python-twitter-v2==0.9.2 Requests==2.32.3 -SQLAlchemy==2.0.39 -starlette==0.46.1 -supabase==2.13.0 +SQLAlchemy==2.0.40 +starlette==0.46.2 +supabase==2.15.0 tiktoken==0.9.0 -uvicorn==0.34.0 -vecs==0.4.5 \ No newline at end of file +uvicorn==0.34.1 +vecs==0.4.5 +langchain_community==0.3.21 \ No newline at end of file diff --git a/services/runner/__init__.py b/services/runner/__init__.py index b2878bbe..e3506e17 100644 --- a/services/runner/__init__.py +++ b/services/runner/__init__.py @@ -3,10 +3,18 @@ from services.runner.base import BaseTask, JobContext, JobType from services.runner.job_manager import JobConfig, JobManager from services.runner.registry import JobRegistry, execute_runner_job +from services.runner.tasks.agent_account_deployer import ( + AgentAccountDeployerTask, + agent_account_deployer, +) from services.runner.tasks.dao_proposal_concluder import ( DAOProposalConcluderTask, dao_proposal_concluder, ) +from services.runner.tasks.dao_proposal_evaluation import ( + DAOProposalEvaluationTask, + dao_proposal_evaluation, +) from services.runner.tasks.dao_proposal_voter import ( DAOProposalVoterTask, dao_proposal_voter, @@ -19,8 +27,10 @@ JobRegistry.register(JobType.DAO, DAOTask) JobRegistry.register(JobType.DAO_PROPOSAL_VOTE, DAOProposalVoterTask) JobRegistry.register(JobType.DAO_PROPOSAL_CONCLUDE, DAOProposalConcluderTask) +JobRegistry.register(JobType.DAO_PROPOSAL_EVALUATION, DAOProposalEvaluationTask) JobRegistry.register(JobType.DAO_TWEET, DAOTweetTask) JobRegistry.register(JobType.TWEET, TweetTask) +JobRegistry.register(JobType.AGENT_ACCOUNT_DEPLOY, AgentAccountDeployerTask) __all__ = [ "BaseTask", @@ -40,4 +50,8 @@ "execute_runner_job", "DAOProposalConcluderTask", "dao_proposal_concluder", + "DAOProposalEvaluationTask", + "dao_proposal_evaluation", + "AgentAccountDeployerTask", + "agent_account_deployer", ] diff --git a/services/runner/base.py b/services/runner/base.py index 5eb7f80e..6e072586 100644 --- a/services/runner/base.py +++ b/services/runner/base.py @@ -65,13 +65,15 @@ def from_env(cls) -> "RunnerConfig": class JobType(str, Enum): - """Enum for different types of jobs.""" - - DAO = QueueMessageType.DAO - DAO_TWEET = QueueMessageType.DAO_TWEET - TWEET = QueueMessageType.TWEET - DAO_PROPOSAL_VOTE = QueueMessageType.DAO_PROPOSAL_VOTE - DAO_PROPOSAL_CONCLUDE = QueueMessageType.DAO_PROPOSAL_CONCLUDE + """Types of jobs that can be run.""" + + DAO = "dao" + DAO_PROPOSAL_VOTE = "dao_proposal_vote" + DAO_PROPOSAL_CONCLUDE = "dao_proposal_conclude" + DAO_PROPOSAL_EVALUATION = "dao_proposal_evaluation" + DAO_TWEET = "dao_tweet" + TWEET = "tweet" + AGENT_ACCOUNT_DEPLOY = "agent_account_deploy" def __str__(self): return self.value diff --git a/services/runner/job_manager.py b/services/runner/job_manager.py index 8b6b0c6d..e8ded37a 100644 --- a/services/runner/job_manager.py +++ b/services/runner/job_manager.py @@ -93,6 +93,18 @@ def get_all_jobs() -> List[JobConfig]: config.scheduler.dao_proposal_conclude_runner_interval_seconds, JobType.DAO_PROPOSAL_CONCLUDE.value, ), + ( + "DAO Proposal Evaluation Runner Service", + config.scheduler.dao_proposal_evaluation_runner_enabled, + config.scheduler.dao_proposal_evaluation_runner_interval_seconds, + JobType.DAO_PROPOSAL_EVALUATION.value, + ), + ( + "Agent Account Deploy Runner Service", + config.scheduler.agent_account_deploy_runner_enabled, + config.scheduler.agent_account_deploy_runner_interval_seconds, + JobType.AGENT_ACCOUNT_DEPLOY.value, + ), ] # Add all runner jobs with common structure diff --git a/services/runner/tasks/__init__.py b/services/runner/tasks/__init__.py index 1d47dbb8..e1992934 100644 --- a/services/runner/tasks/__init__.py +++ b/services/runner/tasks/__init__.py @@ -1,6 +1,7 @@ """Task runners for scheduled and on-demand jobs.""" from .dao_proposal_concluder import DAOProposalConcluderTask, dao_proposal_concluder +from .dao_proposal_evaluation import DAOProposalEvaluationTask, dao_proposal_evaluation from .dao_proposal_voter import DAOProposalVoterTask, dao_proposal_voter from .dao_task import DAOTask, dao_task from .dao_tweet_task import DAOTweetTask, dao_tweet_task @@ -17,4 +18,6 @@ "tweet_task", "DAOProposalConcluderTask", "dao_proposal_concluder", + "DAOProposalEvaluationTask", + "dao_proposal_evaluation", ] diff --git a/services/runner/tasks/agent_account_deployer.py b/services/runner/tasks/agent_account_deployer.py new file mode 100644 index 00000000..051e4437 --- /dev/null +++ b/services/runner/tasks/agent_account_deployer.py @@ -0,0 +1,173 @@ +"""Agent account deployment task implementation.""" + +from dataclasses import dataclass +from typing import Any, Dict, List + +from backend.factory import backend +from backend.models import ( + QueueMessage, + QueueMessageBase, + QueueMessageFilter, + QueueMessageType, +) +from config import config +from lib.logger import configure_logger +from services.runner.base import BaseTask, JobContext, RunnerResult +from tools.smartwallet import SmartWalletDeploySmartWalletTool + +logger = configure_logger(__name__) + + +@dataclass +class AgentAccountDeployResult(RunnerResult): + """Result of agent account deployment operation.""" + + accounts_processed: int = 0 + accounts_deployed: int = 0 + errors: List[str] = None + + def __post_init__(self): + self.errors = self.errors or [] + + +class AgentAccountDeployerTask(BaseTask[AgentAccountDeployResult]): + """Task runner for deploying agent accounts.""" + + QUEUE_TYPE = QueueMessageType.AGENT_ACCOUNT_DEPLOY + + async def _validate_task_specific(self, context: JobContext) -> bool: + """Validate task-specific conditions.""" + try: + # Get pending messages from the queue + pending_messages = await self.get_pending_messages() + message_count = len(pending_messages) + logger.debug( + f"Found {message_count} pending agent account deployment messages" + ) + + if message_count == 0: + logger.info("No pending agent account deployment messages found") + return False + + # Validate that at least one message has valid deployment data + for message in pending_messages: + message_data = message.message or {} + if self._validate_message_data(message_data): + logger.info("Found valid agent account deployment message") + return True + + logger.warning("No valid deployment data found in pending messages") + return False + + except Exception as e: + logger.error( + f"Error validating agent account deployment task: {str(e)}", + exc_info=True, + ) + return False + + def _validate_message_data(self, message_data: Dict[str, Any]) -> bool: + """Validate the message data contains required fields.""" + required_fields = [ + "owner_address", + "dao_token_contract", + "dao_token_dex_contract", + ] + return all(field in message_data for field in required_fields) + + async def process_message(self, message: QueueMessage) -> Dict[str, Any]: + """Process a single agent account deployment message.""" + message_id = message.id + message_data = message.message or {} + + logger.debug(f"Processing agent account deployment message {message_id}") + + try: + # Validate message data + if not self._validate_message_data(message_data): + error_msg = f"Invalid message data in message {message_id}" + logger.error(error_msg) + return {"success": False, "error": error_msg} + + # Initialize the SmartWalletDeploySmartWalletTool + logger.debug("Preparing to deploy agent account") + deploy_tool = SmartWalletDeploySmartWalletTool( + wallet_id=config.scheduler.agent_account_deploy_runner_wallet_id + ) + + # Execute the deployment + logger.debug("Executing deployment...") + deployment_result = await deploy_tool._arun( + owner_address=message_data["owner_address"], + dao_token_contract=message_data["dao_token_contract"], + dao_token_dex_contract=message_data["dao_token_dex_contract"], + ) + logger.debug(f"Deployment result: {deployment_result}") + + # Mark the message as processed + update_data = QueueMessageBase(is_processed=True) + backend.update_queue_message(message_id, update_data) + + return {"success": True, "deployed": True, "result": deployment_result} + + except Exception as e: + error_msg = f"Error processing message {message_id}: {str(e)}" + logger.error(error_msg, exc_info=True) + return {"success": False, "error": error_msg} + + async def get_pending_messages(self) -> List[QueueMessage]: + """Get all unprocessed messages from the queue.""" + filters = QueueMessageFilter(type=self.QUEUE_TYPE, is_processed=False) + return backend.list_queue_messages(filters=filters) + + async def _execute_impl( + self, context: JobContext + ) -> List[AgentAccountDeployResult]: + """Run the agent account deployment task.""" + pending_messages = await self.get_pending_messages() + message_count = len(pending_messages) + logger.debug(f"Found {message_count} pending agent account deployment messages") + + if not pending_messages: + return [ + AgentAccountDeployResult( + success=True, + message="No pending messages found", + accounts_processed=0, + accounts_deployed=0, + ) + ] + + # Process each message + processed_count = 0 + deployed_count = 0 + errors = [] + + for message in pending_messages: + result = await self.process_message(message) + processed_count += 1 + + if result.get("success"): + if result.get("deployed", False): + deployed_count += 1 + else: + errors.append(result.get("error", "Unknown error")) + + logger.debug( + f"Task metrics - Processed: {processed_count}, " + f"Deployed: {deployed_count}, Errors: {len(errors)}" + ) + + return [ + AgentAccountDeployResult( + success=True, + message=f"Processed {processed_count} account(s), deployed {deployed_count} account(s)", + accounts_processed=processed_count, + accounts_deployed=deployed_count, + errors=errors, + ) + ] + + +# Instantiate the task for use in the registry +agent_account_deployer = AgentAccountDeployerTask() diff --git a/services/runner/tasks/dao_proposal_evaluation.py b/services/runner/tasks/dao_proposal_evaluation.py new file mode 100644 index 00000000..d1f1dd6b --- /dev/null +++ b/services/runner/tasks/dao_proposal_evaluation.py @@ -0,0 +1,255 @@ +"""DAO proposal evaluation task implementation.""" + +from dataclasses import dataclass +from typing import Any, Dict, List + +from backend.factory import backend +from backend.models import ( + QueueMessage, + QueueMessageBase, + QueueMessageCreate, + QueueMessageFilter, + QueueMessageType, + VoteBase, + VoteCreate, +) +from lib.logger import configure_logger +from services.runner.base import BaseTask, JobContext, RunnerResult +from services.workflows.proposal_evaluation import evaluate_and_vote_on_proposal + +logger = configure_logger(__name__) + + +@dataclass +class DAOProposalEvaluationResult(RunnerResult): + """Result of DAO proposal evaluation operation.""" + + proposals_processed: int = 0 + proposals_evaluated: int = 0 + errors: List[str] = None + + def __post_init__(self): + self.errors = self.errors or [] + + +class DAOProposalEvaluationTask(BaseTask[DAOProposalEvaluationResult]): + """Task runner for evaluating DAO proposals.""" + + QUEUE_TYPE = QueueMessageType.DAO_PROPOSAL_EVALUATION + DEFAULT_CONFIDENCE_THRESHOLD = 0.7 + DEFAULT_AUTO_VOTE = False + + async def _validate_task_specific(self, context: JobContext) -> bool: + """Validate task-specific conditions.""" + try: + # Get pending messages from the queue + pending_messages = await self.get_pending_messages() + message_count = len(pending_messages) + logger.debug(f"Found {message_count} pending proposal evaluation messages") + + if message_count == 0: + logger.info("No pending proposal evaluation messages found") + return False + + # Validate that at least one message has a valid proposal + for message in pending_messages: + message_data = message.message or {} + proposal_id = message_data.get("proposal_id") + + if not proposal_id: + logger.warning(f"Message {message.id} missing proposal_id") + continue + + # Check if the proposal exists in the database + proposal = backend.get_proposal(proposal_id) + if proposal: + logger.info(f"Found valid proposal {proposal_id} to process") + return True + else: + logger.warning(f"Proposal {proposal_id} not found in database") + + logger.warning("No valid proposals found in pending messages") + return False + + except Exception as e: + logger.error( + f"Error validating proposal evaluation task: {str(e)}", exc_info=True + ) + return False + + async def process_message(self, message: QueueMessage) -> Dict[str, Any]: + """Process a single DAO proposal evaluation message.""" + message_id = message.id + message_data = message.message or {} + wallet_id = message.wallet_id + dao_id = message.dao_id + + logger.debug( + f"Processing proposal evaluation message {message_id} for wallet {wallet_id}" + ) + + # Get the proposal ID from the message + proposal_id = message_data.get("proposal_id") + if not proposal_id: + error_msg = f"Missing proposal_id in message {message_id}" + logger.error(error_msg) + return {"success": False, "error": error_msg} + + try: + # Get the proposal details from the database + proposal = backend.get_proposal(proposal_id) + if not proposal: + error_msg = f"Proposal {proposal_id} not found in database" + logger.error(error_msg) + return {"success": False, "error": error_msg} + + # Get the DAO information + dao = backend.get_dao(dao_id) if dao_id else None + if not dao: + error_msg = f"DAO not found for proposal {proposal_id}" + logger.error(error_msg) + return {"success": False, "error": error_msg} + + # Execute the proposal evaluation workflow + logger.info(f"Evaluating proposal {proposal.id} for DAO {dao.name}") + + result = await evaluate_and_vote_on_proposal( + proposal_id=proposal.id, + wallet_id=wallet_id, + auto_vote=self.DEFAULT_AUTO_VOTE, # Don't auto-vote, just evaluate + confidence_threshold=self.DEFAULT_CONFIDENCE_THRESHOLD, + dao_id=dao_id, + ) + + # Extract evaluation results + evaluation = result.get("evaluation", {}) + approval = evaluation.get("approve", False) + confidence = evaluation.get("confidence_score", 0.0) + reasoning = evaluation.get("reasoning", "No reasoning provided") + formatted_prompt = result.get("formatted_prompt", "No prompt provided") + total_cost = result.get("token_costs", {}).get("total_cost", 0.0) + model = result.get("model_info", {}).get("name", "Unknown") + + logger.info( + f"Proposal {proposal.id} ({dao.name}): Evaluated with result " + f"{'FOR' if approval else 'AGAINST'} with confidence {confidence:.2f}" + ) + + wallet = backend.get_wallet(wallet_id) + + # Create a vote record with the evaluation results + vote_data = VoteCreate( + wallet_id=wallet_id, + dao_id=dao_id, + agent_id=None, # This will be set from the wallet if it exists + proposal_id=proposal_id, + answer=approval, + reasoning=reasoning, + confidence=confidence, + prompt=formatted_prompt, + cost=total_cost, + model=model, + profile_id=wallet.profile_id, + ) + + # Create the vote record + vote = backend.create_vote(vote_data) + if not vote: + logger.error("Failed to create vote record") + return {"success": False, "error": "Failed to create vote record"} + + logger.info(f"Created vote record {vote.id} for proposal {proposal_id}") + + # Create a DAO_PROPOSAL_VOTE message with the vote record ID + vote_message_data = {"proposal_id": proposal_id, "vote_id": str(vote.id)} + + vote_message = backend.create_queue_message( + QueueMessageCreate( + type=QueueMessageType.DAO_PROPOSAL_VOTE, + message=vote_message_data, + dao_id=dao_id, + wallet_id=wallet_id, + ) + ) + + if not vote_message: + logger.error("Failed to create vote queue message") + return { + "success": False, + "error": "Failed to create vote queue message", + } + + logger.info(f"Created vote queue message {vote_message.id}") + + # Mark the evaluation message as processed + update_data = QueueMessageBase(is_processed=True) + backend.update_queue_message(message_id, update_data) + + return { + "success": True, + "vote_id": str(vote.id), + "vote_message_id": str(vote_message.id), + "approve": approval, + "confidence": confidence, + } + + except Exception as e: + error_msg = f"Error processing message {message_id}: {str(e)}" + logger.error(error_msg, exc_info=True) + return {"success": False, "error": error_msg} + + async def get_pending_messages(self) -> List[QueueMessage]: + """Get all unprocessed messages from the queue.""" + filters = QueueMessageFilter(type=self.QUEUE_TYPE, is_processed=False) + return backend.list_queue_messages(filters=filters) + + async def _execute_impl( + self, context: JobContext + ) -> List[DAOProposalEvaluationResult]: + """Run the DAO proposal evaluation task.""" + pending_messages = await self.get_pending_messages() + message_count = len(pending_messages) + logger.debug(f"Found {message_count} pending proposal evaluation messages") + + if not pending_messages: + return [ + DAOProposalEvaluationResult( + success=True, + message="No pending messages found", + proposals_processed=0, + proposals_evaluated=0, + ) + ] + + # Process each message + processed_count = 0 + evaluated_count = 0 + errors = [] + + for message in pending_messages: + result = await self.process_message(message) + processed_count += 1 + + if result.get("success"): + evaluated_count += 1 + else: + errors.append(result.get("error", "Unknown error")) + + logger.debug( + f"Task metrics - Processed: {processed_count}, " + f"Evaluated: {evaluated_count}, Errors: {len(errors)}" + ) + + return [ + DAOProposalEvaluationResult( + success=True, + message=f"Processed {processed_count} proposal(s), evaluated {evaluated_count} proposal(s)", + proposals_processed=processed_count, + proposals_evaluated=evaluated_count, + errors=errors, + ) + ] + + +# Instantiate the task for use in the registry +dao_proposal_evaluation = DAOProposalEvaluationTask() diff --git a/services/runner/tasks/dao_proposal_voter.py b/services/runner/tasks/dao_proposal_voter.py index ab2b5981..21e4b708 100644 --- a/services/runner/tasks/dao_proposal_voter.py +++ b/services/runner/tasks/dao_proposal_voter.py @@ -1,5 +1,6 @@ """DAO proposal voter task implementation.""" +import json from dataclasses import dataclass from typing import Any, Dict, List @@ -9,12 +10,13 @@ QueueMessageBase, QueueMessageFilter, QueueMessageType, - VoteCreate, + VoteBase, + VoteFilter, ) from config import config from lib.logger import configure_logger from services.runner.base import BaseTask, JobContext, RunnerResult -from services.workflows.proposal_evaluation import evaluate_and_vote_on_proposal +from tools.dao_ext_action_proposals import VoteOnActionProposalTool logger = configure_logger(__name__) @@ -35,8 +37,6 @@ class DAOProposalVoterTask(BaseTask[DAOProposalVoteResult]): """Task runner for processing and voting on DAO proposals.""" QUEUE_TYPE = QueueMessageType.DAO_PROPOSAL_VOTE - DEFAULT_CONFIDENCE_THRESHOLD = 0.7 - DEFAULT_AUTO_VOTE = True async def _validate_task_specific(self, context: JobContext) -> bool: """Validate task-specific conditions.""" @@ -50,7 +50,7 @@ async def _validate_task_specific(self, context: JobContext) -> bool: logger.info("No pending proposal voting messages found") return False - # Validate that at least one message has a valid proposal + # Validate that at least one message has a valid proposal ID for message in pending_messages: message_data = message.message or {} proposal_id = message_data.get("proposal_id") @@ -62,12 +62,29 @@ async def _validate_task_specific(self, context: JobContext) -> bool: # Check if the proposal exists in the database proposal = backend.get_proposal(proposal_id) if proposal: - logger.info(f"Found valid proposal {proposal_id} to process") - return True + # Check if there are any unvoted votes for this proposal + unvoted_votes = backend.list_votes( + VoteFilter( + proposal_id=proposal_id, + voted=False, + ) + ) + + if unvoted_votes: + logger.info( + f"Found valid proposal {proposal_id} with {len(unvoted_votes)} unvoted votes to process" + ) + return True + else: + logger.warning( + f"No unvoted votes found for proposal {proposal_id}" + ) else: logger.warning(f"Proposal {proposal_id} not found in database") - logger.warning("No valid proposals found in pending messages") + logger.warning( + "No valid proposals with unvoted votes found in pending messages" + ) return False except Exception as e: @@ -89,6 +106,7 @@ async def process_message(self, message: QueueMessage) -> Dict[str, Any]: # Get the proposal ID from the message proposal_id = message_data.get("proposal_id") + if not proposal_id: error_msg = f"Missing proposal_id in message {message_id}" logger.error(error_msg) @@ -102,101 +120,150 @@ async def process_message(self, message: QueueMessage) -> Dict[str, Any]: logger.error(error_msg) return {"success": False, "error": error_msg} - # Get the DAO information - dao = backend.get_dao(dao_id) if dao_id else None - if not dao: - error_msg = f"DAO not found for proposal {proposal_id}" + # Get unvoted votes for this proposal and wallet + unvoted_votes = backend.list_votes( + VoteFilter( + proposal_id=proposal_id, + wallet_id=wallet_id, + voted=False, + ) + ) + + if not unvoted_votes: + error_msg = f"No unvoted votes found for proposal {proposal_id} and wallet {wallet_id}" logger.error(error_msg) return {"success": False, "error": error_msg} - # Execute the proposal evaluation workflow - logger.info(f"Evaluating proposal {proposal.id} for DAO {dao.name}") + # Initialize the voting tool + voting_tool = VoteOnActionProposalTool(wallet_id=wallet_id) + + # Process each unvoted vote + results = [] + for vote in unvoted_votes: + # Submit the vote + vote_result = await voting_tool._arun( + action_proposals_voting_extension=proposal.contract_principal, + proposal_id=proposal.proposal_id, + vote=vote.answer, + ) - result = await evaluate_and_vote_on_proposal( - proposal_id=proposal.id, - wallet_id=wallet_id, - auto_vote=self.DEFAULT_AUTO_VOTE, - confidence_threshold=self.DEFAULT_CONFIDENCE_THRESHOLD, - dao_id=dao_id, - ) + if not vote_result.get("success", False): + error_msg = f"Failed to submit vote {vote.id}: {vote_result.get('message', 'Unknown error')}" + logger.error(error_msg) + results.append( + {"success": False, "error": error_msg, "vote_id": vote.id} + ) + continue - # Log the results - evaluation = result.get("evaluation", {}) - approval = evaluation.get("approve", False) - confidence = evaluation.get("confidence_score", 0.0) - reasoning = evaluation.get("reasoning", "No reasoning provided") - formatted_prompt = result.get("formatted_prompt", "No prompt provided") - vote_created = False - vote_id = None + try: + # Parse the output JSON string + output_data = ( + json.loads(vote_result["output"]) + if isinstance(vote_result["output"], str) + else vote_result["output"] + ) + # Get the transaction ID from the nested data structure + tx_id = output_data.get("data", {}).get("txid") + + if not tx_id: + logger.warning(f"No txid found in parsed output: {output_data}") + results.append( + { + "success": False, + "error": "No transaction ID found in response", + "vote_id": vote.id, + "vote_result": vote_result, + } + ) + continue + + except (json.JSONDecodeError, KeyError) as e: + logger.error(f"Error parsing vote result output: {str(e)}") + results.append( + { + "success": False, + "error": f"Failed to parse vote result: {str(e)}", + "vote_id": vote.id, + "vote_result": vote_result, + } + ) + continue - if result.get("auto_voted", False): - logger.info( - f"Proposal {proposal.id} ({dao.name}): Voted {'FOR' if approval else 'AGAINST'} " - f"with confidence {confidence:.2f}" + # Log the txid for debugging + ## Get the correct address based on network configuration + wallet = backend.get_wallet(wallet_id) + address = ( + wallet.mainnet_address + if config.network.network == "mainnet" + else wallet.testnet_address ) - else: - logger.info( - f"Proposal {proposal.id} ({dao.name}): Evaluated but not auto-voted - " - f"confidence {confidence:.2f} below threshold" + logger.debug(f"Found txid in response: {tx_id}") + vote_data = VoteBase( + tx_id=tx_id, + voted=True, + address=address, + profile_id=wallet.profile_id, ) - - # Get wallet information for the address - wallet = backend.get_wallet(wallet_id) if wallet_id else None - wallet_address = None - - # Select the appropriate wallet address based on network type - if wallet: - network_type = config.network.network.lower() - if network_type == "mainnet": - wallet_address = wallet.mainnet_address - logger.debug(f"Using mainnet address: {wallet_address}") - else: # testnet or other networks - wallet_address = wallet.testnet_address - logger.debug(f"Using testnet address: {wallet_address}") - - if not wallet_address: - logger.warning( - f"No {network_type} address found for wallet {wallet_id}" + logger.debug( + f"Attempting to update vote {vote.id} with data: {vote_data.model_dump()}" + ) + try: + # Log the current vote state before update + current_vote = backend.get_vote(vote.id) + logger.debug( + f"Current vote state before update: {current_vote.model_dump() if current_vote else None}" ) - # Get transaction ID if available (will be None if not auto-voted) - tx_id = result.get("tx_id") - - # Always create a vote record to store the evaluation results - vote_data = VoteCreate( - wallet_id=wallet_id, - dao_id=dao_id, - agent_id=wallet.agent_id if wallet and wallet.agent_id else None, - answer=approval, - proposal_id=proposal_id, - prompt=formatted_prompt, - reasoning=reasoning, - tx_id=tx_id, - address=wallet_address, - confidence=confidence, - ) - - try: - vote = backend.create_vote(vote_data) - vote_created = True - vote_id = vote.id - logger.info(f"Created vote record {vote.id} for proposal {proposal_id}") - except Exception as vote_error: - logger.error(f"Failed to create vote record: {str(vote_error)}") - - logger.debug(f"Proposal {proposal.id} reasoning: {reasoning}") - - # Mark the message as processed using QueueMessageBase - update_data = QueueMessageBase(is_processed=True) - backend.update_queue_message(message_id, update_data) + updated_vote = backend.update_vote(vote.id, vote_data) + if updated_vote: + logger.info( + f"Successfully updated vote {vote.id} with transaction ID {tx_id} and marked as voted" + ) + logger.debug(f"Updated vote state: {updated_vote.model_dump()}") + else: + logger.error( + f"Failed to update vote {vote.id} - update_vote returned None" + ) + except Exception as e: + logger.error( + f"Error updating vote {vote.id}: {str(e)}", exc_info=True + ) + results.append( + { + "success": False, + "error": f"Failed to update vote: {str(e)}", + "vote_id": vote.id, + "vote_result": vote_result, + } + ) + continue + results.append( + { + "success": True, + "vote_id": vote.id, + "tx_id": tx_id, + "vote_result": vote_result, + } + ) - return { - "success": True, - "auto_voted": result.get("auto_voted", False), - "approve": approval, - "vote_created": vote_created, - "vote_id": vote_id, - } + # Mark the message as processed if all votes were handled + if all(result["success"] for result in results): + update_data = QueueMessageBase(is_processed=True) + backend.update_queue_message(message_id, update_data) + logger.info( + f"Successfully processed all votes for message {message_id}" + ) + return { + "success": True, + "results": results, + } + else: + # Some votes failed + return { + "success": False, + "error": "Some votes failed to process", + "results": results, + } except Exception as e: error_msg = f"Error processing message {message_id}: {str(e)}" @@ -227,7 +294,6 @@ async def _execute_impl(self, context: JobContext) -> List[DAOProposalVoteResult # Process each message processed_count = 0 voted_count = 0 - votes_created = 0 errors = [] for message in pending_messages: @@ -235,22 +301,26 @@ async def _execute_impl(self, context: JobContext) -> List[DAOProposalVoteResult processed_count += 1 if result.get("success"): - if result.get("auto_voted", False): - voted_count += 1 - if result.get("vote_created", False): - votes_created += 1 + # Count successful votes from the results + voted_count += len( + [r for r in result.get("results", []) if r.get("success")] + ) else: errors.append(result.get("error", "Unknown error")) + # Also add any individual vote errors + for vote_result in result.get("results", []): + if not vote_result.get("success"): + errors.append(vote_result.get("error", "Unknown vote error")) logger.debug( f"Task metrics - Processed: {processed_count}, " - f"Voted: {voted_count}, Votes created: {votes_created}, Errors: {len(errors)}" + f"Voted: {voted_count}, Errors: {len(errors)}" ) return [ DAOProposalVoteResult( success=True, - message=f"Processed {processed_count} proposal(s), voted on {voted_count} proposal(s), created {votes_created} vote record(s)", + message=f"Processed {processed_count} proposal(s), voted on {voted_count} proposal(s)", proposals_processed=processed_count, proposals_voted=voted_count, errors=errors, diff --git a/services/runner/tasks/tweet_task.py b/services/runner/tasks/tweet_task.py index fe6f7f23..10ae8fb1 100644 --- a/services/runner/tasks/tweet_task.py +++ b/services/runner/tasks/tweet_task.py @@ -281,4 +281,4 @@ async def _execute_impl(self, context: JobContext) -> List[TweetProcessingResult return results -tweet_task = TweetTask() +tweet_task = TweetTask() \ No newline at end of file diff --git a/services/webhooks/chainhook/handler.py b/services/webhooks/chainhook/handler.py index 7f60d3f1..94d08620 100644 --- a/services/webhooks/chainhook/handler.py +++ b/services/webhooks/chainhook/handler.py @@ -37,6 +37,8 @@ def __init__(self): """Initialize the handler with a logger and specialized handlers.""" super().__init__() self.logger = configure_logger(self.__class__.__name__) + # Initialize BlockStateHandler first as it needs to validate block heights + self.block_state_handler = BlockStateHandler() self.handlers = [ ContractMessageHandler(), BuyEventHandler(), @@ -45,7 +47,7 @@ def __init__(self): DAOProposalBurnHeightHandler(), DAOVoteHandler(), DAOProposalConclusionHandler(), - BlockStateHandler(), + self.block_state_handler, # Add to regular handlers list too for post-processing ] async def handle(self, parsed_data: ChainHookData) -> Dict[str, Any]: @@ -62,7 +64,8 @@ async def handle(self, parsed_data: ChainHookData) -> Dict[str, Any]: f"Processing chainhook webhook with {len(parsed_data.apply)} apply blocks" ) - # Set chainhook data for all handlers + # Set chainhook data for all handlers including block state handler + self.block_state_handler.set_chainhook_data(parsed_data) for handler in self.handlers: handler.set_chainhook_data(parsed_data) @@ -73,9 +76,37 @@ async def handle(self, parsed_data: ChainHookData) -> Dict[str, Any]: f"(height: {apply.block_identifier.index})" ) - # Process block-level handlers first + # First, process with BlockStateHandler to update/validate chain state + if self.block_state_handler.can_handle_block(apply): + await self.block_state_handler.handle_block(apply) + + # Check if BlockStateHandler successfully processed *this* block. + # This implies the block was newer than the DB state AND the DB update succeeded. + # We check if the handler's internal state now matches this block's height. + block_processed_by_state_handler = ( + self.block_state_handler.latest_chain_state is not None + and self.block_state_handler.latest_chain_state.block_height + == apply.block_identifier.index + ) + + if not block_processed_by_state_handler: + self.logger.warning( + f"Block {apply.block_identifier.index} was not processed by BlockStateHandler " + f"(likely older than current DB state or failed update). Skipping other handlers for this block." + ) + continue # Skip to the next block in the webhook payload + + # If BlockStateHandler processed it, proceed with other handlers for this block + self.logger.debug( + f"Block {apply.block_identifier.index} validated by BlockStateHandler, proceeding." + ) + + # Process other block-level handlers for handler in self.handlers: - if handler.can_handle_block(apply): + if ( + handler != self.block_state_handler + and handler.can_handle_block(apply) + ): self.logger.debug( f"Using handler {handler.__class__.__name__} for block-level processing" ) diff --git a/services/webhooks/chainhook/handlers/base_vote_handler.py b/services/webhooks/chainhook/handlers/base_vote_handler.py index a14ac630..fce39dc9 100644 --- a/services/webhooks/chainhook/handlers/base_vote_handler.py +++ b/services/webhooks/chainhook/handlers/base_vote_handler.py @@ -1,6 +1,6 @@ """Base handler for DAO proposal votes.""" -from typing import Dict, Optional +from typing import Dict, List, Optional from backend.factory import backend from backend.models import ProposalFilter, VoteBase, VoteCreate, VoteFilter @@ -150,17 +150,34 @@ async def handle_transaction(self, transaction: TransactionWithReceipt) -> None: ) return - # Find existing votes for this proposal and voter - votes = backend.list_votes( + # Find existing votes using two different filter criteria + votes: List[VoteBase] = [] + + # Search by proposal_id and voter address + address_votes = backend.list_votes( filters=VoteFilter( proposal_id=proposal.id, address=voter_address, ) ) + votes.extend(address_votes) + + # Search by proposal_id and transaction ID + if tx_id: + tx_votes = backend.list_votes( + filters=VoteFilter( + proposal_id=proposal.id, + tx_id=tx_id, + ) + ) + # Add only unique votes that aren't already in the list + for vote in tx_votes: + if vote not in votes: + votes.append(vote) if votes: # Update existing vote with transaction ID and amount - vote = votes[0] + vote = votes[0] # Use the first vote found self.logger.info( f"Updating existing vote {vote.id} with tx_id: {tx_id} and amount: {amount}" ) diff --git a/services/webhooks/chainhook/handlers/dao_proposal_burn_height_handler.py b/services/webhooks/chainhook/handlers/dao_proposal_burn_height_handler.py index b080679a..8aa23241 100644 --- a/services/webhooks/chainhook/handlers/dao_proposal_burn_height_handler.py +++ b/services/webhooks/chainhook/handlers/dao_proposal_burn_height_handler.py @@ -10,6 +10,7 @@ QueueMessageCreate, QueueMessageType, ) +from config import config from lib.logger import configure_logger from services.webhooks.chainhook.handlers.base import ChainhookEventHandler from services.webhooks.chainhook.models import ChainHookData, TransactionWithReceipt @@ -150,14 +151,26 @@ async def handle_transaction(self, transaction: TransactionWithReceipt) -> None: and p.parameters is not None # Ensure parameters exist ] - if not start_proposals and not end_proposals: + # Filter proposals that should trigger voting after delay + vote_delay = config.scheduler.dao_proposal_vote_delay_blocks + vote_proposals = [ + p + for p in proposals + if p.start_block is not None + and p.end_block is not None + and p.start_block - vote_delay == burn_height + and p.parameters is not None # Ensure parameters exist + ] + + if not start_proposals and not end_proposals and not vote_proposals: self.logger.info( f"No eligible proposals found for burn height {burn_height}" ) return self.logger.info( - f"Found {len(start_proposals)} proposals to start and {len(end_proposals)} proposals to conclude" + f"Found {len(start_proposals)} proposals to start, {len(end_proposals)} proposals to conclude, " + f"and {len(vote_proposals)} proposals ready for voting" ) # Process proposals that are starting @@ -183,7 +196,7 @@ async def handle_transaction(self, transaction: TransactionWithReceipt) -> None: backend.create_queue_message( QueueMessageCreate( - type=QueueMessageType.DAO_PROPOSAL_VOTE, + type=QueueMessageType.DAO_PROPOSAL_EVALUATION, message=message_data, dao_id=dao.id, wallet_id=agent["wallet_id"], @@ -191,7 +204,7 @@ async def handle_transaction(self, transaction: TransactionWithReceipt) -> None: ) self.logger.info( - f"Created vote queue message for agent {agent['agent_id']} " + f"Created evaluation queue message for agent {agent['agent_id']} " f"to evaluate proposal {proposal.id}" ) @@ -219,3 +232,37 @@ async def handle_transaction(self, transaction: TransactionWithReceipt) -> None: self.logger.info( f"Created conclude queue message for proposal {proposal.id}" ) + + # Process proposals that are ready for voting + for proposal in vote_proposals: + # Get the DAO for this proposal + dao = backend.get_dao(proposal.dao_id) + if not dao: + self.logger.warning(f"No DAO found for proposal {proposal.id}") + continue + + # Get agents holding governance tokens + agents = self._get_agent_token_holders(dao.id) + if not agents: + self.logger.warning(f"No agents found holding tokens for DAO {dao.id}") + continue + + # Create vote queue messages for each agent + for agent in agents: + message_data = { + "proposal_id": proposal.id, + } + + backend.create_queue_message( + QueueMessageCreate( + type=QueueMessageType.DAO_PROPOSAL_VOTE, + message=message_data, + dao_id=dao.id, + wallet_id=agent["wallet_id"], + ) + ) + + self.logger.info( + f"Created vote queue message for agent {agent['agent_id']} " + f"to vote on proposal {proposal.id}" + ) diff --git a/services/workflows/README.md b/services/workflows/README.md deleted file mode 100644 index c43c4c6f..00000000 --- a/services/workflows/README.md +++ /dev/null @@ -1,202 +0,0 @@ -# Workflow Architecture - -This module provides a framework for creating and executing LangGraph-based workflows using LangChain and OpenAI. - -## Architecture Overview - -The workflow architecture is organized into several layers: - -1. **Base Components**: Core abstractions and interfaces -2. **Service Layer**: Standardized execution and streaming -3. **Workflow Implementations**: Concrete workflow types -4. **Special Purpose Workflows**: Domain-specific implementations - -## 1. Base Components - -### BaseWorkflow - -The foundation for all workflow implementations. It provides: -- Common initialization logic -- Validation framework -- Graph execution patterns - -```python -workflow = BaseWorkflow() -result = await workflow.execute(initial_state) -``` - -### Capability Mixins - -Capabilities that can be mixed into workflows: - -- `PlanningCapability`: Adds planning before execution -- `VectorRetrievalCapability`: Adds vector database retrieval - -```python -class MyWorkflow(BaseWorkflow, PlanningCapability, VectorRetrievalCapability): - # Implementation that can use both capabilities -``` - -## 2. Service Layer - -### WorkflowService - -Interface for service implementations that provide a standard execution method: - -```python -# Standard interface -async for chunk in service.execute_stream( - history=history, - input_str=input_str, - tools_map=tools_map -): - # Process streaming chunks -``` - -### WorkflowFactory and Builder - -Factory pattern for creating workflow instances: - -```python -# Create a workflow service using the factory -service = WorkflowFactory.create_workflow_service( - workflow_type="vector", - vector_collection="my_collection" -) - -# Build a workflow instance with the builder -workflow = ( - WorkflowBuilder(ReactWorkflow) - .with_callback_handler(callback_handler) - .with_tools(tools) - .build() -) -``` - -## 3. Core Workflow Implementations - -### ReactWorkflow - -Basic reasoning + action workflow using the ReAct pattern. - -### VectorReactWorkflow - -ReAct workflow with vector store integration for context retrieval. - -### PreplanReactWorkflow - -ReAct workflow with planning before execution. - -### VectorPreplanReactWorkflow - -ReAct workflow that combines vector retrieval and planning: -1. Retrieves relevant context from vector storage based on the user query -2. Creates a plan using both the query and retrieved context -3. Executes the workflow with both context and plan - -This workflow is ideal for complex tasks that benefit from both: -- External knowledge from vector storage -- Strategic planning before execution - -## 4. Special Purpose Workflows - -Domain-specific implementations: - -- `ProposalEvaluationWorkflow`: Evaluates DAO proposals -- `TweetAnalysisWorkflow`: Analyzes tweets for DAO actions -- `TweetGeneratorWorkflow`: Generates tweets about DAOs - -## Usage Examples - -### Basic ReAct Workflow - -```python -from services.workflows import execute_workflow_stream - -async for chunk in execute_workflow_stream( - workflow_type="react", - history=conversation_history, - input_str="What is the current price of Bitcoin?", - tools_map={"price_check": price_check_tool} -): - yield chunk -``` - -### Vector Retrieval Workflow - -```python -from services.workflows import execute_workflow_stream - -async for chunk in execute_workflow_stream( - workflow_type="vector", - history=conversation_history, - input_str="Tell me about DAO governance", - vector_collections="dao_docs", - tools_map=dao_tools -): - yield chunk -``` - -### Planning Workflow - -```python -from services.workflows import execute_workflow_stream - -async for chunk in execute_workflow_stream( - workflow_type="preplan", - history=conversation_history, - input_str="Create a proposal for the treasury", - tools_map=proposal_tools -): - yield chunk -``` - -### Vector PrePlan Workflow - -```python -from services.workflows import execute_workflow_stream - -async for chunk in execute_workflow_stream( - workflow_type="vector_preplan", - history=conversation_history, - input_str="Create a proposal for the treasury using past proposals as references", - vector_collections=["dao_docs", "knowledge_collection"], - tools_map=proposal_tools -): - yield chunk -``` - -## Creating New Workflows - -To create a new workflow: - -1. Define a state type (TypedDict) -2. Create a workflow class extending BaseWorkflow and any capability mixins -3. Implement `_create_graph()` and any required methods -4. Create a service class extending BaseWorkflowService -5. Implement `_execute_stream_impl()` -6. Add to factory mapping if needed - -Example: - -```python -# 1. Define state -class MyState(TypedDict): - messages: Annotated[list, add_messages] - custom_data: str - -# 2. Create workflow class -class MyWorkflow(BaseWorkflow[MyState], PlanningCapability): - def _create_graph(self) -> StateGraph: - # Implement graph creation - ... - -# 3. Create service class -class MyService(BaseWorkflowService): - async def _execute_stream_impl(self, messages, input_str, **kwargs): - # Implement execution - ... - -# 4. Add to factory -WorkflowFactory.create_workflow_service.service_map["my_workflow"] = MyService -``` \ No newline at end of file diff --git a/services/workflows/base.py b/services/workflows/base.py index bc26b06f..2259335e 100644 --- a/services/workflows/base.py +++ b/services/workflows/base.py @@ -1,15 +1,16 @@ """Base workflow functionality and shared components for all workflow types.""" -import asyncio import json from abc import ABC, abstractmethod from typing import Any, Dict, Generic, List, Optional, TypeVar, Union from langchain.prompts import PromptTemplate -from langchain_core.messages import AIMessage, HumanMessage, SystemMessage -from langchain_openai import ChatOpenAI +from langchain.schema import Document +from langchain_openai import ChatOpenAI, OpenAIEmbeddings from langgraph.graph import Graph, StateGraph +from openai import OpenAI +from backend.factory import backend from lib.logger import configure_logger logger = configure_logger(__name__) @@ -55,7 +56,7 @@ class BaseWorkflow(Generic[StateType]): def __init__( self, - model_name: str = "gpt-4o", + model_name: str = "gpt-4.1", temperature: Optional[float] = 0.1, streaming: bool = True, callbacks: Optional[List[Any]] = None, @@ -72,6 +73,7 @@ def __init__( temperature=temperature, model=model_name, streaming=streaming, + stream_usage=True, callbacks=callbacks or [], ) self.logger = configure_logger(self.__class__.__name__) @@ -123,6 +125,7 @@ def create_llm_with_callbacks(self, callbacks: List[Any]) -> ChatOpenAI: model=self.model_name, temperature=self.temperature, streaming=True, + stream_usage=True, callbacks=callbacks, ) @@ -273,8 +276,24 @@ def integrate_with_graph(self, graph: StateGraph, **kwargs) -> None: class VectorRetrievalCapability(BaseWorkflowMixin): """Mixin that adds vector retrieval capabilities to a workflow.""" - async def retrieve_from_vector_store(self, query: str, **kwargs) -> List[Any]: - """Retrieve relevant documents from vector store. + def __init__(self, *args, **kwargs): + """Initialize the vector retrieval capability.""" + # Initialize parent class if it exists + super().__init__(*args, **kwargs) if hasattr(super(), "__init__") else None + # Initialize our attributes + self._init_vector_retrieval() + + def _init_vector_retrieval(self) -> None: + """Initialize vector retrieval attributes if not already initialized.""" + if not hasattr(self, "collection_names"): + self.collection_names = ["knowledge_collection", "dao_collection"] + if not hasattr(self, "embeddings"): + self.embeddings = OpenAIEmbeddings() + if not hasattr(self, "vector_results_cache"): + self.vector_results_cache = {} + + async def retrieve_from_vector_store(self, query: str, **kwargs) -> List[Document]: + """Retrieve relevant documents from multiple vector stores. Args: query: The query to search for @@ -283,20 +302,273 @@ async def retrieve_from_vector_store(self, query: str, **kwargs) -> List[Any]: Returns: List of retrieved documents """ - raise NotImplementedError( - "VectorRetrievalCapability must implement retrieve_from_vector_store" - ) + try: + # Ensure initialization + self._init_vector_retrieval() + + # Check cache first + if query in self.vector_results_cache: + logger.debug(f"Using cached vector results for query: {query}") + return self.vector_results_cache[query] + + all_documents = [] + limit_per_collection = kwargs.get("limit", 4) + logger.debug( + f"Searching vector store: query={query} | limit_per_collection={limit_per_collection}" + ) + + # Query each collection and gather results + for collection_name in self.collection_names: + try: + # Query vectors using the backend + vector_results = await backend.query_vectors( + collection_name=collection_name, + query_text=query, + limit=limit_per_collection, + embeddings=self.embeddings, + ) + + # Convert to LangChain Documents and add collection source + documents = [ + Document( + page_content=doc.get("page_content", ""), + metadata={ + **doc.get("metadata", {}), + "collection_source": collection_name, + }, + ) + for doc in vector_results + ] + + all_documents.extend(documents) + logger.debug( + f"Retrieved {len(documents)} documents from collection {collection_name}" + ) + except Exception as e: + logger.error( + f"Failed to retrieve from collection {collection_name}: {str(e)}", + exc_info=True, + ) + continue # Continue with other collections if one fails + + logger.debug( + f"Retrieved total of {len(all_documents)} documents from all collections" + ) + + # Cache the results + self.vector_results_cache[query] = all_documents + + return all_documents + except Exception as e: + logger.error(f"Vector store retrieval failed: {str(e)}", exc_info=True) + return [] def integrate_with_graph(self, graph: StateGraph, **kwargs) -> None: """Integrate vector retrieval capability with a graph. - This adds the vector retrieval capability to the graph. + This adds the vector retrieval capability to the graph by adding a node + that can perform vector searches when needed. Args: graph: The graph to integrate with - **kwargs: Additional arguments specific to vector retrieval + **kwargs: Additional arguments specific to vector retrieval including: + - collection_names: List of collection names to search + - limit_per_collection: Number of results per collection """ - # Implementation depends on specific graph structure - raise NotImplementedError( - "VectorRetrievalCapability must implement integrate_with_graph" - ) + # Add vector search node + graph.add_node("vector_search", self.retrieve_from_vector_store) + + # Add result processing node if needed + if "process_vector_results" not in graph.nodes: + graph.add_node("process_vector_results", self._process_vector_results) + graph.add_edge("vector_search", "process_vector_results") + + async def _process_vector_results( + self, vector_results: List[Document], **kwargs + ) -> Dict[str, Any]: + """Process vector search results. + + Args: + vector_results: Results from vector search + **kwargs: Additional processing arguments + + Returns: + Processed results with metadata + """ + return { + "results": vector_results, + "metadata": { + "num_vector_results": len(vector_results), + "collection_sources": list( + set( + doc.metadata.get("collection_source", "unknown") + for doc in vector_results + ) + ), + }, + } + + +class WebSearchCapability(BaseWorkflowMixin): + """Mixin that adds web search capabilities to a workflow using OpenAI Responses API.""" + + def __init__(self, *args, **kwargs): + """Initialize the web search capability.""" + # Initialize parent class if it exists + super().__init__(*args, **kwargs) if hasattr(super(), "__init__") else None + # Initialize our attributes + self._init_web_search() + + def _init_web_search(self) -> None: + """Initialize web search attributes if not already initialized.""" + if not hasattr(self, "search_results_cache"): + self.search_results_cache = {} + if not hasattr(self, "client"): + self.client = OpenAI() + + async def search_web(self, query: str, **kwargs) -> List[Dict[str, Any]]: + """Search the web using OpenAI Responses API. + + Args: + query: The search query + **kwargs: Additional search parameters like user_location and search_context_size + + Returns: + List of search results with content and metadata + """ + try: + # Ensure initialization + self._init_web_search() + + # Check cache first + if query in self.search_results_cache: + logger.info(f"Using cached results for query: {query}") + return self.search_results_cache[query] + + # Configure web search tool + tool_config = { + "type": "web_search_preview", + "search_context_size": kwargs.get("search_context_size", "medium"), + } + + # Add user location if provided + if "user_location" in kwargs: + tool_config["user_location"] = kwargs["user_location"] + + # Make the API call + response = self.client.responses.create( + model="gpt-4.1", tools=[tool_config], input=query + ) + + logger.debug(f"Web search response: {response}") + # Process the response into our document format + documents = [] + + # Access the output text directly + if hasattr(response, "output_text"): + text_content = response.output_text + source_urls = [] + + # Try to extract citations if available + if hasattr(response, "citations"): + source_urls = [ + { + "url": citation.url, + "title": getattr(citation, "title", ""), + "start_index": getattr(citation, "start_index", 0), + "end_index": getattr(citation, "end_index", 0), + } + for citation in response.citations + if hasattr(citation, "url") + ] + + # Ensure we always have at least one URL entry + if not source_urls: + source_urls = [ + { + "url": "No source URL available", + "title": "Generated Response", + "start_index": 0, + "end_index": len(text_content), + } + ] + + # Create document with content + doc = { + "page_content": text_content, + "metadata": { + "type": "web_search_result", + "source_urls": source_urls, + "query": query, + "timestamp": None, + }, + } + documents.append(doc) + + # Cache the results + self.search_results_cache[query] = documents + + logger.info(f"Web search completed with {len(documents)} results") + return documents + + except Exception as e: + logger.error(f"Web search failed: {str(e)}") + # Return a list with one empty result to prevent downstream errors + return [ + { + "page_content": "Web search failed to return results.", + "metadata": { + "type": "web_search_result", + "source_urls": [ + { + "url": "Error occurred during web search", + "title": "Error", + "start_index": 0, + "end_index": 0, + } + ], + "query": query, + "timestamp": None, + }, + } + ] + + def integrate_with_graph(self, graph: StateGraph, **kwargs) -> None: + """Integrate web search capability with a graph. + + This adds the web search capability to the graph by adding a node + that can perform web searches when needed. + + Args: + graph: The graph to integrate with + **kwargs: Additional arguments specific to web search including: + - search_context_size: "low", "medium", or "high" + - user_location: dict with type, country, city, region + """ + # Add web search node + graph.add_node("web_search", self.search_web) + + # Add result processing node if needed + if "process_results" not in graph.nodes: + graph.add_node("process_results", self._process_results) + graph.add_edge("web_search", "process_results") + + async def _process_results( + self, web_results: List[Dict[str, Any]], **kwargs + ) -> Dict[str, Any]: + """Process web search results. + + Args: + web_results: Results from web search + **kwargs: Additional processing arguments + + Returns: + Processed results with metadata + """ + return { + "results": web_results, + "metadata": { + "num_web_results": len(web_results), + "source_types": ["web_search"], + }, + } diff --git a/services/workflows/preplan_react.py b/services/workflows/preplan_react.py index ba0abfbd..8bd1f3e1 100644 --- a/services/workflows/preplan_react.py +++ b/services/workflows/preplan_react.py @@ -64,7 +64,7 @@ def __init__( # Create a separate LLM for planning with streaming enabled self.planning_llm = ChatOpenAI( - model="o3-mini", + model="o4-mini", streaming=True, # Enable streaming for the planning LLM callbacks=[callback_handler], ) diff --git a/services/workflows/proposal_evaluation.py b/services/workflows/proposal_evaluation.py index d4c96259..5e9f8419 100644 --- a/services/workflows/proposal_evaluation.py +++ b/services/workflows/proposal_evaluation.py @@ -3,21 +3,31 @@ import binascii from typing import Dict, List, Optional, TypedDict +from langchain.callbacks.base import BaseCallbackHandler from langchain.prompts import PromptTemplate +from langchain_core.documents import Document from langgraph.graph import END, Graph, StateGraph from pydantic import BaseModel, Field from backend.factory import backend from backend.models import ( UUID, + ExtensionFilter, Profile, + Prompt, PromptFilter, ProposalType, + QueueMessageFilter, + QueueMessageType, ) from lib.hiro import HiroApi from lib.logger import configure_logger -from services.workflows.base import BaseWorkflow -from services.workflows.react import LangGraphService +from services.workflows.base import ( + BaseWorkflow, + VectorRetrievalCapability, + WebSearchCapability, +) +from services.workflows.vector_react import VectorLangGraphService, VectorReactState from tools.dao_ext_action_proposals import VoteOnActionProposalTool from tools.tools_factory import filter_tools_by_names, initialize_tools @@ -53,19 +63,56 @@ class EvaluationState(TypedDict): auto_vote: bool formatted_prompt: str agent_prompts: List[Dict] + vector_results: Optional[List[Dict]] + recent_tweets: Optional[List[Dict]] + web_search_results: Optional[List[Dict]] # Add field for web search results + treasury_balance: Optional[float] + token_usage: Optional[Dict] # Add field for token usage tracking + model_info: Optional[Dict] # Add field for model information -class ProposalEvaluationWorkflow(BaseWorkflow[EvaluationState]): +class ProposalEvaluationWorkflow( + BaseWorkflow[EvaluationState], VectorRetrievalCapability, WebSearchCapability +): """Workflow for evaluating DAO proposals and voting automatically.""" + def __init__( + self, + collection_names: Optional[List[str]] = None, + model_name: str = "gpt-4.1", + temperature: Optional[float] = 0.1, + **kwargs, + ): + """Initialize the workflow. + + Args: + collection_names: Optional list of collection names to search + model_name: The model to use for evaluation + temperature: Optional temperature setting for the model + **kwargs: Additional arguments passed to parent + """ + super().__init__(model_name=model_name, temperature=temperature, **kwargs) + self.collection_names = collection_names or [ + "knowledge_collection", + "dao_collection", + ] + self.required_fields = ["proposal_id", "proposal_data"] + self.logger.debug( + f"Initialized workflow: collections={self.collection_names} | model={model_name} | temperature={temperature}" + ) + def _create_prompt(self) -> PromptTemplate: """Create the evaluation prompt template.""" return PromptTemplate( input_variables=[ "proposal_data", "dao_info", + "treasury_balance", "contract_source", "agent_prompts", + "vector_context", + "recent_tweets", + "web_search_results", ], template=""" You are a DAO proposal evaluator. Your task is to analyze the proposal and determine whether to vote FOR or AGAINST it. @@ -82,15 +129,18 @@ def _create_prompt(self) -> PromptTemplate: # 3. DAO CONTEXT {dao_info} - # 4. AIBTC CHARTER + # 4. TREASURY INFORMATION + {treasury_balance} + + # 5. AIBTC CHARTER Core Values: Curiosity, Truth Maximizing, Humanity's Best Interests, Transparency, Resilience, Collaboration Mission: Elevate human potential through Autonomous Intelligence on Bitcoin Guardrails: Decentralized Governance, Smart Contract accountability - # 5. CONTRACT SOURCE (for core proposals) + # 6. CONTRACT SOURCE (for core proposals) {contract_source} - # 6. EVALUATION CRITERIA + # 7. EVALUATION CRITERIA For Core Proposals: - Security implications - Mission alignment @@ -103,7 +153,7 @@ def _create_prompt(self) -> PromptTemplate: - Security considerations - Alignment with DAO goals - # 7. CONFIDENCE SCORING RUBRIC + # 8. CONFIDENCE SCORING RUBRIC You MUST choose one of these confidence bands: - 0.0-0.2: Extremely low confidence (major red flags or insufficient information) - 0.3-0.4: Low confidence (significant concerns or unclear implications) @@ -111,7 +161,7 @@ def _create_prompt(self) -> PromptTemplate: - 0.7-0.8: High confidence (minor concerns if any) - 0.9-1.0: Very high confidence (clear positive alignment) - # 8. QUALITY STANDARDS + # 9. QUALITY STANDARDS Your evaluation must uphold clarity, reasoning, and respect for the DAO's voice: • Be clear and specific — avoid vagueness or filler • Use a consistent tone, but reflect the DAO's personality if known @@ -122,6 +172,15 @@ def _create_prompt(self) -> PromptTemplate: • Use terms accurately — don't fake precision • Keep structure clean and easy to follow + # 10. VECTOR CONTEXT + {vector_context} + + # 11. RECENT DAO TWEETS + {recent_tweets} + + # 12. WEB SEARCH RESULTS + {web_search_results} + # OUTPUT FORMAT Provide your evaluation in this exact JSON format: {{ @@ -147,6 +206,76 @@ async def evaluate_proposal(state: EvaluationState) -> EvaluationState: try: # Get proposal data from state proposal_data = state["proposal_data"] + dao_id = state.get("dao_info", {}).get("id") + + # Perform web search for relevant context + try: + # Create search query from proposal data + web_search_query = f"DAO proposal {proposal_data.get('type', 'unknown')} - {proposal_data.get('parameters', '')}" + + # Use web search capability + web_search_results = await self.search_web( + query=web_search_query, + search_context_size="medium", # Use medium context size for balanced results + ) + + # Update state with web search results + state["web_search_results"] = web_search_results + self.logger.debug( + f"Web search query: {web_search_query} | Results count: {len(web_search_results)}" + ) + self.logger.debug( + f"Retrieved {len(web_search_results)} web search results" + ) + except Exception as e: + self.logger.error( + f"Failed to perform web search: {str(e)}", exc_info=True + ) + state["web_search_results"] = [] + + # Fetch recent tweets from queue if dao_id exists + recent_tweets = [] + if dao_id: + try: + # Add debug logging for dao_id + self.logger.debug(f"Fetching tweets for DAO ID: {dao_id}") + + queue_messages = backend.list_queue_messages( + QueueMessageFilter( + type=QueueMessageType.TWEET, + dao_id=dao_id, + is_processed=True, + ) + ) + # Log the number of messages found + self.logger.debug(f"Found {len(queue_messages)} queue messages") + + # Sort by created_at and take last 5 + sorted_messages = sorted( + queue_messages, key=lambda x: x.created_at, reverse=True + )[:5] + self.logger.debug(f"After sorting, have {len(sorted_messages)} messages") + + recent_tweets = [ + { + "created_at": msg.created_at, + "message": msg.message.get('message', 'No text available') if isinstance(msg.message, dict) else msg.message, + "tweet_id": msg.tweet_id, + } + for msg in sorted_messages + ] + self.logger.debug(f"Retrieved tweets: {recent_tweets}") + self.logger.debug( + f"Found {len(recent_tweets)} recent tweets for DAO {dao_id}" + ) + except Exception as e: + self.logger.error( + f"Failed to fetch recent tweets: {str(e)}", exc_info=True + ) + recent_tweets = [] + + # Update state with recent tweets + state["recent_tweets"] = recent_tweets # If this is a core proposal, fetch the contract source contract_source = "" @@ -167,67 +296,172 @@ async def evaluate_proposal(state: EvaluationState) -> EvaluationState: ) if "source" in result: contract_source = result["source"] + self.logger.debug( + f"Retrieved contract source for {contract_address}.{contract_name}" + ) else: - logger.warning( - f"Could not find source code in API response: {result}" + self.logger.warning( + f"Contract source not found in API response: {result}" ) except Exception as e: - logger.error(f"Error fetching contract source: {str(e)}") + self.logger.error( + f"Failed to fetch contract source: {str(e)}", + exc_info=True, + ) else: - logger.warning( + self.logger.warning( f"Invalid contract address format: {proposal_data['proposal_contract']}" ) + # Retrieve relevant context from vector store + try: + # Create search query from proposal data + search_query = f"Proposal type: {proposal_data.get('type')} - {proposal_data.get('parameters', '')}" + + # Use vector retrieval capability + vector_results = await self.retrieve_from_vector_store( + query=search_query, limit=5 # Get top 5 most relevant documents + ) + + # Update state with vector results + state["vector_results"] = vector_results + self.logger.debug( + f"Searching vector store with query: {search_query} | Collection count: {len(self.collection_names)}" + ) + self.logger.debug(f"Vector search results: {vector_results}") + self.logger.debug( + f"Retrieved {len(vector_results)} relevant documents from vector store" + ) + + # Format vector context for prompt + vector_context = "\n\n".join( + [ + f"Related Context {i+1}:\n{doc.page_content}" + for i, doc in enumerate(vector_results) + ] + ) + except Exception as e: + self.logger.error( + f"Failed to retrieve from vector store: {str(e)}", exc_info=True + ) + vector_context = ( + "No additional context available from vector store." + ) + # Format prompt with state - self.logger.debug("Formatting evaluation prompt...") + self.logger.debug("Preparing evaluation prompt...") # Format agent prompts as a string agent_prompts_str = "No agent-specific instructions available." if state.get("agent_prompts"): - logger.debug( - f"Raw agent prompts from state: {state['agent_prompts']}" - ) + self.logger.debug(f"Raw agent prompts: {state['agent_prompts']}") if ( isinstance(state["agent_prompts"], list) and state["agent_prompts"] ): # Just use the prompt text directly since that's what we're storing agent_prompts_str = "\n\n".join(state["agent_prompts"]) - logger.debug( - f"Formatted agent prompts string: {agent_prompts_str}" + self.logger.debug( + f"Formatted agent prompts: {agent_prompts_str}" ) else: - logger.warning( - f"Invalid agent prompts format in state: {type(state['agent_prompts'])}" + self.logger.warning( + f"Invalid agent prompts format: {type(state['agent_prompts'])}" ) else: - logger.warning("No agent prompts found in state") + self.logger.debug("No agent prompts found in state") + + # Format web search results for prompt + web_search_content = "No relevant web search results found." + if state.get("web_search_results"): + web_search_content = "\n\n".join( + [ + f"Web Result {i+1}:\n{result['page_content']}\nSource: {result['metadata']['source_urls'][0]['url'] if result['metadata']['source_urls'] else 'Unknown'}" + for i, result in enumerate(state["web_search_results"]) + ] + ) - formatted_prompt = prompt.format( + # Update formatted prompt with web search results + formatted_prompt = self._create_prompt().format( proposal_data=proposal_data, dao_info=state.get( "dao_info", "No additional DAO information available." ), + treasury_balance=state.get("treasury_balance"), contract_source=contract_source, agent_prompts=agent_prompts_str, + vector_context=vector_context, + recent_tweets=( + "\n".join( + [ + f"Tweet {i+1} ({tweet['created_at']}): {tweet['message']}" + for i, tweet in enumerate(recent_tweets) + ] + ) + if recent_tweets + else "No recent tweets available." + ), + web_search_results=web_search_content, ) # Get evaluation from LLM - self.logger.debug("Invoking LLM for evaluation...") + self.logger.debug("Starting LLM evaluation...") structured_output = self.llm.with_structured_output( ProposalEvaluationOutput, + include_raw=True, # Include raw response to get token usage ) + + # Invoke LLM with formatted prompt result = structured_output.invoke(formatted_prompt) - self.logger.debug(f"LLM evaluation result: {result}") + + # Extract the parsed result and token usage from raw response + self.logger.debug( + f"Raw LLM result structure: {type(result).__name__} | Has parsed: {'parsed' in result if isinstance(result, dict) else False}" + ) + parsed_result = result["parsed"] if isinstance(result, dict) else result + model_info = {"name": self.model_name, "temperature": self.temperature} + + if isinstance(result, dict) and "raw" in result: + raw_msg = result["raw"] + # Extract token usage + if hasattr(raw_msg, "usage_metadata"): + token_usage = raw_msg.usage_metadata + self.logger.debug( + f"Token usage details: input={token_usage.get('input_tokens', 0)} | output={token_usage.get('output_tokens', 0)} | total={token_usage.get('total_tokens', 0)}" + ) + else: + self.logger.warning("No usage_metadata found in raw response") + token_usage = { + "input_tokens": 0, + "output_tokens": 0, + "total_tokens": 0, + } + else: + self.logger.warning("No raw response available") + token_usage = { + "input_tokens": 0, + "output_tokens": 0, + "total_tokens": 0, + } + + self.logger.debug(f"Parsed evaluation result: {parsed_result}") # Update state state["formatted_prompt"] = formatted_prompt - state["approve"] = result.approve - state["confidence_score"] = result.confidence_score - state["reasoning"] = result.reasoning - self.logger.info( - f"Evaluation complete: approve={result.approve}, confidence={result.confidence_score}" + state["approve"] = parsed_result.approve + state["confidence_score"] = parsed_result.confidence_score + state["reasoning"] = parsed_result.reasoning + state["token_usage"] = token_usage + state["model_info"] = model_info + + # Calculate token costs + token_costs = calculate_token_cost(token_usage, model_info["name"]) + + # Log final evaluation summary + self.logger.debug( + f"Evaluation complete: Decision={'APPROVE' if parsed_result.approve else 'REJECT'} | Confidence={parsed_result.confidence_score:.2f} | Model={model_info['name']} (temp={model_info['temperature']}) | Tokens={token_usage} | Cost=${token_costs['total_cost']:.4f}" ) + self.logger.debug(f"Full reasoning: {parsed_result.reasoning}") return state except Exception as e: @@ -244,20 +478,20 @@ async def should_vote(state: EvaluationState) -> str: """Decide whether to vote based on confidence threshold.""" try: self.logger.debug( - f"Deciding whether to vote: auto_vote={state['auto_vote']}, confidence={state['confidence_score']}, threshold={state['confidence_threshold']}" + f"Deciding vote: auto_vote={state['auto_vote']} | confidence={state['confidence_score']} | threshold={state['confidence_threshold']}" ) if not state["auto_vote"]: - self.logger.info("Auto-vote is disabled, skipping vote") + self.logger.debug("Auto-vote is disabled, skipping vote") return "skip_vote" if state["confidence_score"] >= state["confidence_threshold"]: - self.logger.info( + self.logger.debug( f"Confidence score {state['confidence_score']} meets threshold {state['confidence_threshold']}, proceeding to vote" ) return "vote" else: - self.logger.info( + self.logger.debug( f"Confidence score {state['confidence_score']} below threshold {state['confidence_threshold']}, skipping vote" ) return "skip_vote" @@ -265,12 +499,12 @@ async def should_vote(state: EvaluationState) -> str: self.logger.error(f"Error in should_vote: {str(e)}", exc_info=True) return "skip_vote" - # Create voting node using ReAct pattern + # Create voting node using VectorReact workflow async def vote_on_proposal(state: EvaluationState) -> EvaluationState: - """Vote on the proposal using ReAct workflow.""" + """Vote on the proposal using VectorReact workflow.""" try: self.logger.debug( - f"Setting up ReAct workflow to vote on proposal {state['proposal_id']}, vote={state['approve']}" + f"Setting up VectorReact workflow: proposal_id={state['proposal_id']} | vote={state['approve']}" ) # Set up the voting tool @@ -278,16 +512,12 @@ async def vote_on_proposal(state: EvaluationState) -> EvaluationState: tools_map = {"dao_action_vote_on_proposal": vote_tool} # Create a user input message that instructs the LLM what to do - vote_instruction = f""" - I need you to vote on a DAO proposal with ID {state['proposal_id']} in the contract {state['action_proposals_contract']}. - - Please vote {"FOR" if state['approve'] else "AGAINST"} the proposal. - - Use the dao_action_vote_on_proposal tool to submit the vote. - """ + vote_instruction = f"I need you to vote on a DAO proposal with ID {state['proposal_id']} in the contract {state['action_proposals_contract']}. Please vote {'FOR' if state['approve'] else 'AGAINST'} the proposal. Use the dao_action_vote_on_proposal tool to submit the vote." - # Create LangGraph service - service = LangGraphService() + # Create VectorLangGraph service with collections + service = VectorLangGraphService( + collection_names=self.collection_names, + ) # History with system message only history = [ @@ -297,20 +527,20 @@ async def vote_on_proposal(state: EvaluationState) -> EvaluationState: } ] - self.logger.debug("Executing ReAct workflow for voting...") + self.logger.debug("Executing VectorReact workflow for voting...") # Collect response chunks response_chunks = [] vote_result = None - # Execute the ReAct workflow - async for chunk in service.execute_react_stream( + # Execute the VectorReact workflow + async for chunk in service.execute_stream( history=history, input_str=vote_instruction, tools_map=tools_map, ): response_chunks.append(chunk) - self.logger.debug(f"ReAct chunk: {chunk}") + self.logger.debug(f"VectorReact chunk: {chunk}") # Extract tool results if ( @@ -319,13 +549,18 @@ async def vote_on_proposal(state: EvaluationState) -> EvaluationState: ): if "output" in chunk: vote_result = chunk.get("output") - self.logger.info(f"Vote result: {vote_result}") + self.logger.debug(f"Vote result: {vote_result}") - # Update state with vote result + # Update state with vote result and vector results state["vote_result"] = { "success": vote_result is not None, "output": vote_result, } + state["vector_results"] = [ + chunk.get("vector_results", []) + for chunk in response_chunks + if chunk.get("vector_results") + ] return state except Exception as e: @@ -340,13 +575,12 @@ async def vote_on_proposal(state: EvaluationState) -> EvaluationState: async def skip_voting(state: EvaluationState) -> EvaluationState: """Skip voting and just return the evaluation.""" try: - self.logger.debug("Skipping voting step") + self.logger.debug("Vote skipped: reason=threshold_or_setting") state["vote_result"] = { "success": True, "message": "Voting skipped due to confidence threshold or auto_vote setting", "data": None, } - self.logger.info("Vote skipped as requested") return state except Exception as e: self.logger.error(f"Error in skip_voting: {str(e)}", exc_info=True) @@ -385,7 +619,9 @@ def _validate_state(self, state: EvaluationState) -> bool: required_fields = ["proposal_id", "proposal_data"] # Log the state for debugging - self.logger.debug(f"Validating state: {state}") + self.logger.debug( + f"Validating state: proposal_id={state.get('proposal_id')} | proposal_type={state.get('proposal_data', {}).get('type', 'unknown')}" + ) # Check all fields and log problems for field in required_fields: @@ -419,6 +655,7 @@ def _validate_state(self, state: EvaluationState) -> bool: self.logger.error(f"Invalid proposal type: {proposal_type}") return False + self.logger.debug("State validation successful") return True @@ -436,9 +673,7 @@ def get_proposal_evaluation_tools( """ # Initialize all tools all_tools = initialize_tools(profile=profile, agent_id=agent_id) - - # Log all available tools for debugging - logger.debug(f"All available tools: {', '.join(all_tools.keys())}") + logger.debug(f"Available tools: {', '.join(all_tools.keys())}") # Filter to only include the tools we need required_tools = [ @@ -451,7 +686,7 @@ def get_proposal_evaluation_tools( ] filtered_tools = filter_tools_by_names(required_tools, all_tools) - logger.debug(f"Filtered tools: {', '.join(filtered_tools.keys())}") + logger.debug(f"Using tools: {', '.join(filtered_tools.keys())}") return filtered_tools @@ -467,11 +702,97 @@ def decode_hex_parameters(hex_string: Optional[str]) -> Optional[str]: decoded_string = decoded_bytes.decode( "utf-8", errors="ignore" ) # Decode as UTF-8 + logger.debug(f"Successfully decoded hex string: {hex_string[:20]}...") return decoded_string - except (binascii.Error, UnicodeDecodeError): + except (binascii.Error, UnicodeDecodeError) as e: + logger.warning(f"Failed to decode hex string: {str(e)}") return None # Return None if decoding fails +def calculate_token_cost( + token_usage: Dict[str, int], model_name: str +) -> Dict[str, float]: + """Calculate the cost of token usage based on current pricing. + + Args: + token_usage: Dictionary containing input_tokens and output_tokens + model_name: Name of the model used + + Returns: + Dictionary containing cost breakdown and total cost + """ + # Current pricing per million tokens (as of August 2024) + MODEL_PRICES = { + "gpt-4o": { + "input": 2.50, # $2.50 per million input tokens + "output": 10.00, # $10.00 per million output tokens + }, + "gpt-4.1": { + "input": 2.00, # $2.00 per million input tokens + "output": 8.00, # $8.00 per million output tokens + }, + "gpt-4.1-mini": { + "input": 0.40, # $0.40 per million input tokens + "output": 1.60, # $1.60 per million output tokens + }, + "gpt-4.1-nano": { + "input": 0.10, # $0.10 per million input tokens + "output": 0.40, # $0.40 per million output tokens + }, + # Default to gpt-4.1 pricing if model not found + "default": { + "input": 2.00, + "output": 8.00, + }, + } + + # Get pricing for the model, default to gpt-4.1 pricing if not found + model_prices = MODEL_PRICES.get(model_name.lower(), MODEL_PRICES["default"]) + + # Extract token counts, ensuring we get integers and handle None values + try: + input_tokens = int(token_usage.get("input_tokens", 0)) + output_tokens = int(token_usage.get("output_tokens", 0)) + except (TypeError, ValueError) as e: + logger.error(f"Error converting token counts to integers: {str(e)}") + input_tokens = 0 + output_tokens = 0 + + # Calculate costs with more precision + input_cost = (input_tokens / 1_000_000.0) * model_prices["input"] + output_cost = (output_tokens / 1_000_000.0) * model_prices["output"] + total_cost = input_cost + output_cost + + # Create detailed token usage breakdown + token_details = { + "input_tokens": input_tokens, + "output_tokens": output_tokens, + "total_tokens": input_tokens + output_tokens, + "model_name": model_name, + "input_price_per_million": model_prices["input"], + "output_price_per_million": model_prices["output"], + } + + # Add token details if available + if "input_token_details" in token_usage: + token_details["input_token_details"] = token_usage["input_token_details"] + if "output_token_details" in token_usage: + token_details["output_token_details"] = token_usage["output_token_details"] + + # Debug logging with more detail + logger.debug( + f"Cost calculation details: Model={model_name} | Input={input_tokens} tokens * ${model_prices['input']}/1M = ${input_cost:.6f} | Output={output_tokens} tokens * ${model_prices['output']}/1M = ${output_cost:.6f} | Total=${total_cost:.6f} | Token details={token_details}" + ) + + return { + "input_cost": round(input_cost, 6), + "output_cost": round(output_cost, 6), + "total_cost": round(total_cost, 6), + "currency": "USD", + "details": token_details, + } + + async def evaluate_and_vote_on_proposal( proposal_id: UUID, wallet_id: Optional[UUID] = None, @@ -491,7 +812,9 @@ async def evaluate_and_vote_on_proposal( Returns: Dictionary containing the evaluation results and voting outcome """ - logger.info(f"Starting proposal evaluation for proposal {proposal_id}") + logger.debug( + f"Starting proposal evaluation: proposal_id={proposal_id} | auto_vote={auto_vote} | confidence_threshold={confidence_threshold}" + ) try: # Get proposal data directly from the database @@ -504,7 +827,9 @@ async def evaluate_and_vote_on_proposal( # Decode parameters if they exist decoded_parameters = decode_hex_parameters(proposal_data.parameters) if decoded_parameters: - logger.debug(f"Decoded parameters: {decoded_parameters}") + logger.debug( + f"Decoded proposal parameters: length={len(decoded_parameters) if decoded_parameters else 0}" + ) # Convert proposal data to dictionary and ensure parameters exist proposal_dict = { @@ -542,7 +867,9 @@ async def evaluate_and_vote_on_proposal( # Get DAO info based on provided dao_id or from proposal dao_info = None if dao_id: - logger.debug(f"Using provided DAO ID: {dao_id}") + logger.debug( + f"Using provided DAO ID: {dao_id} | Found={dao_info is not None}" + ) dao_info = backend.get_dao(dao_id) if not dao_info: logger.warning( @@ -551,7 +878,9 @@ async def evaluate_and_vote_on_proposal( # If dao_info is still None, try to get it from proposal's dao_id if not dao_info and proposal_data.dao_id: - logger.debug(f"Using proposal's DAO ID: {proposal_data.dao_id}") + logger.debug( + f"Using proposal's DAO ID: {proposal_data.dao_id} | Found={dao_info is not None}" + ) dao_info = backend.get_dao(proposal_data.dao_id) if not dao_info: @@ -559,7 +888,34 @@ async def evaluate_and_vote_on_proposal( logger.error(error_msg) return {"success": False, "error": error_msg} - logger.debug(f"Using DAO: {dao_info.name} (ID: {dao_info.id})") + # Get the treasury extension for the DAO + treasury_extension = None + try: + treasury_extensions = backend.list_extensions( + ExtensionFilter(dao_id=dao_info.id, type="EXTENSIONS_TREASURY") + ) + if treasury_extensions: + treasury_extension = treasury_extensions[0] + logger.debug( + f"Found treasury extension: contract_principal={treasury_extension.contract_principal}" + ) + + # Get treasury balance from Hiro API + hiro_api = HiroApi() + treasury_balance = hiro_api.get_address_balance( + treasury_extension.contract_principal + ) + logger.debug(f"Treasury balance retrieved: balance={treasury_balance}") + else: + logger.warning(f"No treasury extension found for DAO {dao_info.id}") + treasury_balance = None + except Exception as e: + logger.error(f"Failed to get treasury balance: {str(e)}", exc_info=True) + treasury_balance = None + + logger.debug( + f"Processing proposal for DAO: {dao_info.name} (ID: {dao_info.id})" + ) # Get the wallet and agent information if available agent_id = None @@ -567,10 +923,12 @@ async def evaluate_and_vote_on_proposal( wallet = backend.get_wallet(wallet_id) if wallet and wallet.agent_id: agent_id = wallet.agent_id - logger.debug(f"Found agent ID {agent_id} for wallet {wallet_id}") + logger.debug(f"Using agent ID {agent_id} for wallet {wallet_id}") # Get agent prompts agent_prompts = [] + model_name = "gpt-4.1" # Default model + temperature = 0.1 # Default temperature try: logger.debug( f"Fetching prompts for agent_id={agent_id}, dao_id={proposal_data.dao_id}" @@ -582,21 +940,27 @@ async def evaluate_and_vote_on_proposal( is_active=True, ) ) - logger.debug(f"Raw prompts from database: {prompts}") - - # Extract prompt texts - agent_prompts = [p.prompt_text for p in prompts if p.prompt_text] - logger.debug(f"Extracted agent prompts: {agent_prompts}") - - logger.info( - f"Found {len(agent_prompts)} active prompts for agent {agent_id}" - ) - if not agent_prompts: + logger.debug(f"Retrieved prompts: {prompts}") + + # Store the full Prompt objects and get model settings from first prompt + agent_prompts = prompts + if agent_prompts: + first_prompt = agent_prompts[0] + model_name = first_prompt.model or model_name + temperature = ( + first_prompt.temperature + if first_prompt.temperature is not None + else temperature + ) + logger.debug( + f"Using model configuration: {model_name} (temperature={temperature})" + ) + else: logger.warning( f"No active prompts found for agent_id={agent_id}, dao_id={proposal_data.dao_id}" ) except Exception as e: - logger.error(f"Error getting agent prompts: {e}", exc_info=True) + logger.error(f"Failed to get agent prompts: {str(e)}", exc_info=True) # Initialize state state = { @@ -605,7 +969,10 @@ async def evaluate_and_vote_on_proposal( "proposal_id": proposal_dict["proposal_id"], "proposal_data": proposal_dict, "dao_info": dao_info.model_dump() if dao_info else {}, - "agent_prompts": agent_prompts, # Add agent prompts to state + "treasury_balance": treasury_balance, + "agent_prompts": ( + [p.prompt_text for p in agent_prompts] if agent_prompts else [] + ), "approve": False, "confidence_score": 0.0, "reasoning": "", @@ -613,19 +980,35 @@ async def evaluate_and_vote_on_proposal( "wallet_id": wallet_id, "confidence_threshold": confidence_threshold, "auto_vote": auto_vote, + "vector_results": None, + "recent_tweets": None, + "web_search_results": None, + "token_usage": None, + "model_info": { + "name": "unknown", + "temperature": None, + }, } - logger.debug(f"State agent_prompts: {state['agent_prompts']}") + logger.debug( + f"Agent prompts count: {len(state['agent_prompts'] or [])} | Has prompts: {bool(state['agent_prompts'])}" + ) - # Create and run workflow - workflow = ProposalEvaluationWorkflow() + # Create and run workflow with model settings from prompt + workflow = ProposalEvaluationWorkflow( + model_name=model_name, temperature=temperature + ) if not workflow._validate_state(state): + error_msg = "Invalid workflow state" + logger.error(error_msg) return { "success": False, - "error": "Invalid workflow state", + "error": error_msg, } + logger.debug("Starting workflow execution...") result = await workflow.execute(state) + logger.debug("Workflow execution completed") # Extract transaction ID from vote result if available tx_id = None @@ -639,9 +1022,11 @@ async def evaluate_and_vote_on_proposal( parts = line.split(":") if len(parts) > 1: tx_id = parts[1].strip() + logger.debug(f"Transaction ID extracted: {tx_id}") break - return { + # Prepare final result + final_result = { "success": True, "evaluation": { "approve": result["approve"], @@ -653,12 +1038,42 @@ async def evaluate_and_vote_on_proposal( and result["confidence_score"] >= confidence_threshold, "tx_id": tx_id, "formatted_prompt": result["formatted_prompt"], + "vector_results": result["vector_results"], + "recent_tweets": result["recent_tweets"], + "web_search_results": result["web_search_results"], + "treasury_balance": result.get("treasury_balance"), + "token_usage": result.get( + "token_usage", + {"input_tokens": 0, "output_tokens": 0, "total_tokens": 0}, + ), + "model_info": result.get( + "model_info", {"name": "unknown", "temperature": None} + ), } + + # Calculate token costs + token_costs = calculate_token_cost( + final_result["token_usage"], final_result["model_info"]["name"] + ) + final_result["token_costs"] = token_costs + + # For the example token usage shown: + # Input: 7425 tokens * ($2.50/1M) = $0.0186 + # Output: 312 tokens * ($10.00/1M) = $0.0031 + # Total: $0.0217 + + logger.debug( + f"Proposal evaluation completed: Success={final_result['success']} | Decision={'APPROVE' if final_result['evaluation']['approve'] else 'REJECT'} | Confidence={final_result['evaluation']['confidence_score']:.2f} | Auto-voted={final_result['auto_voted']} | Transaction={tx_id or 'None'} | Model={final_result['model_info']['name']} | Token Usage={final_result['token_usage']} | Cost (USD)=${token_costs['total_cost']:.4f} (Input=${token_costs['input_cost']:.4f} for {token_costs['details']['input_tokens']} tokens, Output=${token_costs['output_cost']:.4f} for {token_costs['details']['output_tokens']} tokens)" + ) + logger.debug(f"Full evaluation result: {final_result}") + + return final_result except Exception as e: - logger.error(f"Error in evaluate_and_vote_on_proposal: {str(e)}", exc_info=True) + error_msg = f"Unexpected error in evaluate_and_vote_on_proposal: {str(e)}" + logger.error(error_msg, exc_info=True) return { "success": False, - "error": f"Unexpected error: {str(e)}", + "error": error_msg, } @@ -675,52 +1090,22 @@ async def evaluate_proposal_only( Returns: Dictionary containing the evaluation results """ + logger.debug(f"Starting proposal-only evaluation: proposal_id={proposal_id}") + result = await evaluate_and_vote_on_proposal( proposal_id=proposal_id, wallet_id=wallet_id, - auto_vote=True, + auto_vote=False, ) - # Remove vote_result from the response + # Remove vote-related fields from the response + logger.debug("Removing vote-related fields from response") if "vote_result" in result: del result["vote_result"] if "auto_voted" in result: del result["auto_voted"] + if "tx_id" in result: + del result["tx_id"] + logger.debug("Proposal-only evaluation completed") return result - - -async def debug_proposal_evaluation_workflow(): - """Debug function to test the workflow with a mock state.""" - logger.setLevel("DEBUG") - - # Create a mock state with valid required fields - mock_state = { - "action_proposals_contract": "test-contract", - "action_proposals_voting_extension": "test-voting-extension", - "proposal_id": 1, - "proposal_data": {"title": "Test Proposal", "description": "Test Description"}, - "dao_info": {"name": "Test DAO"}, - "approve": False, - "confidence_score": 0.0, - "reasoning": "", - "vote_result": None, - "wallet_id": None, - "confidence_threshold": 0.7, - "auto_vote": False, - } - - # Create the workflow and validate the state - workflow = ProposalEvaluationWorkflow() - is_valid = workflow._validate_state(mock_state) - logger.info(f"Mock state validation result: {is_valid}") - - # Try to execute with the mock state - if is_valid: - try: - result = await workflow.execute(mock_state) - logger.info(f"Workflow execution result: {result}") - except Exception as e: - logger.error(f"Workflow execution failed: {str(e)}", exc_info=True) - - return is_valid diff --git a/services/workflows/vector_preplan_react.py b/services/workflows/vector_preplan_react.py index ae2811cc..20067308 100644 --- a/services/workflows/vector_preplan_react.py +++ b/services/workflows/vector_preplan_react.py @@ -31,6 +31,7 @@ ExecutionError, PlanningCapability, VectorRetrievalCapability, + WebSearchCapability, ) from services.workflows.react import StreamingCallbackHandler @@ -45,11 +46,15 @@ class VectorPreplanState(TypedDict): messages: Annotated[list, add_messages] vector_results: Optional[List[Document]] + web_search_results: Optional[List[Document]] # Add web search results plan: Optional[str] class VectorPreplanReactWorkflow( - BaseWorkflow[VectorPreplanState], VectorRetrievalCapability, PlanningCapability + BaseWorkflow[VectorPreplanState], + VectorRetrievalCapability, + PlanningCapability, + WebSearchCapability, ): """Workflow that combines vector retrieval and planning capabilities. @@ -89,7 +94,7 @@ def __init__( # Create a separate LLM for planning with streaming enabled self.planning_llm = ChatOpenAI( - model="o3-mini", + model="o4-mini", streaming=True, callbacks=[callback_handler], ) @@ -339,18 +344,9 @@ def should_continue(state: VectorPreplanState) -> str: logger.debug(f"Continue decision: {result}") return result - async def retrieve_from_vector_store(state: VectorPreplanState) -> Dict: - """Retrieve relevant documents from vector store if not already present.""" - # If vector_results are already in the state, use them - if state.get("vector_results") and len(state.get("vector_results")) > 0: - logger.info( - f"Using {len(state.get('vector_results'))} documents already in state" - ) - return {"vector_results": state.get("vector_results")} - - # Otherwise, retrieve documents from the vector store + async def retrieve_context(state: VectorPreplanState) -> Dict: + """Retrieve context from both vector store and web search.""" messages = state["messages"] - # Get the last user message last_user_message = None for message in reversed(messages): if isinstance(message, HumanMessage): @@ -358,18 +354,30 @@ async def retrieve_from_vector_store(state: VectorPreplanState) -> Dict: break if not last_user_message: - logger.warning("No user message found for vector retrieval") - return {"vector_results": []} + logger.warning("No user message found for context retrieval") + return {"vector_results": [], "web_search_results": []} - logger.info(f"Retrieving documents for query: {last_user_message[:50]}...") - documents = await self.retrieve_from_vector_store(query=last_user_message) - logger.info(f"Retrieved {len(documents)} documents from vector store") - return {"vector_results": documents} + # Get vector results + vector_results = await self.retrieve_from_vector_store( + query=last_user_message + ) + logger.info(f"Retrieved {len(vector_results)} documents from vector store") + + # Get web search results + try: + web_results = await self.search_web(last_user_message) + logger.info(f"Retrieved {len(web_results)} web search results") + except Exception as e: + logger.error(f"Web search failed: {str(e)}") + web_results = [] + + return {"vector_results": vector_results, "web_search_results": web_results} def call_model_with_context_and_plan(state: VectorPreplanState) -> Dict: - """Call model with both context and plan.""" + """Call model with context, plan, and web search results.""" messages = state["messages"] vector_results = state.get("vector_results", []) + web_results = state.get("web_search_results", []) plan = state.get("plan") # Add vector context to the system message if available @@ -381,6 +389,20 @@ def call_model_with_context_and_plan(state: VectorPreplanState) -> Dict: ) messages = [context_message] + messages + # Add web search results if available + if web_results: + web_context = "\n\n".join( + [ + f"Web Search Result {i+1}:\n{result['page_content']}\nSource: {result['metadata'].get('source_urls', ['Unknown'])[0]}" + for i, result in enumerate(web_results) + ] + ) + web_message = SystemMessage( + content=f"Here are relevant web search results:\n\n{web_context}\n\n" + "Consider this information in your response if relevant." + ) + messages = [web_message] + messages + # Add the plan as a system message if it exists and hasn't been added yet if plan is not None and not any( isinstance(msg, SystemMessage) and "thought" in msg.content.lower() @@ -397,11 +419,7 @@ def call_model_with_context_and_plan(state: VectorPreplanState) -> Dict: Be decisive and action-oriented in your responses. """ ) - # Add plan message after context message (if it exists) - if vector_results: - messages = [messages[0], plan_message] + messages[1:] - else: - messages = [plan_message] + messages + messages = [plan_message] + messages # If decisive behavior is enabled and there's no plan-related system message, # add a decisive behavior system message @@ -417,7 +435,8 @@ def call_model_with_context_and_plan(state: VectorPreplanState) -> Dict: logger.debug( f"Calling model with {len(messages)} messages, " - f"{len(vector_results)} vector results, and " + f"{len(vector_results)} vector results, " + f"{len(web_results)} web results, and " f"{'a plan' if plan else 'no plan'}" ) @@ -427,13 +446,13 @@ def call_model_with_context_and_plan(state: VectorPreplanState) -> Dict: workflow = StateGraph(VectorPreplanState) # Add nodes - workflow.add_node("vector_retrieval", retrieve_from_vector_store) + workflow.add_node("context_retrieval", retrieve_context) workflow.add_node("agent", call_model_with_context_and_plan) workflow.add_node("tools", tool_node) # Set up the execution flow - workflow.add_edge(START, "vector_retrieval") - workflow.add_edge("vector_retrieval", "agent") + workflow.add_edge(START, "context_retrieval") + workflow.add_edge("context_retrieval", "agent") workflow.add_conditional_edges("agent", should_continue) workflow.add_edge("tools", "agent") diff --git a/services/workflows/vector_react.py b/services/workflows/vector_react.py index 1088ab63..aa55f95d 100644 --- a/services/workflows/vector_react.py +++ b/services/workflows/vector_react.py @@ -49,14 +49,21 @@ def __init__( self, callback_handler: StreamingCallbackHandler, tools: List[Any], - collection_name: str, + collection_names: Union[ + str, List[str] + ], # Modified to accept single or multiple collections embeddings: Optional[Embeddings] = None, **kwargs, ): super().__init__(**kwargs) self.callback_handler = callback_handler self.tools = tools - self.collection_name = collection_name + # Convert single collection to list for consistency + self.collection_names = ( + [collection_names] + if isinstance(collection_names, str) + else collection_names + ) self.embeddings = embeddings or OpenAIEmbeddings() self.required_fields = ["messages"] @@ -68,7 +75,7 @@ def _create_prompt(self) -> None: pass async def retrieve_from_vector_store(self, query: str, **kwargs) -> List[Document]: - """Retrieve relevant documents from vector store. + """Retrieve relevant documents from multiple vector stores. Args: query: The query to search for @@ -78,25 +85,48 @@ async def retrieve_from_vector_store(self, query: str, **kwargs) -> List[Documen List of retrieved documents """ try: - # Query vectors using the backend - vector_results = await backend.query_vectors( - collection_name=self.collection_name, - query_text=query, - limit=kwargs.get("limit", 4), - embeddings=self.embeddings, + all_documents = [] + limit_per_collection = kwargs.get( + "limit", 4 + ) # Get 4 results from each collection + + # Query each collection and gather results + for collection_name in self.collection_names: + try: + # Query vectors using the backend + vector_results = await backend.query_vectors( + collection_name=collection_name, + query_text=query, + limit=limit_per_collection, + embeddings=self.embeddings, + ) + + # Convert to LangChain Documents and add collection source + documents = [ + Document( + page_content=doc.get("page_content", ""), + metadata={ + **doc.get("metadata", {}), + "collection_source": collection_name, + }, + ) + for doc in vector_results + ] + + all_documents.extend(documents) + logger.info( + f"Retrieved {len(documents)} documents from collection {collection_name}" + ) + except Exception as e: + logger.error( + f"Failed to retrieve from collection {collection_name}: {str(e)}" + ) + continue # Continue with other collections if one fails + + logger.info( + f"Retrieved total of {len(all_documents)} documents from all collections" ) - - # Convert to LangChain Documents - documents = [ - Document( - page_content=doc.get("page_content", ""), - metadata=doc.get("metadata", {}), - ) - for doc in vector_results - ] - - logger.info(f"Retrieved {len(documents)} documents from vector store") - return documents + return all_documents except Exception as e: logger.error(f"Vector store retrieval failed: {str(e)}") return [] @@ -182,11 +212,17 @@ def call_model_with_context(state: VectorReactState) -> Dict: class VectorLangGraphService: """Service for executing VectorReact LangGraph operations""" - def __init__(self, collection_name: str, embeddings: Optional[Embeddings] = None): + def __init__( + self, + collection_names: Union[ + str, List[str] + ], # Modified to accept single or multiple collections + embeddings: Optional[Embeddings] = None, + ): # Import here to avoid circular imports from services.workflows.react import MessageProcessor - self.collection_name = collection_name + self.collection_names = collection_names self.embeddings = embeddings or OpenAIEmbeddings() self.message_processor = MessageProcessor() @@ -244,7 +280,7 @@ async def _execute_stream_impl( .with_callback_handler(callback_handler) .with_tools(list(tools_map.values()) if tools_map else []) .build( - collection_name=self.collection_name, + collection_names=self.collection_names, embeddings=self.embeddings, ) ) @@ -316,10 +352,10 @@ async def execute_vector_react_stream( # Helper function for adding documents to vector store async def add_documents_to_vectors( - collection_name: str, + collection_name: str, # Modified to only accept a single collection documents: List[Document], embeddings: Optional[Embeddings] = None, -) -> List[str]: +) -> Dict[str, List[str]]: """Add documents to vector collection. Args: @@ -328,7 +364,7 @@ async def add_documents_to_vectors( embeddings: Optional embeddings model to use Returns: - List of document IDs + Dictionary mapping collection name to list of document IDs """ # Ensure embeddings model is provided if embeddings is None: @@ -336,44 +372,59 @@ async def add_documents_to_vectors( "Embeddings model must be provided to add documents to vector store" ) - # Ensure collection exists + # Store document IDs for the collection + collection_doc_ids = {} + try: - backend.get_vector_collection(collection_name) - except Exception: - # Create collection if it doesn't exist - embed_dim = 1536 # Default for OpenAI embeddings - if hasattr(embeddings, "embedding_dim"): - embed_dim = embeddings.embedding_dim - backend.create_vector_collection(collection_name, dimensions=embed_dim) - - # Extract texts for embedding - texts = [doc.page_content for doc in documents] - - # Generate embeddings for the texts - embedding_vectors = embeddings.embed_documents(texts) - - # Prepare documents for storage with embeddings - docs_for_storage = [ - {"page_content": doc.page_content, "embedding": embedding_vectors[i]} - for i, doc in enumerate(documents) - ] - - # Prepare metadata - metadata_list = [doc.metadata for doc in documents] - - # Add to vector store - ids = await backend.add_vectors( - collection_name=collection_name, - documents=docs_for_storage, - metadata=metadata_list, - ) + # Ensure collection exists + try: + backend.get_vector_collection(collection_name) + except Exception: + # Create collection if it doesn't exist + embed_dim = 1536 # Default for OpenAI embeddings + if hasattr(embeddings, "embedding_dim"): + embed_dim = embeddings.embedding_dim + backend.create_vector_collection(collection_name, dimensions=embed_dim) + + # Extract texts for embedding + texts = [doc.page_content for doc in documents] + + # Generate embeddings for the texts + embedding_vectors = embeddings.embed_documents(texts) + + # Prepare documents for storage with embeddings + docs_for_storage = [ + {"page_content": doc.page_content, "embedding": embedding_vectors[i]} + for i, doc in enumerate(documents) + ] + + # Prepare metadata + metadata_list = [doc.metadata for doc in documents] + + # Add to vector store + ids = await backend.add_vectors( + collection_name=collection_name, + documents=docs_for_storage, + metadata=metadata_list, + ) + + collection_doc_ids[collection_name] = ids + logger.info(f"Added {len(ids)} documents to collection {collection_name}") + + except Exception as e: + logger.error( + f"Failed to add documents to collection {collection_name}: {str(e)}" + ) + collection_doc_ids[collection_name] = [] - return ids + return collection_doc_ids # Facade function for backward compatibility async def execute_vector_langgraph_stream( - collection_name: str, + collection_names: Union[ + str, List[str] + ], # Modified to accept single or multiple collections history: List[Dict], input_str: str, persona: Optional[str] = None, @@ -384,7 +435,7 @@ async def execute_vector_langgraph_stream( # Initialize service and run stream embeddings = embeddings or OpenAIEmbeddings() service = VectorLangGraphService( - collection_name=collection_name, + collection_names=collection_names, embeddings=embeddings, ) diff --git a/services/workflows/web_search.py b/services/workflows/web_search.py new file mode 100644 index 00000000..e7a3155f --- /dev/null +++ b/services/workflows/web_search.py @@ -0,0 +1,238 @@ +"""Web search workflow implementation using OpenAI Assistant API.""" + +import asyncio +import json +from typing import Any, Dict, List, Optional + +from langchain_core.messages import AIMessage, HumanMessage +from langgraph.graph import StateGraph +from openai import OpenAI +from openai.types.beta.assistant import Assistant +from openai.types.beta.thread import Thread +from openai.types.beta.threads.thread_message import ThreadMessage + +from lib.logger import configure_logger +from services.workflows.base import BaseWorkflow, WebSearchCapability +from services.workflows.vector import VectorRetrievalCapability + +logger = configure_logger(__name__) + + +class WebSearchWorkflow(BaseWorkflow, WebSearchCapability, VectorRetrievalCapability): + """Workflow that combines web search with vector retrieval capabilities using OpenAI Assistant.""" + + def __init__(self, **kwargs): + """Initialize the workflow. + + Args: + **kwargs: Additional arguments passed to parent classes + """ + super().__init__(**kwargs) + self.search_results_cache = {} + self.client = OpenAI() + # Create an assistant with web browsing capability + self.assistant: Assistant = self.client.beta.assistants.create( + name="Web Search Assistant", + description="Assistant that helps with web searches", + model="gpt-4-turbo-preview", + tools=[{"type": "retrieval"}, {"type": "web_browser"}], + instructions="""You are a web search assistant. Your primary task is to: + 1. Search the web for relevant information + 2. Extract key information from web pages + 3. Provide detailed, accurate responses with source URLs + 4. Format responses as structured data with content and metadata + Always include source URLs in your responses.""", + ) + + async def search_web(self, query: str, **kwargs) -> List[Dict[str, Any]]: + """Search the web using OpenAI Assistant API. + + Args: + query: The search query + **kwargs: Additional search parameters + + Returns: + List of search results with content and metadata + """ + try: + # Check cache first + if query in self.search_results_cache: + logger.info(f"Using cached results for query: {query}") + return self.search_results_cache[query] + + # Create a new thread for this search + thread: Thread = self.client.beta.threads.create() + + # Add the user's message to the thread + self.client.beta.threads.messages.create( + thread_id=thread.id, + role="user", + content=f"Search the web for: {query}. Please provide detailed information with source URLs.", + ) + + # Run the assistant + run = self.client.beta.threads.runs.create( + thread_id=thread.id, assistant_id=self.assistant.id + ) + + # Wait for completion + while True: + run_status = self.client.beta.threads.runs.retrieve( + thread_id=thread.id, run_id=run.id + ) + if run_status.status == "completed": + break + elif run_status.status in ["failed", "cancelled", "expired"]: + raise Exception( + f"Assistant run failed with status: {run_status.status}" + ) + await asyncio.sleep(1) # Wait before checking again + + # Get the assistant's response + messages: List[ThreadMessage] = self.client.beta.threads.messages.list( + thread_id=thread.id + ) + + # Process the response into our document format + documents = [] + for message in messages: + if message.role == "assistant": + for content in message.content: + if content.type == "text": + # Extract URLs from annotations if available + urls = [] + if message.metadata and "citations" in message.metadata: + urls = [ + cite["url"] + for cite in message.metadata["citations"] + ] + + # Create document with content and metadata + doc = { + "page_content": content.text, + "metadata": { + "type": "web_search_result", + "source_urls": urls, + "query": query, + "timestamp": message.created_at, + }, + } + documents.append(doc) + + # Cache the results + self.search_results_cache[query] = documents + + logger.info(f"Web search completed with {len(documents)} results") + return documents + + except Exception as e: + logger.error(f"Web search failed: {str(e)}") + return [] + + async def execute(self, query: str, **kwargs) -> Dict[str, Any]: + """Execute the web search workflow. + + This workflow: + 1. Searches the web for relevant information + 2. Processes and stores the results + 3. Combines with vector retrieval if available + + Args: + query: The search query + **kwargs: Additional execution arguments + + Returns: + Dict containing search results and any additional data + """ + try: + # Perform web search + web_results = await self.search_web(query, **kwargs) + + # Cache results + self.search_results_cache[query] = web_results + + # Combine with vector retrieval if available + combined_results = web_results + try: + vector_results = await self.retrieve_from_vectorstore(query, **kwargs) + combined_results.extend(vector_results) + except Exception as e: + logger.warning( + f"Vector retrieval failed, using only web results: {str(e)}" + ) + + return { + "query": query, + "results": combined_results, + "source": "web_search_workflow", + "metadata": { + "num_web_results": len(web_results), + "has_vector_results": ( + bool(vector_results) if "vector_results" in locals() else False + ), + }, + } + + except Exception as e: + logger.error(f"Web search workflow execution failed: {str(e)}") + raise + + def integrate_with_graph(self, graph: StateGraph, **kwargs) -> None: + """Integrate web search workflow with a graph. + + Args: + graph: The graph to integrate with + **kwargs: Additional integration arguments + """ + # Add web search node + graph.add_node("web_search", self.search_web) + + # Add vector retrieval node if available + try: + graph.add_node("vector_retrieval", self.retrieve_from_vectorstore) + + # Connect nodes + graph.add_edge("web_search", "vector_retrieval") + except Exception as e: + logger.warning(f"Vector retrieval integration failed: {str(e)}") + + # Add result processing node + graph.add_node("process_results", self._process_results) + graph.add_edge("vector_retrieval", "process_results") + + async def _process_results( + self, + web_results: List[Dict[str, Any]], + vector_results: Optional[List[Dict[str, Any]]] = None, + ) -> Dict[str, Any]: + """Process and combine search results. + + Args: + web_results: Results from web search + vector_results: Optional results from vector retrieval + + Returns: + Processed and combined results + """ + combined_results = web_results.copy() + if vector_results: + combined_results.extend(vector_results) + + # Deduplicate results based on content similarity + seen_contents = set() + unique_results = [] + for result in combined_results: + content = result.get("page_content", "") + content_hash = hash(content) + if content_hash not in seen_contents: + seen_contents.add(content_hash) + unique_results.append(result) + + return { + "results": unique_results, + "metadata": { + "num_web_results": len(web_results), + "num_vector_results": len(vector_results) if vector_results else 0, + "num_unique_results": len(unique_results), + }, + } diff --git a/services/workflows/workflow_service.py b/services/workflows/workflow_service.py index 05cc4733..a4ecbdf8 100644 --- a/services/workflows/workflow_service.py +++ b/services/workflows/workflow_service.py @@ -428,7 +428,7 @@ def __init__(self, workflow_class: Type, **kwargs): self.kwargs = kwargs self.callback_handler = None self.tools = [] - self.model_name = kwargs.get("model_name", "gpt-4o") + self.model_name = kwargs.get("model_name", "gpt-4.1") self.temperature = kwargs.get("temperature", 0.1) def with_callback_handler( diff --git a/tests/lib/test_tokenizer.py b/tests/lib/test_tokenizer.py index 0e7c5960..17bea5c9 100644 --- a/tests/lib/test_tokenizer.py +++ b/tests/lib/test_tokenizer.py @@ -22,7 +22,7 @@ def sample_messages() -> List[Dict[str, Any]]: def test_trimmer_initialization() -> None: """Test Trimmer initialization with default and custom parameters.""" default_trimmer = Trimmer() - assert default_trimmer.token_model == "gpt-4o" + assert default_trimmer.token_model == "gpt-4.1" assert default_trimmer.maxsize == 50000 assert default_trimmer.margin == 500 diff --git a/tools/dao_ext_action_proposals.py b/tools/dao_ext_action_proposals.py index 6f47bb45..47976187 100644 --- a/tools/dao_ext_action_proposals.py +++ b/tools/dao_ext_action_proposals.py @@ -48,6 +48,11 @@ class ProposeActionAddResourceInput(BaseModel): description="Optional URL associated with the resource", examples=["https://www.example.com/resource"], ) + memo: Optional[str] = Field( + None, + description="Optional memo to include with the proposal", + examples=["Adding a new consultation resource for the DAO"], + ) class ProposeActionAddResourceTool(BaseTool): @@ -73,6 +78,7 @@ def _deploy( resource_description: str, resource_price: int, resource_url: Optional[str] = None, + memo: Optional[str] = None, **kwargs, ) -> Dict[str, Any]: """Execute the tool to propose adding a resource.""" @@ -90,6 +96,11 @@ def _deploy( if resource_url: args.append(resource_url) + + if memo: + if not resource_url: + args.append("") # Add empty URL if not provided but memo is + args.append(memo) return BunScriptRunner.bun_run( self.wallet_id, @@ -107,6 +118,7 @@ def _run( resource_description: str, resource_price: int, resource_url: Optional[str] = None, + memo: Optional[str] = None, **kwargs, ) -> Dict[str, Any]: """Execute the tool to propose adding a resource.""" @@ -118,6 +130,7 @@ def _run( resource_description, resource_price, resource_url, + memo, **kwargs, ) @@ -130,6 +143,7 @@ async def _arun( resource_description: str, resource_price: int, resource_url: Optional[str] = None, + memo: Optional[str] = None, **kwargs, ) -> Dict[str, Any]: """Async version of the tool.""" @@ -141,6 +155,7 @@ async def _arun( resource_description, resource_price, resource_url, + memo, **kwargs, ) @@ -180,6 +195,11 @@ class ProposeActionAllowAssetInput(BaseModel): "ST3YT0XW92E6T2FE59B2G5N2WNNFSBZ6MZKQS5D18.t3st-faktory", ], ) + memo: Optional[str] = Field( + None, + description="Optional memo to include with the proposal", + examples=["Allow new token for DAO treasury operations"], + ) class ProposeActionAllowAssetTool(BaseTool): @@ -202,6 +222,7 @@ def _deploy( action_proposal_contract_to_execute: str, dao_token_contract_address: str, dao_token_contract_address_to_allow: str, + memo: Optional[str] = None, **kwargs, ) -> Dict[str, Any]: """Execute the tool to propose allowing an asset.""" @@ -215,6 +236,9 @@ def _deploy( dao_token_contract_address_to_allow, ] + if memo: + args.append(memo) + return BunScriptRunner.bun_run( self.wallet_id, "aibtc-dao/extensions/action-proposals/public", @@ -228,6 +252,7 @@ def _run( action_proposal_contract_to_execute: str, dao_token_contract_address: str, dao_token_contract_address_to_allow: str, + memo: Optional[str] = None, **kwargs, ) -> Dict[str, Any]: """Execute the tool to propose allowing an asset.""" @@ -236,6 +261,7 @@ def _run( action_proposal_contract_to_execute, dao_token_contract_address, dao_token_contract_address_to_allow, + memo, **kwargs, ) @@ -245,6 +271,7 @@ async def _arun( action_proposal_contract_to_execute: str, dao_token_contract_address: str, dao_token_contract_address_to_allow: str, + memo: Optional[str] = None, **kwargs, ) -> Dict[str, Any]: """Async version of the tool.""" @@ -253,6 +280,7 @@ async def _arun( action_proposal_contract_to_execute, dao_token_contract_address, dao_token_contract_address_to_allow, + memo, **kwargs, ) @@ -289,6 +317,11 @@ class ProposeActionSendMessageInput(BaseModel): description="Message to be sent through the DAO proposal system, verified to be from the DAO and posted to Twitter/X automatically if successful.", examples=["gm gm from the $FACES DAO!"], ) + memo: Optional[str] = Field( + None, + description="Optional memo to include with the proposal", + examples=["Sending a greeting message from our DAO"], + ) class ProposeActionSendMessageTool(BaseTool): @@ -311,6 +344,7 @@ def _deploy( action_proposal_contract_to_execute: str, dao_token_contract_address: str, message: str, + memo: Optional[str] = None, **kwargs, ) -> Dict[str, Any]: """Execute the tool to propose sending a message.""" @@ -324,6 +358,9 @@ def _deploy( message, ] + if memo: + args.append(memo) + return BunScriptRunner.bun_run( self.wallet_id, "aibtc-dao/extensions/action-proposals/public", @@ -337,6 +374,7 @@ def _run( action_proposal_contract_to_execute: str, dao_token_contract_address: str, message: str, + memo: Optional[str] = None, **kwargs, ) -> Dict[str, Any]: """Execute the tool to propose sending a message.""" @@ -345,6 +383,7 @@ def _run( action_proposal_contract_to_execute, dao_token_contract_address, message, + memo, **kwargs, ) @@ -354,6 +393,7 @@ async def _arun( action_proposal_contract_to_execute: str, dao_token_contract_address: str, message: str, + memo: Optional[str] = None, **kwargs, ) -> Dict[str, Any]: """Async version of the tool.""" @@ -362,6 +402,7 @@ async def _arun( action_proposal_contract_to_execute, dao_token_contract_address, message, + memo, **kwargs, ) @@ -402,6 +443,11 @@ class ProposeActionSetAccountHolderInput(BaseModel): "ST1PQHQKV0RJXZFY1DGX8MNSNYVE3VGZJSRTPGZGM.contract", ], ) + memo: Optional[str] = Field( + None, + description="Optional memo to include with the proposal", + examples=["Setting new account holder for DAO vault"], + ) class ProposeActionSetAccountHolderTool(BaseTool): @@ -424,6 +470,7 @@ def _deploy( action_proposal_contract_to_execute: str, dao_token_contract_address: str, account_holder: str, + memo: Optional[str] = None, **kwargs, ) -> Dict[str, Any]: """Execute the tool to propose setting a new account holder.""" @@ -437,6 +484,9 @@ def _deploy( account_holder, ] + if memo: + args.append(memo) + return BunScriptRunner.bun_run( self.wallet_id, "aibtc-dao/extensions/action-proposals/public", @@ -450,6 +500,7 @@ def _run( action_proposal_contract_to_execute: str, dao_token_contract_address: str, account_holder: str, + memo: Optional[str] = None, **kwargs, ) -> Dict[str, Any]: """Execute the tool to propose setting a new account holder.""" @@ -458,6 +509,7 @@ def _run( action_proposal_contract_to_execute, dao_token_contract_address, account_holder, + memo, **kwargs, ) @@ -467,6 +519,7 @@ async def _arun( action_proposal_contract_to_execute: str, dao_token_contract_address: str, account_holder: str, + memo: Optional[str] = None, **kwargs, ) -> Dict[str, Any]: """Async version of the tool.""" @@ -475,6 +528,7 @@ async def _arun( action_proposal_contract_to_execute, dao_token_contract_address, account_holder, + memo, **kwargs, ) @@ -511,6 +565,11 @@ class ProposeActionSetWithdrawalAmountInput(BaseModel): description="New withdrawal amount to set in microSTX", examples=["50000000"], # 50 STX ) + memo: Optional[str] = Field( + None, + description="Optional memo to include with the proposal", + examples=["Updating withdrawal amount to 50 STX"], + ) class ProposeActionSetWithdrawalAmountTool(BaseTool): @@ -533,6 +592,7 @@ def _deploy( action_proposal_contract_to_execute: str, dao_token_contract_address: str, withdrawal_amount: int, + memo: Optional[str] = None, **kwargs, ) -> Dict[str, Any]: """Execute the tool to propose setting a new withdrawal amount.""" @@ -546,6 +606,9 @@ def _deploy( str(withdrawal_amount), ] + if memo: + args.append(memo) + return BunScriptRunner.bun_run( self.wallet_id, "aibtc-dao/extensions/action-proposals/public", @@ -559,6 +622,7 @@ def _run( action_proposal_contract_to_execute: str, dao_token_contract_address: str, withdrawal_amount: int, + memo: Optional[str] = None, **kwargs, ) -> Dict[str, Any]: """Execute the tool to propose setting a new withdrawal amount.""" @@ -567,6 +631,7 @@ def _run( action_proposal_contract_to_execute, dao_token_contract_address, withdrawal_amount, + memo, **kwargs, ) @@ -576,6 +641,7 @@ async def _arun( action_proposal_contract_to_execute: str, dao_token_contract_address: str, withdrawal_amount: int, + memo: Optional[str] = None, **kwargs, ) -> Dict[str, Any]: """Async version of the tool.""" @@ -584,6 +650,7 @@ async def _arun( action_proposal_contract_to_execute, dao_token_contract_address, withdrawal_amount, + memo, **kwargs, ) @@ -620,6 +687,11 @@ class ProposeActionSetWithdrawalPeriodInput(BaseModel): description="New withdrawal period to set in Bitcoin blocks", examples=["144"], # 1 day in BTC blocks ) + memo: Optional[str] = Field( + None, + description="Optional memo to include with the proposal", + examples=["Updating withdrawal period to 1 day (144 blocks)"], + ) class ProposeActionSetWithdrawalPeriodTool(BaseTool): @@ -642,6 +714,7 @@ def _deploy( action_proposal_contract_to_execute: str, dao_token_contract_address: str, withdrawal_period: int, + memo: Optional[str] = None, **kwargs, ) -> Dict[str, Any]: """Execute the tool to propose setting a new withdrawal period.""" @@ -655,6 +728,9 @@ def _deploy( str(withdrawal_period), ] + if memo: + args.append(memo) + return BunScriptRunner.bun_run( self.wallet_id, "aibtc-dao/extensions/action-proposals/public", @@ -668,6 +744,7 @@ def _run( action_proposal_contract_to_execute: str, dao_token_contract_address: str, withdrawal_period: int, + memo: Optional[str] = None, **kwargs, ) -> Dict[str, Any]: """Execute the tool to propose setting a new withdrawal period.""" @@ -676,6 +753,7 @@ def _run( action_proposal_contract_to_execute, dao_token_contract_address, withdrawal_period, + memo, **kwargs, ) @@ -685,6 +763,7 @@ async def _arun( action_proposal_contract_to_execute: str, dao_token_contract_address: str, withdrawal_period: int, + memo: Optional[str] = None, **kwargs, ) -> Dict[str, Any]: """Async version of the tool.""" @@ -693,6 +772,7 @@ async def _arun( action_proposal_contract_to_execute, dao_token_contract_address, withdrawal_period, + memo, **kwargs, ) @@ -914,6 +994,11 @@ class ProposeActionToggleResourceInput(BaseModel): description="Name of the resource to toggle", examples=["apiv1", "protected-content", "1hr consulting"], ) + memo: Optional[str] = Field( + None, + description="Optional memo to include with the proposal", + examples=["Toggling availability of consulting resource"], + ) class ProposeActionToggleResourceTool(BaseTool): @@ -936,6 +1021,7 @@ def _deploy( action_proposal_contract_to_execute: str, dao_token_contract_address: str, resource_name: str, + memo: Optional[str] = None, **kwargs, ) -> Dict[str, Any]: """Execute the tool to propose toggling a resource.""" @@ -949,6 +1035,9 @@ def _deploy( resource_name, ] + if memo: + args.append(memo) + return BunScriptRunner.bun_run( self.wallet_id, "aibtc-dao/extensions/action-proposals/public", @@ -962,6 +1051,7 @@ def _run( action_proposal_contract_to_execute: str, dao_token_contract_address: str, resource_name: str, + memo: Optional[str] = None, **kwargs, ) -> Dict[str, Any]: """Execute the tool to propose toggling a resource.""" @@ -970,6 +1060,7 @@ def _run( action_proposal_contract_to_execute, dao_token_contract_address, resource_name, + memo, **kwargs, ) @@ -979,6 +1070,7 @@ async def _arun( action_proposal_contract_to_execute: str, dao_token_contract_address: str, resource_name: str, + memo: Optional[str] = None, **kwargs, ) -> Dict[str, Any]: """Async version of the tool.""" @@ -987,6 +1079,7 @@ async def _arun( action_proposal_contract_to_execute, dao_token_contract_address, resource_name, + memo, **kwargs, ) diff --git a/tools/dao_ext_core_proposals.py b/tools/dao_ext_core_proposals.py new file mode 100644 index 00000000..7fc11821 --- /dev/null +++ b/tools/dao_ext_core_proposals.py @@ -0,0 +1,236 @@ +from typing import Any, Dict, Optional, Type +from uuid import UUID + +from langchain.tools import BaseTool +from pydantic import BaseModel, Field + +from tools.bun import BunScriptRunner + + +class GenerateCoreProposalInput(BaseModel): + """Input schema for generating a core proposal.""" + + dao_deployer_address: str = Field( + ..., + description="The address of the DAO deployer", + example="ST1PQHQKV0RJXZFY1DGX8MNSNYVE3VGZJSRTPGZGM", + ) + dao_token_symbol: str = Field( + ..., + description="The token symbol for the DAO", + example="aibtc", + ) + proposal_contract_name: str = Field( + ..., + description="The name of the proposal contract", + example="aibtc-treasury-withdraw-stx", + ) + proposal_args: Dict[str, str] = Field( + ..., + description="Arguments for the proposal in key-value format", + example={ + "stx_amount": "1000000", + "recipient_address": "ST1PQHQKV0RJXZFY1DGX8MNSNYVE3VGZJSRTPGZGM", + }, + ) + generate_files: bool = Field( + False, + description="Whether to generate and save proposal files", + ) + + +class GenerateCoreProposalTool(BaseTool): + name: str = "dao_generate_core_proposal" + description: str = ( + "Generate a core proposal for the DAO. " + "This will create the proposal contract but not deploy it. " + "Returns the generated proposal details if successful." + ) + args_schema: Type[BaseModel] = GenerateCoreProposalInput + return_direct: bool = False + wallet_id: Optional[UUID] = None + + def __init__(self, wallet_id: Optional[UUID] = None, **kwargs): + super().__init__(**kwargs) + self.wallet_id = wallet_id + + def _deploy( + self, + dao_deployer_address: str, + dao_token_symbol: str, + proposal_contract_name: str, + proposal_args: Dict[str, str], + generate_files: bool = False, + **kwargs, + ) -> Dict[str, Any]: + """Execute the tool to generate a core proposal.""" + if self.wallet_id is None: + return {"success": False, "message": "Wallet ID is required", "data": None} + + args = [ + dao_deployer_address, + dao_token_symbol, + proposal_contract_name, + str(proposal_args).replace("'", '"'), # Convert Python dict to JSON string + str(generate_files).lower(), + ] + + return BunScriptRunner.bun_run( + self.wallet_id, + "aibtc-dao/extensions/core-proposals", + "generate-core-proposal.ts", + *args, + ) + + def _run( + self, + dao_deployer_address: str, + dao_token_symbol: str, + proposal_contract_name: str, + proposal_args: Dict[str, str], + generate_files: bool = False, + **kwargs, + ) -> Dict[str, Any]: + """Execute the tool to generate a core proposal.""" + return self._deploy( + dao_deployer_address, + dao_token_symbol, + proposal_contract_name, + proposal_args, + generate_files, + **kwargs, + ) + + async def _arun( + self, + dao_deployer_address: str, + dao_token_symbol: str, + proposal_contract_name: str, + proposal_args: Dict[str, str], + generate_files: bool = False, + **kwargs, + ) -> Dict[str, Any]: + """Async version of the tool.""" + return self._deploy( + dao_deployer_address, + dao_token_symbol, + proposal_contract_name, + proposal_args, + generate_files, + **kwargs, + ) + + +class DeployCoreProposalInput(BaseModel): + """Input schema for deploying a core proposal.""" + + dao_deployer_address: str = Field( + ..., + description="The address of the DAO deployer", + example="ST1PQHQKV0RJXZFY1DGX8MNSNYVE3VGZJSRTPGZGM", + ) + dao_token_symbol: str = Field( + ..., + description="The token symbol for the DAO", + example="aibtc", + ) + proposal_contract_name: str = Field( + ..., + description="The name of the proposal contract", + example="aibtc-treasury-withdraw-stx", + ) + proposal_args: Dict[str, str] = Field( + ..., + description="Arguments for the proposal in key-value format", + example={ + "stx_amount": "1000000", + "recipient_address": "ST1PQHQKV0RJXZFY1DGX8MNSNYVE3VGZJSRTPGZGM", + }, + ) + generate_files: bool = Field( + False, + description="Whether to generate and save proposal files", + ) + + +class DeployCoreProposalTool(BaseTool): + name: str = "dao_deploy_core_proposal" + description: str = ( + "Deploy a core proposal for the DAO. " + "This will generate and deploy the proposal contract. " + "This is a required step before proposing. " + "Returns the deployment details if successful." + ) + args_schema: Type[BaseModel] = DeployCoreProposalInput + return_direct: bool = False + wallet_id: Optional[UUID] = None + + def __init__(self, wallet_id: Optional[UUID] = None, **kwargs): + super().__init__(**kwargs) + self.wallet_id = wallet_id + + def _deploy( + self, + dao_deployer_address: str, + dao_token_symbol: str, + proposal_contract_name: str, + proposal_args: Dict[str, str], + generate_files: bool = False, + **kwargs, + ) -> Dict[str, Any]: + """Execute the tool to deploy a core proposal.""" + if self.wallet_id is None: + return {"success": False, "message": "Wallet ID is required", "data": None} + + args = [ + dao_deployer_address, + dao_token_symbol, + proposal_contract_name, + str(proposal_args).replace("'", '"'), # Convert Python dict to JSON string + str(generate_files).lower(), + ] + + return BunScriptRunner.bun_run( + self.wallet_id, + "aibtc-dao/extensions/core-proposals", + "deploy-core-proposal.ts", + *args, + ) + + def _run( + self, + dao_deployer_address: str, + dao_token_symbol: str, + proposal_contract_name: str, + proposal_args: Dict[str, str], + generate_files: bool = False, + **kwargs, + ) -> Dict[str, Any]: + """Execute the tool to deploy a core proposal.""" + return self._deploy( + dao_deployer_address, + dao_token_symbol, + proposal_contract_name, + proposal_args, + generate_files, + **kwargs, + ) + + async def _arun( + self, + dao_deployer_address: str, + dao_token_symbol: str, + proposal_contract_name: str, + proposal_args: Dict[str, str], + generate_files: bool = False, + **kwargs, + ) -> Dict[str, Any]: + """Async version of the tool.""" + return self._deploy( + dao_deployer_address, + dao_token_symbol, + proposal_contract_name, + proposal_args, + generate_files, + **kwargs, + ) diff --git a/tools/smartwallet.py b/tools/smartwallet.py index a21098d9..94f73b85 100644 --- a/tools/smartwallet.py +++ b/tools/smartwallet.py @@ -7,24 +7,1838 @@ from tools.bun import BunScriptRunner -class DepositSTXInput(BaseModel): +class SmartWalletGenerateSmartWalletInput(BaseModel): + """Input schema for generating a smart wallet contract.""" + + owner_address: str = Field( + ..., + description="Stacks address of the wallet owner", + example="ST1PQHQKV0RJXZFY1DGX8MNSNYVE3VGZJSRTPGZGM", + ) + agent_address: str = Field( + ..., + description="Stacks address of the agent", + example="ST2CY5V39NHDPWSXMW9QDT3HC3GD6Q6XX4CFRK9AG", + ) + dao_token_contract: str = Field( + ..., + description="Contract principal of the DAO token", + example="ST35K818S3K2GSNEBC3M35GA3W8Q7X72KF4RVM3QA.aibtc-token", + ) + dao_token_dex_contract: str = Field( + ..., + description="Contract principal of the DAO token DEX", + example="ST35K818S3K2GSNEBC3M35GA3W8Q7X72KF4RVM3QA.aibtc-token-dex", + ) + generate_files: bool = Field( + False, + description="Whether to generate contract files", + ) + + +class SmartWalletGenerateSmartWalletTool(BaseTool): + name: str = "smartwallet_generate_smart_wallet" + description: str = ( + "Generate a new smart wallet contract with specified owner and agent addresses. " + "Returns the contract name, hash, and source code." + ) + args_schema: Type[BaseModel] = SmartWalletGenerateSmartWalletInput + return_direct: bool = False + wallet_id: Optional[UUID] = None + + def __init__(self, wallet_id: Optional[UUID] = None, **kwargs): + super().__init__(**kwargs) + self.wallet_id = wallet_id + + def _deploy( + self, + owner_address: str, + agent_address: str, + dao_token_contract: str, + dao_token_dex_contract: str, + generate_files: bool = False, + **kwargs, + ) -> Dict[str, Any]: + """Execute the tool to generate smart wallet.""" + if self.wallet_id is None: + return {"success": False, "message": "Wallet ID is required", "data": None} + + args = [ + owner_address, + agent_address, + dao_token_contract, + dao_token_dex_contract, + str(generate_files).lower(), + ] + + return BunScriptRunner.bun_run( + self.wallet_id, + "aibtc-dao/smart-wallet", + "generate-smart-wallet.ts", + *args, + ) + + def _run( + self, + owner_address: str, + agent_address: str, + dao_token_contract: str, + dao_token_dex_contract: str, + generate_files: bool = False, + **kwargs, + ) -> Dict[str, Any]: + """Execute the tool to generate smart wallet.""" + return self._deploy( + owner_address, + agent_address, + dao_token_contract, + dao_token_dex_contract, + generate_files, + **kwargs, + ) + + async def _arun( + self, + owner_address: str, + agent_address: str, + dao_token_contract: str, + dao_token_dex_contract: str, + generate_files: bool = False, + **kwargs, + ) -> Dict[str, Any]: + """Async version of the tool.""" + return self._deploy( + owner_address, + agent_address, + dao_token_contract, + dao_token_dex_contract, + generate_files, + **kwargs, + ) + + +class SmartWalletGenerateMySmartWalletInput(BaseModel): + """Input schema for generating a smart wallet contract using the current agent as the agent address.""" + + owner_address: str = Field( + ..., + description="Stacks address of the wallet owner", + example="ST1PQHQKV0RJXZFY1DGX8MNSNYVE3VGZJSRTPGZGM", + ) + dao_token_contract: str = Field( + ..., + description="Contract principal of the DAO token", + example="ST35K818S3K2GSNEBC3M35GA3W8Q7X72KF4RVM3QA.aibtc-token", + ) + dao_token_dex_contract: str = Field( + ..., + description="Contract principal of the DAO token DEX", + example="ST35K818S3K2GSNEBC3M35GA3W8Q7X72KF4RVM3QA.aibtc-token-dex", + ) + generate_files: bool = Field( + False, + description="Whether to generate contract files", + ) + + +class SmartWalletGenerateMySmartWalletTool(BaseTool): + name: str = "smartwallet_generate_my_smart_wallet" + description: str = ( + "Generate a new smart wallet contract using the current agent as the agent address. " + "Returns the contract name, hash, and source code." + ) + args_schema: Type[BaseModel] = SmartWalletGenerateMySmartWalletInput + return_direct: bool = False + wallet_id: Optional[UUID] = None + + def __init__(self, wallet_id: Optional[UUID] = None, **kwargs): + super().__init__(**kwargs) + self.wallet_id = wallet_id + + def _deploy( + self, + owner_address: str, + dao_token_contract: str, + dao_token_dex_contract: str, + generate_files: bool = False, + **kwargs, + ) -> Dict[str, Any]: + """Execute the tool to generate smart wallet.""" + if self.wallet_id is None: + return {"success": False, "message": "Wallet ID is required", "data": None} + + args = [ + owner_address, + dao_token_contract, + dao_token_dex_contract, + str(generate_files).lower(), + ] + + return BunScriptRunner.bun_run( + self.wallet_id, + "aibtc-dao/smart-wallet", + "generate-my-smart-wallet.ts", + *args, + ) + + def _run( + self, + owner_address: str, + dao_token_contract: str, + dao_token_dex_contract: str, + generate_files: bool = False, + **kwargs, + ) -> Dict[str, Any]: + """Execute the tool to generate smart wallet.""" + return self._deploy( + owner_address, + dao_token_contract, + dao_token_dex_contract, + generate_files, + **kwargs, + ) + + async def _arun( + self, + owner_address: str, + dao_token_contract: str, + dao_token_dex_contract: str, + generate_files: bool = False, + **kwargs, + ) -> Dict[str, Any]: + """Async version of the tool.""" + return self._deploy( + owner_address, + dao_token_contract, + dao_token_dex_contract, + generate_files, + **kwargs, + ) + + +class SmartWalletDeploySmartWalletInput(BaseModel): + """Input schema for deploying a smart wallet contract.""" + + owner_address: str = Field( + ..., + description="Stacks address of the wallet owner", + example="ST1PQHQKV0RJXZFY1DGX8MNSNYVE3VGZJSRTPGZGM", + ) + dao_token_contract: str = Field( + ..., + description="Contract principal of the DAO token", + example="ST35K818S3K2GSNEBC3M35GA3W8Q7X72KF4RVM3QA.aibtc-token", + ) + dao_token_dex_contract: str = Field( + ..., + description="Contract principal of the DAO token DEX", + example="ST35K818S3K2GSNEBC3M35GA3W8Q7X72KF4RVM3QA.aibtc-token-dex", + ) + + +class SmartWalletDeploySmartWalletTool(BaseTool): + name: str = "smartwallet_deploy_smart_wallet" + description: str = ( + "Deploy a new smart wallet contract with specified owner and agent addresses. " + "Returns the deployed contract address and transaction ID." + ) + args_schema: Type[BaseModel] = SmartWalletDeploySmartWalletInput + return_direct: bool = False + wallet_id: Optional[UUID] = None + + def __init__(self, wallet_id: Optional[UUID] = None, **kwargs): + super().__init__(**kwargs) + self.wallet_id = wallet_id + + def _deploy( + self, + owner_address: str, + agent_address: str, + dao_token_contract: str, + dao_token_dex_contract: str, + **kwargs, + ) -> Dict[str, Any]: + """Execute the tool to deploy smart wallet.""" + if self.wallet_id is None: + return {"success": False, "message": "Wallet ID is required", "data": None} + + args = [ + owner_address, + agent_address, + dao_token_contract, + dao_token_dex_contract, + ] + + return BunScriptRunner.bun_run( + self.wallet_id, + "aibtc-dao/smart-wallet", + "deploy-smart-wallet.ts", + *args, + ) + + def _run( + self, + owner_address: str, + agent_address: str, + dao_token_contract: str, + dao_token_dex_contract: str, + **kwargs, + ) -> Dict[str, Any]: + """Execute the tool to deploy smart wallet.""" + return self._deploy( + owner_address, + agent_address, + dao_token_contract, + dao_token_dex_contract, + **kwargs, + ) + + async def _arun( + self, + owner_address: str, + agent_address: str, + dao_token_contract: str, + dao_token_dex_contract: str, + **kwargs, + ) -> Dict[str, Any]: + """Async version of the tool.""" + return self._deploy( + owner_address, + agent_address, + dao_token_contract, + dao_token_dex_contract, + **kwargs, + ) + + +class SmartWalletDeployMySmartWalletInput(BaseModel): + """Input schema for deploying a smart wallet contract using the current agent as the agent address.""" + + owner_address: str = Field( + ..., + description="Stacks address of the wallet owner", + example="ST1PQHQKV0RJXZFY1DGX8MNSNYVE3VGZJSRTPGZGM", + ) + dao_token_contract: str = Field( + ..., + description="Contract principal of the DAO token", + example="ST35K818S3K2GSNEBC3M35GA3W8Q7X72KF4RVM3QA.aibtc-token", + ) + dao_token_dex_contract: str = Field( + ..., + description="Contract principal of the DAO token DEX", + example="ST35K818S3K2GSNEBC3M35GA3W8Q7X72KF4RVM3QA.aibtc-token-dex", + ) + + +class SmartWalletDeployMySmartWalletTool(BaseTool): + name: str = "smartwallet_deploy_my_smart_wallet" + description: str = ( + "Deploy a new smart wallet contract using the current agent as the agent address. " + "Returns the deployed contract address and transaction ID." + ) + args_schema: Type[BaseModel] = SmartWalletDeployMySmartWalletInput + return_direct: bool = False + wallet_id: Optional[UUID] = None + + def __init__(self, wallet_id: Optional[UUID] = None, **kwargs): + super().__init__(**kwargs) + self.wallet_id = wallet_id + + def _deploy( + self, + owner_address: str, + dao_token_contract: str, + dao_token_dex_contract: str, + **kwargs, + ) -> Dict[str, Any]: + """Execute the tool to deploy smart wallet.""" + if self.wallet_id is None: + return {"success": False, "message": "Wallet ID is required", "data": None} + + args = [ + owner_address, + dao_token_contract, + dao_token_dex_contract, + ] + + return BunScriptRunner.bun_run( + self.wallet_id, + "aibtc-dao/smart-wallet", + "deploy-my-smart-wallet.ts", + *args, + ) + + def _run( + self, + owner_address: str, + dao_token_contract: str, + dao_token_dex_contract: str, + **kwargs, + ) -> Dict[str, Any]: + """Execute the tool to deploy smart wallet.""" + return self._deploy( + owner_address, + dao_token_contract, + dao_token_dex_contract, + **kwargs, + ) + + async def _arun( + self, + owner_address: str, + dao_token_contract: str, + dao_token_dex_contract: str, + **kwargs, + ) -> Dict[str, Any]: + """Async version of the tool.""" + return self._deploy( + owner_address, + dao_token_contract, + dao_token_dex_contract, + **kwargs, + ) + + +class SmartWalletIsApprovedAssetInput(BaseModel): + """Input schema for checking if an asset is approved in a smart wallet.""" + + smart_wallet_contract: str = Field( + ..., + description="Contract principal of the smart wallet", + example="ST35K818S3K2GSNEBC3M35GA3W8Q7X72KF4RVM3QA.aibtc-user-agent-smart-wallet", + ) + asset_contract: str = Field( + ..., + description="Contract principal of the asset", + example="ST35K818S3K2GSNEBC3M35GA3W8Q7X72KF4RVM3QA.aibtc-token", + ) + + +class SmartWalletIsApprovedAssetTool(BaseTool): + name: str = "smartwallet_is_approved_asset" + description: str = ( + "Check if an asset is approved for use with a smart wallet. " + "Returns true if the asset is approved, false otherwise." + ) + args_schema: Type[BaseModel] = SmartWalletIsApprovedAssetInput + return_direct: bool = False + wallet_id: Optional[UUID] = None + + def __init__(self, wallet_id: Optional[UUID] = None, **kwargs): + super().__init__(**kwargs) + self.wallet_id = wallet_id + + def _deploy( + self, + smart_wallet_contract: str, + asset_contract: str, + **kwargs, + ) -> Dict[str, Any]: + """Execute the tool to check asset approval.""" + if self.wallet_id is None: + return {"success": False, "message": "Wallet ID is required", "data": None} + + args = [ + smart_wallet_contract, + asset_contract, + ] + + return BunScriptRunner.bun_run( + self.wallet_id, + "aibtc-dao/smart-wallet/read-only", + "is-approved-asset.ts", + *args, + ) + + def _run( + self, + smart_wallet_contract: str, + asset_contract: str, + **kwargs, + ) -> Dict[str, Any]: + """Execute the tool to check asset approval.""" + return self._deploy( + smart_wallet_contract, + asset_contract, + **kwargs, + ) + + async def _arun( + self, + smart_wallet_contract: str, + asset_contract: str, + **kwargs, + ) -> Dict[str, Any]: + """Async version of the tool.""" + return self._deploy( + smart_wallet_contract, + asset_contract, + **kwargs, + ) + + +class SmartWalletGetBalanceStxInput(BaseModel): + """Input schema for getting STX balance from a smart wallet.""" + + smart_wallet_contract: str = Field( + ..., + description="Contract principal of the smart wallet", + example="ST35K818S3K2GSNEBC3M35GA3W8Q7X72KF4RVM3QA.aibtc-user-agent-smart-wallet", + ) + + +class SmartWalletGetBalanceStxTool(BaseTool): + name: str = "smartwallet_get_balance_stx" + description: str = ( + "Get the STX balance from a smart wallet. " "Returns the balance in microSTX." + ) + args_schema: Type[BaseModel] = SmartWalletGetBalanceStxInput + return_direct: bool = False + wallet_id: Optional[UUID] = None + + def __init__(self, wallet_id: Optional[UUID] = None, **kwargs): + super().__init__(**kwargs) + self.wallet_id = wallet_id + + def _deploy( + self, + smart_wallet_contract: str, + **kwargs, + ) -> Dict[str, Any]: + """Execute the tool to get STX balance.""" + if self.wallet_id is None: + return {"success": False, "message": "Wallet ID is required", "data": None} + + args = [smart_wallet_contract] + + return BunScriptRunner.bun_run( + self.wallet_id, + "aibtc-dao/smart-wallet/read-only", + "get-balance-stx.ts", + *args, + ) + + def _run( + self, + smart_wallet_contract: str, + **kwargs, + ) -> Dict[str, Any]: + """Execute the tool to get STX balance.""" + return self._deploy(smart_wallet_contract, **kwargs) + + async def _arun( + self, + smart_wallet_contract: str, + **kwargs, + ) -> Dict[str, Any]: + """Async version of the tool.""" + return self._deploy(smart_wallet_contract, **kwargs) + + +class SmartWalletGetConfigurationInput(BaseModel): + """Input schema for getting smart wallet configuration.""" + + smart_wallet_contract: str = Field( + ..., + description="Contract principal of the smart wallet", + example="ST35K818S3K2GSNEBC3M35GA3W8Q7X72KF4RVM3QA.aibtc-user-agent-smart-wallet", + ) + + +class SmartWalletGetConfigurationTool(BaseTool): + name: str = "smartwallet_get_configuration" + description: str = ( + "Get the configuration of a smart wallet. " + "Returns owner, agent, and other configuration details." + ) + args_schema: Type[BaseModel] = SmartWalletGetConfigurationInput + return_direct: bool = False + wallet_id: Optional[UUID] = None + + def __init__(self, wallet_id: Optional[UUID] = None, **kwargs): + super().__init__(**kwargs) + self.wallet_id = wallet_id + + def _deploy( + self, + smart_wallet_contract: str, + **kwargs, + ) -> Dict[str, Any]: + """Execute the tool to get wallet configuration.""" + if self.wallet_id is None: + return {"success": False, "message": "Wallet ID is required", "data": None} + + args = [smart_wallet_contract] + + return BunScriptRunner.bun_run( + self.wallet_id, + "aibtc-dao/smart-wallet/read-only", + "get-configuration.ts", + *args, + ) + + def _run( + self, + smart_wallet_contract: str, + **kwargs, + ) -> Dict[str, Any]: + """Execute the tool to get wallet configuration.""" + return self._deploy(smart_wallet_contract, **kwargs) + + async def _arun( + self, + smart_wallet_contract: str, + **kwargs, + ) -> Dict[str, Any]: + """Async version of the tool.""" + return self._deploy(smart_wallet_contract, **kwargs) + + +class SmartWalletApproveAssetInput(BaseModel): + """Input schema for approving an asset in a smart wallet.""" + + smart_wallet_contract: str = Field( + ..., + description="Contract principal of the smart wallet", + example="ST35K818S3K2GSNEBC3M35GA3W8Q7X72KF4RVM3QA.aibtc-user-agent-smart-wallet", + ) + asset_contract: str = Field( + ..., + description="Contract principal of the asset to approve", + example="ST35K818S3K2GSNEBC3M35GA3W8Q7X72KF4RVM3QA.aibtc-token", + ) + + +class SmartWalletApproveAssetTool(BaseTool): + name: str = "smartwallet_approve_asset" + description: str = ( + "Approve an asset for use with a smart wallet. " + "Returns the transaction ID of the approval transaction." + ) + args_schema: Type[BaseModel] = SmartWalletApproveAssetInput + return_direct: bool = False + wallet_id: Optional[UUID] = None + + def __init__(self, wallet_id: Optional[UUID] = None, **kwargs): + super().__init__(**kwargs) + self.wallet_id = wallet_id + + def _deploy( + self, + smart_wallet_contract: str, + asset_contract: str, + **kwargs, + ) -> Dict[str, Any]: + """Execute the tool to approve asset.""" + if self.wallet_id is None: + return {"success": False, "message": "Wallet ID is required", "data": None} + + args = [ + smart_wallet_contract, + asset_contract, + ] + + return BunScriptRunner.bun_run( + self.wallet_id, + "aibtc-dao/smart-wallet/public", + "approve-asset.ts", + *args, + ) + + def _run( + self, + smart_wallet_contract: str, + asset_contract: str, + **kwargs, + ) -> Dict[str, Any]: + """Execute the tool to approve asset.""" + return self._deploy( + smart_wallet_contract, + asset_contract, + **kwargs, + ) + + async def _arun( + self, + smart_wallet_contract: str, + asset_contract: str, + **kwargs, + ) -> Dict[str, Any]: + """Async version of the tool.""" + return self._deploy( + smart_wallet_contract, + asset_contract, + **kwargs, + ) + + +class SmartWalletRevokeAssetInput(BaseModel): + """Input schema for revoking an asset from a smart wallet.""" + + smart_wallet_contract: str = Field( + ..., + description="Contract principal of the smart wallet", + example="ST35K818S3K2GSNEBC3M35GA3W8Q7X72KF4RVM3QA.aibtc-user-agent-smart-wallet", + ) + asset_contract: str = Field( + ..., + description="Contract principal of the asset to revoke", + example="ST35K818S3K2GSNEBC3M35GA3W8Q7X72KF4RVM3QA.aibtc-token", + ) + + +class SmartWalletRevokeAssetTool(BaseTool): + name: str = "smartwallet_revoke_asset" + description: str = ( + "Revoke an asset from a smart wallet. " + "Returns the transaction ID of the revocation transaction." + ) + args_schema: Type[BaseModel] = SmartWalletRevokeAssetInput + return_direct: bool = False + wallet_id: Optional[UUID] = None + + def __init__(self, wallet_id: Optional[UUID] = None, **kwargs): + super().__init__(**kwargs) + self.wallet_id = wallet_id + + def _deploy( + self, + smart_wallet_contract: str, + asset_contract: str, + **kwargs, + ) -> Dict[str, Any]: + """Execute the tool to revoke asset.""" + if self.wallet_id is None: + return {"success": False, "message": "Wallet ID is required", "data": None} + + args = [ + smart_wallet_contract, + asset_contract, + ] + + return BunScriptRunner.bun_run( + self.wallet_id, + "aibtc-dao/smart-wallet/public", + "revoke-asset.ts", + *args, + ) + + def _run( + self, + smart_wallet_contract: str, + asset_contract: str, + **kwargs, + ) -> Dict[str, Any]: + """Execute the tool to revoke asset.""" + return self._deploy( + smart_wallet_contract, + asset_contract, + **kwargs, + ) + + async def _arun( + self, + smart_wallet_contract: str, + asset_contract: str, + **kwargs, + ) -> Dict[str, Any]: + """Async version of the tool.""" + return self._deploy( + smart_wallet_contract, + asset_contract, + **kwargs, + ) + + +class SmartWalletDepositStxInput(BaseModel): """Input schema for depositing STX to a smart wallet.""" smart_wallet_contract: str = Field( ..., description="Contract principal of the smart wallet", - example="ST35K818S3K2GSNEBC3M35GA3W8Q7X72KF4RVM3QA.aibtc-smart-wallet-ST3YT-S5D18", + example="ST35K818S3K2GSNEBC3M35GA3W8Q7X72KF4RVM3QA.aibtc-user-agent-smart-wallet", + ) + amount: int = Field( + ..., + description="Amount of STX to deposit in microSTX", + example=1000000, + gt=0, + ) + + +class SmartWalletDepositStxTool(BaseTool): + name: str = "smartwallet_deposit_stx" + description: str = ( + "Deposit STX to a smart wallet. " + "Returns the transaction ID of the deposit transaction." + ) + args_schema: Type[BaseModel] = SmartWalletDepositStxInput + return_direct: bool = False + wallet_id: Optional[UUID] = None + + def __init__(self, wallet_id: Optional[UUID] = None, **kwargs): + super().__init__(**kwargs) + self.wallet_id = wallet_id + + def _deploy( + self, + smart_wallet_contract: str, + amount: int, + **kwargs, + ) -> Dict[str, Any]: + """Execute the tool to deposit STX.""" + if self.wallet_id is None: + return {"success": False, "message": "Wallet ID is required", "data": None} + + args = [ + smart_wallet_contract, + str(amount), + ] + + return BunScriptRunner.bun_run( + self.wallet_id, + "aibtc-dao/smart-wallet/public", + "deposit-stx.ts", + *args, + ) + + def _run( + self, + smart_wallet_contract: str, + amount: int, + **kwargs, + ) -> Dict[str, Any]: + """Execute the tool to deposit STX.""" + return self._deploy( + smart_wallet_contract, + amount, + **kwargs, + ) + + async def _arun( + self, + smart_wallet_contract: str, + amount: int, + **kwargs, + ) -> Dict[str, Any]: + """Async version of the tool.""" + return self._deploy( + smart_wallet_contract, + amount, + **kwargs, + ) + + +class SmartWalletWithdrawStxInput(BaseModel): + """Input schema for withdrawing STX from a smart wallet.""" + + smart_wallet_contract: str = Field( + ..., + description="Contract principal of the smart wallet", + example="ST35K818S3K2GSNEBC3M35GA3W8Q7X72KF4RVM3QA.aibtc-user-agent-smart-wallet", + ) + amount: int = Field( + ..., + description="Amount of STX to withdraw in microSTX", + example=1000000, + gt=0, + ) + + +class SmartWalletWithdrawSTXTool(BaseTool): + name: str = "smartwallet_withdraw_stx" + description: str = ( + "Withdraw STX from a smart wallet. " + "Returns the transaction ID of the withdrawal transaction." + ) + args_schema: Type[BaseModel] = SmartWalletWithdrawStxInput + return_direct: bool = False + wallet_id: Optional[UUID] = None + + def __init__(self, wallet_id: Optional[UUID] = None, **kwargs): + super().__init__(**kwargs) + self.wallet_id = wallet_id + + def _deploy( + self, + smart_wallet_contract: str, + amount: int, + **kwargs, + ) -> Dict[str, Any]: + """Execute the tool to withdraw STX.""" + if self.wallet_id is None: + return {"success": False, "message": "Wallet ID is required", "data": None} + + args = [ + smart_wallet_contract, + str(amount), + ] + + return BunScriptRunner.bun_run( + self.wallet_id, + "aibtc-dao/smart-wallet/public", + "withdraw-stx.ts", + *args, + ) + + def _run( + self, + smart_wallet_contract: str, + amount: int, + **kwargs, + ) -> Dict[str, Any]: + """Execute the tool to withdraw STX.""" + return self._deploy( + smart_wallet_contract, + amount, + **kwargs, + ) + + async def _arun( + self, + smart_wallet_contract: str, + amount: int, + **kwargs, + ) -> Dict[str, Any]: + """Async version of the tool.""" + return self._deploy( + smart_wallet_contract, + amount, + **kwargs, + ) + + +class SmartWalletDepositFtInput(BaseModel): + """Input schema for depositing fungible tokens to a smart wallet.""" + + smart_wallet_contract: str = Field( + ..., + description="Contract principal of the smart wallet", + example="ST35K818S3K2GSNEBC3M35GA3W8Q7X72KF4RVM3QA.aibtc-user-agent-smart-wallet", + ) + ft_contract: str = Field( + ..., + description="Contract principal of the fungible token", + example="ST35K818S3K2GSNEBC3M35GA3W8Q7X72KF4RVM3QA.aibtc-token", + ) + amount: int = Field( + ..., + description="Amount of tokens to deposit", + example=1000, + gt=0, + ) + + +class SmartWalletDepositFtTool(BaseTool): + name: str = "smartwallet_deposit_ft" + description: str = ( + "Deposit fungible tokens to a smart wallet. " + "Returns the transaction ID of the deposit transaction." + ) + args_schema: Type[BaseModel] = SmartWalletDepositFtInput + return_direct: bool = False + wallet_id: Optional[UUID] = None + + def __init__(self, wallet_id: Optional[UUID] = None, **kwargs): + super().__init__(**kwargs) + self.wallet_id = wallet_id + + def _deploy( + self, + smart_wallet_contract: str, + ft_contract: str, + amount: int, + **kwargs, + ) -> Dict[str, Any]: + """Execute the tool to deposit fungible tokens.""" + if self.wallet_id is None: + return {"success": False, "message": "Wallet ID is required", "data": None} + + args = [ + smart_wallet_contract, + ft_contract, + str(amount), + ] + + return BunScriptRunner.bun_run( + self.wallet_id, + "aibtc-dao/smart-wallet/public", + "deposit-ft.ts", + *args, + ) + + def _run( + self, + smart_wallet_contract: str, + ft_contract: str, + amount: int, + **kwargs, + ) -> Dict[str, Any]: + """Execute the tool to deposit fungible tokens.""" + return self._deploy( + smart_wallet_contract, + ft_contract, + amount, + **kwargs, + ) + + async def _arun( + self, + smart_wallet_contract: str, + ft_contract: str, + amount: int, + **kwargs, + ) -> Dict[str, Any]: + """Async version of the tool.""" + return self._deploy( + smart_wallet_contract, + ft_contract, + amount, + **kwargs, + ) + + +class SmartWalletWithdrawFtInput(BaseModel): + """Input schema for withdrawing fungible tokens from a smart wallet.""" + + smart_wallet_contract: str = Field( + ..., + description="Contract principal of the smart wallet", + example="ST35K818S3K2GSNEBC3M35GA3W8Q7X72KF4RVM3QA.aibtc-user-agent-smart-wallet", + ) + ft_contract: str = Field( + ..., + description="Contract principal of the fungible token", + example="ST35K818S3K2GSNEBC3M35GA3W8Q7X72KF4RVM3QA.aibtc-token", + ) + amount: int = Field( + ..., + description="Amount of tokens to withdraw", + example=1000, + gt=0, + ) + + +class SmartWalletWithdrawFTTool(BaseTool): + name: str = "smartwallet_withdraw_ft" + description: str = ( + "Withdraw fungible tokens from a smart wallet. " + "Returns the transaction ID of the withdrawal transaction." + ) + args_schema: Type[BaseModel] = SmartWalletWithdrawFtInput + return_direct: bool = False + wallet_id: Optional[UUID] = None + + def __init__(self, wallet_id: Optional[UUID] = None, **kwargs): + super().__init__(**kwargs) + self.wallet_id = wallet_id + + def _deploy( + self, + smart_wallet_contract: str, + ft_contract: str, + amount: int, + **kwargs, + ) -> Dict[str, Any]: + """Execute the tool to withdraw fungible tokens.""" + if self.wallet_id is None: + return {"success": False, "message": "Wallet ID is required", "data": None} + + args = [ + smart_wallet_contract, + ft_contract, + str(amount), + ] + + return BunScriptRunner.bun_run( + self.wallet_id, + "aibtc-dao/smart-wallet/public", + "withdraw-ft.ts", + *args, + ) + + def _run( + self, + smart_wallet_contract: str, + ft_contract: str, + amount: int, + **kwargs, + ) -> Dict[str, Any]: + """Execute the tool to withdraw fungible tokens.""" + return self._deploy( + smart_wallet_contract, + ft_contract, + amount, + **kwargs, + ) + + async def _arun( + self, + smart_wallet_contract: str, + ft_contract: str, + amount: int, + **kwargs, + ) -> Dict[str, Any]: + """Async version of the tool.""" + return self._deploy( + smart_wallet_contract, + ft_contract, + amount, + **kwargs, + ) + + +class SmartWalletProxyCreateProposalInput(BaseModel): + """Input schema for creating a core proposal through a smart wallet.""" + + smart_wallet_contract: str = Field( + ..., + description="Contract principal of the smart wallet", + example="ST35K818S3K2GSNEBC3M35GA3W8Q7X72KF4RVM3QA.aibtc-user-agent-smart-wallet", + ) + dao_core_proposals_extension_contract: str = Field( + ..., + description="Contract principal of the DAO core proposals extension", + example="ST35K818S3K2GSNEBC3M35GA3W8Q7X72KF4RVM3QA.aibtc-core-proposals-v2", + ) + dao_proposal_contract: str = Field( + ..., + description="Contract principal of the DAO proposal", + example="ST35K818S3K2GSNEBC3M35GA3W8Q7X72KF4RVM3QA.proposal-add-extension", + ) + dao_token_contract: str = Field( + ..., + description="Contract principal of the DAO token", + example="ST35K818S3K2GSNEBC3M35GA3W8Q7X72KF4RVM3QA.aibtc-token", + ) + + +class SmartWalletProxyCreateProposalTool(BaseTool): + name: str = "smartwallet_proxy_create_proposal" + description: str = ( + "Create a core proposal through a smart wallet. " + "Returns the transaction ID of the proposal creation transaction." + ) + args_schema: Type[BaseModel] = SmartWalletProxyCreateProposalInput + return_direct: bool = False + wallet_id: Optional[UUID] = None + + def __init__(self, wallet_id: Optional[UUID] = None, **kwargs): + super().__init__(**kwargs) + self.wallet_id = wallet_id + + def _deploy( + self, + smart_wallet_contract: str, + dao_core_proposals_extension_contract: str, + dao_proposal_contract: str, + dao_token_contract: str, + **kwargs, + ) -> Dict[str, Any]: + """Execute the tool to create a core proposal.""" + if self.wallet_id is None: + return {"success": False, "message": "Wallet ID is required", "data": None} + + args = [ + smart_wallet_contract, + dao_core_proposals_extension_contract, + dao_proposal_contract, + dao_token_contract, + ] + + return BunScriptRunner.bun_run( + self.wallet_id, + "aibtc-dao/smart-wallet/public", + "proxy-create-proposal.ts", + *args, + ) + + def _run( + self, + smart_wallet_contract: str, + dao_core_proposals_extension_contract: str, + dao_proposal_contract: str, + dao_token_contract: str, + **kwargs, + ) -> Dict[str, Any]: + """Execute the tool to create a core proposal.""" + return self._deploy( + smart_wallet_contract, + dao_core_proposals_extension_contract, + dao_proposal_contract, + dao_token_contract, + **kwargs, + ) + + async def _arun( + self, + smart_wallet_contract: str, + dao_core_proposals_extension_contract: str, + dao_proposal_contract: str, + dao_token_contract: str, + **kwargs, + ) -> Dict[str, Any]: + """Async version of the tool.""" + return self._deploy( + smart_wallet_contract, + dao_core_proposals_extension_contract, + dao_proposal_contract, + dao_token_contract, + **kwargs, + ) + + +class SmartWalletProxyProposeActionSendMessageInput(BaseModel): + """Input schema for proposing a send message action through a smart wallet.""" + + smart_wallet_contract: str = Field( + ..., + description="Contract principal of the smart wallet", + example="ST35K818S3K2GSNEBC3M35GA3W8Q7X72KF4RVM3QA.aibtc-user-agent-smart-wallet", + ) + dao_action_proposals_extension_contract: str = Field( + ..., + description="Contract principal of the DAO action proposals extension", + example="ST35K818S3K2GSNEBC3M35GA3W8Q7X72KF4RVM3QA.aibtc-action-proposals-v2", + ) + dao_action_proposal_contract: str = Field( + ..., + description="Contract principal of the DAO action proposal", + example="ST35K818S3K2GSNEBC3M35GA3W8Q7X72KF4RVM3QA.aibtc-action-send-message", + ) + dao_token_contract: str = Field( + ..., + description="Contract principal of the DAO token", + example="ST35K818S3K2GSNEBC3M35GA3W8Q7X72KF4RVM3QA.aibtc-token", + ) + message: str = Field( + ..., + description="Message to send", + example="hello world", + ) + + +class SmartWalletProxyProposeActionSendMessageTool(BaseTool): + name: str = "smartwallet_proxy_propose_action_send_message" + description: str = ( + "Propose a send message action through a smart wallet. " + "Returns the transaction ID of the action proposal transaction." + ) + args_schema: Type[BaseModel] = SmartWalletProxyProposeActionSendMessageInput + return_direct: bool = False + wallet_id: Optional[UUID] = None + + def __init__(self, wallet_id: Optional[UUID] = None, **kwargs): + super().__init__(**kwargs) + self.wallet_id = wallet_id + + def _deploy( + self, + smart_wallet_contract: str, + dao_action_proposals_extension_contract: str, + dao_action_proposal_contract: str, + dao_token_contract: str, + message: str, + **kwargs, + ) -> Dict[str, Any]: + """Execute the tool to propose a send message action.""" + if self.wallet_id is None: + return {"success": False, "message": "Wallet ID is required", "data": None} + + args = [ + smart_wallet_contract, + dao_action_proposals_extension_contract, + dao_action_proposal_contract, + dao_token_contract, + message, + ] + + return BunScriptRunner.bun_run( + self.wallet_id, + "aibtc-dao/smart-wallet/public", + "proxy-propose-action-send-message.ts", + *args, + ) + + def _run( + self, + smart_wallet_contract: str, + dao_action_proposals_extension_contract: str, + dao_action_proposal_contract: str, + dao_token_contract: str, + message: str, + **kwargs, + ) -> Dict[str, Any]: + """Execute the tool to propose a send message action.""" + return self._deploy( + smart_wallet_contract, + dao_action_proposals_extension_contract, + dao_action_proposal_contract, + dao_token_contract, + message, + **kwargs, + ) + + async def _arun( + self, + smart_wallet_contract: str, + dao_action_proposals_extension_contract: str, + dao_action_proposal_contract: str, + dao_token_contract: str, + message: str, + **kwargs, + ) -> Dict[str, Any]: + """Async version of the tool.""" + return self._deploy( + smart_wallet_contract, + dao_action_proposals_extension_contract, + dao_action_proposal_contract, + dao_token_contract, + message, + **kwargs, + ) + + +class SmartWalletVoteOnActionProposalInput(BaseModel): + """Input schema for voting on an action proposal through a smart wallet.""" + + smart_wallet_contract: str = Field( + ..., + description="Contract principal of the smart wallet", + example="ST1PQHQKV0RJXZFY1DGX8MNSNYVE3VGZJSRTPGZGM.aibtc-user-agent-smart-wallet", + ) + dao_action_proposals_extension_contract: str = Field( + ..., + description="Contract principal of the DAO action proposals extension", + example="ST35K818S3K2GSNEBC3M35GA3W8Q7X72KF4RVM3QA.aibtc-action-proposals-v2", + ) + proposal_id: int = Field( + ..., + description="ID of the action proposal", + example=1, + gt=0, + ) + vote: bool = Field( + ..., + description="True to vote in favor, False to vote against", + example=True, + ) + + +class SmartWalletVoteOnActionProposalTool(BaseTool): + name: str = "smartwallet_vote_on_action_proposal" + description: str = ( + "Vote on an action proposal through a smart wallet. " + "Returns the transaction ID of the vote transaction." + ) + args_schema: Type[BaseModel] = SmartWalletVoteOnActionProposalInput + return_direct: bool = False + wallet_id: Optional[UUID] = None + + def __init__(self, wallet_id: Optional[UUID] = None, **kwargs): + super().__init__(**kwargs) + self.wallet_id = wallet_id + + def _deploy( + self, + smart_wallet_contract: str, + dao_action_proposals_extension_contract: str, + proposal_id: int, + vote: bool, + **kwargs, + ) -> Dict[str, Any]: + """Execute the tool to vote on an action proposal.""" + if self.wallet_id is None: + return {"success": False, "message": "Wallet ID is required", "data": None} + + args = [ + smart_wallet_contract, + dao_action_proposals_extension_contract, + str(proposal_id), + str(vote).lower(), + ] + + return BunScriptRunner.bun_run( + self.wallet_id, + "aibtc-dao/smart-wallet/public", + "vote-on-action-proposal.ts", + *args, + ) + + def _run( + self, + smart_wallet_contract: str, + dao_action_proposals_extension_contract: str, + proposal_id: int, + vote: bool, + **kwargs, + ) -> Dict[str, Any]: + """Execute the tool to vote on an action proposal.""" + return self._deploy( + smart_wallet_contract, + dao_action_proposals_extension_contract, + proposal_id, + vote, + **kwargs, + ) + + async def _arun( + self, + smart_wallet_contract: str, + dao_action_proposals_extension_contract: str, + proposal_id: int, + vote: bool, + **kwargs, + ) -> Dict[str, Any]: + """Async version of the tool.""" + return self._deploy( + smart_wallet_contract, + dao_action_proposals_extension_contract, + proposal_id, + vote, + **kwargs, + ) + + +class SmartWalletVoteOnCoreProposalInput(BaseModel): + """Input schema for voting on a core proposal through a smart wallet.""" + + smart_wallet_contract: str = Field( + ..., + description="Contract principal of the smart wallet", + example="ST1PQHQKV0RJXZFY1DGX8MNSNYVE3VGZJSRTPGZGM.aibtc-user-agent-smart-wallet", + ) + dao_core_proposals_extension_contract: str = Field( + ..., + description="Contract principal of the DAO core proposals extension", + example="ST35K818S3K2GSNEBC3M35GA3W8Q7X72KF4RVM3QA.aibtc-core-proposals-v2", + ) + dao_proposal_contract: str = Field( + ..., + description="Contract principal of the DAO proposal", + example="ST35K818S3K2GSNEBC3M35GA3W8Q7X72KF4RVM3QA.aibtc-onchain-messaging-send", + ) + vote: bool = Field( + ..., + description="True to vote in favor, False to vote against", + example=True, + ) + + +class SmartWalletVoteOnCoreProposalTool(BaseTool): + name: str = "smartwallet_vote_on_core_proposal" + description: str = ( + "Vote on a core proposal through a smart wallet. " + "Returns the transaction ID of the vote transaction." + ) + args_schema: Type[BaseModel] = SmartWalletVoteOnCoreProposalInput + return_direct: bool = False + wallet_id: Optional[UUID] = None + + def __init__(self, wallet_id: Optional[UUID] = None, **kwargs): + super().__init__(**kwargs) + self.wallet_id = wallet_id + + def _deploy( + self, + smart_wallet_contract: str, + dao_core_proposals_extension_contract: str, + dao_proposal_contract: str, + vote: bool, + **kwargs, + ) -> Dict[str, Any]: + """Execute the tool to vote on a core proposal.""" + if self.wallet_id is None: + return {"success": False, "message": "Wallet ID is required", "data": None} + + args = [ + smart_wallet_contract, + dao_core_proposals_extension_contract, + dao_proposal_contract, + str(vote).lower(), + ] + + return BunScriptRunner.bun_run( + self.wallet_id, + "aibtc-dao/smart-wallet/public", + "vote-on-core-proposal.ts", + *args, + ) + + def _run( + self, + smart_wallet_contract: str, + dao_core_proposals_extension_contract: str, + dao_proposal_contract: str, + vote: bool, + **kwargs, + ) -> Dict[str, Any]: + """Execute the tool to vote on a core proposal.""" + return self._deploy( + smart_wallet_contract, + dao_core_proposals_extension_contract, + dao_proposal_contract, + vote, + **kwargs, + ) + + async def _arun( + self, + smart_wallet_contract: str, + dao_core_proposals_extension_contract: str, + dao_proposal_contract: str, + vote: bool, + **kwargs, + ) -> Dict[str, Any]: + """Async version of the tool.""" + return self._deploy( + smart_wallet_contract, + dao_core_proposals_extension_contract, + dao_proposal_contract, + vote, + **kwargs, + ) + + +class SmartWalletConcludeActionProposalInput(BaseModel): + """Input schema for concluding an action proposal through a smart wallet.""" + + smart_wallet_contract: str = Field( + ..., + description="Contract principal of the smart wallet", + example="ST1PQHQKV0RJXZFY1DGX8MNSNYVE3VGZJSRTPGZGM.aibtc-user-agent-smart-wallet", + ) + dao_action_proposals_extension_contract: str = Field( + ..., + description="Contract principal of the DAO action proposals extension", + example="ST35K818S3K2GSNEBC3M35GA3W8Q7X72KF4RVM3QA.aibtc-action-proposals-v2", + ) + proposal_id: int = Field( + ..., + description="ID of the action proposal", + example=1, + gt=0, + ) + + +class SmartWalletConcludeActionProposalTool(BaseTool): + name: str = "smartwallet_conclude_action_proposal" + description: str = ( + "Conclude an action proposal through a smart wallet. " + "Returns the transaction ID of the conclusion transaction." + ) + args_schema: Type[BaseModel] = SmartWalletConcludeActionProposalInput + return_direct: bool = False + wallet_id: Optional[UUID] = None + + def __init__(self, wallet_id: Optional[UUID] = None, **kwargs): + super().__init__(**kwargs) + self.wallet_id = wallet_id + + def _deploy( + self, + smart_wallet_contract: str, + dao_action_proposals_extension_contract: str, + proposal_id: int, + **kwargs, + ) -> Dict[str, Any]: + """Execute the tool to conclude an action proposal.""" + if self.wallet_id is None: + return {"success": False, "message": "Wallet ID is required", "data": None} + + args = [ + smart_wallet_contract, + dao_action_proposals_extension_contract, + str(proposal_id), + ] + + return BunScriptRunner.bun_run( + self.wallet_id, + "aibtc-dao/smart-wallet/public", + "conclude-action-proposal.ts", + *args, + ) + + def _run( + self, + smart_wallet_contract: str, + dao_action_proposals_extension_contract: str, + proposal_id: int, + **kwargs, + ) -> Dict[str, Any]: + """Execute the tool to conclude an action proposal.""" + return self._deploy( + smart_wallet_contract, + dao_action_proposals_extension_contract, + proposal_id, + **kwargs, + ) + + async def _arun( + self, + smart_wallet_contract: str, + dao_action_proposals_extension_contract: str, + proposal_id: int, + **kwargs, + ) -> Dict[str, Any]: + """Async version of the tool.""" + return self._deploy( + smart_wallet_contract, + dao_action_proposals_extension_contract, + proposal_id, + **kwargs, + ) + + +class SmartWalletConcludeCoreProposalInput(BaseModel): + """Input schema for concluding a core proposal through a smart wallet.""" + + smart_wallet_contract: str = Field( + ..., + description="Contract principal of the smart wallet", + example="ST1PQHQKV0RJXZFY1DGX8MNSNYVE3VGZJSRTPGZGM.aibtc-user-agent-smart-wallet", + ) + dao_core_proposals_extension_contract: str = Field( + ..., + description="Contract principal of the DAO core proposals extension", + example="ST35K818S3K2GSNEBC3M35GA3W8Q7X72KF4RVM3QA.aibtc-core-proposals-v2", + ) + dao_proposal_contract: str = Field( + ..., + description="Contract principal of the DAO proposal", + example="ST35K818S3K2GSNEBC3M35GA3W8Q7X72KF4RVM3QA.aibtc-onchain-messaging-send", + ) + + +class SmartWalletConcludeCoreProposalTool(BaseTool): + name: str = "smartwallet_conclude_core_proposal" + description: str = ( + "Conclude a core proposal through a smart wallet. " + "Returns the transaction ID of the conclusion transaction." + ) + args_schema: Type[BaseModel] = SmartWalletConcludeCoreProposalInput + return_direct: bool = False + wallet_id: Optional[UUID] = None + + def __init__(self, wallet_id: Optional[UUID] = None, **kwargs): + super().__init__(**kwargs) + self.wallet_id = wallet_id + + def _deploy( + self, + smart_wallet_contract: str, + dao_core_proposals_extension_contract: str, + dao_proposal_contract: str, + **kwargs, + ) -> Dict[str, Any]: + """Execute the tool to conclude a core proposal.""" + if self.wallet_id is None: + return {"success": False, "message": "Wallet ID is required", "data": None} + + args = [ + smart_wallet_contract, + dao_core_proposals_extension_contract, + dao_proposal_contract, + ] + + return BunScriptRunner.bun_run( + self.wallet_id, + "aibtc-dao/smart-wallet/public", + "conclude-core-proposal.ts", + *args, + ) + + def _run( + self, + smart_wallet_contract: str, + dao_core_proposals_extension_contract: str, + dao_proposal_contract: str, + **kwargs, + ) -> Dict[str, Any]: + """Execute the tool to conclude a core proposal.""" + return self._deploy( + smart_wallet_contract, + dao_core_proposals_extension_contract, + dao_proposal_contract, + **kwargs, + ) + + async def _arun( + self, + smart_wallet_contract: str, + dao_core_proposals_extension_contract: str, + dao_proposal_contract: str, + **kwargs, + ) -> Dict[str, Any]: + """Async version of the tool.""" + return self._deploy( + smart_wallet_contract, + dao_core_proposals_extension_contract, + dao_proposal_contract, + **kwargs, + ) + + +class SmartWalletProxyProposeActionAddResourceInput(BaseModel): + """Input schema for proposing an action to add a resource through a smart wallet.""" + + smart_wallet_contract: str = Field( + ..., + description="Contract principal of the smart wallet", + example="ST1PQHQKV0RJXZFY1DGX8MNSNYVE3VGZJSRTPGZGM.aibtc-user-agent-smart-wallet", + ) + dao_action_proposals_extension_contract: str = Field( + ..., + description="Contract principal of the DAO action proposals extension", + example="ST35K818S3K2GSNEBC3M35GA3W8Q7X72KF4RVM3QA.aibtc-action-proposals-v2", + ) + dao_action_proposal_contract: str = Field( + ..., + description="Contract principal of the DAO action proposal", + example="ST35K818S3K2GSNEBC3M35GA3W8Q7X72KF4RVM3QA.aibtc-onchain-messaging-send", + ) + dao_token_contract: str = Field( + ..., + description="Contract principal of the DAO token", + example="ST35K818S3K2GSNEBC3M35GA3W8Q7X72KF4RVM3QA.aibtc-token", + ) + resource_name: str = Field( + ..., + description="Name of the resource to add", + example="my-resource", + ) + resource_contract: str = Field( + ..., + description="Contract principal of the resource", + example="ST35K818S3K2GSNEBC3M35GA3W8Q7X72KF4RVM3QA.my-resource", + ) + + +class SmartWalletProxyProposeActionAddResourceTool(BaseTool): + name: str = "smartwallet_proxy_propose_action_add_resource" + description: str = ( + "Propose an action to add a resource through a smart wallet. " + "Returns the transaction ID of the proposal transaction." + ) + args_schema: Type[BaseModel] = SmartWalletProxyProposeActionAddResourceInput + return_direct: bool = False + wallet_id: Optional[UUID] = None + + def __init__(self, wallet_id: Optional[UUID] = None, **kwargs): + super().__init__(**kwargs) + self.wallet_id = wallet_id + + def _deploy( + self, + smart_wallet_contract: str, + dao_action_proposals_extension_contract: str, + dao_action_proposal_contract: str, + dao_token_contract: str, + resource_name: str, + resource_contract: str, + **kwargs, + ) -> Dict[str, Any]: + """Execute the tool to propose an action to add a resource.""" + if self.wallet_id is None: + return {"success": False, "message": "Wallet ID is required", "data": None} + + args = [ + smart_wallet_contract, + dao_action_proposals_extension_contract, + dao_action_proposal_contract, + dao_token_contract, + resource_name, + resource_contract, + ] + + return BunScriptRunner.bun_run( + self.wallet_id, + "aibtc-dao/smart-wallet/public", + "proxy-propose-action-add-resource.ts", + *args, + ) + + def _run( + self, + smart_wallet_contract: str, + dao_action_proposals_extension_contract: str, + dao_action_proposal_contract: str, + dao_token_contract: str, + resource_name: str, + resource_contract: str, + **kwargs, + ) -> Dict[str, Any]: + """Execute the tool to propose an action to add a resource.""" + return self._deploy( + smart_wallet_contract, + dao_action_proposals_extension_contract, + dao_action_proposal_contract, + dao_token_contract, + resource_name, + resource_contract, + **kwargs, + ) + + async def _arun( + self, + smart_wallet_contract: str, + dao_action_proposals_extension_contract: str, + dao_action_proposal_contract: str, + dao_token_contract: str, + resource_name: str, + resource_contract: str, + **kwargs, + ) -> Dict[str, Any]: + """Async version of the tool.""" + return self._deploy( + smart_wallet_contract, + dao_action_proposals_extension_contract, + dao_action_proposal_contract, + dao_token_contract, + resource_name, + resource_contract, + **kwargs, + ) + + +class SmartWalletProxyProposeActionAllowAssetInput(BaseModel): + """Input schema for proposing an action to allow an asset through a smart wallet.""" + + smart_wallet_contract: str = Field( + ..., + description="Contract principal of the smart wallet", + example="ST1PQHQKV0RJXZFY1DGX8MNSNYVE3VGZJSRTPGZGM.aibtc-user-agent-smart-wallet", + ) + dao_action_proposals_extension_contract: str = Field( + ..., + description="Contract principal of the DAO action proposals extension", + example="ST35K818S3K2GSNEBC3M35GA3W8Q7X72KF4RVM3QA.aibtc-action-proposals-v2", + ) + dao_action_proposal_contract: str = Field( + ..., + description="Contract principal of the DAO action proposal", + example="ST35K818S3K2GSNEBC3M35GA3W8Q7X72KF4RVM3QA.aibtc-onchain-messaging-send", + ) + dao_token_contract: str = Field( + ..., + description="Contract principal of the DAO token", + example="ST35K818S3K2GSNEBC3M35GA3W8Q7X72KF4RVM3QA.aibtc-token", + ) + asset_contract: str = Field( + ..., + description="Contract principal of the asset", + example="ST35K818S3K2GSNEBC3M35GA3W8Q7X72KF4RVM3QA.my-asset", ) - amount: int = Field(..., description="Amount of STX to deposit in microstacks") -class DepositSTXTool(BaseTool): - name: str = "smartwallet_deposit_stx" +class SmartWalletProxyProposeActionAllowAssetTool(BaseTool): + name: str = "smartwallet_proxy_propose_action_allow_asset" description: str = ( - "Deposit STX into a smart wallet. " - "The amount should be specified in microstacks (1 STX = 1,000,000 microstacks)." + "Propose an action to allow an asset through a smart wallet. " + "Returns the transaction ID of the proposal transaction." ) - args_schema: Type[BaseModel] = DepositSTXInput + args_schema: Type[BaseModel] = SmartWalletProxyProposeActionAllowAssetInput return_direct: bool = False wallet_id: Optional[UUID] = None @@ -35,64 +1849,109 @@ def __init__(self, wallet_id: Optional[UUID] = None, **kwargs): def _deploy( self, smart_wallet_contract: str, - amount: int, + dao_action_proposals_extension_contract: str, + dao_action_proposal_contract: str, + dao_token_contract: str, + asset_contract: str, **kwargs, ) -> Dict[str, Any]: - """Execute the tool to deposit STX to a smart wallet.""" + """Execute the tool to propose an action to allow an asset.""" if self.wallet_id is None: return {"success": False, "message": "Wallet ID is required", "data": None} - args = [smart_wallet_contract, str(amount)] + args = [ + smart_wallet_contract, + dao_action_proposals_extension_contract, + dao_action_proposal_contract, + dao_token_contract, + asset_contract, + ] return BunScriptRunner.bun_run( self.wallet_id, "aibtc-dao/smart-wallet/public", - "deposit-stx.ts", + "proxy-propose-action-allow-asset.ts", *args, ) def _run( self, smart_wallet_contract: str, - amount: int, + dao_action_proposals_extension_contract: str, + dao_action_proposal_contract: str, + dao_token_contract: str, + asset_contract: str, **kwargs, ) -> Dict[str, Any]: - """Execute the tool to deposit STX to a smart wallet.""" - return self._deploy(smart_wallet_contract, amount, **kwargs) + """Execute the tool to propose an action to allow an asset.""" + return self._deploy( + smart_wallet_contract, + dao_action_proposals_extension_contract, + dao_action_proposal_contract, + dao_token_contract, + asset_contract, + **kwargs, + ) async def _arun( self, smart_wallet_contract: str, - amount: int, + dao_action_proposals_extension_contract: str, + dao_action_proposal_contract: str, + dao_token_contract: str, + asset_contract: str, **kwargs, ) -> Dict[str, Any]: - """Async version of the tool to deposit STX to a smart wallet.""" - return self._deploy(smart_wallet_contract, amount, **kwargs) + """Async version of the tool.""" + return self._deploy( + smart_wallet_contract, + dao_action_proposals_extension_contract, + dao_action_proposal_contract, + dao_token_contract, + asset_contract, + **kwargs, + ) -class DepositFTInput(BaseModel): - """Input schema for depositing fungible tokens to a smart wallet.""" +class SmartWalletProxyProposeActionToggleResourceByNameInput(BaseModel): + """Input schema for proposing an action to toggle a resource by name through a smart wallet.""" smart_wallet_contract: str = Field( ..., description="Contract principal of the smart wallet", - example="ST35K818S3K2GSNEBC3M35GA3W8Q7X72KF4RVM3QA.aibtc-smart-wallet-ST3YT-S5D18", + example="ST1PQHQKV0RJXZFY1DGX8MNSNYVE3VGZJSRTPGZGM.aibtc-user-agent-smart-wallet", ) - ft_contract: str = Field( + dao_action_proposals_extension_contract: str = Field( ..., - description="Contract principal of the fungible token", + description="Contract principal of the DAO action proposals extension", + example="ST35K818S3K2GSNEBC3M35GA3W8Q7X72KF4RVM3QA.aibtc-action-proposals-v2", + ) + dao_action_proposal_contract: str = Field( + ..., + description="Contract principal of the DAO action proposal", + example="ST35K818S3K2GSNEBC3M35GA3W8Q7X72KF4RVM3QA.aibtc-onchain-messaging-send", + ) + dao_token_contract: str = Field( + ..., + description="Contract principal of the DAO token", example="ST35K818S3K2GSNEBC3M35GA3W8Q7X72KF4RVM3QA.aibtc-token", ) - amount: int = Field(..., description="Amount of tokens to deposit") + resource_name: str = Field( + ..., + description="Name of the resource to toggle", + example="my-resource", + ) -class DepositFTTool(BaseTool): - name: str = "smartwallet_deposit_ft" +class SmartWalletProxyProposeActionToggleResourceByNameTool(BaseTool): + name: str = "smartwallet_proxy_propose_action_toggle_resource_by_name" description: str = ( - "Deposit fungible tokens into a smart wallet. " - "Requires the token contract principal and amount to deposit." + "Propose an action to toggle a resource by name through a smart wallet. " + "Returns the transaction ID of the proposal transaction." + ) + args_schema: Type[BaseModel] = ( + SmartWalletProxyProposeActionToggleResourceByNameInput ) - args_schema: Type[BaseModel] = DepositFTInput return_direct: bool = False wallet_id: Optional[UUID] = None @@ -103,66 +1962,107 @@ def __init__(self, wallet_id: Optional[UUID] = None, **kwargs): def _deploy( self, smart_wallet_contract: str, - ft_contract: str, - amount: int, + dao_action_proposals_extension_contract: str, + dao_action_proposal_contract: str, + dao_token_contract: str, + resource_name: str, **kwargs, ) -> Dict[str, Any]: - """Execute the tool to deposit fungible tokens.""" + """Execute the tool to propose an action to toggle a resource by name.""" if self.wallet_id is None: return {"success": False, "message": "Wallet ID is required", "data": None} - args = [smart_wallet_contract, ft_contract, str(amount)] + args = [ + smart_wallet_contract, + dao_action_proposals_extension_contract, + dao_action_proposal_contract, + dao_token_contract, + resource_name, + ] return BunScriptRunner.bun_run( self.wallet_id, "aibtc-dao/smart-wallet/public", - "deposit-ft.ts", + "proxy-propose-action-toggle-resource-by-name.ts", *args, ) def _run( self, smart_wallet_contract: str, - ft_contract: str, - amount: int, + dao_action_proposals_extension_contract: str, + dao_action_proposal_contract: str, + dao_token_contract: str, + resource_name: str, **kwargs, ) -> Dict[str, Any]: - """Execute the tool to deposit fungible tokens.""" - return self._deploy(smart_wallet_contract, ft_contract, amount, **kwargs) + """Execute the tool to propose an action to toggle a resource by name.""" + return self._deploy( + smart_wallet_contract, + dao_action_proposals_extension_contract, + dao_action_proposal_contract, + dao_token_contract, + resource_name, + **kwargs, + ) async def _arun( self, smart_wallet_contract: str, - ft_contract: str, - amount: int, + dao_action_proposals_extension_contract: str, + dao_action_proposal_contract: str, + dao_token_contract: str, + resource_name: str, **kwargs, ) -> Dict[str, Any]: """Async version of the tool.""" - return self._deploy(smart_wallet_contract, ft_contract, amount, **kwargs) + return self._deploy( + smart_wallet_contract, + dao_action_proposals_extension_contract, + dao_action_proposal_contract, + dao_token_contract, + resource_name, + **kwargs, + ) -class ApproveAssetInput(BaseModel): - """Input schema for approving an asset in a smart wallet.""" +class SmartWalletProxyProposeActionSetAccountHolderInput(BaseModel): + """Input schema for proposing an action to set the account holder through a smart wallet.""" smart_wallet_contract: str = Field( ..., description="Contract principal of the smart wallet", - example="ST35K818S3K2GSNEBC3M35GA3W8Q7X72KF4RVM3QA.aibtc-smart-wallet-ST3YT-S5D18", + example="ST1PQHQKV0RJXZFY1DGX8MNSNYVE3VGZJSRTPGZGM.aibtc-user-agent-smart-wallet", ) - asset_contract: str = Field( + dao_action_proposals_extension_contract: str = Field( ..., - description="Contract principal of the asset to approve", + description="Contract principal of the DAO action proposals extension", + example="ST35K818S3K2GSNEBC3M35GA3W8Q7X72KF4RVM3QA.aibtc-action-proposals-v2", + ) + dao_action_proposal_contract: str = Field( + ..., + description="Contract principal of the DAO action proposal", + example="ST35K818S3K2GSNEBC3M35GA3W8Q7X72KF4RVM3QA.aibtc-onchain-messaging-send", + ) + dao_token_contract: str = Field( + ..., + description="Contract principal of the DAO token", example="ST35K818S3K2GSNEBC3M35GA3W8Q7X72KF4RVM3QA.aibtc-token", ) + account_holder: str = Field( + ..., + description="Principal of the new account holder", + example="ST1PQHQKV0RJXZFY1DGX8MNSNYVE3VGZJSRTPGZGM", + ) -class ApproveAssetTool(BaseTool): - name: str = "smartwallet_approve_asset" +class SmartWalletProxyProposeActionSetAccountHolderTool(BaseTool): + name: str = "smartwallet_proxy_propose_action_set_account_holder" description: str = ( - "Approve an asset for use with the smart wallet. " - "This allows the smart wallet to interact with the specified asset contract." + "Propose an action to set the account holder through a smart wallet. " + "Returns the transaction ID of the proposal transaction." ) - args_schema: Type[BaseModel] = ApproveAssetInput + args_schema: Type[BaseModel] = SmartWalletProxyProposeActionSetAccountHolderInput return_direct: bool = False wallet_id: Optional[UUID] = None @@ -173,63 +2073,108 @@ def __init__(self, wallet_id: Optional[UUID] = None, **kwargs): def _deploy( self, smart_wallet_contract: str, - asset_contract: str, + dao_action_proposals_extension_contract: str, + dao_action_proposal_contract: str, + dao_token_contract: str, + account_holder: str, **kwargs, ) -> Dict[str, Any]: - """Execute the tool to approve an asset.""" + """Execute the tool to propose an action to set the account holder.""" if self.wallet_id is None: return {"success": False, "message": "Wallet ID is required", "data": None} - args = [smart_wallet_contract, asset_contract] + args = [ + smart_wallet_contract, + dao_action_proposals_extension_contract, + dao_action_proposal_contract, + dao_token_contract, + account_holder, + ] return BunScriptRunner.bun_run( self.wallet_id, "aibtc-dao/smart-wallet/public", - "approve-asset.ts", + "proxy-propose-action-set-account-holder.ts", *args, ) def _run( self, smart_wallet_contract: str, - asset_contract: str, + dao_action_proposals_extension_contract: str, + dao_action_proposal_contract: str, + dao_token_contract: str, + account_holder: str, **kwargs, ) -> Dict[str, Any]: - """Execute the tool to approve an asset.""" - return self._deploy(smart_wallet_contract, asset_contract, **kwargs) + """Execute the tool to propose an action to set the account holder.""" + return self._deploy( + smart_wallet_contract, + dao_action_proposals_extension_contract, + dao_action_proposal_contract, + dao_token_contract, + account_holder, + **kwargs, + ) async def _arun( self, smart_wallet_contract: str, - asset_contract: str, + dao_action_proposals_extension_contract: str, + dao_action_proposal_contract: str, + dao_token_contract: str, + account_holder: str, **kwargs, ) -> Dict[str, Any]: """Async version of the tool.""" - return self._deploy(smart_wallet_contract, asset_contract, **kwargs) + return self._deploy( + smart_wallet_contract, + dao_action_proposals_extension_contract, + dao_action_proposal_contract, + dao_token_contract, + account_holder, + **kwargs, + ) -class RevokeAssetInput(BaseModel): - """Input schema for revoking an asset from a smart wallet.""" +class SmartWalletProxyProposeActionSetWithdrawalAmountInput(BaseModel): + """Input schema for proposing an action to set the withdrawal amount through a smart wallet.""" smart_wallet_contract: str = Field( ..., description="Contract principal of the smart wallet", - example="ST35K818S3K2GSNEBC3M35GA3W8Q7X72KF4RVM3QA.aibtc-smart-wallet-ST3YT-S5D18", + example="ST1PQHQKV0RJXZFY1DGX8MNSNYVE3VGZJSRTPGZGM.aibtc-user-agent-smart-wallet", ) - asset_contract: str = Field( + dao_action_proposals_extension_contract: str = Field( ..., - description="Contract principal of the asset to revoke", + description="Contract principal of the DAO action proposals extension", + example="ST35K818S3K2GSNEBC3M35GA3W8Q7X72KF4RVM3QA.aibtc-action-proposals-v2", + ) + dao_action_proposal_contract: str = Field( + ..., + description="Contract principal of the DAO action proposal", + example="ST35K818S3K2GSNEBC3M35GA3W8Q7X72KF4RVM3QA.aibtc-onchain-messaging-send", + ) + dao_token_contract: str = Field( + ..., + description="Contract principal of the DAO token", example="ST35K818S3K2GSNEBC3M35GA3W8Q7X72KF4RVM3QA.aibtc-token", ) + withdrawal_amount: int = Field( + ..., + description="New withdrawal amount in micro-STX", + example=1000000, + gt=0, + ) -class RevokeAssetTool(BaseTool): - name: str = "smartwallet_revoke_asset" +class SmartWalletProxyProposeActionSetWithdrawalAmountTool(BaseTool): + name: str = "smartwallet_proxy_propose_action_set_withdrawal_amount" description: str = ( - "Revoke an asset from the smart wallet. " - "This prevents the smart wallet from interacting with the specified asset contract." + "Propose an action to set the withdrawal amount through a smart wallet. " + "Returns the transaction ID of the proposal transaction." ) - args_schema: Type[BaseModel] = RevokeAssetInput + args_schema: Type[BaseModel] = SmartWalletProxyProposeActionSetWithdrawalAmountInput return_direct: bool = False wallet_id: Optional[UUID] = None @@ -240,55 +2185,108 @@ def __init__(self, wallet_id: Optional[UUID] = None, **kwargs): def _deploy( self, smart_wallet_contract: str, - asset_contract: str, + dao_action_proposals_extension_contract: str, + dao_action_proposal_contract: str, + dao_token_contract: str, + withdrawal_amount: int, **kwargs, ) -> Dict[str, Any]: - """Execute the tool to revoke an asset.""" + """Execute the tool to propose an action to set the withdrawal amount.""" if self.wallet_id is None: return {"success": False, "message": "Wallet ID is required", "data": None} - args = [smart_wallet_contract, asset_contract] + args = [ + smart_wallet_contract, + dao_action_proposals_extension_contract, + dao_action_proposal_contract, + dao_token_contract, + str(withdrawal_amount), + ] return BunScriptRunner.bun_run( self.wallet_id, "aibtc-dao/smart-wallet/public", - "revoke-asset.ts", + "proxy-propose-action-set-withdrawal-amount.ts", *args, ) def _run( self, smart_wallet_contract: str, - asset_contract: str, + dao_action_proposals_extension_contract: str, + dao_action_proposal_contract: str, + dao_token_contract: str, + withdrawal_amount: int, **kwargs, ) -> Dict[str, Any]: - """Execute the tool to revoke an asset.""" - return self._deploy(smart_wallet_contract, asset_contract, **kwargs) + """Execute the tool to propose an action to set the withdrawal amount.""" + return self._deploy( + smart_wallet_contract, + dao_action_proposals_extension_contract, + dao_action_proposal_contract, + dao_token_contract, + withdrawal_amount, + **kwargs, + ) async def _arun( self, smart_wallet_contract: str, - asset_contract: str, + dao_action_proposals_extension_contract: str, + dao_action_proposal_contract: str, + dao_token_contract: str, + withdrawal_amount: int, **kwargs, ) -> Dict[str, Any]: """Async version of the tool.""" - return self._deploy(smart_wallet_contract, asset_contract, **kwargs) + return self._deploy( + smart_wallet_contract, + dao_action_proposals_extension_contract, + dao_action_proposal_contract, + dao_token_contract, + withdrawal_amount, + **kwargs, + ) -class GetBalanceSTXInput(BaseModel): - """Input schema for getting STX balance from a smart wallet.""" +class SmartWalletProxyProposeActionSetWithdrawalPeriodInput(BaseModel): + """Input schema for proposing an action to set the withdrawal period through a smart wallet.""" smart_wallet_contract: str = Field( ..., description="Contract principal of the smart wallet", - example="ST35K818S3K2GSNEBC3M35GA3W8Q7X72KF4RVM3QA.aibtc-smart-wallet-ST3YT-S5D18", + example="ST1PQHQKV0RJXZFY1DGX8MNSNYVE3VGZJSRTPGZGM.aibtc-user-agent-smart-wallet", + ) + dao_action_proposals_extension_contract: str = Field( + ..., + description="Contract principal of the DAO action proposals extension", + example="ST35K818S3K2GSNEBC3M35GA3W8Q7X72KF4RVM3QA.aibtc-action-proposals-v2", + ) + dao_action_proposal_contract: str = Field( + ..., + description="Contract principal of the DAO action proposal", + example="ST35K818S3K2GSNEBC3M35GA3W8Q7X72KF4RVM3QA.aibtc-onchain-messaging-send", + ) + dao_token_contract: str = Field( + ..., + description="Contract principal of the DAO token", + example="ST35K818S3K2GSNEBC3M35GA3W8Q7X72KF4RVM3QA.aibtc-token", + ) + withdrawal_period: int = Field( + ..., + description="New withdrawal period in blocks", + example=144, + gt=0, ) -class GetBalanceSTXTool(BaseTool): - name: str = "smartwallet_get_balance_stx" - description: str = "Get the STX balance from a smart wallet. Returns the current STX balance as a number." - args_schema: Type[BaseModel] = GetBalanceSTXInput +class SmartWalletProxyProposeActionSetWithdrawalPeriodTool(BaseTool): + name: str = "smartwallet_proxy_propose_action_set_withdrawal_period" + description: str = ( + "Propose an action to set the withdrawal period through a smart wallet. " + "Returns the transaction ID of the proposal transaction." + ) + args_schema: Type[BaseModel] = SmartWalletProxyProposeActionSetWithdrawalPeriodInput return_direct: bool = False wallet_id: Optional[UUID] = None @@ -299,60 +2297,88 @@ def __init__(self, wallet_id: Optional[UUID] = None, **kwargs): def _deploy( self, smart_wallet_contract: str, + dao_action_proposals_extension_contract: str, + dao_action_proposal_contract: str, + dao_token_contract: str, + withdrawal_period: int, **kwargs, ) -> Dict[str, Any]: - """Execute the tool to get STX balance.""" + """Execute the tool to propose an action to set the withdrawal period.""" if self.wallet_id is None: return {"success": False, "message": "Wallet ID is required", "data": None} - args = [smart_wallet_contract] + args = [ + smart_wallet_contract, + dao_action_proposals_extension_contract, + dao_action_proposal_contract, + dao_token_contract, + str(withdrawal_period), + ] return BunScriptRunner.bun_run( self.wallet_id, - "aibtc-dao/smart-wallet/read-only", - "get-balance-stx.ts", + "aibtc-dao/smart-wallet/public", + "proxy-propose-action-set-withdrawal-period.ts", *args, ) def _run( self, smart_wallet_contract: str, + dao_action_proposals_extension_contract: str, + dao_action_proposal_contract: str, + dao_token_contract: str, + withdrawal_period: int, **kwargs, ) -> Dict[str, Any]: - """Execute the tool to get STX balance.""" - return self._deploy(smart_wallet_contract, **kwargs) + """Execute the tool to propose an action to set the withdrawal period.""" + return self._deploy( + smart_wallet_contract, + dao_action_proposals_extension_contract, + dao_action_proposal_contract, + dao_token_contract, + withdrawal_period, + **kwargs, + ) async def _arun( self, smart_wallet_contract: str, + dao_action_proposals_extension_contract: str, + dao_action_proposal_contract: str, + dao_token_contract: str, + withdrawal_period: int, **kwargs, ) -> Dict[str, Any]: """Async version of the tool.""" - return self._deploy(smart_wallet_contract, **kwargs) + return self._deploy( + smart_wallet_contract, + dao_action_proposals_extension_contract, + dao_action_proposal_contract, + dao_token_contract, + withdrawal_period, + **kwargs, + ) -class IsApprovedAssetInput(BaseModel): - """Input schema for checking if an asset is approved in a smart wallet.""" +class SmartWalletDepositSTXInput(BaseModel): + """Input schema for depositing STX to a smart wallet.""" smart_wallet_contract: str = Field( ..., description="Contract principal of the smart wallet", example="ST35K818S3K2GSNEBC3M35GA3W8Q7X72KF4RVM3QA.aibtc-smart-wallet-ST3YT-S5D18", ) - asset_contract: str = Field( - ..., - description="Contract principal of the asset to check", - example="ST35K818S3K2GSNEBC3M35GA3W8Q7X72KF4RVM3QA.aibtc-faktory", - ) + amount: int = Field(..., description="Amount of STX to deposit in microstacks") -class IsApprovedAssetTool(BaseTool): - name: str = "smartwallet_is_approved_asset" +class SmartWalletDepositSTXTool(BaseTool): + name: str = "smartwallet_deposit_stx" description: str = ( - "Check if a specific asset is approved in the smart wallet. " - "Returns true if the asset is approved, false if not." + "Deposit STX into a smart wallet. " + "The amount should be specified in microstacks (1 STX = 1,000,000 microstacks)." ) - args_schema: Type[BaseModel] = IsApprovedAssetInput + args_schema: Type[BaseModel] = SmartWalletDepositSTXInput return_direct: bool = False wallet_id: Optional[UUID] = None @@ -363,58 +2389,64 @@ def __init__(self, wallet_id: Optional[UUID] = None, **kwargs): def _deploy( self, smart_wallet_contract: str, - asset_contract: str, + amount: int, **kwargs, ) -> Dict[str, Any]: - """Execute the tool to check if asset is approved.""" + """Execute the tool to deposit STX to a smart wallet.""" if self.wallet_id is None: return {"success": False, "message": "Wallet ID is required", "data": None} - args = [smart_wallet_contract, asset_contract] + args = [smart_wallet_contract, str(amount)] return BunScriptRunner.bun_run( self.wallet_id, - "aibtc-dao/smart-wallet/read-only", - "is-approved-asset.ts", + "aibtc-dao/smart-wallet/public", + "deposit-stx.ts", *args, ) def _run( self, smart_wallet_contract: str, - asset_contract: str, + amount: int, **kwargs, ) -> Dict[str, Any]: - """Execute the tool to check if asset is approved.""" - return self._deploy(smart_wallet_contract, asset_contract, **kwargs) + """Execute the tool to deposit STX to a smart wallet.""" + return self._deploy(smart_wallet_contract, amount, **kwargs) async def _arun( self, smart_wallet_contract: str, - asset_contract: str, + amount: int, **kwargs, ) -> Dict[str, Any]: - """Async version of the tool.""" - return self._deploy(smart_wallet_contract, asset_contract, **kwargs) + """Async version of the tool to deposit STX to a smart wallet.""" + return self._deploy(smart_wallet_contract, amount, **kwargs) -class GetConfigurationInput(BaseModel): - """Input schema for getting smart wallet configuration.""" +class SmartWalletDepositFTInput(BaseModel): + """Input schema for depositing fungible tokens to a smart wallet.""" smart_wallet_contract: str = Field( ..., description="Contract principal of the smart wallet", example="ST35K818S3K2GSNEBC3M35GA3W8Q7X72KF4RVM3QA.aibtc-smart-wallet-ST3YT-S5D18", ) + ft_contract: str = Field( + ..., + description="Contract principal of the fungible token", + example="ST35K818S3K2GSNEBC3M35GA3W8Q7X72KF4RVM3QA.aibtc-token", + ) + amount: int = Field(..., description="Amount of tokens to deposit") -class GetConfigurationTool(BaseTool): - name: str = "smartwallet_get_configuration" +class SmartWalletDepositFTTool(BaseTool): + name: str = "smartwallet_deposit_ft" description: str = ( - "Get the configuration of a smart wallet. " - "Returns information about the agent, user, smart wallet, DAO token, and sBTC token." + "Deposit fungible tokens into a smart wallet. " + "Requires the token contract principal and amount to deposit." ) - args_schema: Type[BaseModel] = GetConfigurationInput + args_schema: Type[BaseModel] = SmartWalletDepositFTInput return_direct: bool = False wallet_id: Optional[UUID] = None @@ -425,66 +2457,60 @@ def __init__(self, wallet_id: Optional[UUID] = None, **kwargs): def _deploy( self, smart_wallet_contract: str, + ft_contract: str, + amount: int, **kwargs, ) -> Dict[str, Any]: - """Execute the tool to get smart wallet configuration.""" + """Execute the tool to deposit fungible tokens.""" if self.wallet_id is None: return {"success": False, "message": "Wallet ID is required", "data": None} - args = [smart_wallet_contract] + args = [smart_wallet_contract, ft_contract, str(amount)] return BunScriptRunner.bun_run( self.wallet_id, - "aibtc-dao/smart-wallet/read-only", - "get-configuration.ts", + "aibtc-dao/smart-wallet/public", + "deposit-ft.ts", *args, ) def _run( self, smart_wallet_contract: str, + ft_contract: str, + amount: int, **kwargs, ) -> Dict[str, Any]: - """Execute the tool to get smart wallet configuration.""" - return self._deploy(smart_wallet_contract, **kwargs) + """Execute the tool to deposit fungible tokens.""" + return self._deploy(smart_wallet_contract, ft_contract, amount, **kwargs) async def _arun( self, smart_wallet_contract: str, + ft_contract: str, + amount: int, **kwargs, ) -> Dict[str, Any]: """Async version of the tool.""" - return self._deploy(smart_wallet_contract, **kwargs) + return self._deploy(smart_wallet_contract, ft_contract, amount, **kwargs) -class DeploySmartWalletInput(BaseModel): - """Input schema for deploying a smart wallet.""" +class SmartWalletGetBalanceSTXInput(BaseModel): + """Input schema for getting STX balance from a smart wallet.""" - owner_address: str = Field( - ..., - description="Stacks address of the wallet owner", - example="ST35K818S3K2GSNEBC3M35GA3W8Q7X72KF4RVM3QA", - ) - dao_token_contract: str = Field( - ..., - description="Contract principal of the DAO token", - example="ST35K818S3K2GSNEBC3M35GA3W8Q7X72KF4RVM3QA.aibtc-token", - ) - dao_token_dex_contract: str = Field( + smart_wallet_contract: str = Field( ..., - description="Contract principal of the DAO token DEX", - example="ST35K818S3K2GSNEBC3M35GA3W8Q7X72KF4RVM3QA.aibtc-token-dex", + description="Contract principal of the smart wallet", + example="ST35K818S3K2GSNEBC3M35GA3W8Q7X72KF4RVM3QA.aibtc-smart-wallet-ST3YT-S5D18", ) -class DeploySmartWalletTool(BaseTool): - name: str = "smartwallet_deploy" +class SmartWalletGetBalanceSTXTool(BaseTool): + name: str = "smartwallet_get_balance_stx" description: str = ( - "Deploy a new smart wallet for a user. " - "The smart wallet will be owned by the specified address and linked to the DAO token. " - "Returns the deployed smart wallet contract address and transaction ID." + "Get the STX balance from a smart wallet. Returns the current STX balance as a number." ) - args_schema: Type[BaseModel] = DeploySmartWalletInput + args_schema: Type[BaseModel] = SmartWalletGetBalanceSTXInput return_direct: bool = False wallet_id: Optional[UUID] = None @@ -494,37 +2520,49 @@ def __init__(self, wallet_id: Optional[UUID] = None, **kwargs): def _deploy( self, - owner_address: str, - dao_token_contract: str, + smart_wallet_contract: str, **kwargs, ) -> Dict[str, Any]: - """Execute the tool to deploy a smart wallet.""" + """Execute the tool to get STX balance.""" if self.wallet_id is None: return {"success": False, "message": "Wallet ID is required", "data": None} - args = [owner_address, dao_token_contract] + args = [smart_wallet_contract] return BunScriptRunner.bun_run( self.wallet_id, - "aibtc-dao", - "deploy-smart-wallet.ts", + "aibtc-dao/smart-wallet/read-only", + "get-balance-stx.ts", *args, ) def _run( self, - owner_address: str, - dao_token_contract: str, + smart_wallet_contract: str, **kwargs, ) -> Dict[str, Any]: - """Execute the tool to deploy a smart wallet.""" - return self._deploy(owner_address, dao_token_contract, **kwargs) + """Execute the tool to get STX balance.""" + return self._deploy(smart_wallet_contract, **kwargs) async def _arun( self, - owner_address: str, - dao_token_contract: str, + smart_wallet_contract: str, **kwargs, ) -> Dict[str, Any]: """Async version of the tool.""" - return self._deploy(owner_address, dao_token_contract, **kwargs) + return self._deploy(smart_wallet_contract, **kwargs) + + +class SmartWalletIsApprovedAssetInput(BaseModel): + """Input schema for checking if an asset is approved in a smart wallet.""" + + smart_wallet_contract: str = Field( + ..., + description="Contract principal of the smart wallet", + example="ST35K818S3K2GSNEBC3M35GA3W8Q7X72KF4RVM3QA.aibtc-smart-wallet-ST3YT-S5D18", + ) + asset_contract: str = Field( + ..., + description="Contract principal of the asset to check", + example="ST35K818S3K2GSNEBC3M35GA3W8Q7X72KF4RVM3QA.aibtc-faktory", + ) diff --git a/tools/tools_factory.py b/tools/tools_factory.py index 52720d70..b98f3fb4 100644 --- a/tools/tools_factory.py +++ b/tools/tools_factory.py @@ -27,13 +27,15 @@ ProposeActionToggleResourceTool, VoteOnActionProposalTool, ) -from .dao_ext_timed_vault import DepositSTXTool as TimedVaultDepositSTXTool -from .dao_ext_timed_vault import GetAccountTermsTool, WithdrawSTXTool from .dao_ext_charter import ( GetCurrentDaoCharterTool, GetCurrentDaoCharterVersionTool, GetDaoCharterTool, ) +from .dao_ext_core_proposals import ( + DeployCoreProposalTool, + GenerateCoreProposalTool, +) from .dao_ext_payments_invoices import ( GetInvoiceTool, GetResourceByNameTool, @@ -41,6 +43,8 @@ PayInvoiceByResourceNameTool, PayInvoiceTool, ) +from .dao_ext_timed_vault import DepositSTXTool as TimedVaultDepositSTXTool +from .dao_ext_timed_vault import GetAccountTermsTool, WithdrawSTXTool from .dao_ext_treasury import GetAllowedAssetTool, IsAllowedAssetTool from .database import ( AddScheduledTaskTool, @@ -62,13 +66,32 @@ LunarCrushTokenMetricsTool, SearchLunarCrushTool, ) -from .smartwallet import ApproveAssetTool, DeploySmartWalletTool, DepositFTTool -from .smartwallet import DepositSTXTool as SmartWalletDepositSTXTool from .smartwallet import ( - GetBalanceSTXTool, - GetConfigurationTool, - IsApprovedAssetTool, - RevokeAssetTool, + SmartWalletApproveAssetTool, + SmartWalletConcludeActionProposalTool, + SmartWalletConcludeCoreProposalTool, + SmartWalletDeployMySmartWalletTool, + SmartWalletDeploySmartWalletTool, + SmartWalletDepositFTTool, + SmartWalletDepositSTXTool, + SmartWalletGenerateMySmartWalletTool, + SmartWalletGenerateSmartWalletTool, + SmartWalletGetBalanceSTXTool, + SmartWalletGetConfigurationTool, + SmartWalletIsApprovedAssetTool, + SmartWalletProxyCreateProposalTool, + SmartWalletProxyProposeActionAddResourceTool, + SmartWalletProxyProposeActionAllowAssetTool, + SmartWalletProxyProposeActionSendMessageTool, + SmartWalletProxyProposeActionSetAccountHolderTool, + SmartWalletProxyProposeActionSetWithdrawalAmountTool, + SmartWalletProxyProposeActionSetWithdrawalPeriodTool, + SmartWalletProxyProposeActionToggleResourceByNameTool, + SmartWalletRevokeAssetTool, + SmartWalletVoteOnActionProposalTool, + SmartWalletVoteOnCoreProposalTool, + SmartWalletWithdrawFTTool, + SmartWalletWithdrawSTXTool, ) from .telegram import SendTelegramNotificationTool from .transactions import ( @@ -131,6 +154,8 @@ def initialize_tools( "contracts_get_sip10_info": ContractSIP10InfoTool(wallet_id), "contracts_deploy_dao": ContractDAODeployTool(wallet_id), "contracts_fetch_source_code": FetchContractSourceTool(wallet_id), + "dao_coreproposals_generate_proposal": GenerateCoreProposalTool(wallet_id), + "dao_coreproposals_deploy_proposal": DeployCoreProposalTool(wallet_id), "dao_actionproposals_conclude_proposal": ConcludeActionProposalTool(wallet_id), "dao_actionproposals_get_liquid_supply": GetLiquidSupplyTool(wallet_id), "dao_actionproposals_get_proposal": GetProposalTool(wallet_id), @@ -201,14 +226,61 @@ def initialize_tools( "wallet_get_my_transactions": WalletGetMyTransactions(wallet_id), "wallet_send_sip10": WalletSIP10SendTool(wallet_id), "x_credentials": CollectXCredentialsTool(profile_id), - "smartwallet_deploy_smart_wallet": DeploySmartWalletTool(wallet_id), + "smartwallet_deploy_smart_wallet": SmartWalletDeploySmartWalletTool(wallet_id), + "smartwallet_deploy_my_smart_wallet": SmartWalletDeployMySmartWalletTool( + wallet_id + ), "smartwallet_deposit_stx": SmartWalletDepositSTXTool(wallet_id), - "smartwallet_deposit_ft": DepositFTTool(wallet_id), - "smartwallet_approve_asset": ApproveAssetTool(wallet_id), - "smartwallet_revoke_asset": RevokeAssetTool(wallet_id), - "smartwallet_get_balance_stx": GetBalanceSTXTool(wallet_id), - "smartwallet_is_approved_asset": IsApprovedAssetTool(wallet_id), - "smartwallet_get_configuration": GetConfigurationTool(wallet_id), + "smartwallet_deposit_ft": SmartWalletDepositFTTool(wallet_id), + "smartwallet_approve_asset": SmartWalletApproveAssetTool(wallet_id), + "smartwallet_revoke_asset": SmartWalletRevokeAssetTool(wallet_id), + "smartwallet_get_balance_stx": SmartWalletGetBalanceSTXTool(wallet_id), + "smartwallet_is_approved_asset": SmartWalletIsApprovedAssetTool(wallet_id), + "smartwallet_get_configuration": SmartWalletGetConfigurationTool(wallet_id), + "smartwallet_generate_smart_wallet": SmartWalletGenerateSmartWalletTool( + wallet_id + ), + "smartwallet_generate_my_smart_wallet": SmartWalletGenerateMySmartWalletTool( + wallet_id + ), + "smartwallet_withdraw_stx": SmartWalletWithdrawSTXTool(wallet_id), + "smartwallet_withdraw_ft": SmartWalletWithdrawFTTool(wallet_id), + "smartwallet_proxy_create_proposal": SmartWalletProxyCreateProposalTool( + wallet_id + ), + "smartwallet_proxy_propose_action_send_message": SmartWalletProxyProposeActionSendMessageTool( + wallet_id + ), + "smartwallet_proxy_propose_action_add_resource": SmartWalletProxyProposeActionAddResourceTool( + wallet_id + ), + "smartwallet_proxy_propose_action_allow_asset": SmartWalletProxyProposeActionAllowAssetTool( + wallet_id + ), + "smartwallet_proxy_propose_action_toggle_resource_by_name": SmartWalletProxyProposeActionToggleResourceByNameTool( + wallet_id + ), + "smartwallet_proxy_propose_action_set_account_holder": SmartWalletProxyProposeActionSetAccountHolderTool( + wallet_id + ), + "smartwallet_proxy_propose_action_set_withdrawal_amount": SmartWalletProxyProposeActionSetWithdrawalAmountTool( + wallet_id + ), + "smartwallet_proxy_propose_action_set_withdrawal_period": SmartWalletProxyProposeActionSetWithdrawalPeriodTool( + wallet_id + ), + "smartwallet_vote_on_action_proposal": SmartWalletVoteOnActionProposalTool( + wallet_id + ), + "smartwallet_vote_on_core_proposal": SmartWalletVoteOnCoreProposalTool( + wallet_id + ), + "smartwallet_conclude_action_proposal": SmartWalletConcludeActionProposalTool( + wallet_id + ), + "smartwallet_conclude_core_proposal": SmartWalletConcludeCoreProposalTool( + wallet_id + ), } return tools