diff --git a/.env.example b/.env.example index a25e35f7..242599ef 100644 --- a/.env.example +++ b/.env.example @@ -3,133 +3,123 @@ # ============================================================================= # Logging level (DEBUG, INFO, WARNING, ERROR, CRITICAL) LOG_LEVEL=INFO - -# Backend type (supabase, cloudflare) -AIBTC_BACKEND="supabase" - -# Network configuration (mainnet, testnet) -NETWORK="testnet" - -# WebSocket settings -WEBSOCKETS_MAX_LINE_LENGTH=31928 +# Network Configuration +NETWORK=testnet # ============================================================================= # Database Configuration (Supabase) # ============================================================================= -AIBTC_SUPABASE_URL="https://hellowowld.supabase.co" -AIBTC_SUPABASE_SERVICE_KEY="your-service-key" -AIBTC_SUPABASE_BUCKET_NAME="your-bucket-name" -# Database connection details -AIBTC_SUPABASE_USER="user" -AIBTC_SUPABASE_PASSWORD="user" -AIBTC_SUPABASE_HOST="localhost" -AIBTC_SUPABASE_PORT="5432" -AIBTC_SUPABASE_DBNAME="postgres" +AIBTC_BACKEND=supabase +AIBTC_SUPABASE_USER=your_supabase_user +AIBTC_SUPABASE_PASSWORD=your_supabase_password +AIBTC_SUPABASE_HOST=your_supabase_host +AIBTC_SUPABASE_PORT=5432 +AIBTC_SUPABASE_DBNAME=your_database_name +AIBTC_SUPABASE_URL=https://your-project.supabase.co +AIBTC_SUPABASE_SERVICE_KEY=your_supabase_service_key +AIBTC_SUPABASE_BUCKET_NAME=your_bucket_name # ============================================================================= -# External API Endpoints & Keys +# Backend Wallet Configuration # ============================================================================= -# Webhook Configuration -AIBTC_WEBHOOK_URL="https://core-staging.aibtc.dev/webhooks/chainhook" -AIBTC_WEBHOOK_AUTH_TOKEN="Bearer your-webhook-auth-token" +AIBTC_BACKEND_WALLET_SEED_PHRASE=your_wallet_seed_phrase +AIBTC_BACKEND_WALLET_PRIVATE_KEY=your_wallet_private_key +AIBTC_BACKEND_WALLET_PUBLIC_KEY=your_wallet_public_key +AIBTC_BACKEND_WALLET_ADDRESS=your_wallet_address -# Platform APIs -AIBTC_PLATFORM_API_URL="https://api.platform.hiro.so" -AIBTC_HIRO_API_URL=https://api.hiro.so -HIRO_API_KEY="your-hiro-api-key" -AIBTC_ALEX_BASE_URL=https://api.alexgo.io/ -AIBTC_VELAR_BASE_URL="https://gateway.velar.network/" +# ============================================================================= +# Twitter Configuration +# ============================================================================= +AIBTC_TWITTER_ENABLED=false +AIBTC_TWITTER_INTERVAL_SECONDS=120 +AIBTC_TWITTER_CONSUMER_KEY=your_twitter_consumer_key +AIBTC_TWITTER_CONSUMER_SECRET=your_twitter_consumer_secret +AIBTC_TWITTER_CLIENT_ID=your_twitter_client_id +AIBTC_TWITTER_CLIENT_SECRET=your_twitter_client_secret +AIBTC_TWITTER_ACCESS_TOKEN=your_twitter_access_token +AIBTC_TWITTER_ACCESS_SECRET=your_twitter_access_secret +AIBTC_TWITTER_USERNAME=your_twitter_username +AIBTC_TWITTER_AUTOMATED_USER_ID=your_automated_user_id +AIBTC_TWITTER_WHITELISTED=user1,user2,user3 -# AI Services -OPENAI_MODEL_NAME="gpt-4.1" -OPENAI_API_KEY="sk-proj-your-api-key-here" -# For local model deployment -# OPENAI_API_BASE="http://localhost:5000" +# ============================================================================= +# Telegram Configuration +# ============================================================================= +AIBTC_TELEGRAM_BOT_TOKEN=your_telegram_bot_token +AIBTC_TELEGRAM_BOT_ENABLED=false -# Market Data APIs -SERPER_API_KEY="your-serper-api-key" -AIBTC_CMC_API_KEY='cmc-api-key' -AIBTC_LUNARCRUSH_API_KEY="lunarcrush-api-key" -AIBTC_LUNARCRUSH_BASE_URL="https://lunarcrush.com/api/v2" +# ============================================================================= +# Discord Configuration +# ============================================================================= +AIBTC_DISCORD_WEBHOOK_URL_PASSED=https://discord.com/api/webhooks/your_passed_webhook +AIBTC_DISCORD_WEBHOOK_URL_FAILED=https://discord.com/api/webhooks/your_failed_webhook # ============================================================================= -# Task Scheduling Configuration +# Job Scheduler Configuration (NEW NAMING - matches job types exactly) # ============================================================================= -# Schedule Sync + +# General Scheduler Settings AIBTC_SCHEDULE_SYNC_ENABLED=false AIBTC_SCHEDULE_SYNC_INTERVAL_SECONDS=60 -# DAO Processing Pipeline -# Step 1: Process DAO deployments -AIBTC_DAO_RUNNER_ENABLED=false -AIBTC_DAO_RUNNER_INTERVAL_SECONDS=30 +# Agent Account Deployer Job +AIBTC_AGENT_ACCOUNT_DEPLOYER_ENABLED=false +AIBTC_AGENT_ACCOUNT_DEPLOYER_INTERVAL_SECONDS=60 -# Step 2: Generate tweets for completed DAOs -AIBTC_DAO_TWEET_RUNNER_ENABLED=false -AIBTC_DAO_TWEET_RUNNER_INTERVAL_SECONDS=30 +# Chain State Monitor Job +AIBTC_CHAIN_STATE_MONITOR_ENABLED=true +AIBTC_CHAIN_STATE_MONITOR_INTERVAL_SECONDS=300 -# Step 3: Post generated tweets -AIBTC_TWEET_RUNNER_ENABLED=false -AIBTC_TWEET_RUNNER_INTERVAL_SECONDS=30 +# DAO Deployment Job +AIBTC_DAO_DEPLOYMENT_ENABLED=false +AIBTC_DAO_DEPLOYMENT_INTERVAL_SECONDS=60 -# Step 4: Process DAO proposal votes -AIBTC_DAO_PROPOSAL_VOTE_RUNNER_ENABLED=false -AIBTC_DAO_PROPOSAL_VOTE_RUNNER_INTERVAL_SECONDS=60 +# DAO Deployment Tweet Job +AIBTC_DAO_DEPLOYMENT_TWEET_ENABLED=false +AIBTC_DAO_DEPLOYMENT_TWEET_INTERVAL_SECONDS=60 -# Step 5: Process DAO proposal conclusions -AIBTC_DAO_PROPOSAL_CONCLUDE_RUNNER_ENABLED=false -AIBTC_DAO_PROPOSAL_CONCLUDE_RUNNER_INTERVAL_SECONDS=60 -AIBTC_DAO_PROPOSAL_CONCLUDE_RUNNER_WALLET_ID="your-wallet-id" +# DAO Proposal Conclude Job +AIBTC_DAO_PROPOSAL_CONCLUDE_ENABLED=false +AIBTC_DAO_PROPOSAL_CONCLUDE_INTERVAL_SECONDS=60 -# Step 6: -AIBTC_DAO_PROPOSAL_EVALUATION_RUNNER_ENABLED=false -AIBTC_DAO_PROPOSAL_EVALUATION_RUNNER_INTERVAL_SECONDS=60 +# DAO Proposal Embedder Job +AIBTC_DAO_PROPOSAL_EMBEDDER_ENABLED=false +AIBTC_DAO_PROPOSAL_EMBEDDER_INTERVAL_SECONDS=300 -# Step 6: Process agent account deployments -AIBTC_AGENT_ACCOUNT_DEPLOY_RUNNER_ENABLED=false -AIBTC_AGENT_ACCOUNT_DEPLOY_RUNNER_INTERVAL_SECONDS=60 -AIBTC_AGENT_ACCOUNT_DEPLOY_RUNNER_WALLET_ID="your-wallet-id" +# DAO Proposal Evaluation Job +AIBTC_DAO_PROPOSAL_EVALUATION_ENABLED=false +AIBTC_DAO_PROPOSAL_EVALUATION_INTERVAL_SECONDS=60 -# ============================================================================= -# Social Media Integration -# ============================================================================= -# Twitter Configuration -AIBTC_TWITTER_ENABLED=false -AIBTC_TWITTER_INTERVAL_SECONDS=120 +# DAO Proposal Vote Job +AIBTC_DAO_PROPOSAL_VOTE_ENABLED=false +AIBTC_DAO_PROPOSAL_VOTE_INTERVAL_SECONDS=60 -# Twitter API Credentials -AIBTC_TWITTER_CONSUMER_KEY="your-twitter-consumer-key" -AIBTC_TWITTER_CONSUMER_SECRET="your-twitter-consumer-secret" -AIBTC_TWITTER_ACCESS_TOKEN="your-twitter-access-token" -AIBTC_TWITTER_ACCESS_SECRET="your-twitter-access-secret" -AIBTC_TWITTER_CLIENT_ID="your-twitter-client-id" -AIBTC_TWITTER_CLIENT_SECRET="your-twitter-client-secret" - -# Twitter User Configuration -AIBTC_TWITTER_AUTOMATED_USER_ID="your-twitter-automated-user-id" -AIBTC_TWITTER_PROFILE_ID="your-twitter-profile-id" -AIBTC_TWITTER_AGENT_ID="your-twitter-agent-id" -# Comma-separated list of whitelisted Twitter user IDs -AIBTC_TWITTER_WHITELISTED="your-twitter-whitelisted" -AIBTC_TWITTER_WHITELIST_ENABLED=false +# Discord Job +AIBTC_DISCORD_ENABLED=false +AIBTC_DISCORD_INTERVAL_SECONDS=30 + +# Tweet Job +AIBTC_TWEET_ENABLED=false +AIBTC_TWEET_INTERVAL_SECONDS=30 -# Telegram Configuration -AIBTC_TELEGRAM_BOT_TOKEN="your-telegram-bot-token" -AIBTC_TELEGRAM_BOT_ENABLED=false -#Discrod -# For successful proposals (celebrations, announcements) -AIBTC_DISCORD_WEBHOOK_URL_PASSED="https://discord.com/api/webhooks/YOUR_SUCCESS_WEBHOOK" -# For failed proposals (notifications, discussions) -AIBTC_DISCORD_WEBHOOK_URL_FAILED="https://discord.com/api/webhooks/YOUR_FAILURE_WEBHOOK" # ============================================================================= -# Additional Tools & Services +# API Configuration # ============================================================================= -AIBTC_FAKTORY_API_KEY="your-faktory-api-key" +AIBTC_BASEURL=https://app-staging.aibtc.dev +AIBTC_ALEX_BASE_URL=https://api.alexgo.io/ +AIBTC_HIRO_API_URL=https://api.hiro.so +AIBTC_PLATFORM_API_URL=https://api.platform.hiro.so +AIBTC_VELAR_BASE_URL=https://gateway.velar.network/ +AIBTC_LUNARCRUSH_BASE_URL=https://lunarcrush.com/api/v2 + +# API Keys +HIRO_API_KEY=your_hiro_api_key +AIBTC_LUNARCRUSH_API_KEY=your_lunarcrush_api_key +AIBTC_CMC_API_KEY=your_coinmarketcap_api_key +OPENAI_API_KEY=your_openai_api_key -# Bitflow Configuration -BITFLOW_API_HOST=https://bitflowapihost.hiro.so -BITFLOW_API_KEY="your-bitflow-api-key" -BITFLOW_STACKS_API_HOST=https://api.hiro.so/ -BITFLOW_READONLY_CALL_API_HOST=https://readonly-call-api.hiro.so +# Webhook Configuration +AIBTC_WEBHOOK_URL=https://your-webhook-url.com +AIBTC_WEBHOOK_AUTH_TOKEN=Bearer your_webhook_auth_token \ No newline at end of file diff --git a/backend/models.py b/backend/models.py index d3f7b16d..f5215f90 100644 --- a/backend/models.py +++ b/backend/models.py @@ -8,7 +8,8 @@ class CustomBaseModel(BaseModel): model_config = ConfigDict( - json_encoders={UUID: str, datetime: lambda v: v.isoformat()} + json_encoders={UUID: str, datetime: lambda v: v.isoformat()}, + arbitrary_types_allowed=True, ) @@ -63,44 +64,69 @@ def __str__(self): return self.value -class QueueMessageType(str, Enum): - TWEET = "tweet" - DAO = "dao" - DAO_TWEET = "dao_tweet" - DAO_PROPOSAL_VOTE = "dao_proposal_vote" - DAO_PROPOSAL_CONCLUDE = "dao_proposal_conclude" - DAO_PROPOSAL_EVALUATION = ( - "dao_proposal_evaluation" # New type for proposal evaluation - ) - AGENT_ACCOUNT_DEPLOY = ( - "agent_account_deploy" # New type for agent account deployment - ) - DISCORD = "discord" # New type for Discord queue messages +class QueueMessageType: + """Dynamic queue message types that are registered at runtime. - def __str__(self): - return self.value + This system is compatible with the runner's dynamic JobType system. + Queue message types are registered dynamically as job tasks are discovered. + """ + _message_types: Dict[str, "QueueMessageType"] = {} -# -# SECRETS -# -class SecretBase(CustomBaseModel): - name: Optional[str] = None - description: Optional[str] = None - secret: Optional[str] = None - decrypted_secret: Optional[str] = None - key_id: Optional[str] = None - nonce: Optional[str] = None + def __init__(self, value: str): + self._value = value.lower() + self._name = value.upper() + @property + def value(self) -> str: + return self._value -class SecretCreate(SecretBase): - pass + @property + def name(self) -> str: + return self._name + def __str__(self) -> str: + return self._value -class Secret(SecretBase): - id: UUID - created_at: datetime - updated_at: datetime + def __repr__(self) -> str: + return f"QueueMessageType({self._value})" + + def __eq__(self, other) -> bool: + if isinstance(other, QueueMessageType): + return self._value == other._value + if isinstance(other, str): + return self._value == other.lower() + return False + + def __hash__(self) -> int: + return hash(self._value) + + @classmethod + def get_or_create(cls, message_type: str) -> "QueueMessageType": + """Get existing message type or create new one.""" + normalized = message_type.lower() + if normalized not in cls._message_types: + cls._message_types[normalized] = cls(normalized) + return cls._message_types[normalized] + + @classmethod + def register(cls, message_type: str) -> "QueueMessageType": + """Register a new message type and return the instance.""" + return cls.get_or_create(message_type) + + @classmethod + def get_all_message_types(cls) -> Dict[str, str]: + """Get all registered message types.""" + return {mt._value: mt._value for mt in cls._message_types.values()} + + @classmethod + def list_all(cls) -> List["QueueMessageType"]: + """Get all registered message type instances.""" + return list(cls._message_types.values()) + + +# Types are registered dynamically by the runner system +# No need to pre-register common types # @@ -114,6 +140,7 @@ class QueueMessageBase(CustomBaseModel): conversation_id: Optional[str] = None dao_id: Optional[UUID] = None wallet_id: Optional[UUID] = None + result: Optional[dict] = None class QueueMessageCreate(QueueMessageBase): @@ -125,6 +152,28 @@ class QueueMessage(QueueMessageBase): created_at: datetime +# +# SECRETS +# +class SecretBase(CustomBaseModel): + name: Optional[str] = None + description: Optional[str] = None + secret: Optional[str] = None + decrypted_secret: Optional[str] = None + key_id: Optional[str] = None + nonce: Optional[str] = None + + +class SecretCreate(SecretBase): + pass + + +class Secret(SecretBase): + id: UUID + created_at: datetime + updated_at: datetime + + # # WALLETS # diff --git a/config.py b/config.py index febe867c..c1b0a84a 100644 --- a/config.py +++ b/config.py @@ -41,6 +41,13 @@ class TwitterConfig: ) +@dataclass +class BackendWalletConfig: + """Configuration for backend wallet operations.""" + + seed_phrase: str = os.getenv("AIBTC_BACKEND_WALLET_SEED_PHRASE", "") + + @dataclass class TelegramConfig: token: str = os.getenv("AIBTC_TELEGRAM_BOT_TOKEN", "") @@ -61,83 +68,85 @@ class SchedulerConfig: sync_interval_seconds: int = int( os.getenv("AIBTC_SCHEDULE_SYNC_INTERVAL_SECONDS", "60") ) - dao_runner_enabled: bool = ( - os.getenv("AIBTC_DAO_RUNNER_ENABLED", "false").lower() == "true" - ) - dao_runner_interval_seconds: int = int( - os.getenv("AIBTC_DAO_RUNNER_INTERVAL_SECONDS", "30") - ) - dao_tweet_runner_enabled: bool = ( - os.getenv("AIBTC_DAO_TWEET_RUNNER_ENABLED", "false").lower() == "true" - ) - dao_tweet_runner_interval_seconds: int = int( - os.getenv("AIBTC_DAO_TWEET_RUNNER_INTERVAL_SECONDS", "30") - ) - tweet_runner_enabled: bool = ( - os.getenv("AIBTC_TWEET_RUNNER_ENABLED", "false").lower() == "true" - ) - tweet_runner_interval_seconds: int = int( - os.getenv("AIBTC_TWEET_RUNNER_INTERVAL_SECONDS", "30") + + # Job-specific configurations matching job_type names exactly + + # agent_account_deployer job + agent_account_deployer_enabled: bool = ( + os.getenv("AIBTC_AGENT_ACCOUNT_DEPLOYER_ENABLED", "false").lower() == "true" ) - discord_runner_enabled: bool = ( - os.getenv("AIBTC_DISCORD_RUNNER_ENABLED", "false").lower() == "true" + agent_account_deployer_interval_seconds: int = int( + os.getenv("AIBTC_AGENT_ACCOUNT_DEPLOYER_INTERVAL_SECONDS", "60") ) - discord_runner_interval_seconds: int = int( - os.getenv("AIBTC_DISCORD_RUNNER_INTERVAL_SECONDS", "30") + + # chain_state_monitor job + chain_state_monitor_enabled: bool = ( + os.getenv("AIBTC_CHAIN_STATE_MONITOR_ENABLED", "true").lower() == "true" ) - dao_proposal_vote_runner_enabled: bool = ( - os.getenv("AIBTC_DAO_PROPOSAL_VOTE_RUNNER_ENABLED", "false").lower() == "true" + chain_state_monitor_interval_seconds: int = int( + os.getenv("AIBTC_CHAIN_STATE_MONITOR_INTERVAL_SECONDS", "300") ) - dao_proposal_vote_runner_interval_seconds: int = int( - os.getenv("AIBTC_DAO_PROPOSAL_VOTE_RUNNER_INTERVAL_SECONDS", "60") + + # dao_deployment job + dao_deployment_enabled: bool = ( + os.getenv("AIBTC_DAO_DEPLOYMENT_ENABLED", "false").lower() == "true" ) - dao_proposal_conclude_runner_enabled: bool = ( - os.getenv("AIBTC_DAO_PROPOSAL_CONCLUDE_RUNNER_ENABLED", "false").lower() - == "true" + dao_deployment_interval_seconds: int = int( + os.getenv("AIBTC_DAO_DEPLOYMENT_INTERVAL_SECONDS", "60") ) - dao_proposal_conclude_runner_interval_seconds: int = int( - os.getenv("AIBTC_DAO_PROPOSAL_CONCLUDE_RUNNER_INTERVAL_SECONDS", "60") + + # dao_deployment_tweet job + dao_deployment_tweet_enabled: bool = ( + os.getenv("AIBTC_DAO_DEPLOYMENT_TWEET_ENABLED", "false").lower() == "true" ) - dao_proposal_conclude_runner_wallet_id: str = os.getenv( - "AIBTC_DAO_PROPOSAL_CONCLUDE_RUNNER_WALLET_ID", "" + dao_deployment_tweet_interval_seconds: int = int( + os.getenv("AIBTC_DAO_DEPLOYMENT_TWEET_INTERVAL_SECONDS", "60") ) - dao_proposal_evaluation_runner_enabled: bool = ( - os.getenv("AIBTC_DAO_PROPOSAL_EVALUATION_RUNNER_ENABLED", "false").lower() - == "true" + + # dao_proposal_conclude job + dao_proposal_conclude_enabled: bool = ( + os.getenv("AIBTC_DAO_PROPOSAL_CONCLUDE_ENABLED", "false").lower() == "true" ) - dao_proposal_evaluation_runner_interval_seconds: int = int( - os.getenv("AIBTC_DAO_PROPOSAL_EVALUATION_RUNNER_INTERVAL_SECONDS", "60") + dao_proposal_conclude_interval_seconds: int = int( + os.getenv("AIBTC_DAO_PROPOSAL_CONCLUDE_INTERVAL_SECONDS", "60") ) - agent_account_deploy_runner_enabled: bool = ( - os.getenv("AIBTC_AGENT_ACCOUNT_DEPLOY_RUNNER_ENABLED", "false").lower() - == "true" + + # dao_proposal_embedder job + dao_proposal_embedder_enabled: bool = ( + os.getenv("AIBTC_DAO_PROPOSAL_EMBEDDER_ENABLED", "false").lower() == "true" ) - agent_account_deploy_runner_interval_seconds: int = int( - os.getenv("AIBTC_AGENT_ACCOUNT_DEPLOY_RUNNER_INTERVAL_SECONDS", "60") + dao_proposal_embedder_interval_seconds: int = int( + os.getenv("AIBTC_DAO_PROPOSAL_EMBEDDER_INTERVAL_SECONDS", "300") ) - agent_account_deploy_runner_wallet_id: str = os.getenv( - "AIBTC_AGENT_ACCOUNT_DEPLOY_RUNNER_WALLET_ID", "" + + # dao_proposal_evaluation job + dao_proposal_evaluation_enabled: bool = ( + os.getenv("AIBTC_DAO_PROPOSAL_EVALUATION_ENABLED", "false").lower() == "true" ) - dao_proposal_vote_delay_blocks: int = int( - os.getenv("AIBTC_DAO_PROPOSAL_VOTE_DELAY_BLOCKS", "2") + dao_proposal_evaluation_interval_seconds: int = int( + os.getenv("AIBTC_DAO_PROPOSAL_EVALUATION_INTERVAL_SECONDS", "60") ) - proposal_embedder_enabled: bool = ( - os.getenv("AIBTC_PROPOSAL_EMBEDDER_ENABLED", "false").lower() == "true" + + # dao_proposal_vote job + dao_proposal_vote_enabled: bool = ( + os.getenv("AIBTC_DAO_PROPOSAL_VOTE_ENABLED", "false").lower() == "true" ) - proposal_embedder_interval_seconds: int = int( - os.getenv( - "AIBTC_PROPOSAL_EMBEDDER_INTERVAL_SECONDS", "300" - ) # Default to 5 mins + dao_proposal_vote_interval_seconds: int = int( + os.getenv("AIBTC_DAO_PROPOSAL_VOTE_INTERVAL_SECONDS", "60") ) - chain_state_monitor_enabled: bool = ( - os.getenv("AIBTC_CHAIN_STATE_MONITOR_ENABLED", "true").lower() == "true" + + # discord job + discord_enabled: bool = ( + os.getenv("AIBTC_DISCORD_ENABLED", "false").lower() == "true" ) - chain_state_monitor_interval_seconds: int = int( - os.getenv( - "AIBTC_CHAIN_STATE_MONITOR_INTERVAL_SECONDS", "300" - ) # Default to 5 mins + discord_interval_seconds: int = int( + os.getenv("AIBTC_DISCORD_INTERVAL_SECONDS", "30") ) + # tweet job + tweet_enabled: bool = os.getenv("AIBTC_TWEET_ENABLED", "false").lower() == "true" + tweet_interval_seconds: int = int(os.getenv("AIBTC_TWEET_INTERVAL_SECONDS", "30")) + @dataclass class APIConfig: @@ -175,6 +184,7 @@ class Config: api: APIConfig = field(default_factory=APIConfig) network: NetworkConfig = field(default_factory=NetworkConfig) discord: DiscordConfig = field(default_factory=DiscordConfig) + backend_wallet: BackendWalletConfig = field(default_factory=BackendWalletConfig) @classmethod def load(cls) -> "Config": diff --git a/services/llm/__init__.py b/services/llm/__init__.py new file mode 100644 index 00000000..7e3787cf --- /dev/null +++ b/services/llm/__init__.py @@ -0,0 +1,5 @@ +"""LLM services module.""" + +from .embed import EmbedService + +__all__ = ["EmbedService"] diff --git a/services/llm/embed.py b/services/llm/embed.py new file mode 100644 index 00000000..1f3682cc --- /dev/null +++ b/services/llm/embed.py @@ -0,0 +1,112 @@ +"""Embedding service implementation.""" + +from typing import List, Optional + +from langchain_openai import OpenAIEmbeddings + +from config import config +from lib.logger import configure_logger + +logger = configure_logger(__name__) + +EMBEDDING_MODEL = "text-embedding-ada-002" + + +class EmbedService: + """Service for generating text embeddings using OpenAI.""" + + def __init__(self, model_name: str = EMBEDDING_MODEL): + """Initialize the embedding service. + + Args: + model_name: The OpenAI embedding model to use + """ + self.model_name = model_name + self._embeddings_client: Optional[OpenAIEmbeddings] = None + + @property + def embeddings_client(self) -> OpenAIEmbeddings: + """Get or create the OpenAI embeddings client.""" + if self._embeddings_client is None: + if not config.api.openai_api_key: + raise ValueError("OpenAI API key not configured") + + self._embeddings_client = OpenAIEmbeddings( + model=self.model_name, openai_api_key=config.api.openai_api_key + ) + return self._embeddings_client + + async def embed_text(self, text: str) -> Optional[List[float]]: + """Generate embedding for a single text. + + Args: + text: The text to embed + + Returns: + List of floats representing the embedding, or None if failed + """ + if not text or not text.strip(): + logger.warning("Empty text provided for embedding") + return None + + try: + logger.debug(f"Generating embedding for text (length: {len(text)})") + embedding = await self.embeddings_client.aembed_query(text) + logger.debug(f"Generated embedding with dimension: {len(embedding)}") + return embedding + except Exception as e: + logger.error(f"Failed to generate embedding: {str(e)}", exc_info=True) + return None + + async def embed_documents(self, texts: List[str]) -> Optional[List[List[float]]]: + """Generate embeddings for multiple texts. + + Args: + texts: List of texts to embed + + Returns: + List of embeddings, or None if failed + """ + if not texts: + logger.warning("Empty text list provided for embedding") + return None + + # Filter out empty texts + valid_texts = [text for text in texts if text and text.strip()] + if not valid_texts: + logger.warning("No valid texts found for embedding") + return None + + try: + logger.debug(f"Generating embeddings for {len(valid_texts)} texts") + embeddings = await self.embeddings_client.aembed_documents(valid_texts) + logger.debug(f"Generated {len(embeddings)} embeddings") + return embeddings + except Exception as e: + logger.error(f"Failed to generate embeddings: {str(e)}", exc_info=True) + return None + + def is_available(self) -> bool: + """Check if the embedding service is available. + + Returns: + True if the service is properly configured and available + """ + try: + return bool(config.api.openai_api_key) + except Exception as e: + logger.error(f"Error checking embedding service availability: {str(e)}") + return False + + async def test_connection(self) -> bool: + """Test the embedding service connection. + + Returns: + True if the service is working correctly + """ + try: + test_embedding = await self.embed_text("test") + return test_embedding is not None and len(test_embedding) > 0 + except Exception as e: + logger.error(f"Embedding service test failed: {str(e)}") + return False diff --git a/services/runner/README.md b/services/runner/README.md new file mode 100644 index 00000000..452e8421 --- /dev/null +++ b/services/runner/README.md @@ -0,0 +1,167 @@ +# Job Runner System - Auto-Discovery + +The job runner system uses **auto-discovery** to make adding new jobs incredibly simple. All job types are dynamically registered - there are no hardcoded job types! + +## How It Works + +The system automatically: +1. 🔍 **Discovers** all task files in `services/runner/tasks/` +2. 📝 **Registers** jobs decorated with `@job` +3. 🏗️ **Creates** JobType enums dynamically +4. ⚙️ **Configures** scheduling and execution + +**No hardcoded job types!** Everything is discovered at runtime through the `@job` decorator. + +## Adding a New Job (Super Easy!) + +### Step 1: Create Your Task File +Create a new `.py` file in `services/runner/tasks/`. That's it for file creation! + +### Step 2: Use the @job Decorator +```python +from dataclasses import dataclass +from typing import List + +from ..base import BaseTask, JobContext, RunnerResult +from ..decorators import JobPriority, job + +@dataclass +class MyJobResult(RunnerResult): + """Result of my job processing.""" + items_processed: int = 0 + +@job( + "my_awesome_job", # ✨ Job type - automatically creates JobType.MY_AWESOME_JOB + name="My Awesome Job", + description="Does awesome things", + interval_seconds=120, + priority=JobPriority.NORMAL, + max_concurrent=2, + requires_twitter=True, # Optional: specify requirements + enabled=True, # Optional: enable/disable +) +class MyAwesomeJobTask(BaseTask[MyJobResult]): + """My awesome job task.""" + + async def _execute_impl(self, context: JobContext) -> List[MyJobResult]: + # Your job logic here + return [MyJobResult(success=True, message="Done!", items_processed=10)] +``` + +### Step 3: That's It! +Your job is automatically: +- ✅ Discovered and registered +- ✅ JobType enum created dynamically +- ✅ Available in the job manager +- ✅ Schedulable and executable +- ✅ Configurable via environment/config + +## Dynamic Job Types + +🚀 **All job types are dynamic!** No more hardcoded enums or manual registration. + +- Job types are created automatically when you use `@job("job_type_name")` +- The system supports any job type name you want +- JobType enums are generated at runtime +- No conflicts or duplicates - each job type is unique + +## Configuration + +Jobs can be configured via environment variables or config files: + +```bash +# Enable/disable a job +MY_AWESOME_JOB_ENABLED=true + +# Override interval +MY_AWESOME_JOB_INTERVAL_SECONDS=300 + +# Alternative naming pattern (backwards compatibility) +MY_AWESOME_JOB_RUNNER_ENABLED=true +MY_AWESOME_JOB_RUNNER_INTERVAL_SECONDS=300 +``` + +## Job Decorator Options + +The `@job` decorator supports many options: + +```python +@job( + "job_type", # Required: unique job identifier + name="Human Readable Name", # Optional: display name + description="What it does", # Optional: description + + # Scheduling + interval_seconds=60, # How often to run + enabled=True, # Enable/disable + + # Execution + priority=JobPriority.NORMAL, # LOW, NORMAL, HIGH, CRITICAL + max_retries=3, # Retry attempts + retry_delay_seconds=30, # Delay between retries + timeout_seconds=300, # Execution timeout + + # Concurrency + max_concurrent=1, # Max parallel executions + batch_size=10, # Items per batch + + # Requirements + requires_wallet=True, # Needs wallet access + requires_twitter=True, # Needs Twitter API + requires_discord=True, # Needs Discord API + + # Advanced + dependencies=["other_job"], # Job dependencies + preserve_order=False, # Order sensitive? + idempotent=True, # Safe to retry? +) +``` + +## Migration from Old System + +### Before (Manual Registration Required) +1. Add job type to hardcoded `JobType` enum in `base.py` +2. Add config mapping in `job_manager.py` +3. Import and register in `__init__.py` +4. Export in `tasks/__init__.py` +5. Create the task class + +### After (Auto-Discovery) +1. Create task file with `@job` decorator +2. Done! 🎉 + +## Benefits + +- 🚀 **Faster development**: No manual registration steps +- 🛡️ **Less error-prone**: No forgetting to register +- 🔧 **Self-documenting**: All config in one place +- 🌟 **Consistent**: Same pattern for all jobs +- 🎯 **Dynamic**: Job types created automatically +- 🔄 **No hardcoded types**: Everything discovered at runtime + +## Examples + +Check out existing task files for patterns: +- `dao_task.py` - Complex workflow-based task +- `tweet_task.py` - Media handling and chunking +- `discord_task.py` - Webhook integration +- `proposal_embedder.py` - AI service integration + +## Troubleshooting + +### Job Not Appearing? +1. Check file is in `services/runner/tasks/` +2. Check `@job` decorator is present +3. Check no syntax errors in task file +4. Check logs for import errors + +### Configuration Not Working? +1. Use naming pattern: `{job_type}_enabled` or `{job_type}_interval_seconds` +2. Check environment variables +3. Check config file settings + +### Need Help? +- Look at existing task examples +- Check the auto-discovery logs +- Use `JobRegistry.list_jobs()` to see registered jobs +- Check dynamic job types with `JobType.__class__.get_all_job_types()` \ No newline at end of file diff --git a/services/runner/__init__.py b/services/runner/__init__.py index addea28a..a5be6bb3 100644 --- a/services/runner/__init__.py +++ b/services/runner/__init__.py @@ -1,75 +1,20 @@ """Runner module for executing tasks such as DAO processing and Twitter interactions.""" +# Auto-discovery will handle task registration +from services.runner.auto_discovery import discover_and_register_tasks from services.runner.base import BaseTask, JobContext, JobType -from services.runner.job_manager import JobConfig, JobManager +from services.runner.job_manager import JobManager, JobScheduleConfig from services.runner.registry import JobRegistry, execute_runner_job -from services.runner.tasks.agent_account_deployer import ( - AgentAccountDeployerTask, - agent_account_deployer, -) -from services.runner.tasks.chain_state_monitor import ( - ChainStateMonitorTask, - chain_state_monitor, -) -from services.runner.tasks.dao_proposal_concluder import ( - DAOProposalConcluderTask, - dao_proposal_concluder, -) -from services.runner.tasks.dao_proposal_evaluation import ( - DAOProposalEvaluationTask, - dao_proposal_evaluation, -) -from services.runner.tasks.dao_proposal_voter import ( - DAOProposalVoterTask, - dao_proposal_voter, -) -from services.runner.tasks.dao_task import DAOTask, dao_task -from services.runner.tasks.dao_tweet_task import DAOTweetTask, dao_tweet_task -from services.runner.tasks.discord_task import DiscordTask, discord_task -from services.runner.tasks.proposal_embedder import ( - ProposalEmbedderTask, - proposal_embedder, -) -from services.runner.tasks.tweet_task import TweetTask, tweet_task -# Register tasks with the registry -JobRegistry.register(JobType.DAO, DAOTask) -JobRegistry.register(JobType.DAO_PROPOSAL_VOTE, DAOProposalVoterTask) -JobRegistry.register(JobType.DAO_PROPOSAL_CONCLUDE, DAOProposalConcluderTask) -JobRegistry.register(JobType.DAO_PROPOSAL_EVALUATION, DAOProposalEvaluationTask) -JobRegistry.register(JobType.DAO_TWEET, DAOTweetTask) -JobRegistry.register(JobType.TWEET, TweetTask) -JobRegistry.register(JobType.DISCORD, DiscordTask) -JobRegistry.register(JobType.AGENT_ACCOUNT_DEPLOY, AgentAccountDeployerTask) -JobRegistry.register(JobType.PROPOSAL_EMBEDDING, ProposalEmbedderTask) -JobRegistry.register(JobType.CHAIN_STATE_MONITOR, ChainStateMonitorTask) +# Ensure tasks are discovered and registered when module is imported +discover_and_register_tasks() __all__ = [ "BaseTask", "JobContext", "JobRegistry", "JobType", - "JobConfig", + "JobScheduleConfig", "JobManager", - "DAOTask", - "dao_task", - "DAOProposalVoterTask", - "dao_proposal_voter", - "DAOTweetTask", - "dao_tweet_task", - "TweetTask", - "tweet_task", - "DiscordTask", - "discord_task", "execute_runner_job", - "DAOProposalConcluderTask", - "dao_proposal_concluder", - "DAOProposalEvaluationTask", - "dao_proposal_evaluation", - "AgentAccountDeployerTask", - "agent_account_deployer", - "ProposalEmbedderTask", - "proposal_embedder", - "ChainStateMonitorTask", - "chain_state_monitor", ] diff --git a/services/runner/auto_discovery.py b/services/runner/auto_discovery.py new file mode 100644 index 00000000..49937807 --- /dev/null +++ b/services/runner/auto_discovery.py @@ -0,0 +1,133 @@ +"""Auto-discovery module for job tasks.""" + +import importlib +from pathlib import Path + +from lib.logger import configure_logger + +from .decorators import JobRegistry + +logger = configure_logger(__name__) + + +def discover_and_register_tasks() -> None: + """Discover and register all job tasks from the tasks directory.""" + try: + tasks_dir = Path(__file__).parent / "tasks" + if not tasks_dir.exists(): + logger.warning(f"Tasks directory not found: {tasks_dir}") + return + + # Import all Python modules in the tasks directory + tasks_package = "services.runner.tasks" + discovered_modules = [] + + # Get all .py files in the tasks directory + for file_path in tasks_dir.glob("*.py"): + if file_path.name.startswith("__"): + continue # Skip __init__.py and __pycache__ + + module_name = file_path.stem + full_module_name = f"{tasks_package}.{module_name}" + + try: + logger.debug(f"Importing task module: {full_module_name}") + importlib.import_module(full_module_name) + discovered_modules.append(module_name) + logger.debug(f"Successfully imported: {full_module_name}") + except ImportError as e: + logger.warning( + f"Failed to import task module {full_module_name}: {str(e)}" + ) + except Exception as e: + logger.error( + f"Error importing task module {full_module_name}: {str(e)}", + exc_info=True, + ) + + # Log discovered tasks + registered_tasks = JobRegistry.list_jobs() + if registered_tasks: + logger.info( + f"Auto-discovered and registered {len(registered_tasks)} job tasks from {len(discovered_modules)} modules:" + ) + for job_type, metadata in registered_tasks.items(): + logger.info( + f" - {job_type}: {metadata.name} (enabled: {metadata.enabled}, interval: {metadata.interval_seconds}s)" + ) + else: + logger.warning("No job tasks were discovered and registered") + + # Validate dependencies + dependency_issues = JobRegistry.validate_dependencies() + if dependency_issues: + logger.warning("Dependency validation issues found:") + for issue in dependency_issues: + logger.warning(f" - {issue}") + else: + logger.debug("All job dependencies validated successfully") + + # Log dynamic job types that were created + from .base import JobType + + all_job_types = JobType.get_all_job_types() + if all_job_types: + logger.info( + f"Dynamic job types registered: {', '.join(all_job_types.keys())}" + ) + + except Exception as e: + logger.error(f"Error during task discovery: {str(e)}", exc_info=True) + + +def reload_tasks() -> None: + """Reload all tasks (useful for development).""" + logger.info("Reloading all job tasks...") + + # Clear existing registry + JobRegistry.clear_registry() + + # Clear dynamic job types + from .base import JobType + + JobType._job_types = {} + + # Re-discover tasks + discover_and_register_tasks() + + logger.info("Task reload completed") + + +def get_task_summary() -> dict: + """Get a summary of all discovered tasks.""" + registered_tasks = JobRegistry.list_jobs() + enabled_tasks = JobRegistry.list_enabled_jobs() + + summary = { + "total_tasks": len(registered_tasks), + "enabled_tasks": len(enabled_tasks), + "disabled_tasks": len(registered_tasks) - len(enabled_tasks), + "tasks_by_priority": {}, + "tasks_by_type": {}, + "dependency_issues": JobRegistry.validate_dependencies(), + "dynamic_job_types": list(registered_tasks.keys()), + } + + # Group by priority + for job_type, metadata in registered_tasks.items(): + priority = str(metadata.priority) + if priority not in summary["tasks_by_priority"]: + summary["tasks_by_priority"][priority] = [] + summary["tasks_by_priority"][priority].append(str(job_type)) + + # Group by type (enabled/disabled) + summary["tasks_by_type"]["enabled"] = [str(jt) for jt in enabled_tasks.keys()] + summary["tasks_by_type"]["disabled"] = [ + str(jt) for jt, meta in registered_tasks.items() if not meta.enabled + ] + + return summary + + +# Auto-discover tasks when this module is imported +discover_and_register_tasks() diff --git a/services/runner/base.py b/services/runner/base.py index 97c8a9cb..f055cf1d 100644 --- a/services/runner/base.py +++ b/services/runner/base.py @@ -1,7 +1,6 @@ import os from abc import ABC, abstractmethod -from dataclasses import dataclass -from enum import Enum +from dataclasses import dataclass, field from typing import Any, Dict, Generic, List, Optional, Type, TypeVar from uuid import UUID @@ -66,22 +65,69 @@ def from_env(cls) -> "RunnerConfig": ) -class JobType(str, Enum): - """Types of jobs that can be run.""" +class JobType: + """Dynamic job types that are registered at runtime via auto-discovery. - DAO = "dao" - DAO_PROPOSAL_VOTE = "dao_proposal_vote" - DAO_PROPOSAL_CONCLUDE = "dao_proposal_conclude" - DAO_PROPOSAL_EVALUATION = "dao_proposal_evaluation" - DAO_TWEET = "dao_tweet" - TWEET = "tweet" - DISCORD = "discord" - AGENT_ACCOUNT_DEPLOY = "agent_account_deploy" - PROPOSAL_EMBEDDING = "proposal_embedding" - CHAIN_STATE_MONITOR = "chain_state_monitor" + No hardcoded job types - all jobs are discovered and registered dynamically + using the @job decorator in task files. + """ - def __str__(self): - return self.value + _job_types: Dict[str, "JobType"] = {} + + def __init__(self, value: str): + self._value = value.lower() + self._name = value.upper() + + @property + def value(self) -> str: + return self._value + + @property + def name(self) -> str: + return self._name + + def __str__(self) -> str: + return self._value + + def __repr__(self) -> str: + return f"JobType.{self._name}" + + def __eq__(self, other) -> bool: + if isinstance(other, JobType): + return self._value == other._value + if isinstance(other, str): + return self._value == other.lower() + return False + + def __hash__(self) -> int: + return hash(self._value) + + @classmethod + def get_or_create(cls, job_type: str) -> "JobType": + """Get existing job type or create new one.""" + normalized = job_type.lower() + if normalized not in cls._job_types: + cls._job_types[normalized] = cls(normalized) + return cls._job_types[normalized] + + @classmethod + def register(cls, job_type: str) -> "JobType": + """Register a new job type and return the enum member.""" + return cls.get_or_create(job_type) + + @classmethod + def get_all_job_types(cls) -> Dict[str, str]: + """Get all registered job types.""" + return {jt._value: jt._value for jt in cls._job_types.values()} + + @classmethod + def list_all(cls) -> List["JobType"]: + """Get all registered job type instances.""" + return list(cls._job_types.values()) + + def __call__(self, value: str) -> "JobType": + """Allow calling like an enum constructor.""" + return self.get_or_create(value) @dataclass @@ -94,6 +140,13 @@ class JobContext: retry_count: int = 0 max_retries: int = 3 + # Enhanced context fields + execution_id: Optional[str] = None + worker_name: Optional[str] = None + timeout_seconds: Optional[int] = None + priority: Optional[str] = None + metadata: Dict[str, Any] = field(default_factory=dict) + class BaseTask(ABC, Generic[T]): """Base class for all tasks.""" @@ -147,7 +200,8 @@ async def validate(self, context: JobContext) -> bool: This method provides a validation pipeline: 1. Configuration validation 2. Prerequisites validation - 3. Task-specific validation + 3. Resource availability validation + 4. Task-specific validation """ try: logger.debug(f"Starting validation for {self.task_name}") @@ -162,7 +216,12 @@ async def validate(self, context: JobContext) -> bool: logger.debug(f"{self.task_name}: Prerequisites validation failed") return False - # Step 3: Task-specific validation + # Step 3: Resource availability validation + if not await self._validate_resources(context): + logger.debug(f"{self.task_name}: Resource validation failed") + return False + + # Step 4: Task-specific validation if not await self._validate_task_specific(context): logger.debug(f"{self.task_name}: Task-specific validation failed") return False @@ -183,6 +242,10 @@ async def _validate_prerequisites(self, context: JobContext) -> bool: """Validate task prerequisites.""" return True + async def _validate_resources(self, context: JobContext) -> bool: + """Validate resource availability (network, APIs, etc.).""" + return True + async def _validate_task_specific(self, context: JobContext) -> bool: """Validate task-specific conditions.""" return True @@ -190,21 +253,89 @@ async def _validate_task_specific(self, context: JobContext) -> bool: async def execute(self, context: JobContext) -> List[T]: """Execute the task with given context.""" self._log_task_start() + results = [] + try: - results = await self._execute_impl(context) + # Prepare context + prepared_context = await self._prepare_context(context) + + # Execute the task implementation + results = await self._execute_impl(prepared_context) self._log_task_completion(results) - return results + except Exception as e: logger.error(f"Error executing {self.task_name}: {str(e)}", exc_info=True) - result_class = self.get_result_class() - return [ - result_class( - success=False, message=f"Error executing task: {str(e)}", error=e + + # Try custom error handling + recovery_results = await self._handle_execution_error(e, context) + if recovery_results is not None: + results = recovery_results + logger.info(f"Task {self.task_name} recovered from error: {str(e)}") + else: + # Default error handling + result_class = self.get_result_class() + results = [ + result_class( + success=False, + message=f"Error executing task: {str(e)}", + error=e, + ) + ] + + finally: + # Always perform cleanup + try: + await self._post_execution_cleanup(context, results) + except Exception as cleanup_error: + logger.warning( + f"Cleanup error in {self.task_name}: {str(cleanup_error)}" ) - ] + + return results @abstractmethod async def _execute_impl(self, context: JobContext) -> List[T]: """Implementation of task execution logic. This method should be implemented by subclasses.""" pass + + async def _handle_execution_error( + self, error: Exception, context: JobContext + ) -> Optional[List[T]]: + """Handle execution errors with recovery logic. + + Override this method to implement custom error recovery. + Return None to use default error handling, or return results + to continue as if execution succeeded. + """ + return None + + async def _post_execution_cleanup( + self, context: JobContext, results: List[T] + ) -> None: + """Perform cleanup after task execution. + + This is called after both successful and failed executions. + Override this method to implement custom cleanup logic. + """ + pass + + def _should_retry_on_error(self, error: Exception, context: JobContext) -> bool: + """Determine if a specific error should trigger a retry. + + Override this method to implement custom retry logic based on error type. + """ + # Default: retry on network errors, API timeouts, temporary failures + retry_errors = ( + ConnectionError, + TimeoutError, + # Add more error types as needed + ) + return isinstance(error, retry_errors) + + async def _prepare_context(self, context: JobContext) -> JobContext: + """Prepare and enrich the job context before execution. + + Override this method to add task-specific context data. + """ + return context diff --git a/services/runner/decorators.py b/services/runner/decorators.py new file mode 100644 index 00000000..aeb37b0c --- /dev/null +++ b/services/runner/decorators.py @@ -0,0 +1,272 @@ +"""Job registration decorators and metadata system.""" + +from dataclasses import dataclass, field +from enum import Enum +from typing import Any, Callable, Dict, List, Optional, Type, TypeVar, Union + +from lib.logger import configure_logger + +from .base import BaseTask, JobType + +logger = configure_logger(__name__) + +T = TypeVar("T", bound=BaseTask) + + +class JobPriority(Enum): + """Job execution priority levels.""" + + LOW = 1 + NORMAL = 2 + MEDIUM = 3 + HIGH = 4 + CRITICAL = 5 + + def __str__(self): + return self.name.lower() + + +@dataclass +class JobMetadata: + """Metadata for job configuration and execution.""" + + # Basic job information + job_type: JobType + name: str + description: str = "" + version: str = "1.0.0" + + # Execution configuration + enabled: bool = True + interval_seconds: int = 60 + priority: JobPriority = JobPriority.NORMAL + max_retries: int = 3 + retry_delay_seconds: int = 30 + timeout_seconds: Optional[int] = None + + # Concurrency settings + max_concurrent: int = 1 + batch_size: int = 10 + + # Dependencies and requirements + requires_wallet: bool = False + requires_twitter: bool = False + requires_discord: bool = False + requires_blockchain: bool = False + requires_ai: bool = False + dependencies: List[str] = field(default_factory=list) + + # Advanced settings + enable_dead_letter_queue: bool = True + preserve_order: bool = False + idempotent: bool = False + + # Configuration overrides + config_overrides: Dict[str, Any] = field(default_factory=dict) + + +class JobRegistry: + """Enhanced job registry with auto-discovery and metadata.""" + + _jobs: Dict[JobType, Type[BaseTask]] = {} + _metadata: Dict[JobType, JobMetadata] = {} + _instances: Dict[JobType, BaseTask] = {} + + @classmethod + def register( + cls, + job_type: Union[JobType, str], + metadata: Optional[JobMetadata] = None, + **kwargs, + ) -> Callable[[Type[T]], Type[T]]: + """Decorator to register a job task with metadata. + + Args: + job_type: The job type enum or string + metadata: Optional job metadata + **kwargs: Additional metadata fields + + Returns: + Decorator function + + Example: + @JobRegistry.register( + "new_job_type", # Can use string - will auto-create JobType + name="New Job", + description="Does new job things", + interval_seconds=120, + max_concurrent=2 + ) + class NewJobTask(BaseTask[NewJobResult]): + pass + """ + + def decorator(task_class: Type[T]) -> Type[T]: + # Convert string to JobType or create new one + if isinstance(job_type, str): + job_enum = JobType.get_or_create(job_type) + logger.info(f"Auto-registered job type: {job_type} -> {job_enum}") + else: + job_enum = job_type + + # Create metadata if not provided + if metadata is None: + # Extract metadata from kwargs or use defaults + meta = JobMetadata( + job_type=job_enum, + name=kwargs.get("name", task_class.__name__), + description=kwargs.get("description", task_class.__doc__ or ""), + **{ + k: v + for k, v in kwargs.items() + if k not in ["name", "description"] + }, + ) + else: + # Update metadata with any additional kwargs + for key, value in kwargs.items(): + if hasattr(metadata, key): + setattr(metadata, key, value) + meta = metadata + + # Register the task + cls._jobs[job_enum] = task_class + cls._metadata[job_enum] = meta + + logger.info( + f"Registered job: {job_enum} -> {task_class.__name__} " + f"(enabled: {meta.enabled}, interval: {meta.interval_seconds}s)" + ) + + return task_class + + return decorator + + @classmethod + def get_task_class(cls, job_type: JobType) -> Optional[Type[BaseTask]]: + """Get the task class for a job type.""" + return cls._jobs.get(job_type) + + @classmethod + def get_metadata(cls, job_type: JobType) -> Optional[JobMetadata]: + """Get the metadata for a job type.""" + return cls._metadata.get(job_type) + + @classmethod + def get_instance(cls, job_type: JobType) -> Optional[BaseTask]: + """Get or create a task instance for a job type.""" + if job_type not in cls._instances: + task_class = cls.get_task_class(job_type) + if task_class: + cls._instances[job_type] = task_class() + return cls._instances.get(job_type) + + @classmethod + def list_jobs(cls) -> Dict[JobType, JobMetadata]: + """List all registered jobs and their metadata.""" + return cls._metadata.copy() + + @classmethod + def list_enabled_jobs(cls) -> Dict[JobType, JobMetadata]: + """List only enabled jobs.""" + return { + job_type: metadata + for job_type, metadata in cls._metadata.items() + if metadata.enabled + } + + @classmethod + def get_jobs_by_priority(cls, priority: JobPriority) -> Dict[JobType, JobMetadata]: + """Get jobs filtered by priority.""" + return { + job_type: metadata + for job_type, metadata in cls._metadata.items() + if metadata.priority == priority + } + + @classmethod + def clear_registry(cls) -> None: + """Clear all registered jobs (useful for testing).""" + cls._jobs.clear() + cls._metadata.clear() + cls._instances.clear() + + @classmethod + def validate_dependencies(cls) -> List[str]: + """Validate job dependencies and return any issues.""" + issues = [] + all_job_types = set(cls._jobs.keys()) + + for job_type, metadata in cls._metadata.items(): + for dep in metadata.dependencies: + try: + dep_type = JobType.get_or_create(dep) + if dep_type not in all_job_types: + issues.append( + f"Job {job_type} depends on unregistered job: {dep}" + ) + except Exception: + issues.append(f"Job {job_type} has invalid dependency: {dep}") + + return issues + + @classmethod + def get_all_job_types(cls) -> List[str]: + """Get all registered job type strings.""" + return [str(job_type) for job_type in cls._jobs.keys()] + + +# Convenience function for job registration +def job( + job_type: Union[JobType, str], + name: Optional[str] = None, + description: Optional[str] = None, + **kwargs, +) -> Callable[[Type[T]], Type[T]]: + """Convenience decorator for job registration. + + Args: + job_type: The job type (can be string - will auto-create JobType) + name: Human-readable job name + description: Job description + **kwargs: Additional metadata fields + + Example: + @job("my_new_job", name="My New Job", interval_seconds=30) + class MyNewJobTask(BaseTask[MyJobResult]): + pass + """ + return JobRegistry.register( + job_type=job_type, + name=name, + description=description, + **kwargs, + ) + + +# Convenience function for quick job registration with metadata +def scheduled_job( + job_type: Union[JobType, str], + interval_seconds: int, + name: Optional[str] = None, + **kwargs, +) -> Callable[[Type[T]], Type[T]]: + """Decorator for scheduled jobs with interval configuration. + + Args: + job_type: The job type (can be string - will auto-create JobType) + interval_seconds: How often to run the job + name: Human-readable job name + **kwargs: Additional metadata fields + + Example: + @scheduled_job("my_scheduled_job", 120, name="My Scheduled Job") + class MyScheduledJobTask(BaseTask[MyJobResult]): + pass + """ + return JobRegistry.register( + job_type=job_type, + interval_seconds=interval_seconds, + name=name, + **kwargs, + ) diff --git a/services/runner/execution.py b/services/runner/execution.py new file mode 100644 index 00000000..61b16755 --- /dev/null +++ b/services/runner/execution.py @@ -0,0 +1,430 @@ +"""Enhanced job execution system with scalability features.""" + +import asyncio +import time +from dataclasses import dataclass, field +from datetime import datetime, timedelta +from enum import Enum +from typing import Any, Dict, List, Optional, Set +from uuid import UUID + +from backend.factory import backend +from backend.models import QueueMessage, QueueMessageBase, QueueMessageFilter +from lib.logger import configure_logger + +from .base import JobContext, JobType +from .decorators import JobMetadata, JobPriority, JobRegistry + +logger = configure_logger(__name__) + + +class JobStatus(Enum): + """Job execution status.""" + + PENDING = "pending" + RUNNING = "running" + COMPLETED = "completed" + FAILED = "failed" + RETRYING = "retrying" + DEAD_LETTER = "dead_letter" + + +@dataclass +class JobExecution: + """Track individual job execution.""" + + id: UUID + job_type: JobType + status: JobStatus = JobStatus.PENDING + attempt: int = 1 + max_attempts: int = 3 + started_at: Optional[datetime] = None + completed_at: Optional[datetime] = None + error: Optional[str] = None + result: Optional[Any] = None + retry_after: Optional[datetime] = None + metadata: Dict[str, Any] = field(default_factory=dict) + + +class PriorityQueue: + """Priority-based job queue with concurrency control.""" + + def __init__(self): + self._queues: Dict[JobPriority, asyncio.Queue] = { + priority: asyncio.Queue() for priority in JobPriority + } + self._active_jobs: Dict[JobType, Set[UUID]] = {} + self._semaphores: Dict[JobType, asyncio.Semaphore] = {} + self._executions: Dict[UUID, JobExecution] = {} + + async def enqueue( + self, message: QueueMessage, priority: JobPriority = JobPriority.NORMAL + ) -> UUID: + """Add a job to the priority queue.""" + # Convert message type to JobType, handling both DynamicQueueMessageType and string + type_value = ( + message.type.value if hasattr(message.type, "value") else str(message.type) + ) + job_type = JobType.get_or_create(type_value) + execution = JobExecution( + id=message.id, job_type=job_type, metadata={"message": message} + ) + + self._executions[message.id] = execution + await self._queues[priority].put(execution) + + logger.debug(f"Enqueued job {message.id} with priority {priority}") + return message.id + + async def dequeue(self, priority: JobPriority) -> Optional[JobExecution]: + """Get next job from priority queue.""" + try: + # Try to get a job without blocking + execution = self._queues[priority].get_nowait() + return execution + except asyncio.QueueEmpty: + return None + + async def get_next_job(self) -> Optional[JobExecution]: + """Get the next job from highest priority queue.""" + # Check queues in priority order (highest first) + for priority in reversed(list(JobPriority)): + execution = await self.dequeue(priority) + if execution: + return execution + return None + + def set_concurrency_limit(self, job_type: JobType, max_concurrent: int) -> None: + """Set concurrency limit for a job type.""" + self._semaphores[job_type] = asyncio.Semaphore(max_concurrent) + self._active_jobs[job_type] = set() + + async def acquire_slot(self, job_type: JobType, job_id: UUID) -> bool: + """Acquire a concurrency slot for job execution.""" + if job_type not in self._semaphores: + return True # No limit set + + semaphore = self._semaphores[job_type] + try: + await asyncio.wait_for(semaphore.acquire(), timeout=0.1) + self._active_jobs[job_type].add(job_id) + return True + except asyncio.TimeoutError: + return False # No slots available + + def release_slot(self, job_type: JobType, job_id: UUID) -> None: + """Release a concurrency slot.""" + if job_type in self._semaphores: + self._semaphores[job_type].release() + if job_type in self._active_jobs: + self._active_jobs[job_type].discard(job_id) + + def get_execution(self, job_id: UUID) -> Optional[JobExecution]: + """Get job execution by ID.""" + return self._executions.get(job_id) + + def update_execution(self, job_id: UUID, **kwargs) -> None: + """Update job execution status.""" + if job_id in self._executions: + execution = self._executions[job_id] + for key, value in kwargs.items(): + if hasattr(execution, key): + setattr(execution, key, value) + + +class RetryManager: + """Manages job retry logic with exponential backoff.""" + + @staticmethod + def should_retry(execution: JobExecution, metadata: JobMetadata) -> bool: + """Determine if a job should be retried.""" + if execution.attempt >= metadata.max_retries: + return False + + # Check if enough time has passed for retry + if execution.retry_after and datetime.now() < execution.retry_after: + return False + + return True + + @staticmethod + def calculate_retry_delay( + attempt: int, base_delay: int = 30, max_delay: int = 3600 + ) -> int: + """Calculate retry delay with exponential backoff.""" + delay = base_delay * (2 ** (attempt - 1)) + return min(delay, max_delay) + + @staticmethod + def schedule_retry(execution: JobExecution, metadata: JobMetadata) -> None: + """Schedule a job for retry.""" + delay = RetryManager.calculate_retry_delay( + execution.attempt, metadata.retry_delay_seconds + ) + execution.retry_after = datetime.now() + timedelta(seconds=delay) + execution.status = JobStatus.RETRYING + execution.attempt += 1 + + logger.info( + f"Scheduling retry for job {execution.id} " + f"(attempt {execution.attempt}) in {delay} seconds" + ) + + +class DeadLetterQueue: + """Handles jobs that have failed all retry attempts.""" + + def __init__(self): + self._dead_jobs: Dict[UUID, JobExecution] = {} + + def add_dead_job(self, execution: JobExecution) -> None: + """Add a job to the dead letter queue.""" + execution.status = JobStatus.DEAD_LETTER + execution.completed_at = datetime.now() + self._dead_jobs[execution.id] = execution + + logger.error( + f"Job {execution.id} moved to dead letter queue after " + f"{execution.attempt} attempts. Error: {execution.error}" + ) + + def get_dead_jobs(self) -> List[JobExecution]: + """Get all jobs in the dead letter queue.""" + return list(self._dead_jobs.values()) + + def remove_dead_job(self, job_id: UUID) -> Optional[JobExecution]: + """Remove a job from the dead letter queue.""" + return self._dead_jobs.pop(job_id, None) + + +class JobExecutor: + """Enhanced job executor with scalability features.""" + + def __init__(self): + self.priority_queue = PriorityQueue() + self.retry_manager = RetryManager() + self.dead_letter_queue = DeadLetterQueue() + self._running = False + self._worker_tasks: List[asyncio.Task] = [] + + async def start(self, num_workers: int = 5) -> None: + """Start the job executor with specified number of workers.""" + if self._running: + logger.warning("JobExecutor is already running") + return + + self._running = True + + # Initialize concurrency limits from job metadata + for job_type, metadata in JobRegistry.list_jobs().items(): + self.priority_queue.set_concurrency_limit(job_type, metadata.max_concurrent) + + # Start worker tasks + for i in range(num_workers): + task = asyncio.create_task(self._worker(f"worker-{i}")) + self._worker_tasks.append(task) + + logger.info(f"Started JobExecutor with {num_workers} workers") + + async def stop(self) -> None: + """Stop the job executor.""" + if not self._running: + return + + self._running = False + + # Cancel all worker tasks + for task in self._worker_tasks: + task.cancel() + + # Wait for tasks to complete + if self._worker_tasks: + await asyncio.gather(*self._worker_tasks, return_exceptions=True) + + self._worker_tasks.clear() + logger.info("Stopped JobExecutor") + + async def _worker(self, worker_name: str) -> None: + """Worker coroutine that processes jobs from the queue.""" + logger.debug(f"Starting worker: {worker_name}") + + while self._running: + try: + # Get next job from priority queue + execution = await self.priority_queue.get_next_job() + if not execution: + await asyncio.sleep(0.1) # Brief pause if no jobs + continue + + # Check if we can acquire a slot for this job type + acquired = await self.priority_queue.acquire_slot( + execution.job_type, execution.id + ) + if not acquired: + # Put job back in queue and try later + metadata = JobRegistry.get_metadata(execution.job_type) + if metadata: + await self.priority_queue.enqueue( + execution.metadata["message"], metadata.priority + ) + await asyncio.sleep(0.5) + continue + + # Execute the job + try: + await self._execute_job(execution, worker_name) + finally: + # Always release the slot + self.priority_queue.release_slot(execution.job_type, execution.id) + + except Exception as e: + logger.error(f"Worker {worker_name} error: {str(e)}", exc_info=True) + await asyncio.sleep(1) # Pause on error + + async def _execute_job(self, execution: JobExecution, worker_name: str) -> None: + """Execute a single job.""" + job_id = execution.id + job_type = execution.job_type + start_time = time.time() + + logger.debug(f"{worker_name} executing job {job_id} ({job_type})") + + # Record execution start in metrics + from .monitoring import get_metrics_collector + + metrics = get_metrics_collector() + metrics.record_execution_start(execution, worker_name) + + # Update execution status + self.priority_queue.update_execution( + job_id, status=JobStatus.RUNNING, started_at=datetime.now() + ) + + try: + # Get job metadata and task instance + metadata = JobRegistry.get_metadata(job_type) + task_instance = JobRegistry.get_instance(job_type) + + if not metadata or not task_instance: + raise ValueError(f"Job type {job_type} not properly registered") + + # Create job context + from .base import RunnerConfig + + context = JobContext( + job_type=job_type, + config=RunnerConfig.from_env(), + retry_count=execution.attempt - 1, + max_retries=metadata.max_retries, + ) + + # Execute the task with timeout + if metadata.timeout_seconds: + results = await asyncio.wait_for( + task_instance.execute(context), timeout=metadata.timeout_seconds + ) + else: + results = await task_instance.execute(context) + + # Calculate execution duration + duration = time.time() - start_time + + # Update execution with results + self.priority_queue.update_execution( + job_id, + status=JobStatus.COMPLETED, + completed_at=datetime.now(), + result=results, + ) + + # Record successful execution in metrics + metrics.record_execution_completion(execution, duration) + + # Mark message as processed in database + message = execution.metadata["message"] + backend.update_queue_message( + queue_message_id=message.id, + update_data=QueueMessageBase(is_processed=True), + ) + + logger.info(f"{worker_name} completed job {job_id} in {duration:.2f}s") + + except Exception as e: + error_msg = str(e) + duration = time.time() - start_time + + logger.error(f"{worker_name} job {job_id} failed: {error_msg}") + + # Record failed execution in metrics + metrics.record_execution_failure(execution, error_msg, duration) + + # Update execution with error + self.priority_queue.update_execution( + job_id, status=JobStatus.FAILED, error=error_msg + ) + + # Handle retry or dead letter + metadata = JobRegistry.get_metadata(job_type) + if metadata and self.retry_manager.should_retry(execution, metadata): + metrics.record_execution_retry(execution) + self.retry_manager.schedule_retry(execution, metadata) + # Re-enqueue for retry + message = execution.metadata["message"] + await self.priority_queue.enqueue(message, metadata.priority) + else: + # Move to dead letter queue + metrics.record_dead_letter(execution) + self.dead_letter_queue.add_dead_job(execution) + + async def enqueue_pending_jobs(self) -> int: + """Load pending jobs from database and enqueue them.""" + enqueued_count = 0 + + for job_type, metadata in JobRegistry.list_enabled_jobs().items(): + try: + # Get pending messages for this job type + filters = QueueMessageFilter(type=job_type.value, is_processed=False) + pending_messages = backend.list_queue_messages(filters=filters) + + # Enqueue each message + for message in pending_messages: + await self.priority_queue.enqueue(message, metadata.priority) + enqueued_count += 1 + + if pending_messages: + logger.debug(f"Enqueued {len(pending_messages)} {job_type} jobs") + + except Exception as e: + logger.error( + f"Error enqueuing jobs for {job_type}: {str(e)}", exc_info=True + ) + + if enqueued_count > 0: + logger.info(f"Enqueued {enqueued_count} pending jobs") + + return enqueued_count + + def get_stats(self) -> Dict[str, Any]: + """Get executor statistics.""" + stats = { + "running": self._running, + "worker_count": len(self._worker_tasks), + "dead_letter_count": len(self.dead_letter_queue.get_dead_jobs()), + "active_jobs": { + str(job_type): len(jobs) + for job_type, jobs in self.priority_queue._active_jobs.items() + }, + } + return stats + + +# Global executor instance +_executor: Optional[JobExecutor] = None + + +def get_executor() -> JobExecutor: + """Get the global job executor instance.""" + global _executor + if _executor is None: + _executor = JobExecutor() + return _executor diff --git a/services/runner/job_manager.py b/services/runner/job_manager.py index 42ec8652..6510bff3 100644 --- a/services/runner/job_manager.py +++ b/services/runner/job_manager.py @@ -1,207 +1,379 @@ -"""Job management utilities for the runner service.""" +"""Enhanced Job Manager using the new job queue system.""" from dataclasses import dataclass -from typing import Any, Callable, List, Optional, cast +from typing import Any, Dict, List, Optional from apscheduler.schedulers.asyncio import AsyncIOScheduler from config import config from lib.logger import configure_logger -from .base import JobType -from .registry import execute_runner_job +from .auto_discovery import get_task_summary +from .decorators import JobMetadata, JobRegistry +from .execution import get_executor +from .monitoring import get_metrics_collector, get_performance_monitor logger = configure_logger(__name__) @dataclass -class JobConfig: - """Configuration for a scheduled job.""" +class JobScheduleConfig: + """Enhanced configuration for scheduled jobs.""" - name: str + job_type: str + metadata: JobMetadata enabled: bool - func: Callable - seconds: int - args: Optional[List[Any]] = None - job_id: Optional[str] = None + scheduler_id: str + + def to_dict(self) -> Dict[str, Any]: + """Convert to dictionary for API responses.""" + return { + "job_type": self.job_type, + "name": self.metadata.name, + "description": self.metadata.description, + "enabled": self.enabled, + "interval_seconds": self.metadata.interval_seconds, + "priority": str(self.metadata.priority), + "max_retries": self.metadata.max_retries, + "max_concurrent": self.metadata.max_concurrent, + "requires_twitter": self.metadata.requires_twitter, + "requires_discord": self.metadata.requires_discord, + "requires_wallet": self.metadata.requires_wallet, + "scheduler_id": self.scheduler_id, + } class JobManager: - """Manager for scheduled jobs.""" - - @staticmethod - def get_all_jobs() -> List[JobConfig]: - """Get configurations for all available jobs. - - Returns: - List of job configurations - """ - # Static configurations for built-in jobs - jobs = [ - JobConfig( - name="Twitter Service", - enabled=config.twitter.enabled, - func=cast( - Callable, "execute_twitter_job" - ), # Import at runtime to avoid circular imports - seconds=config.twitter.interval_seconds, - job_id="twitter_service", - ), - JobConfig( - name="Schedule Sync Service", - enabled=config.scheduler.sync_enabled, - func=cast( - Callable, "sync_schedules" - ), # Import at runtime to avoid circular imports - seconds=config.scheduler.sync_interval_seconds, - args=[ - "scheduler" - ], # Special case - will be replaced with actual scheduler - job_id="schedule_sync_service", - ), - ] - - # Add runner jobs (could be extended with more job types) - runner_jobs = [ - ( - "DAO Runner Service", - config.scheduler.dao_runner_enabled, - config.scheduler.dao_runner_interval_seconds, - JobType.DAO.value, - ), - ( - "DAO Tweet Runner Service", - config.scheduler.dao_tweet_runner_enabled, - config.scheduler.dao_tweet_runner_interval_seconds, - JobType.DAO_TWEET.value, - ), - ( - "Tweet Runner Service", - config.scheduler.tweet_runner_enabled, - config.scheduler.tweet_runner_interval_seconds, - JobType.TWEET.value, - ), - ( - "Discord Runner Service", - config.scheduler.discord_runner_enabled, - config.scheduler.discord_runner_interval_seconds, - JobType.DISCORD.value, - ), - ( - "DAO Proposal Vote Runner Service", - config.scheduler.dao_proposal_vote_runner_enabled, - config.scheduler.dao_proposal_vote_runner_interval_seconds, - JobType.DAO_PROPOSAL_VOTE.value, - ), - ( - "DAO Proposal Conclude Runner Service", - config.scheduler.dao_proposal_conclude_runner_enabled, - config.scheduler.dao_proposal_conclude_runner_interval_seconds, - JobType.DAO_PROPOSAL_CONCLUDE.value, - ), - ( - "DAO Proposal Evaluation Runner Service", - config.scheduler.dao_proposal_evaluation_runner_enabled, - config.scheduler.dao_proposal_evaluation_runner_interval_seconds, - JobType.DAO_PROPOSAL_EVALUATION.value, - ), - ( - "Agent Account Deploy Runner Service", - config.scheduler.agent_account_deploy_runner_enabled, - config.scheduler.agent_account_deploy_runner_interval_seconds, - JobType.AGENT_ACCOUNT_DEPLOY.value, - ), - ( - "Proposal Embedder Runner Service", - config.scheduler.proposal_embedder_enabled, - config.scheduler.proposal_embedder_interval_seconds, - JobType.PROPOSAL_EMBEDDING.value, - ), - ( - "Chain State Monitor Service", - config.scheduler.chain_state_monitor_enabled, - config.scheduler.chain_state_monitor_interval_seconds, - JobType.CHAIN_STATE_MONITOR.value, - ), - ] - - # Add all runner jobs with common structure - for name, enabled, seconds, job_type in runner_jobs: - jobs.append( - JobConfig( - name=name, - enabled=enabled, - func=execute_runner_job, - seconds=seconds, - args=[job_type], - job_id=f"{job_type}_runner", - ) + """Enhanced manager for scheduled jobs using the new system.""" + + def __init__(self): + self._scheduler: Optional[AsyncIOScheduler] = None + self._executor = get_executor() + self._metrics = get_metrics_collector() + self._performance_monitor = get_performance_monitor() + + def get_all_jobs(self) -> List[JobScheduleConfig]: + """Get configurations for all registered jobs.""" + configs = [] + + # Get all registered jobs from the new system + registered_jobs = JobRegistry.list_jobs() + + for job_type, metadata in registered_jobs.items(): + # Check if job is enabled (can be overridden by config) + enabled = self._is_job_enabled(job_type, metadata) + + config_item = JobScheduleConfig( + job_type=str(job_type), + metadata=metadata, + enabled=enabled, + scheduler_id=f"{job_type.value}_scheduler", ) + configs.append(config_item) - return jobs + return configs - @staticmethod - def schedule_jobs(scheduler: AsyncIOScheduler) -> bool: - """Schedule all enabled jobs. + def _is_job_enabled(self, job_type, metadata: JobMetadata) -> bool: + """Check if a job is enabled based on metadata and config overrides.""" + # First check the metadata default + if not metadata.enabled: + return False - Args: - scheduler: The scheduler to add jobs to + # Check for config overrides using dynamic approach + job_type_str = str(job_type).lower() - Returns: - True if any jobs were scheduled, False otherwise - """ - # Import at runtime to avoid circular imports - from services.schedule import sync_schedules - from services.twitter import execute_twitter_job + # Try config override with standard naming pattern + config_attr = f"{job_type_str}_enabled" + if hasattr(config.scheduler, config_attr): + return getattr(config.scheduler, config_attr, metadata.enabled) - # Get all job configurations - jobs = JobManager.get_all_jobs() + # Try alternative naming pattern for backwards compatibility + alt_config_attr = f"{job_type_str}_runner_enabled" + if hasattr(config.scheduler, alt_config_attr): + return getattr(config.scheduler, alt_config_attr, metadata.enabled) - # Map function names to actual functions - func_map = { - "execute_twitter_job": execute_twitter_job, - "sync_schedules": sync_schedules, - } + # Use metadata default if no config override found + return metadata.enabled + + def _get_job_interval(self, job_type, metadata: JobMetadata) -> int: + """Get job interval, checking config overrides.""" + job_type_str = str(job_type).lower() - # Add enabled jobs to the scheduler + # Try config override with standard naming pattern + config_attr = f"{job_type_str}_interval_seconds" + if hasattr(config.scheduler, config_attr): + return getattr(config.scheduler, config_attr, metadata.interval_seconds) + + # Try alternative naming pattern for backwards compatibility + alt_config_attr = f"{job_type_str}_runner_interval_seconds" + if hasattr(config.scheduler, alt_config_attr): + return getattr(config.scheduler, alt_config_attr, metadata.interval_seconds) + + # Use metadata default if no config override found + return metadata.interval_seconds + + async def _execute_job_via_executor(self, job_type: str) -> None: + """Execute a job through the enhanced executor system.""" + try: + # Load pending jobs into the executor + await self._executor.enqueue_pending_jobs() + + logger.debug(f"Triggered job execution check for {job_type}") + + except Exception as e: + logger.error(f"Error executing job {job_type}: {str(e)}", exc_info=True) + + def schedule_jobs(self, scheduler: AsyncIOScheduler) -> bool: + """Schedule all enabled jobs using the new system.""" + self._scheduler = scheduler + + # Get all job configurations + jobs = self.get_all_jobs() + + # Schedule enabled jobs any_enabled = False - for job in jobs: - if job.enabled: + scheduled_count = 0 + + for job_config in jobs: + if job_config.enabled: any_enabled = True - # Handle special cases - job_func = job.func - if isinstance(job_func, str): - job_func = func_map.get(job_func, job_func) - - job_args = {} - if job.args: - # Special case for scheduler argument - if "scheduler" in job.args: - job_args["args"] = [scheduler] - else: - job_args["args"] = job.args - - # Add the job with a specific ID for easier management - job_id = job.job_id or f"{job.name.lower().replace(' ', '_')}" - - # Add max_instances=1 for all jobs to prevent concurrent execution - # and set misfire_grace_time to prevent missed execution warnings - # Set next_run_time to one minute from now + # Get the actual interval (might be overridden by config) + interval_seconds = self._get_job_interval( + job_config.job_type, job_config.metadata + ) + + # Schedule the job scheduler.add_job( - job_func, + self._execute_job_via_executor, "interval", - seconds=job.seconds, - id=job_id, - max_instances=1, + seconds=interval_seconds, + id=job_config.scheduler_id, + args=[job_config.job_type], + max_instances=1, # Prevent overlapping executions misfire_grace_time=60, - **job_args, + replace_existing=True, # Allow replacing existing jobs ) + scheduled_count += 1 logger.info( - f"{job.name} started with interval of {job.seconds} seconds (will execute in one minute)" + f"Scheduled {job_config.metadata.name} " + f"(priority: {job_config.metadata.priority}, " + f"interval: {interval_seconds}s, " + f"max_concurrent: {job_config.metadata.max_concurrent})" ) else: - logger.info(f"{job.name} is disabled") + logger.info(f"{job_config.metadata.name} is disabled") + + if scheduled_count > 0: + logger.info(f"Successfully scheduled {scheduled_count} jobs") return any_enabled + + async def start_executor(self, num_workers: int = 5) -> None: + """Start the job executor.""" + await self._executor.start(num_workers) + logger.info(f"Job executor started with {num_workers} workers") + + async def stop_executor(self) -> None: + """Stop the job executor.""" + await self._executor.stop() + logger.info("Job executor stopped") + + def get_executor_stats(self) -> Dict[str, Any]: + """Get executor statistics.""" + return self._executor.get_stats() + + def get_job_metrics(self, job_type: Optional[str] = None) -> Dict[str, Any]: + """Get job execution metrics.""" + from .base import JobType + + job_type_enum = None + if job_type: + try: + job_type_enum = JobType(job_type) + except ValueError: + pass + + metrics = self._metrics.get_metrics(job_type_enum) + return { + str(jt): { + "total_executions": m.total_executions, + "successful_executions": m.successful_executions, + "failed_executions": m.failed_executions, + "retried_executions": m.retried_executions, + "dead_letter_executions": m.dead_letter_executions, + "avg_execution_time": m.avg_execution_time, + "min_execution_time": m.min_execution_time, + "max_execution_time": m.max_execution_time, + "current_running": m.current_running, + "max_concurrent_reached": m.max_concurrent_reached, + "last_execution": ( + m.last_execution.isoformat() if m.last_execution else None + ), + "last_success": m.last_success.isoformat() if m.last_success else None, + "last_failure": m.last_failure.isoformat() if m.last_failure else None, + } + for jt, m in metrics.items() + } + + def get_system_health(self) -> Dict[str, Any]: + """Get overall system health status.""" + system_metrics = self._metrics.get_system_metrics() + health_status = self._metrics.get_health_status() + performance_summary = self._performance_monitor.get_performance_summary() + task_summary = get_task_summary() + executor_stats = self.get_executor_stats() + + return { + "status": health_status["status"], + "uptime_seconds": system_metrics["uptime_seconds"], + "executor": { + "running": executor_stats["running"], + "worker_count": executor_stats["worker_count"], + "dead_letter_count": executor_stats["dead_letter_count"], + "active_jobs": executor_stats["active_jobs"], + }, + "metrics": { + "total_executions": system_metrics["total_executions"], + "success_rate": system_metrics["success_rate"], + "total_dead_letter": system_metrics["total_dead_letter"], + }, + "tasks": { + "total_registered": task_summary["total_tasks"], + "enabled": task_summary["enabled_tasks"], + "disabled": task_summary["disabled_tasks"], + "dependency_issues": len(task_summary["dependency_issues"]), + }, + "performance": { + "system_health": performance_summary.get("system_health", "unknown"), + "healthy_job_types": performance_summary.get("healthy_job_types", 0), + "problematic_job_types": performance_summary.get( + "problematic_job_types", [] + ), + }, + "issues": health_status["issues"], + "alerts": performance_summary.get("alerts", []), + } + + def get_job_details(self, job_type: str) -> Optional[Dict[str, Any]]: + """Get detailed information about a specific job type.""" + try: + from .base import JobType + + job_type_enum = JobType(job_type) + + metadata = JobRegistry.get_metadata(job_type_enum) + if not metadata: + return None + + # Get metrics for this job + metrics = self._metrics.get_metrics(job_type_enum) + job_metrics = metrics.get(job_type_enum) + + # Get recent events + recent_events = self._metrics.get_recent_events(job_type_enum, limit=10) + + return { + "job_type": job_type, + "metadata": { + "name": metadata.name, + "description": metadata.description, + "version": metadata.version, + "enabled": metadata.enabled, + "interval_seconds": metadata.interval_seconds, + "priority": str(metadata.priority), + "max_retries": metadata.max_retries, + "retry_delay_seconds": metadata.retry_delay_seconds, + "timeout_seconds": metadata.timeout_seconds, + "max_concurrent": metadata.max_concurrent, + "batch_size": metadata.batch_size, + "requires_wallet": metadata.requires_wallet, + "requires_twitter": metadata.requires_twitter, + "requires_discord": metadata.requires_discord, + "dependencies": metadata.dependencies, + "enable_dead_letter_queue": metadata.enable_dead_letter_queue, + "preserve_order": metadata.preserve_order, + "idempotent": metadata.idempotent, + }, + "metrics": { + "total_executions": ( + job_metrics.total_executions if job_metrics else 0 + ), + "successful_executions": ( + job_metrics.successful_executions if job_metrics else 0 + ), + "failed_executions": ( + job_metrics.failed_executions if job_metrics else 0 + ), + "retried_executions": ( + job_metrics.retried_executions if job_metrics else 0 + ), + "dead_letter_executions": ( + job_metrics.dead_letter_executions if job_metrics else 0 + ), + "avg_execution_time": ( + job_metrics.avg_execution_time if job_metrics else 0 + ), + "min_execution_time": ( + job_metrics.min_execution_time if job_metrics else None + ), + "max_execution_time": ( + job_metrics.max_execution_time if job_metrics else None + ), + "current_running": ( + job_metrics.current_running if job_metrics else 0 + ), + "max_concurrent_reached": ( + job_metrics.max_concurrent_reached if job_metrics else 0 + ), + "last_execution": ( + job_metrics.last_execution.isoformat() + if job_metrics and job_metrics.last_execution + else None + ), + "last_success": ( + job_metrics.last_success.isoformat() + if job_metrics and job_metrics.last_success + else None + ), + "last_failure": ( + job_metrics.last_failure.isoformat() + if job_metrics and job_metrics.last_failure + else None + ), + }, + "recent_events": [ + { + "execution_id": str(event.execution_id), + "event_type": event.event_type, + "timestamp": event.timestamp.isoformat(), + "duration": event.duration, + "error": event.error, + "attempt": event.attempt, + "metadata": event.metadata, + } + for event in recent_events + ], + } + + except ValueError: + return None + + async def trigger_job_execution(self, job_type: str) -> Dict[str, Any]: + """Manually trigger execution of a specific job type.""" + try: + await self._execute_job_via_executor(job_type) + return { + "success": True, + "message": f"Triggered execution for job type: {job_type}", + "job_type": job_type, + } + except Exception as e: + logger.error(f"Error triggering job {job_type}: {str(e)}", exc_info=True) + return { + "success": False, + "message": f"Failed to trigger job: {str(e)}", + "job_type": job_type, + "error": str(e), + } diff --git a/services/runner/monitoring.py b/services/runner/monitoring.py new file mode 100644 index 00000000..bbe25c67 --- /dev/null +++ b/services/runner/monitoring.py @@ -0,0 +1,470 @@ +"""Job monitoring and observability system.""" + +from dataclasses import dataclass, field +from datetime import datetime, timedelta +from typing import Any, Dict, List, Optional +from uuid import UUID + +from lib.logger import configure_logger + +from .base import JobType +from .execution import JobExecution + +logger = configure_logger(__name__) + + +@dataclass +class JobMetrics: + """Metrics for job execution.""" + + job_type: JobType + total_executions: int = 0 + successful_executions: int = 0 + failed_executions: int = 0 + retried_executions: int = 0 + dead_letter_executions: int = 0 + + # Timing metrics + total_execution_time: float = 0.0 + min_execution_time: Optional[float] = None + max_execution_time: Optional[float] = None + avg_execution_time: float = 0.0 + + # Recent metrics (last hour) + recent_executions: int = 0 + recent_failures: int = 0 + recent_avg_time: float = 0.0 + + # Concurrency metrics + current_running: int = 0 + max_concurrent_reached: int = 0 + + last_execution: Optional[datetime] = None + last_success: Optional[datetime] = None + last_failure: Optional[datetime] = None + + +@dataclass +class ExecutionEvent: + """Individual execution event for detailed tracking.""" + + execution_id: UUID + job_type: JobType + event_type: str # started, completed, failed, retried, dead_letter + timestamp: datetime + duration: Optional[float] = None + error: Optional[str] = None + attempt: int = 1 + metadata: Dict[str, Any] = field(default_factory=dict) + + +class MetricsCollector: + """Collects and aggregates job execution metrics.""" + + def __init__(self, max_events: int = 10000): + self._metrics: Dict[JobType, JobMetrics] = {} + self._events: List[ExecutionEvent] = [] + self._max_events = max_events + self._start_time = datetime.now() + + def record_execution_start( + self, execution: JobExecution, worker_name: str = "" + ) -> None: + """Record the start of a job execution.""" + job_type = execution.job_type + + # Initialize metrics if needed + if job_type not in self._metrics: + self._metrics[job_type] = JobMetrics(job_type=job_type) + + metrics = self._metrics[job_type] + metrics.total_executions += 1 + metrics.current_running += 1 + metrics.max_concurrent_reached = max( + metrics.max_concurrent_reached, metrics.current_running + ) + metrics.last_execution = datetime.now() + + # Record event + event = ExecutionEvent( + execution_id=execution.id, + job_type=job_type, + event_type="started", + timestamp=datetime.now(), + attempt=execution.attempt, + metadata={"worker": worker_name}, + ) + self._add_event(event) + + logger.debug(f"Started tracking execution {execution.id} ({job_type})") + + def record_execution_completion( + self, execution: JobExecution, duration: float + ) -> None: + """Record the completion of a job execution.""" + job_type = execution.job_type + metrics = self._metrics.get(job_type) + + if not metrics: + logger.warning(f"No metrics found for job type {job_type}") + return + + metrics.current_running = max(0, metrics.current_running - 1) + metrics.successful_executions += 1 + metrics.last_success = datetime.now() + + # Update timing metrics + self._update_timing_metrics(metrics, duration) + + # Record event + event = ExecutionEvent( + execution_id=execution.id, + job_type=job_type, + event_type="completed", + timestamp=datetime.now(), + duration=duration, + attempt=execution.attempt, + ) + self._add_event(event) + + logger.debug( + f"Completed execution {execution.id} ({job_type}) in {duration:.2f}s" + ) + + def record_execution_failure( + self, execution: JobExecution, error: str, duration: float + ) -> None: + """Record a job execution failure.""" + job_type = execution.job_type + metrics = self._metrics.get(job_type) + + if not metrics: + logger.warning(f"No metrics found for job type {job_type}") + return + + metrics.current_running = max(0, metrics.current_running - 1) + metrics.failed_executions += 1 + metrics.last_failure = datetime.now() + + # Update timing metrics (even for failures) + self._update_timing_metrics(metrics, duration) + + # Record event + event = ExecutionEvent( + execution_id=execution.id, + job_type=job_type, + event_type="failed", + timestamp=datetime.now(), + duration=duration, + error=error, + attempt=execution.attempt, + ) + self._add_event(event) + + logger.debug( + f"Failed execution {execution.id} ({job_type}) after {duration:.2f}s: {error}" + ) + + def record_execution_retry(self, execution: JobExecution) -> None: + """Record a job execution retry.""" + job_type = execution.job_type + metrics = self._metrics.get(job_type) + + if not metrics: + logger.warning(f"No metrics found for job type {job_type}") + return + + metrics.retried_executions += 1 + + # Record event + event = ExecutionEvent( + execution_id=execution.id, + job_type=job_type, + event_type="retried", + timestamp=datetime.now(), + attempt=execution.attempt, + ) + self._add_event(event) + + logger.debug( + f"Retrying execution {execution.id} ({job_type}), attempt {execution.attempt}" + ) + + def record_dead_letter(self, execution: JobExecution) -> None: + """Record a job being moved to dead letter queue.""" + job_type = execution.job_type + metrics = self._metrics.get(job_type) + + if not metrics: + logger.warning(f"No metrics found for job type {job_type}") + return + + metrics.dead_letter_executions += 1 + + # Record event + event = ExecutionEvent( + execution_id=execution.id, + job_type=job_type, + event_type="dead_letter", + timestamp=datetime.now(), + error=execution.error, + attempt=execution.attempt, + ) + self._add_event(event) + + logger.warning( + f"Dead letter execution {execution.id} ({job_type}) after {execution.attempt} attempts" + ) + + def _update_timing_metrics(self, metrics: JobMetrics, duration: float) -> None: + """Update timing metrics with new execution duration.""" + # Update min/max + if metrics.min_execution_time is None or duration < metrics.min_execution_time: + metrics.min_execution_time = duration + if metrics.max_execution_time is None or duration > metrics.max_execution_time: + metrics.max_execution_time = duration + + # Update average + total_time = metrics.total_execution_time + duration + total_count = metrics.successful_executions + metrics.failed_executions + + metrics.total_execution_time = total_time + if total_count > 0: + metrics.avg_execution_time = total_time / total_count + + def _add_event(self, event: ExecutionEvent) -> None: + """Add an event to the event log.""" + self._events.append(event) + + # Trim events if we exceed max + if len(self._events) > self._max_events: + # Remove oldest 20% to avoid frequent trimming + trim_count = int(self._max_events * 0.2) + self._events = self._events[trim_count:] + + def get_metrics( + self, job_type: Optional[JobType] = None + ) -> Dict[JobType, JobMetrics]: + """Get metrics for all job types or a specific type.""" + if job_type: + return { + job_type: self._metrics.get(job_type, JobMetrics(job_type=job_type)) + } + return self._metrics.copy() + + def get_recent_events( + self, job_type: Optional[JobType] = None, limit: int = 100 + ) -> List[ExecutionEvent]: + """Get recent execution events.""" + events = self._events + + if job_type: + events = [e for e in events if e.job_type == job_type] + + # Return most recent events + return sorted(events, key=lambda e: e.timestamp, reverse=True)[:limit] + + def get_system_metrics(self) -> Dict[str, Any]: + """Get overall system metrics.""" + total_executions = sum(m.total_executions for m in self._metrics.values()) + total_successful = sum(m.successful_executions for m in self._metrics.values()) + total_failed = sum(m.failed_executions for m in self._metrics.values()) + total_dead_letter = sum( + m.dead_letter_executions for m in self._metrics.values() + ) + + success_rate = ( + (total_successful / total_executions) if total_executions > 0 else 0 + ) + + return { + "uptime_seconds": (datetime.now() - self._start_time).total_seconds(), + "total_executions": total_executions, + "total_successful": total_successful, + "total_failed": total_failed, + "total_dead_letter": total_dead_letter, + "success_rate": success_rate, + "active_job_types": len(self._metrics), + "total_events": len(self._events), + } + + def get_health_status(self) -> Dict[str, Any]: + """Get system health status.""" + now = datetime.now() + health = {"status": "healthy", "issues": []} + + for job_type, metrics in self._metrics.items(): + # Check failure rate + if metrics.total_executions > 10: + failure_rate = metrics.failed_executions / metrics.total_executions + if failure_rate > 0.5: # More than 50% failures + health["issues"].append( + f"{job_type}: High failure rate ({failure_rate:.1%})" + ) + + # Check if job hasn't run recently (if it should be running) + if metrics.last_execution: + time_since_last = now - metrics.last_execution + if time_since_last > timedelta(hours=2): + health["issues"].append( + f"{job_type}: No executions in {time_since_last}" + ) + + if health["issues"]: + health["status"] = "degraded" if len(health["issues"]) < 3 else "unhealthy" + + return health + + def reset_metrics(self, job_type: Optional[JobType] = None) -> None: + """Reset metrics for a job type or all types.""" + if job_type: + if job_type in self._metrics: + self._metrics[job_type] = JobMetrics(job_type=job_type) + else: + self._metrics.clear() + self._events.clear() + + logger.info(f"Reset metrics for {job_type or 'all job types'}") + + +class SystemMetrics: + """System-wide metrics collector for monitoring system resources.""" + + def __init__(self): + self.monitoring_active = False + + async def start_monitoring(self) -> None: + """Start system monitoring.""" + self.monitoring_active = True + logger.info("System metrics monitoring started") + + async def stop_monitoring(self) -> None: + """Stop system monitoring.""" + self.monitoring_active = False + logger.info("System metrics monitoring stopped") + + def get_current_metrics(self) -> Dict[str, Any]: + """Get current system metrics.""" + try: + import psutil + + return { + "cpu_usage": psutil.cpu_percent(interval=1), + "memory_usage": psutil.virtual_memory().percent, + "disk_usage": psutil.disk_usage("/").percent, + "timestamp": datetime.now().isoformat(), + "monitoring_active": self.monitoring_active, + } + except ImportError: + logger.warning("psutil not available, returning basic metrics") + return { + "cpu_usage": 0, + "memory_usage": 0, + "disk_usage": 0, + "timestamp": datetime.now().isoformat(), + "monitoring_active": self.monitoring_active, + } + + +class PerformanceMonitor: + """Monitors job execution performance and provides alerts.""" + + def __init__(self, metrics_collector: MetricsCollector): + self.metrics = metrics_collector + self._thresholds = { + "max_failure_rate": 0.3, # 30% + "max_avg_execution_time": 300.0, # 5 minutes + "max_dead_letter_rate": 0.1, # 10% + } + + def check_performance_issues(self) -> List[str]: + """Check for performance issues and return alerts.""" + alerts = [] + + for job_type, metrics in self.metrics.get_metrics().items(): + if metrics.total_executions < 5: + continue # Skip jobs with insufficient data + + # Check failure rate + failure_rate = metrics.failed_executions / metrics.total_executions + if failure_rate > self._thresholds["max_failure_rate"]: + alerts.append( + f"HIGH FAILURE RATE: {job_type} has {failure_rate:.1%} failure rate" + ) + + # Check average execution time + if metrics.avg_execution_time > self._thresholds["max_avg_execution_time"]: + alerts.append( + f"SLOW EXECUTION: {job_type} average time is {metrics.avg_execution_time:.1f}s" + ) + + # Check dead letter rate + dead_letter_rate = metrics.dead_letter_executions / metrics.total_executions + if dead_letter_rate > self._thresholds["max_dead_letter_rate"]: + alerts.append( + f"HIGH DEAD LETTER RATE: {job_type} has {dead_letter_rate:.1%} dead letter rate" + ) + + return alerts + + def get_performance_summary(self) -> Dict[str, Any]: + """Get a performance summary across all job types.""" + metrics_data = self.metrics.get_metrics() + + if not metrics_data: + return {"message": "No job execution data available"} + + # Calculate overall statistics + total_jobs = len(metrics_data) + healthy_jobs = 0 + problematic_jobs = [] + + for job_type, metrics in metrics_data.items(): + if metrics.total_executions < 5: + continue + + failure_rate = metrics.failed_executions / metrics.total_executions + dead_letter_rate = metrics.dead_letter_executions / metrics.total_executions + + is_healthy = ( + failure_rate <= self._thresholds["max_failure_rate"] + and metrics.avg_execution_time + <= self._thresholds["max_avg_execution_time"] + and dead_letter_rate <= self._thresholds["max_dead_letter_rate"] + ) + + if is_healthy: + healthy_jobs += 1 + else: + problematic_jobs.append(str(job_type)) + + return { + "total_job_types": total_jobs, + "healthy_job_types": healthy_jobs, + "problematic_job_types": problematic_jobs, + "system_health": ( + "good" if len(problematic_jobs) == 0 else "needs_attention" + ), + "alerts": self.check_performance_issues(), + } + + +# Global metrics collector +_metrics_collector: Optional[MetricsCollector] = None +_performance_monitor: Optional[PerformanceMonitor] = None + + +def get_metrics_collector() -> MetricsCollector: + """Get the global metrics collector instance.""" + global _metrics_collector + if _metrics_collector is None: + _metrics_collector = MetricsCollector() + return _metrics_collector + + +def get_performance_monitor() -> PerformanceMonitor: + """Get the global performance monitor instance.""" + global _performance_monitor + if _performance_monitor is None: + _performance_monitor = PerformanceMonitor(get_metrics_collector()) + return _performance_monitor diff --git a/services/runner/registry.py b/services/runner/registry.py index fccf9e80..ada8348e 100644 --- a/services/runner/registry.py +++ b/services/runner/registry.py @@ -25,6 +25,17 @@ def get_runner(cls, job_type: JobType) -> Optional[Type[BaseTask]]: """Get runner for a job type.""" return cls._runners.get(job_type) + @classmethod + def get_all_jobs(cls) -> Dict[str, Type[BaseTask]]: + """Get all registered jobs.""" + return {str(job_type): runner for job_type, runner in cls._runners.items()} + + @classmethod + def clear_registry(cls) -> None: + """Clear all registered jobs (useful for testing).""" + cls._runners.clear() + logger.debug("Cleared job registry") + async def execute_runner_job( job_type: str, parameters: Optional[Dict[str, str]] = None diff --git a/services/runner/tasks/__init__.py b/services/runner/tasks/__init__.py index 1fc13843..3ea9f828 100644 --- a/services/runner/tasks/__init__.py +++ b/services/runner/tasks/__init__.py @@ -1,29 +1,29 @@ -"""Task runners for scheduled and on-demand jobs.""" +"""Task runners for scheduled and on-demand jobs. -from .chain_state_monitor import ChainStateMonitorTask, chain_state_monitor -from .dao_proposal_concluder import DAOProposalConcluderTask, dao_proposal_concluder -from .dao_proposal_evaluation import DAOProposalEvaluationTask, dao_proposal_evaluation -from .dao_proposal_voter import DAOProposalVoterTask, dao_proposal_voter -from .dao_task import DAOTask, dao_task -from .dao_tweet_task import DAOTweetTask, dao_tweet_task -from .discord_task import DiscordTask, discord_task -from .tweet_task import TweetTask, tweet_task +Tasks are automatically discovered and registered using the @job decorator. +To create a new task: -__all__ = [ - "DAOTask", - "dao_task", - "DAOProposalVoterTask", - "dao_proposal_voter", - "DAOTweetTask", - "dao_tweet_task", - "DiscordTask", - "discord_task", - "TweetTask", - "tweet_task", - "DAOProposalConcluderTask", - "dao_proposal_concluder", - "DAOProposalEvaluationTask", - "dao_proposal_evaluation", - "ChainStateMonitorTask", - "chain_state_monitor", -] +1. Create a new .py file in this directory +2. Import the @job decorator: from ..decorators import job +3. Decorate your task class with @job("your_job_type", ...) +4. That's it! The task will be automatically discovered and registered. + +Example: + @job( + "my_new_job", + name="My New Job", + description="Does something useful", + interval_seconds=120, + priority=JobPriority.NORMAL, + max_concurrent=1, + ) + class MyNewJobTask(BaseTask[MyJobResult]): + async def _execute_impl(self, context: JobContext) -> List[MyJobResult]: + # Implementation here + pass +""" + +# Auto-discovery handles all task imports and registrations +# No manual imports needed here anymore! + +__all__ = [] # Auto-discovery populates the registry diff --git a/services/runner/tasks/agent_account_deployer.py b/services/runner/tasks/agent_account_deployer.py index 40e8ce91..37f9c27a 100644 --- a/services/runner/tasks/agent_account_deployer.py +++ b/services/runner/tasks/agent_account_deployer.py @@ -2,7 +2,7 @@ import json from dataclasses import dataclass -from typing import Any, Dict, List +from typing import Any, Dict, List, Optional from backend.factory import backend from backend.models import ( @@ -13,7 +13,8 @@ ) from config import config from lib.logger import configure_logger -from services.runner.base import BaseTask, JobContext, RunnerResult +from services.runner.base import BaseTask, JobContext, RunnerConfig, RunnerResult +from services.runner.decorators import JobPriority, job from tools.agent_account import AgentAccountDeployTool logger = configure_logger(__name__) @@ -31,10 +32,61 @@ def __post_init__(self): self.errors = self.errors or [] +@job( + job_type="agent_account_deployer", + name="Agent Account Deployer", + description="Deploys agent account contracts with enhanced monitoring and error handling", + interval_seconds=300, # 5 minutes + priority=JobPriority.MEDIUM, + max_retries=2, + retry_delay_seconds=180, + timeout_seconds=120, + max_concurrent=1, + requires_blockchain=True, + batch_size=5, + enable_dead_letter_queue=True, +) class AgentAccountDeployerTask(BaseTask[AgentAccountDeployResult]): - """Task runner for deploying agent accounts.""" + """Task runner for deploying agent account contracts with enhanced capabilities.""" + + QUEUE_TYPE = QueueMessageType.get_or_create("agent_account_deploy") + + def __init__(self, config: Optional[RunnerConfig] = None): + super().__init__(config) + + async def _validate_config(self, context: JobContext) -> bool: + """Validate task configuration.""" + try: + # Check if backend wallet configuration is available + if not config.backend_wallet or not config.backend_wallet.seed_phrase: + logger.error( + "Backend wallet seed phrase not configured for agent account deployment" + ) + return False + return True + except Exception as e: + logger.error( + f"Error validating agent account deployer config: {str(e)}", + exc_info=True, + ) + return False - QUEUE_TYPE = QueueMessageType.AGENT_ACCOUNT_DEPLOY + async def _validate_resources(self, context: JobContext) -> bool: + """Validate resource availability.""" + try: + # Check backend connectivity + backend.get_api_status() + + # Test agent account deploy tool initialization + tool = AgentAccountDeployTool(seed_phrase=config.backend_wallet.seed_phrase) + if not tool: + logger.error("Cannot initialize AgentAccountDeployTool") + return False + + return True + except Exception as e: + logger.error(f"Resource validation failed: {str(e)}") + return False async def _validate_task_specific(self, context: JobContext) -> bool: """Validate task-specific conditions.""" @@ -103,25 +155,33 @@ async def process_message(self, message: QueueMessage) -> Dict[str, Any]: if not self._validate_message_data(message_data): error_msg = f"Invalid message data in message {message_id}" logger.error(error_msg) - return {"success": False, "error": error_msg} + result = {"success": False, "error": error_msg} - # Initialize the AgentAccountDeployTool + # Store result and mark as processed + update_data = QueueMessageBase(is_processed=True, result=result) + backend.update_queue_message(message_id, update_data) + + return result + + # Initialize the AgentAccountDeployTool with seed phrase logger.debug("Preparing to deploy agent account") deploy_tool = AgentAccountDeployTool( - wallet_id=config.scheduler.agent_account_deploy_runner_wallet_id - ) - - # get address from wallet id - wallet = backend.get_wallet( - config.scheduler.agent_account_deploy_runner_wallet_id + seed_phrase=config.backend_wallet.seed_phrase ) - # depending on the network, use the correct address - profile = backend.get_profile(wallet.profile_id) - if config.network == "mainnet": - owner_address = profile.email.strip("@stacks.id").upper() + # Determine owner address based on network and wallet configuration + if config.network.network == "mainnet": + # For mainnet, try to derive from backend wallet or use configured address + owner_address = ( + config.backend_wallet.address + or "SP1HTBVD3JG9C05J7HDJKDYR99M9Q4JKJECEWC9S" + ) else: - owner_address = "ST1994Y3P6ZDJX476QFSABEFE5T6YMTJT0T7RSQDW" + # For testnet/other networks + owner_address = ( + config.backend_wallet.address + or "ST1994Y3P6ZDJX476QFSABEFE5T6YMTJT0T7RSQDW" + ) # Execute the deployment logger.debug("Executing deployment...") @@ -133,16 +193,26 @@ async def process_message(self, message: QueueMessage) -> Dict[str, Any]: ) logger.debug(f"Deployment result: {deployment_result}") - # Mark the message as processed - update_data = QueueMessageBase(is_processed=True) + result = {"success": True, "deployed": True, "result": deployment_result} + + # Store result and mark as processed + update_data = QueueMessageBase(is_processed=True, result=result) backend.update_queue_message(message_id, update_data) - return {"success": True, "deployed": True, "result": deployment_result} + logger.info(f"Successfully deployed agent account for message {message_id}") + + return result except Exception as e: error_msg = f"Error processing message {message_id}: {str(e)}" logger.error(error_msg, exc_info=True) - return {"success": False, "error": error_msg} + result = {"success": False, "error": error_msg} + + # Store result even for failed processing + update_data = QueueMessageBase(result=result) + backend.update_queue_message(message_id, update_data) + + return result async def get_pending_messages(self) -> List[QueueMessage]: """Get all unprocessed messages from the queue.""" @@ -155,10 +225,53 @@ async def get_pending_messages(self) -> List[QueueMessage]: return messages + def _should_retry_on_error(self, error: Exception, context: JobContext) -> bool: + """Determine if error should trigger retry.""" + # Retry on network errors, blockchain timeouts + retry_errors = ( + ConnectionError, + TimeoutError, + ) + + # Don't retry on validation errors + if "invalid message data" in str(error).lower(): + return False + if "missing" in str(error).lower() and "required" in str(error).lower(): + return False + + return isinstance(error, retry_errors) + + async def _handle_execution_error( + self, error: Exception, context: JobContext + ) -> Optional[List[AgentAccountDeployResult]]: + """Handle execution errors with recovery logic.""" + if "blockchain" in str(error).lower() or "contract" in str(error).lower(): + logger.warning(f"Blockchain/contract error: {str(error)}, will retry") + return None + + if isinstance(error, (ConnectionError, TimeoutError)): + logger.warning(f"Network error: {str(error)}, will retry") + return None + + # For validation errors, don't retry + return [ + AgentAccountDeployResult( + success=False, + message=f"Unrecoverable error: {str(error)}", + error=error, + ) + ] + + async def _post_execution_cleanup( + self, context: JobContext, results: List[AgentAccountDeployResult] + ) -> None: + """Cleanup after task execution.""" + logger.debug("Agent account deployer task cleanup completed") + async def _execute_impl( self, context: JobContext ) -> List[AgentAccountDeployResult]: - """Run the agent account deployment task.""" + """Run the agent account deployment task with batch processing.""" pending_messages = await self.get_pending_messages() message_count = len(pending_messages) logger.debug(f"Found {message_count} pending agent account deployment messages") @@ -173,23 +286,36 @@ async def _execute_impl( ) ] - # Process each message + # Process each message in batches processed_count = 0 deployed_count = 0 errors = [] + batch_size = getattr(context, "batch_size", 5) - for message in pending_messages: - result = await self.process_message(message) - processed_count += 1 + logger.info(f"Processing {message_count} agent account deployment messages") - if result.get("success"): - if result.get("deployed", False): - deployed_count += 1 - else: - errors.append(result.get("error", "Unknown error")) + # Process messages in batches + for i in range(0, len(pending_messages), batch_size): + batch = pending_messages[i : i + batch_size] + + for message in batch: + try: + result = await self.process_message(message) + processed_count += 1 + + if result.get("success"): + if result.get("deployed", False): + deployed_count += 1 + else: + errors.append(result.get("error", "Unknown error")) + + except Exception as e: + error_msg = f"Exception processing message {message.id}: {str(e)}" + errors.append(error_msg) + logger.error(error_msg, exc_info=True) - logger.debug( - f"Task metrics - Processed: {processed_count}, " + logger.info( + f"Agent account deployment completed - Processed: {processed_count}, " f"Deployed: {deployed_count}, Errors: {len(errors)}" ) @@ -204,5 +330,5 @@ async def _execute_impl( ] -# Instantiate the task for use in the registry +# Create instance for auto-registration agent_account_deployer = AgentAccountDeployerTask() diff --git a/services/runner/tasks/chain_state_monitor.py b/services/runner/tasks/chain_state_monitor.py index 2d9148d9..7212b1a3 100644 --- a/services/runner/tasks/chain_state_monitor.py +++ b/services/runner/tasks/chain_state_monitor.py @@ -1,6 +1,7 @@ """Chain state monitoring task implementation.""" import uuid +from dataclasses import dataclass from datetime import datetime from typing import Any, Dict, List, Optional @@ -8,7 +9,8 @@ from config import config from lib.hiro import HiroApi from lib.logger import configure_logger -from services.runner.base import BaseTask, JobContext, RunnerResult +from services.runner.base import BaseTask, JobContext, RunnerConfig, RunnerResult +from services.runner.decorators import JobPriority, job from services.webhooks.chainhook import ChainhookService from services.webhooks.chainhook.models import ( Apply, @@ -24,60 +26,88 @@ logger = configure_logger(__name__) +@dataclass class ChainStateMonitorResult(RunnerResult): """Result of chain state monitoring operation.""" - def __init__( - self, - success: bool, - message: str, - error: Optional[Exception] = None, - network: str = None, - is_stale: bool = False, - last_updated: Optional[datetime] = None, - elapsed_minutes: float = 0, - blocks_behind: int = 0, - blocks_processed: Optional[List[int]] = None, - ): - """Initialize with required and optional parameters. - - Args: - success: Whether the operation was successful - message: Message describing the operation result - error: Optional exception that occurred - network: The network being monitored (optional, defaults to None) - is_stale: Whether the chain state is stale (optional, defaults to False) - last_updated: When the chain state was last updated - elapsed_minutes: Minutes since last update - blocks_behind: Number of blocks behind - blocks_processed: List of blocks processed - """ - super().__init__(success=success, message=message, error=error) - self.network = ( - network or config.network.network - ) # Use config network as default - self.is_stale = is_stale - self.last_updated = last_updated - self.elapsed_minutes = elapsed_minutes - self.blocks_behind = blocks_behind - self.blocks_processed = blocks_processed if blocks_processed is not None else [] - - + network: str = None + is_stale: bool = False + last_updated: Optional[datetime] = None + elapsed_minutes: float = 0 + blocks_behind: int = 0 + blocks_processed: Optional[List[int]] = None + + def __post_init__(self): + """Initialize default values after dataclass creation.""" + if self.network is None: + self.network = config.network.network + if self.blocks_processed is None: + self.blocks_processed = [] + + +@job( + job_type="chain_state_monitor", + name="Chain State Monitor", + description="Monitors blockchain state for synchronization with enhanced monitoring and error handling", + interval_seconds=90, # 1.5 minutes + priority=JobPriority.MEDIUM, + max_retries=3, + retry_delay_seconds=120, + timeout_seconds=300, + max_concurrent=2, + requires_blockchain=True, + batch_size=20, + enable_dead_letter_queue=True, +) class ChainStateMonitorTask(BaseTask[ChainStateMonitorResult]): - """Task runner for monitoring chain state freshness.""" + """Task for monitoring blockchain state and syncing with database with enhanced capabilities.""" - def __init__(self): - """Initialize the task without requiring config parameter.""" - # No config parameter needed - we get it from the import - super().__init__() + def __init__(self, config: Optional[RunnerConfig] = None): + super().__init__(config) self.hiro_api = HiroApi() self.chainhook_service = ChainhookService() + async def _validate_config(self, context: JobContext) -> bool: + """Validate task configuration.""" + try: + # Chain state monitor doesn't require wallet configuration + # It only reads from the blockchain, no transactions needed + return True + except Exception as e: + logger.error( + f"Error validating chain state monitor config: {str(e)}", exc_info=True + ) + return False + + async def _validate_resources(self, context: JobContext) -> bool: + """Validate resource availability for blockchain monitoring.""" + try: + # Check backend connectivity + backend.get_api_status() + + # Test HiroApi initialization and connectivity + hiro_api = HiroApi() + api_info = await hiro_api.aget_info() + if not api_info: + logger.error("Cannot connect to Hiro API") + return False + + return True + except Exception as e: + logger.error(f"Resource validation failed: {str(e)}") + return False + async def _validate_task_specific(self, context: JobContext) -> bool: """Validate task-specific conditions.""" - # Always valid to run - we want to check chain state freshness - # even when there's no new data - return True + try: + # Always valid to run - we want to check chain state freshness + # even when there's no new data + return True + except Exception as e: + logger.error( + f"Error validating chain state monitor task: {str(e)}", exc_info=True + ) + return False def _convert_to_chainhook_format( self, @@ -554,8 +584,51 @@ def _create_transaction_operations( return operations + def _should_retry_on_error(self, error: Exception, context: JobContext) -> bool: + """Determine if error should trigger retry.""" + # Retry on network errors, blockchain RPC issues + retry_errors = ( + ConnectionError, + TimeoutError, + ) + + # Don't retry on configuration errors + if "not configured" in str(error).lower(): + return False + if "invalid contract" in str(error).lower(): + return False + + return isinstance(error, retry_errors) + + async def _handle_execution_error( + self, error: Exception, context: JobContext + ) -> Optional[List[ChainStateMonitorResult]]: + """Handle execution errors with recovery logic.""" + if "blockchain" in str(error).lower() or "rpc" in str(error).lower(): + logger.warning(f"Blockchain/RPC error: {str(error)}, will retry") + return None + + if isinstance(error, (ConnectionError, TimeoutError)): + logger.warning(f"Network error: {str(error)}, will retry") + return None + + # For configuration errors, don't retry + return [ + ChainStateMonitorResult( + success=False, + message=f"Unrecoverable error: {str(error)}", + error=error, + ) + ] + + async def _post_execution_cleanup( + self, context: JobContext, results: List[ChainStateMonitorResult] + ) -> None: + """Cleanup after task execution.""" + logger.debug("Chain state monitor task cleanup completed") + async def _execute_impl(self, context: JobContext) -> List[ChainStateMonitorResult]: - """Run the chain state monitoring task.""" + """Execute chain state monitoring task with blockchain synchronization.""" # Use the configured network network = config.network.network @@ -800,5 +873,5 @@ async def _execute_impl(self, context: JobContext) -> List[ChainStateMonitorResu ] -# Instantiate the task for use in the registry +# Create instance for auto-registration chain_state_monitor = ChainStateMonitorTask() diff --git a/services/runner/tasks/dao_deployment_task.py b/services/runner/tasks/dao_deployment_task.py new file mode 100644 index 00000000..547aeb1e --- /dev/null +++ b/services/runner/tasks/dao_deployment_task.py @@ -0,0 +1,389 @@ +from dataclasses import dataclass +from datetime import datetime +from typing import Any, Dict, List, Optional +from uuid import UUID + +from backend.factory import backend +from backend.models import ( + DAOFilter, + Profile, + QueueMessage, + QueueMessageBase, + QueueMessageFilter, + QueueMessageType, +) +from lib.logger import configure_logger +from services.workflows import execute_workflow_stream +from tools.tools_factory import filter_tools_by_names, initialize_tools + +from ..base import BaseTask, JobContext, RunnerConfig, RunnerResult +from ..decorators import JobPriority, job + +logger = configure_logger(__name__) + + +@dataclass +class DAODeploymentResult(RunnerResult): + """Result of DAO deployment operation.""" + + dao_id: Optional[UUID] = None + deployment_data: Optional[Dict[str, Any]] = None + daos_processed: int = 0 + deployments_successful: int = 0 + + +@job( + job_type="dao_deployment", + name="DAO Deployment Processor", + description="Processes DAO deployment requests with enhanced monitoring and error handling", + interval_seconds=60, + priority=JobPriority.HIGH, + max_retries=2, + retry_delay_seconds=120, + timeout_seconds=600, + max_concurrent=1, + requires_blockchain=True, + batch_size=1, + enable_dead_letter_queue=True, +) +class DAODeploymentTask(BaseTask[DAODeploymentResult]): + """Task for processing DAO deployments with enhanced capabilities.""" + + def __init__(self, config: Optional[RunnerConfig] = None): + super().__init__(config) + self._pending_messages = None + self.tools_map_all = initialize_tools( + Profile(id=self.config.twitter_profile_id, created_at=datetime.now()), + agent_id=self.config.twitter_agent_id, + ) + self.tools_map = filter_tools_by_names( + ["contract_deploy_dao"], self.tools_map_all + ) + logger.debug(f"Initialized {len(self.tools_map)} DAO deployment tools") + + async def _validate_config(self, context: JobContext) -> bool: + """Validate DAO deployment task configuration.""" + try: + if not self.tools_map: + logger.error("No DAO deployment tools available") + return False + + if not self.tools_map_all: + logger.error("Tools not properly initialized") + return False + + # Validate that the twitter profile and agent are available + if not self.config.twitter_profile_id or not self.config.twitter_agent_id: + logger.error("Twitter profile or agent ID not configured") + return False + + return True + except Exception as e: + logger.error( + f"Error validating DAO deployment config: {str(e)}", exc_info=True + ) + return False + + async def _validate_resources(self, context: JobContext) -> bool: + """Validate resource availability for DAO deployment.""" + try: + # Check backend connectivity + backend.get_api_status() + + # Check if we have required tools initialized + if not self.tools_map: + logger.error("DAO deployment tools not available") + return False + + return True + except Exception as e: + logger.error(f"DAO deployment resource validation failed: {str(e)}") + return False + + async def _validate_prerequisites(self, context: JobContext) -> bool: + """Validate DAO deployment task prerequisites.""" + try: + # Check for pending DAOs first + pending_daos = backend.list_daos( + filters=DAOFilter( + is_deployed=False, + is_broadcasted=True, + wallet_id=self.config.twitter_wallet_id, + ) + ) + if pending_daos: + logger.info( + f"Found {len(pending_daos)} pending Twitter DAO(s), skipping queue processing" + ) + return False + + # Cache pending messages for later use + self._pending_messages = backend.list_queue_messages( + filters=QueueMessageFilter( + type=QueueMessageType.DAO, is_processed=False + ) + ) + return True + except Exception as e: + logger.error( + f"Error validating DAO deployment prerequisites: {str(e)}", + exc_info=True, + ) + self._pending_messages = None + return False + + async def _validate_task_specific(self, context: JobContext) -> bool: + """Validate DAO deployment task-specific conditions.""" + try: + if not self._pending_messages: + logger.debug("No pending DAO deployment messages found") + return False + + # Validate each message has required parameters + valid_messages = [] + for message in self._pending_messages: + if await self._is_message_valid(message): + valid_messages.append(message) + + self._pending_messages = valid_messages + message_count = len(valid_messages) + + if message_count > 0: + logger.debug(f"Found {message_count} valid DAO deployment messages") + return True + + logger.debug("No valid DAO deployment messages to process") + return False + + except Exception as e: + logger.error( + f"Error in DAO deployment task validation: {str(e)}", exc_info=True + ) + return False + + async def _is_message_valid(self, message: QueueMessage) -> bool: + """Check if a message has valid DAO deployment parameters.""" + try: + if not message.message or not isinstance(message.message, dict): + return False + + params = message.message.get("parameters", {}) + required_params = [ + "token_symbol", + "token_name", + "token_description", + "token_max_supply", + "token_decimals", + "origin_address", + "mission", + ] + + # Check all required parameters exist and are not empty + for param in required_params: + if param not in params or not params[param]: + logger.debug( + f"Message {message.id} missing required param: {param}" + ) + return False + + return True + except Exception: + return False + + async def _validate_message( + self, message: QueueMessage + ) -> Optional[DAODeploymentResult]: + """Validate a single DAO deployment message before processing.""" + try: + params = message.message.get("parameters", {}) + required_params = [ + "token_symbol", + "token_name", + "token_description", + "token_max_supply", + "token_decimals", + "origin_address", + "mission", + ] + + missing_params = [p for p in required_params if p not in params] + if missing_params: + return DAODeploymentResult( + success=False, + message=f"Missing required parameters: {', '.join(missing_params)}", + ) + + return None # Validation passed + + except Exception as e: + logger.error( + f"Error validating DAO deployment message {message.id}: {str(e)}", + exc_info=True, + ) + return DAODeploymentResult( + success=False, + message=f"Error validating message: {str(e)}", + error=e, + ) + + def _get_dao_deployment_parameters(self, message: QueueMessage) -> Optional[str]: + """Extract and format DAO deployment parameters from message.""" + try: + params = message.message["parameters"] + return ( + f"Please deploy a DAO with the following parameters:\n" + f"Token Symbol: {params['token_symbol']}\n" + f"Token Name: {params['token_name']}\n" + f"Token Description: {params['token_description']}\n" + f"Token Max Supply: {params['token_max_supply']}\n" + f"Token Decimals: {params['token_decimals']}\n" + f"Origin Address: {params['origin_address']}\n" + f"Tweet Origin: {message.tweet_id}\n" + f"Mission: {params['mission']}" + ) + except KeyError as e: + logger.error(f"Missing required parameter in DAO deployment message: {e}") + return None + + async def _process_dao_deployment_message( + self, message: QueueMessage + ) -> DAODeploymentResult: + """Process a single DAO deployment message with enhanced error handling.""" + try: + # Validate message first + validation_result = await self._validate_message(message) + if validation_result: + return validation_result + + tool_input = self._get_dao_deployment_parameters(message) + if not tool_input: + return DAODeploymentResult( + success=False, + message="Failed to extract DAO deployment parameters from message", + ) + + logger.info(f"Processing DAO deployment for message {message.id}") + logger.debug(f"DAO deployment parameters: {tool_input}") + + deployment_data = {} + async for chunk in execute_workflow_stream( + history=[], input_str=tool_input, tools_map=self.tools_map + ): + if chunk["type"] == "result": + deployment_data = chunk["content"] + logger.info("DAO deployment completed successfully") + logger.debug(f"Deployment data: {deployment_data}") + elif chunk["type"] == "tool": + logger.debug(f"Executing tool: {chunk}") + + # Extract DAO ID if available from deployment data + dao_id = None + if isinstance(deployment_data, dict): + dao_id = deployment_data.get("dao_id") + + return DAODeploymentResult( + success=True, + message="Successfully processed DAO deployment", + deployment_data=deployment_data, + dao_id=dao_id, + daos_processed=1, + deployments_successful=1, + ) + + except Exception as e: + logger.error( + f"Error processing DAO deployment message: {str(e)}", exc_info=True + ) + return DAODeploymentResult( + success=False, + message=f"Error processing DAO deployment: {str(e)}", + error=e, + daos_processed=1, + deployments_successful=0, + ) + + def _should_retry_on_error(self, error: Exception, context: JobContext) -> bool: + """Determine if DAO deployment error should trigger retry.""" + # Retry on network errors, temporary blockchain issues + retry_errors = ( + ConnectionError, + TimeoutError, + ) + + # Don't retry on validation errors or tool configuration issues + if "Missing required parameter" in str(error): + return False + if "Tools not properly initialized" in str(error): + return False + + return isinstance(error, retry_errors) + + async def _handle_execution_error( + self, error: Exception, context: JobContext + ) -> Optional[List[DAODeploymentResult]]: + """Handle DAO deployment execution errors with recovery logic.""" + if "blockchain" in str(error).lower() or "network" in str(error).lower(): + logger.warning( + f"Blockchain/network error during DAO deployment: {str(error)}, will retry" + ) + return None # Let default retry handling take over + + # For validation errors, don't retry + return [ + DAODeploymentResult( + success=False, + message=f"Unrecoverable DAO deployment error: {str(error)}", + error=error, + ) + ] + + async def _post_execution_cleanup( + self, context: JobContext, results: List[DAODeploymentResult] + ) -> None: + """Cleanup after DAO deployment task execution.""" + # Clear cached pending messages + self._pending_messages = None + logger.debug("DAO deployment task cleanup completed") + + async def _execute_impl(self, context: JobContext) -> List[DAODeploymentResult]: + """Execute DAO deployment task with enhanced processing.""" + results: List[DAODeploymentResult] = [] + try: + if not self._pending_messages: + return results + + # Process one message at a time for DAO deployments (they're resource intensive) + message = self._pending_messages[0] + logger.debug(f"Processing DAO deployment message: {message.id}") + + result = await self._process_dao_deployment_message(message) + results.append(result) + + if result.success: + backend.update_queue_message( + queue_message_id=message.id, + update_data=QueueMessageBase(is_processed=True), + ) + logger.debug(f"Marked DAO deployment message {message.id} as processed") + logger.info("DAO deployment task completed successfully") + else: + logger.error(f"DAO deployment failed: {result.message}") + + return results + + except Exception as e: + logger.error(f"Error in DAO deployment task: {str(e)}", exc_info=True) + results.append( + DAODeploymentResult( + success=False, + message=f"Error in DAO deployment task: {str(e)}", + error=e, + daos_processed=1, + deployments_successful=0, + ) + ) + return results + + +# Create instance for auto-registration +dao_deployment_task = DAODeploymentTask() diff --git a/services/runner/tasks/dao_deployment_tweet_task.py b/services/runner/tasks/dao_deployment_tweet_task.py new file mode 100644 index 00000000..54996b46 --- /dev/null +++ b/services/runner/tasks/dao_deployment_tweet_task.py @@ -0,0 +1,371 @@ +from dataclasses import dataclass +from typing import Any, List, Optional +from uuid import UUID + +from backend.factory import backend +from backend.models import ( + QueueMessageBase, + QueueMessageCreate, + QueueMessageFilter, + QueueMessageType, + TokenFilter, +) +from lib.logger import configure_logger +from services.workflows import generate_dao_tweet + +from ..base import BaseTask, JobContext, RunnerConfig, RunnerResult +from ..decorators import JobPriority, job + +logger = configure_logger(__name__) + + +@dataclass +class DAODeploymentTweetResult(RunnerResult): + """Result of DAO deployment tweet processing operation.""" + + dao_id: Optional[UUID] = None + tweet_id: Optional[str] = None + congratulatory_tweets_generated: int = 0 + tweet_messages_created: int = 0 + + +@job( + job_type="dao_deployment_tweet", + name="DAO Deployment Tweet Generator", + description="Generates congratulatory tweets for successfully deployed DAOs with enhanced monitoring and error handling", + interval_seconds=45, + priority=JobPriority.MEDIUM, + max_retries=3, + retry_delay_seconds=60, + timeout_seconds=180, + max_concurrent=2, + requires_ai=True, + batch_size=5, + enable_dead_letter_queue=True, +) +class DAODeploymentTweetTask(BaseTask[DAODeploymentTweetResult]): + """Task for generating congratulatory tweets for successfully deployed DAOs with enhanced capabilities.""" + + def __init__(self, config: Optional[RunnerConfig] = None): + super().__init__(config) + self._pending_messages = None + + async def _validate_config(self, context: JobContext) -> bool: + """Validate DAO deployment tweet task configuration.""" + try: + # Check if generate_dao_tweet workflow is available for deployment congratulations + return True + except Exception as e: + logger.error( + f"Error validating DAO deployment tweet task config: {str(e)}", + exc_info=True, + ) + return False + + async def _validate_resources(self, context: JobContext) -> bool: + """Validate resource availability for DAO deployment tweet generation.""" + try: + # Check backend connectivity + backend.get_api_status() + return True + except Exception as e: + logger.error(f"Backend not available for DAO deployment tweets: {str(e)}") + return False + + async def _validate_prerequisites(self, context: JobContext) -> bool: + """Validate DAO deployment tweet task prerequisites.""" + try: + # Cache pending deployment tweet messages for later use + self._pending_messages = backend.list_queue_messages( + filters=QueueMessageFilter( + type=QueueMessageType.DAO_TWEET, is_processed=False + ) + ) + return True + except Exception as e: + logger.error( + f"Error validating DAO deployment tweet prerequisites: {str(e)}", + exc_info=True, + ) + self._pending_messages = None + return False + + async def _validate_task_specific(self, context: JobContext) -> bool: + """Validate DAO deployment tweet task-specific conditions.""" + try: + if not self._pending_messages: + logger.debug("No pending DAO deployment tweet messages found") + return False + + # Validate each message has valid deployed DAO data + valid_messages = [] + for message in self._pending_messages: + if await self._is_deployment_message_valid(message): + valid_messages.append(message) + + self._pending_messages = valid_messages + + if valid_messages: + logger.debug( + f"Found {len(valid_messages)} valid DAO deployment tweet messages" + ) + return True + + logger.debug("No valid DAO deployment tweet messages to process") + return False + + except Exception as e: + logger.error( + f"Error in DAO deployment tweet task validation: {str(e)}", + exc_info=True, + ) + return False + + async def _is_deployment_message_valid(self, message: Any) -> bool: + """Check if a DAO deployment tweet message is valid for processing.""" + try: + if not message.dao_id: + return False + + # Validate DAO exists and is successfully deployed + dao = backend.get_dao(message.dao_id) + if not dao or not dao.is_deployed: + return False + + # Validate token exists for the deployed DAO + token = backend.list_tokens(filters=TokenFilter(dao_id=message.dao_id)) + if not token: + return False + + return True + except Exception: + return False + + async def _validate_deployment_message( + self, message: Any + ) -> Optional[DAODeploymentTweetResult]: + """Validate a single DAO deployment message before processing.""" + try: + if not message.dao_id: + return DAODeploymentTweetResult( + success=False, + message="DAO deployment message has no dao_id", + dao_id=None, + ) + + # Validate DAO exists and is successfully deployed + dao = backend.get_dao(message.dao_id) + if not dao: + return DAODeploymentTweetResult( + success=False, + message=f"No DAO found for deployment tweet: {message.dao_id}", + dao_id=message.dao_id, + ) + + if not dao.is_deployed: + return DAODeploymentTweetResult( + success=False, + message=f"DAO is not yet deployed, cannot tweet congratulations: {message.dao_id}", + dao_id=message.dao_id, + ) + + # Validate token exists for the deployed DAO + token = backend.list_tokens(filters=TokenFilter(dao_id=message.dao_id)) + if not token: + return DAODeploymentTweetResult( + success=False, + message=f"No token found for deployed DAO: {message.dao_id}", + dao_id=message.dao_id, + ) + + return None # Validation passed + + except Exception as e: + logger.error( + f"Error validating DAO deployment message {message.id}: {str(e)}", + exc_info=True, + ) + return DAODeploymentTweetResult( + success=False, + message=f"Error validating deployment message: {str(e)}", + error=e, + dao_id=message.dao_id if hasattr(message, "dao_id") else None, + ) + + async def _process_dao_deployment_message( + self, message: Any + ) -> DAODeploymentTweetResult: + """Process a single DAO deployment message to generate congratulatory tweet.""" + try: + # Validate deployment message first + validation_result = await self._validate_deployment_message(message) + if validation_result: + return validation_result + + # Get the validated deployed DAO and token info + dao = backend.get_dao(message.dao_id) + token = backend.list_tokens(filters=TokenFilter(dao_id=message.dao_id))[0] + + logger.info( + f"Generating congratulatory tweet for deployed DAO: {dao.name} ({dao.id})" + ) + logger.debug( + f"Deployed DAO details - Symbol: {token.symbol}, Mission: {dao.mission[:100]}..." + ) + + # Generate congratulatory tweet for the deployment + generated_congratulatory_tweet = await generate_dao_tweet( + dao_name=dao.name, + dao_symbol=token.symbol, + dao_mission=dao.mission, + dao_id=dao.id, + ) + + if ( + not generated_congratulatory_tweet + or not generated_congratulatory_tweet.get("tweet_text") + ): + return DAODeploymentTweetResult( + success=False, + message="Failed to generate congratulatory tweet content for DAO deployment", + dao_id=dao.id, + tweet_id=message.tweet_id, + ) + + # Create a new congratulatory tweet message in the queue + congratulatory_tweet_message = backend.create_queue_message( + QueueMessageCreate( + type="tweet", + dao_id=dao.id, + message={"message": generated_congratulatory_tweet["tweet_text"]}, + tweet_id=message.tweet_id, + conversation_id=message.conversation_id, + ) + ) + + logger.info( + f"Created congratulatory tweet message for deployed DAO: {dao.name}" + ) + logger.debug( + f"Congratulatory tweet message ID: {congratulatory_tweet_message.id}" + ) + logger.debug( + f"Generated congratulatory tweet content: {generated_congratulatory_tweet['tweet_text'][:100]}..." + ) + + return DAODeploymentTweetResult( + success=True, + message="Successfully generated congratulatory tweet for DAO deployment", + dao_id=dao.id, + tweet_id=message.tweet_id, + congratulatory_tweets_generated=1, + tweet_messages_created=1, + ) + + except Exception as e: + logger.error( + f"Error processing DAO deployment message {message.id}: {str(e)}", + exc_info=True, + ) + return DAODeploymentTweetResult( + success=False, + message=f"Error processing DAO deployment tweet: {str(e)}", + error=e, + dao_id=message.dao_id if hasattr(message, "dao_id") else None, + ) + + def _should_retry_on_error(self, error: Exception, context: JobContext) -> bool: + """Determine if DAO deployment tweet error should trigger retry.""" + # Retry on network errors, AI service timeouts + retry_errors = ( + ConnectionError, + TimeoutError, + ) + + # Don't retry on DAO deployment validation errors + if "DAO is not yet deployed" in str(error): + return False + if "No DAO found" in str(error): + return False + if "No token found for deployed DAO" in str(error): + return False + + return isinstance(error, retry_errors) + + async def _handle_execution_error( + self, error: Exception, context: JobContext + ) -> Optional[List[DAODeploymentTweetResult]]: + """Handle DAO deployment tweet execution errors with recovery logic.""" + if "ai" in str(error).lower() or "openai" in str(error).lower(): + logger.warning( + f"AI service error during congratulatory tweet generation: {str(error)}, will retry" + ) + return None + + if isinstance(error, (ConnectionError, TimeoutError)): + logger.warning( + f"Network error during DAO deployment tweet: {str(error)}, will retry" + ) + return None + + # For DAO deployment validation errors, don't retry + return [ + DAODeploymentTweetResult( + success=False, + message=f"Unrecoverable DAO deployment tweet error: {str(error)}", + error=error, + ) + ] + + async def _post_execution_cleanup( + self, context: JobContext, results: List[DAODeploymentTweetResult] + ) -> None: + """Cleanup after DAO deployment tweet task execution.""" + # Clear cached pending messages + self._pending_messages = None + logger.debug("DAO deployment tweet task cleanup completed") + + async def _execute_impl( + self, context: JobContext + ) -> List[DAODeploymentTweetResult]: + """Execute DAO deployment tweet processing task with batch processing.""" + results: List[DAODeploymentTweetResult] = [] + + if not self._pending_messages: + logger.debug("No pending DAO deployment tweet messages to process") + return results + + processed_count = 0 + success_count = 0 + batch_size = getattr(context, "batch_size", 5) + + # Process deployment tweet messages in batches + for i in range(0, len(self._pending_messages), batch_size): + batch = self._pending_messages[i : i + batch_size] + + for message in batch: + logger.debug(f"Processing DAO deployment tweet message: {message.id}") + result = await self._process_dao_deployment_message(message) + results.append(result) + processed_count += 1 + + if result.success: + success_count += 1 + backend.update_queue_message( + queue_message_id=message.id, + update_data=QueueMessageBase(is_processed=True), + ) + logger.debug( + f"Marked DAO deployment tweet message {message.id} as processed" + ) + + logger.info( + f"DAO deployment tweet task completed - Processed: {processed_count}, " + f"Successful: {success_count}, Failed: {processed_count - success_count}" + ) + + return results + + +# Create instance for auto-registration +dao_deployment_tweet_task = DAODeploymentTweetTask() diff --git a/services/runner/tasks/dao_proposal_concluder.py b/services/runner/tasks/dao_proposal_concluder.py index b212c8e2..19165e29 100644 --- a/services/runner/tasks/dao_proposal_concluder.py +++ b/services/runner/tasks/dao_proposal_concluder.py @@ -1,7 +1,7 @@ """DAO proposal conclusion task implementation.""" from dataclasses import dataclass -from typing import Any, Dict, List +from typing import Any, Dict, List, Optional from backend.factory import backend from backend.models import ( @@ -14,6 +14,7 @@ from config import config from lib.logger import configure_logger from services.runner.base import BaseTask, JobContext, RunnerResult +from services.runner.decorators import JobPriority, job from tools.dao_ext_action_proposals import ConcludeActionProposalTool logger = configure_logger(__name__) @@ -25,16 +26,55 @@ class DAOProposalConcludeResult(RunnerResult): proposals_processed: int = 0 proposals_concluded: int = 0 + conclusions_successful: int = 0 errors: List[str] = None def __post_init__(self): self.errors = self.errors or [] +@job( + job_type="dao_proposal_conclude", + name="DAO Proposal Concluder", + description="Processes and concludes DAO proposals with enhanced monitoring and error handling", + interval_seconds=60, + priority=JobPriority.MEDIUM, + max_retries=2, + retry_delay_seconds=90, + timeout_seconds=240, + max_concurrent=1, + requires_blockchain=True, + batch_size=2, + enable_dead_letter_queue=True, +) class DAOProposalConcluderTask(BaseTask[DAOProposalConcludeResult]): - """Task runner for processing and concluding DAO proposals.""" + """Task runner for processing and concluding DAO proposals with enhanced capabilities.""" - QUEUE_TYPE = QueueMessageType.DAO_PROPOSAL_CONCLUDE + QUEUE_TYPE = QueueMessageType.get_or_create("dao_proposal_conclude") + + async def _validate_config(self, context: JobContext) -> bool: + """Validate task configuration.""" + try: + # Check if backend wallet configuration is available + if not config.backend_wallet or not config.backend_wallet.seed_phrase: + logger.error("Backend wallet seed phrase not configured") + return False + return True + except Exception as e: + logger.error( + f"Error validating proposal concluder config: {str(e)}", exc_info=True + ) + return False + + async def _validate_resources(self, context: JobContext) -> bool: + """Validate resource availability.""" + try: + # Check backend connectivity + backend.get_api_status() + return True + except Exception as e: + logger.error(f"Backend not available: {str(e)}") + return False async def _validate_task_specific(self, context: JobContext) -> bool: """Validate task-specific conditions.""" @@ -48,22 +88,17 @@ async def _validate_task_specific(self, context: JobContext) -> bool: logger.info("No pending proposal conclusion messages found") return False - # Validate that at least one message has a valid proposal + # Validate each message has valid proposal data + valid_messages = [] for message in pending_messages: - message_data = message.message or {} - proposal_id = message_data.get("proposal_id") - - if not proposal_id: - logger.warning(f"Message {message.id} missing proposal_id") - continue + if await self._is_message_valid(message): + valid_messages.append(message) - # Check if the proposal exists in the database - proposal = backend.get_proposal(proposal_id) - if proposal: - logger.info(f"Found valid proposal {proposal_id} to conclude") - return True - else: - logger.warning(f"Proposal {proposal_id} not found in database") + if valid_messages: + logger.info( + f"Found {len(valid_messages)} valid proposal conclusion messages" + ) + return True logger.warning("No valid proposals found in pending messages") return False @@ -74,8 +109,27 @@ async def _validate_task_specific(self, context: JobContext) -> bool: ) return False - async def process_message(self, message: QueueMessage) -> Dict[str, Any]: - """Process a single DAO proposal conclusion message.""" + async def _is_message_valid(self, message: QueueMessage) -> bool: + """Check if a proposal conclusion message is valid for processing.""" + try: + if not message.message or not message.dao_id: + return False + + proposal_id = message.message.get("proposal_id") + if not proposal_id: + return False + + # Check if the proposal exists in the database + proposal = backend.get_proposal(proposal_id) + if not proposal: + return False + + return True + except Exception: + return False + + async def _process_message(self, message: QueueMessage) -> Dict[str, Any]: + """Process a single DAO proposal conclusion message with enhanced error handling.""" message_id = message.id message_data = message.message or {} dao_id = message.dao_id @@ -114,10 +168,13 @@ async def process_message(self, message: QueueMessage) -> Dict[str, Any]: # Use the first token as the DAO token dao_token = tokens[0] + logger.info( + f"Preparing to conclude proposal {proposal.proposal_id} for DAO {dao.name}" + ) + # Initialize the ConcludeActionProposalTool - logger.debug(f"Preparing to conclude proposal {proposal.proposal_id}") conclude_tool = ConcludeActionProposalTool( - wallet_id=config.scheduler.dao_proposal_conclude_runner_wallet_id + seed_phrase=config.backend_wallet.seed_phrase ) # Execute the conclusion @@ -130,26 +187,79 @@ async def process_message(self, message: QueueMessage) -> Dict[str, Any]: ) logger.debug(f"Conclusion result: {conclusion_result}") - # Mark the message as processed - update_data = QueueMessageBase(is_processed=True) + result = {"success": True, "concluded": True, "result": conclusion_result} + + # Store result and mark the message as processed + update_data = QueueMessageBase(is_processed=True, result=result) backend.update_queue_message(message_id, update_data) - return {"success": True, "concluded": True, "result": conclusion_result} + logger.info(f"Successfully concluded proposal {proposal.proposal_id}") + + return result except Exception as e: error_msg = f"Error processing message {message_id}: {str(e)}" logger.error(error_msg, exc_info=True) - return {"success": False, "error": error_msg} + result = {"success": False, "error": error_msg} + + # Store result even for failed processing + update_data = QueueMessageBase(result=result) + backend.update_queue_message(message_id, update_data) + + return result async def get_pending_messages(self) -> List[QueueMessage]: """Get all unprocessed messages from the queue.""" filters = QueueMessageFilter(type=self.QUEUE_TYPE, is_processed=False) return backend.list_queue_messages(filters=filters) + def _should_retry_on_error(self, error: Exception, context: JobContext) -> bool: + """Determine if error should trigger retry.""" + # Retry on network errors, blockchain timeouts + retry_errors = ( + ConnectionError, + TimeoutError, + ) + + # Don't retry on validation errors + if "not found" in str(error).lower(): + return False + if "missing" in str(error).lower() and "proposal_id" in str(error).lower(): + return False + + return isinstance(error, retry_errors) + + async def _handle_execution_error( + self, error: Exception, context: JobContext + ) -> Optional[List[DAOProposalConcludeResult]]: + """Handle execution errors with recovery logic.""" + if "blockchain" in str(error).lower() or "contract" in str(error).lower(): + logger.warning(f"Blockchain/contract error: {str(error)}, will retry") + return None + + if isinstance(error, (ConnectionError, TimeoutError)): + logger.warning(f"Network error: {str(error)}, will retry") + return None + + # For validation errors, don't retry + return [ + DAOProposalConcludeResult( + success=False, + message=f"Unrecoverable error: {str(error)}", + error=error, + ) + ] + + async def _post_execution_cleanup( + self, context: JobContext, results: List[DAOProposalConcludeResult] + ) -> None: + """Cleanup after task execution.""" + logger.debug("DAO proposal concluder task cleanup completed") + async def _execute_impl( self, context: JobContext ) -> List[DAOProposalConcludeResult]: - """Run the DAO proposal conclusion task.""" + """Run the DAO proposal conclusion task with batch processing.""" pending_messages = await self.get_pending_messages() message_count = len(pending_messages) logger.debug(f"Found {message_count} pending proposal conclusion messages") @@ -167,20 +277,33 @@ async def _execute_impl( # Process each message processed_count = 0 concluded_count = 0 + successful_conclusions = 0 errors = [] - - for message in pending_messages: - result = await self.process_message(message) - processed_count += 1 - - if result.get("success"): - if result.get("concluded", False): - concluded_count += 1 - else: - errors.append(result.get("error", "Unknown error")) - - logger.debug( - f"Task metrics - Processed: {processed_count}, " + batch_size = getattr(context, "batch_size", 2) + + # Process messages in batches + for i in range(0, len(pending_messages), batch_size): + batch = pending_messages[i : i + batch_size] + + for message in batch: + try: + result = await self._process_message(message) + processed_count += 1 + + if result.get("success"): + if result.get("concluded", False): + concluded_count += 1 + successful_conclusions += 1 + else: + errors.append(result.get("error", "Unknown error")) + + except Exception as e: + error_msg = f"Exception processing message {message.id}: {str(e)}" + errors.append(error_msg) + logger.error(error_msg, exc_info=True) + + logger.info( + f"DAO proposal concluder task completed - Processed: {processed_count}, " f"Concluded: {concluded_count}, Errors: {len(errors)}" ) @@ -190,10 +313,11 @@ async def _execute_impl( message=f"Processed {processed_count} proposal(s), concluded {concluded_count} proposal(s)", proposals_processed=processed_count, proposals_concluded=concluded_count, + conclusions_successful=successful_conclusions, errors=errors, ) ] -# Instantiate the task for use in the registry +# Create instance for auto-registration dao_proposal_concluder = DAOProposalConcluderTask() diff --git a/services/runner/tasks/dao_proposal_embedder.py b/services/runner/tasks/dao_proposal_embedder.py new file mode 100644 index 00000000..4720f10c --- /dev/null +++ b/services/runner/tasks/dao_proposal_embedder.py @@ -0,0 +1,319 @@ +"""DAO proposal embedder task implementation.""" + +from dataclasses import dataclass +from typing import List, Optional + +from backend.factory import backend +from backend.models import ProposalBase, ProposalFilter +from lib.logger import configure_logger +from services.llm.embed import EmbedService +from services.runner.base import BaseTask, JobContext, RunnerConfig, RunnerResult +from services.runner.decorators import JobPriority, job + +logger = configure_logger(__name__) + + +@dataclass +class DAOProposalEmbeddingResult(RunnerResult): + """Result of DAO proposal embedding operation.""" + + dao_proposals_processed: int = 0 + dao_proposals_embedded: int = 0 + embeddings_successful: int = 0 + embeddings_failed: int = 0 + + +@job( + job_type="dao_proposal_embedder", + name="DAO Proposal Embedder", + description="Generates embeddings for new DAO proposals with enhanced monitoring and error handling", + interval_seconds=120, # 2 minutes + priority=JobPriority.LOW, + max_retries=3, + retry_delay_seconds=60, + timeout_seconds=180, + max_concurrent=3, + requires_ai=True, + batch_size=10, + enable_dead_letter_queue=True, +) +class DAOProposalEmbedderTask(BaseTask[DAOProposalEmbeddingResult]): + """Task for generating embeddings for new DAO proposals with enhanced capabilities.""" + + def __init__(self, config: Optional[RunnerConfig] = None): + super().__init__(config) + self._dao_proposals_without_embeddings = None + self.embed_service = EmbedService() + + async def _validate_config(self, context: JobContext) -> bool: + """Validate DAO proposal embedder task configuration.""" + try: + # Check if embedding service is available for DAO proposals + if not self.embed_service: + logger.error("Embedding service not available for DAO proposals") + return False + return True + except Exception as e: + logger.error( + f"Error validating DAO proposal embedder config: {str(e)}", + exc_info=True, + ) + return False + + async def _validate_resources(self, context: JobContext) -> bool: + """Validate resource availability for DAO proposal AI embeddings.""" + try: + # Check backend connectivity + backend.get_api_status() + + # Test embedding service for DAO proposals + try: + test_result = await self.embed_service.embed_text("test dao proposal") + if not test_result: + logger.error("Embedding service test failed for DAO proposals") + return False + except Exception as e: + logger.error( + f"DAO proposal embedding service validation failed: {str(e)}" + ) + return False + + return True + except Exception as e: + logger.error(f"DAO proposal embedding resource validation failed: {str(e)}") + return False + + async def _validate_task_specific(self, context: JobContext) -> bool: + """Validate DAO proposal embedder task-specific conditions.""" + try: + # Get DAO proposals without embeddings + dao_proposals = backend.list_proposals( + filters=ProposalFilter(has_embedding=False) + ) + + # Filter DAO proposals that have actual content to embed + dao_proposals_without_embeddings = [] + for proposal in dao_proposals: + if proposal.description and proposal.description.strip(): + dao_proposals_without_embeddings.append(proposal) + + self._dao_proposals_without_embeddings = dao_proposals_without_embeddings + + if dao_proposals_without_embeddings: + logger.info( + f"Found {len(dao_proposals_without_embeddings)} DAO proposals needing embeddings" + ) + return True + + logger.debug("No DAO proposals needing embeddings found") + return False + + except Exception as e: + logger.error( + f"Error validating DAO proposal embedder task: {str(e)}", exc_info=True + ) + self._dao_proposals_without_embeddings = None + return False + + async def _generate_embedding_for_dao_proposal( + self, dao_proposal + ) -> DAOProposalEmbeddingResult: + """Generate embedding for a single DAO proposal with enhanced error handling.""" + try: + logger.info( + f"Generating embedding for DAO proposal: {dao_proposal.title} ({dao_proposal.id})" + ) + + # Prepare text content for DAO proposal embedding + text_content = f"DAO Proposal Title: {dao_proposal.title}\n" + if dao_proposal.description: + text_content += ( + f"DAO Proposal Description: {dao_proposal.description}\n" + ) + + # Additional context if available for DAO proposal + if hasattr(dao_proposal, "summary") and dao_proposal.summary: + text_content += f"DAO Proposal Summary: {dao_proposal.summary}\n" + + logger.debug( + f"DAO proposal embedding text content (first 200 chars): {text_content[:200]}..." + ) + + # Generate embedding for DAO proposal + dao_proposal_embedding = await self.embed_service.embed_text(text_content) + + if not dao_proposal_embedding: + error_msg = ( + f"Failed to generate embedding for DAO proposal {dao_proposal.id}" + ) + logger.error(error_msg) + return DAOProposalEmbeddingResult( + success=False, + message=error_msg, + dao_proposals_processed=1, + dao_proposals_embedded=0, + embeddings_failed=1, + ) + + # Update DAO proposal with embedding + dao_proposal_update = ProposalBase( + embedding=dao_proposal_embedding, + embedding_model=( + self.embed_service.model_name + if hasattr(self.embed_service, "model_name") + else "unknown" + ), + ) + + updated_dao_proposal = backend.update_proposal( + dao_proposal.id, dao_proposal_update + ) + if not updated_dao_proposal: + error_msg = ( + f"Failed to save embedding for DAO proposal {dao_proposal.id}" + ) + logger.error(error_msg) + return DAOProposalEmbeddingResult( + success=False, + message=error_msg, + dao_proposals_processed=1, + dao_proposals_embedded=0, + embeddings_failed=1, + ) + + logger.info( + f"Successfully generated embedding for DAO proposal: {dao_proposal.title}" + ) + logger.debug( + f"DAO proposal embedding dimension: {len(dao_proposal_embedding)}" + ) + + return DAOProposalEmbeddingResult( + success=True, + message=f"Successfully generated embedding for DAO proposal {dao_proposal.title}", + dao_proposals_processed=1, + dao_proposals_embedded=1, + embeddings_successful=1, + ) + + except Exception as e: + error_msg = f"Error generating embedding for DAO proposal {dao_proposal.id}: {str(e)}" + logger.error(error_msg, exc_info=True) + return DAOProposalEmbeddingResult( + success=False, + message=error_msg, + error=e, + dao_proposals_processed=1, + dao_proposals_embedded=0, + embeddings_failed=1, + ) + + def _should_retry_on_error(self, error: Exception, context: JobContext) -> bool: + """Determine if DAO proposal embedding error should trigger retry.""" + # Retry on network errors, AI service timeouts + retry_errors = ( + ConnectionError, + TimeoutError, + ) + + # Don't retry on DAO proposal content validation errors + if "empty" in str(error).lower() or "no content" in str(error).lower(): + return False + if "invalid embedding" in str(error).lower(): + return False + + return isinstance(error, retry_errors) + + async def _handle_execution_error( + self, error: Exception, context: JobContext + ) -> Optional[List[DAOProposalEmbeddingResult]]: + """Handle DAO proposal embedding execution errors with recovery logic.""" + if "ai" in str(error).lower() or "embedding" in str(error).lower(): + logger.warning( + f"AI/embedding service error for DAO proposals: {str(error)}, will retry" + ) + return None + + if isinstance(error, (ConnectionError, TimeoutError)): + logger.warning( + f"Network error during DAO proposal embedding: {str(error)}, will retry" + ) + return None + + # For DAO proposal validation errors, don't retry + return [ + DAOProposalEmbeddingResult( + success=False, + message=f"Unrecoverable DAO proposal embedding error: {str(error)}", + error=error, + ) + ] + + async def _post_execution_cleanup( + self, context: JobContext, results: List[DAOProposalEmbeddingResult] + ) -> None: + """Cleanup after DAO proposal embedding task execution.""" + # Clear cached DAO proposals + self._dao_proposals_without_embeddings = None + logger.debug("DAO proposal embedder task cleanup completed") + + async def _execute_impl( + self, context: JobContext + ) -> List[DAOProposalEmbeddingResult]: + """Execute DAO proposal embedding task with batch processing.""" + results: List[DAOProposalEmbeddingResult] = [] + + if not self._dao_proposals_without_embeddings: + logger.debug("No DAO proposals needing embeddings to process") + return [ + DAOProposalEmbeddingResult( + success=True, + message="No DAO proposals require embedding generation", + dao_proposals_processed=0, + dao_proposals_embedded=0, + ) + ] + + total_dao_proposals = len(self._dao_proposals_without_embeddings) + processed_count = 0 + successful_embeddings = 0 + failed_embeddings = 0 + batch_size = getattr(context, "batch_size", 10) + + logger.info( + f"Processing {total_dao_proposals} DAO proposals requiring embeddings" + ) + + # Process DAO proposals in batches + for i in range(0, len(self._dao_proposals_without_embeddings), batch_size): + batch = self._dao_proposals_without_embeddings[i : i + batch_size] + + for dao_proposal in batch: + logger.debug( + f"Generating embedding for DAO proposal: {dao_proposal.title} ({dao_proposal.id})" + ) + result = await self._generate_embedding_for_dao_proposal(dao_proposal) + results.append(result) + processed_count += 1 + + if result.success: + successful_embeddings += 1 + logger.debug( + f"Successfully embedded DAO proposal {dao_proposal.title}" + ) + else: + failed_embeddings += 1 + logger.error( + f"Failed to embed DAO proposal {dao_proposal.title}: {result.message}" + ) + + logger.info( + f"DAO proposal embedding completed - Processed: {processed_count}, " + f"Successful: {successful_embeddings}, Failed: {failed_embeddings}" + ) + + return results + + +# Create instance for auto-registration +dao_proposal_embedder = DAOProposalEmbedderTask() diff --git a/services/runner/tasks/dao_proposal_evaluation.py b/services/runner/tasks/dao_proposal_evaluation.py index 7492da8d..5ef2910b 100644 --- a/services/runner/tasks/dao_proposal_evaluation.py +++ b/services/runner/tasks/dao_proposal_evaluation.py @@ -1,12 +1,12 @@ """DAO proposal evaluation task implementation.""" -import asyncio -import time from dataclasses import dataclass -from typing import Any, Dict, List +from typing import Any, Dict, List, Optional from backend.factory import backend from backend.models import ( + ProposalBase, + ProposalFilter, QueueMessage, QueueMessageBase, QueueMessageFilter, @@ -15,7 +15,8 @@ ) from lib.logger import configure_logger from services.runner.base import BaseTask, JobContext, RunnerResult -from services.workflows.proposal_evaluation import evaluate_and_vote_on_proposal +from services.runner.decorators import JobPriority, job +from services.workflows import evaluate_and_vote_on_proposal logger = configure_logger(__name__) @@ -26,61 +27,81 @@ class DAOProposalEvaluationResult(RunnerResult): proposals_processed: int = 0 proposals_evaluated: int = 0 + evaluations_successful: int = 0 + votes_created: int = 0 errors: List[str] = None def __post_init__(self): self.errors = self.errors or [] +@job( + job_type="dao_proposal_evaluation", + name="DAO Proposal Evaluator", + description="Evaluates DAO proposals using AI analysis with enhanced monitoring and error handling", + interval_seconds=30, + priority=JobPriority.HIGH, + max_retries=3, + retry_delay_seconds=60, + timeout_seconds=180, + max_concurrent=2, + requires_ai=True, + batch_size=5, + enable_dead_letter_queue=True, +) class DAOProposalEvaluationTask(BaseTask[DAOProposalEvaluationResult]): - """Task runner for evaluating DAO proposals with concurrent processing. - - This task processes multiple DAO proposal evaluation messages concurrently - instead of sequentially. Key features: - - Uses asyncio.gather() for concurrent execution - - Semaphore controls maximum concurrent operations to prevent resource exhaustion - - Configurable concurrency limit (default: 5) - - Graceful error handling that doesn't stop the entire batch - - Performance timing and detailed logging - """ - - QUEUE_TYPE = QueueMessageType.DAO_PROPOSAL_EVALUATION - DEFAULT_CONFIDENCE_THRESHOLD = 0.7 - DEFAULT_AUTO_VOTE = False - DEFAULT_MAX_CONCURRENT_EVALUATIONS = ( - 5 # Limit concurrent evaluations to avoid rate limits - ) + """Task runner for evaluating DAO proposals using AI analysis with enhanced capabilities.""" + + QUEUE_TYPE = QueueMessageType.get_or_create("dao_proposal_evaluation") + + async def get_pending_messages(self) -> List[QueueMessage]: + """Get all unprocessed DAO proposal evaluation messages from the queue.""" + filters = QueueMessageFilter(type=self.QUEUE_TYPE, is_processed=False) + return backend.list_queue_messages(filters=filters) + + async def _validate_config(self, context: JobContext) -> bool: + """Validate task configuration.""" + try: + # Check if AI evaluation workflow is available + return True + except Exception as e: + logger.error( + f"Error validating proposal evaluation config: {str(e)}", exc_info=True + ) + return False + + async def _validate_resources(self, context: JobContext) -> bool: + """Validate resource availability for AI processing.""" + try: + # Check backend connectivity + backend.get_api_status() + return True + except Exception as e: + logger.error(f"Backend not available: {str(e)}") + return False async def _validate_task_specific(self, context: JobContext) -> bool: - """Validate task-specific conditions.""" + """Validate that we have pending evaluation messages to process.""" try: - # Get pending messages from the queue pending_messages = await self.get_pending_messages() - message_count = len(pending_messages) - logger.debug(f"Found {message_count} pending proposal evaluation messages") - if message_count == 0: - logger.info("No pending proposal evaluation messages found") + if not pending_messages: + logger.info("No pending DAO proposal evaluation messages found") return False - # Validate that at least one message has a valid proposal + # Validate each message has valid proposal data + valid_messages = [] for message in pending_messages: - message_data = message.message or {} - proposal_id = message_data.get("proposal_id") - - if not proposal_id: - logger.warning(f"Message {message.id} missing proposal_id") - continue - - # Check if the proposal exists in the database - proposal = backend.get_proposal(proposal_id) - if proposal: - logger.info(f"Found valid proposal {proposal_id} to process") - return True - else: - logger.warning(f"Proposal {proposal_id} not found in database") - - logger.warning("No valid proposals found in pending messages") + if await self._is_message_valid(message): + valid_messages.append(message) + + if valid_messages: + logger.info( + f"Found {len(valid_messages)} valid DAO proposal evaluation messages" + ) + return True + + logger.info("No valid DAO proposal evaluation messages to process") return False except Exception as e: @@ -89,16 +110,36 @@ async def _validate_task_specific(self, context: JobContext) -> bool: ) return False - async def process_message(self, message: QueueMessage) -> Dict[str, Any]: - """Process a single DAO proposal evaluation message.""" + async def _is_message_valid(self, message: QueueMessage) -> bool: + """Check if a proposal evaluation message is valid for processing.""" + try: + if not message.message or not message.dao_id: + return False + + proposal_id = message.message.get("proposal_id") + if not proposal_id: + return False + + # Check if proposal exists and is ready for evaluation + proposal = backend.get_proposal(proposal_id) + if not proposal: + return False + + # Check if proposal is already evaluated + if proposal.evaluated: + return False + + return True + except Exception: + return False + + async def _process_message(self, message: QueueMessage) -> Dict[str, Any]: + """Process a single DAO proposal evaluation message with enhanced error handling.""" message_id = message.id message_data = message.message or {} - wallet_id = message.wallet_id dao_id = message.dao_id - logger.debug( - f"Processing proposal evaluation message {message_id} for wallet {wallet_id}" - ) + logger.debug(f"Processing proposal evaluation message {message_id}") # Get the proposal ID from the message proposal_id = message_data.get("proposal_id") @@ -108,86 +149,96 @@ async def process_message(self, message: QueueMessage) -> Dict[str, Any]: return {"success": False, "error": error_msg} try: - # Get the proposal details from the database + # Get the proposal details from database proposal = backend.get_proposal(proposal_id) if not proposal: error_msg = f"Proposal {proposal_id} not found in database" logger.error(error_msg) - return {"success": False, "error": error_msg} - - # Get the DAO information - dao = backend.get_dao(dao_id) if dao_id else None - if not dao: - error_msg = f"DAO not found for proposal {proposal_id}" - logger.error(error_msg) - return {"success": False, "error": error_msg} - - # Execute the proposal evaluation workflow - logger.info(f"Evaluating proposal {proposal.id} for DAO {dao.name}") - - result = await evaluate_and_vote_on_proposal( - proposal_id=proposal.id, - wallet_id=wallet_id, - auto_vote=self.DEFAULT_AUTO_VOTE, # Don't auto-vote, just evaluate - confidence_threshold=self.DEFAULT_CONFIDENCE_THRESHOLD, - dao_id=dao_id, + return { + "success": False, + "error": error_msg, + "should_mark_processed": True, # Remove invalid messages + } + + # Check if proposal is already evaluated + if proposal.evaluated: + logger.info(f"Proposal {proposal_id} is already evaluated, skipping...") + return { + "success": True, + "evaluated": False, + "message": "Proposal already evaluated", + "should_mark_processed": True, + } + + # Check if the DAO has any pending proposals + pending_proposals = backend.list_proposals( + filters=ProposalFilter(dao_id=dao_id, is_open=True, evaluated=False) ) - # Extract evaluation results - evaluation = result.get("evaluation", {}) - approval = evaluation.get("approve", False) - confidence = evaluation.get("confidence_score", 0.0) - reasoning = evaluation.get("reasoning", "No reasoning provided") - formatted_prompt = result.get("formatted_prompt", "") - total_cost = result.get("total_overall_cost", 0.0) - model = evaluation.get("model_name", "Unknown") - evaluation_scores = evaluation.get( - "scores", {} - ) # Extract the full scores data - evaluation_flags = evaluation.get("flags", []) # Extract the flags data - - logger.info( - f"Proposal {proposal.id} ({dao.name}): Evaluated with result " - f"{'FOR' if approval else 'AGAINST'} with confidence {confidence:.2f}" - ) + if not pending_proposals: + logger.info( + f"No pending proposals found for DAO {dao_id}, skipping evaluation" + ) + return { + "success": True, + "evaluated": False, + "message": "No pending proposals to evaluate", + "should_mark_processed": True, + } - wallet = backend.get_wallet(wallet_id) + logger.info(f"Evaluating proposal {proposal.proposal_id} for DAO {dao_id}") - # Create a vote record with the evaluation results - vote_data = VoteCreate( - wallet_id=wallet_id, + # Process the proposal using the AI workflow + evaluation_result = await evaluate_and_vote_on_proposal( dao_id=dao_id, - agent_id=wallet.agent_id, # This will be set from the wallet if it exists proposal_id=proposal_id, - answer=approval, - reasoning=reasoning, - confidence=confidence, - prompt=formatted_prompt, - cost=total_cost, - model=model, - profile_id=wallet.profile_id, - evaluation_score=evaluation_scores, # Store the complete evaluation scores - flags=evaluation_flags, # Store the evaluation flags - evaluation=evaluation, + auto_vote=False, ) - # Create the vote record - vote = backend.create_vote(vote_data) - if not vote: - logger.error("Failed to create vote record") - return {"success": False, "error": "Failed to create vote record"} + if not evaluation_result or not evaluation_result.get("success"): + error_msg = f"Proposal evaluation failed: {evaluation_result.get('error', 'Unknown error')}" + logger.error(error_msg) + return {"success": False, "error": error_msg} + + # Update proposal as evaluated + proposal_update = ProposalBase(evaluated=True) + updated_proposal = backend.update_proposal(proposal_id, proposal_update) + + if not updated_proposal: + error_msg = "Failed to update proposal as evaluated" + logger.error(error_msg) + return {"success": False, "error": error_msg} - logger.info(f"Created vote record {vote.id} for proposal {proposal_id}") + # Create votes based on evaluation result + votes_created = 0 + if evaluation_result.get("votes"): + for vote_data in evaluation_result["votes"]: + try: + vote = VoteCreate( + proposal_id=proposal_id, + wallet_id=vote_data["wallet_id"], + answer=vote_data["answer"], + voted=False, + ) + created_vote = backend.create_vote(vote) + if created_vote: + votes_created += 1 + logger.debug( + f"Created vote {created_vote.id} for proposal {proposal_id}" + ) + except Exception as e: + logger.error(f"Failed to create vote: {str(e)}") - # Mark the evaluation message as processed - update_data = QueueMessageBase(is_processed=True) - backend.update_queue_message(message_id, update_data) + logger.info( + f"Successfully evaluated proposal {proposal.proposal_id}, created {votes_created} votes" + ) return { "success": True, - "vote_id": str(vote.id), - "approve": approval, - "confidence": confidence, + "evaluated": True, + "votes_created": votes_created, + "evaluation_result": evaluation_result, + "should_mark_processed": True, } except Exception as e: @@ -195,65 +246,55 @@ async def process_message(self, message: QueueMessage) -> Dict[str, Any]: logger.error(error_msg, exc_info=True) return {"success": False, "error": error_msg} - async def get_pending_messages(self) -> List[QueueMessage]: - """Get all unprocessed messages from the queue.""" - filters = QueueMessageFilter(type=self.QUEUE_TYPE, is_processed=False) - return backend.list_queue_messages(filters=filters) + def _should_retry_on_error(self, error: Exception, context: JobContext) -> bool: + """Determine if error should trigger retry.""" + # Retry on network errors, AI service timeouts + retry_errors = ( + ConnectionError, + TimeoutError, + ) - async def process_message_with_semaphore( - self, semaphore: asyncio.Semaphore, message: QueueMessage - ) -> Dict[str, Any]: - """Process a message with concurrency control using semaphore. - - This wrapper ensures that each message processing is controlled by the - semaphore to limit concurrent operations and prevent resource exhaustion. - """ - async with semaphore: - try: - return await self.process_message(message) - except Exception as e: - # Log the error and return a failure result instead of raising - # This prevents one failed message from crashing the entire batch - error_msg = f"Failed to process message {message.id}: {str(e)}" - logger.error(error_msg, exc_info=True) - return {"success": False, "error": error_msg} + # Don't retry on validation errors + if "not found" in str(error).lower(): + return False + if "already evaluated" in str(error).lower(): + return False - def get_max_concurrent_evaluations(self, context: JobContext) -> int: - """Get the maximum number of concurrent evaluations from context or default. + return isinstance(error, retry_errors) - This allows for dynamic configuration of concurrency limits based on: - - Context configuration - - Environment variables - - System load considerations - """ - # Allow context to override the default concurrency limit - context_limit = getattr(context, "max_concurrent_evaluations", None) + async def _handle_execution_error( + self, error: Exception, context: JobContext + ) -> Optional[List[DAOProposalEvaluationResult]]: + """Handle execution errors with recovery logic.""" + if "ai" in str(error).lower() or "openai" in str(error).lower(): + logger.warning(f"AI service error: {str(error)}, will retry") + return None - if context_limit is not None: - logger.debug(f"Using context-provided concurrency limit: {context_limit}") - return context_limit + if isinstance(error, (ConnectionError, TimeoutError)): + logger.warning(f"Network error: {str(error)}, will retry") + return None - # Could also check environment variables or system resources here - # import os - # env_limit = os.getenv("DAO_EVAL_MAX_CONCURRENT") - # if env_limit: - # return int(env_limit) + # For validation errors, don't retry + return [ + DAOProposalEvaluationResult( + success=False, + message=f"Unrecoverable error: {str(error)}", + error=error, + ) + ] - return self.DEFAULT_MAX_CONCURRENT_EVALUATIONS + async def _post_execution_cleanup( + self, context: JobContext, results: List[DAOProposalEvaluationResult] + ) -> None: + """Cleanup after task execution.""" + logger.debug("DAO proposal evaluation task cleanup completed") async def _execute_impl( self, context: JobContext ) -> List[DAOProposalEvaluationResult]: - """Run the DAO proposal evaluation task with concurrent processing. - - This method processes multiple proposal evaluation messages concurrently - instead of sequentially, which significantly improves performance when - dealing with multiple proposals. The concurrency is controlled by a - semaphore to avoid overwhelming the system or hitting rate limits. - """ + """Run the DAO proposal evaluation task with batch processing.""" + # Get pending messages pending_messages = await self.get_pending_messages() - message_count = len(pending_messages) - logger.debug(f"Found {message_count} pending proposal evaluation messages") if not pending_messages: return [ @@ -265,66 +306,82 @@ async def _execute_impl( ) ] - # Process messages concurrently with semaphore to limit concurrent operations - max_concurrent = min( - self.get_max_concurrent_evaluations(context), len(pending_messages) - ) - semaphore = asyncio.Semaphore(max_concurrent) - - logger.info( - f"Processing {len(pending_messages)} messages with max {max_concurrent} concurrent evaluations" - ) - - # Create tasks for concurrent processing - tasks = [ - self.process_message_with_semaphore(semaphore, message) - for message in pending_messages - ] - - # Execute all tasks concurrently and collect results - start_time = time.time() - results = await asyncio.gather(*tasks, return_exceptions=True) - execution_time = time.time() - start_time - - logger.info( - f"Completed concurrent processing of {len(pending_messages)} messages in {execution_time:.2f} seconds" - ) + message_count = len(pending_messages) + logger.info(f"Processing {message_count} pending proposal evaluation messages") - # Process results - processed_count = len(results) + # Process each message + processed_count = 0 evaluated_count = 0 + successful_evaluations = 0 + total_votes_created = 0 errors = [] + batch_size = getattr(context, "batch_size", 5) + + # Process messages in batches + for i in range(0, len(pending_messages), batch_size): + batch = pending_messages[i : i + batch_size] + + for message in batch: + try: + result = await self._process_message(message) + processed_count += 1 + + if result.get("success"): + if result.get("evaluated", False): + evaluated_count += 1 + successful_evaluations += 1 + total_votes_created += result.get("votes_created", 0) + + # Mark message as processed if indicated and store result + if result.get("should_mark_processed", False): + update_data = QueueMessageBase( + is_processed=True, result=result + ) + backend.update_queue_message(message.id, update_data) + logger.debug( + f"Marked message {message.id} as processed with result" + ) + + else: + error_msg = result.get("error", "Unknown error") + errors.append(f"Message {message.id}: {error_msg}") + logger.error( + f"Failed to process message {message.id}: {error_msg}" + ) + + # Store result for failed processing + update_data = QueueMessageBase(result=result) + backend.update_queue_message(message.id, update_data) + logger.debug(f"Stored result for failed message {message.id}") + + except Exception as e: + error_msg = f"Exception processing message {message.id}: {str(e)}" + errors.append(error_msg) + logger.error(error_msg, exc_info=True) + + # Store result for exception cases + error_result = {"success": False, "error": error_msg} + update_data = QueueMessageBase(result=error_result) + backend.update_queue_message(message.id, update_data) + logger.debug(f"Stored error result for message {message.id}") - for i, result in enumerate(results): - if isinstance(result, Exception): - error_msg = f"Exception processing message {pending_messages[i].id}: {str(result)}" - logger.error(error_msg, exc_info=True) - errors.append(error_msg) - elif isinstance(result, dict): - if result.get("success"): - evaluated_count += 1 - else: - errors.append(result.get("error", "Unknown error")) - else: - error_msg = f"Unexpected result type for message {pending_messages[i].id}: {type(result)}" - logger.error(error_msg) - errors.append(error_msg) - - logger.debug( - f"Task metrics - Processed: {processed_count}, " - f"Evaluated: {evaluated_count}, Errors: {len(errors)}" + logger.info( + f"DAO proposal evaluation task completed - Processed: {processed_count}/{message_count}, " + f"Evaluated: {evaluated_count}, Votes Created: {total_votes_created}, Errors: {len(errors)}" ) return [ DAOProposalEvaluationResult( success=True, - message=f"Processed {processed_count} proposal(s), evaluated {evaluated_count} proposal(s)", + message=f"Processed {processed_count} message(s), evaluated {evaluated_count} proposal(s)", proposals_processed=processed_count, proposals_evaluated=evaluated_count, + evaluations_successful=successful_evaluations, + votes_created=total_votes_created, errors=errors, ) ] -# Instantiate the task for use in the registry +# Create instance for auto-registration dao_proposal_evaluation = DAOProposalEvaluationTask() diff --git a/services/runner/tasks/dao_proposal_voter.py b/services/runner/tasks/dao_proposal_voter.py index 41e00d32..23b502cf 100644 --- a/services/runner/tasks/dao_proposal_voter.py +++ b/services/runner/tasks/dao_proposal_voter.py @@ -2,7 +2,7 @@ import json from dataclasses import dataclass -from typing import Any, Dict, List +from typing import Any, Dict, List, Optional from backend.factory import backend from backend.models import ( @@ -17,6 +17,7 @@ from config import config from lib.logger import configure_logger from services.runner.base import BaseTask, JobContext, RunnerResult +from services.runner.decorators import JobPriority, job from tools.dao_ext_action_proposals import VoteOnActionProposalTool logger = configure_logger(__name__) @@ -28,22 +29,61 @@ class DAOProposalVoteResult(RunnerResult): proposals_processed: int = 0 proposals_voted: int = 0 + votes_cast: int = 0 errors: List[str] = None def __post_init__(self): self.errors = self.errors or [] +@job( + job_type="dao_proposal_vote", + name="DAO Proposal Voter", + description="Processes and votes on DAO proposals with enhanced monitoring and error handling", + interval_seconds=30, + priority=JobPriority.HIGH, + max_retries=2, + retry_delay_seconds=60, + timeout_seconds=300, + max_concurrent=2, + requires_blockchain=True, + batch_size=3, + enable_dead_letter_queue=True, +) class DAOProposalVoterTask(BaseTask[DAOProposalVoteResult]): - """Task runner for processing and voting on DAO proposals.""" + """Task runner for processing and voting on DAO proposals with enhanced capabilities.""" - QUEUE_TYPE = QueueMessageType.DAO_PROPOSAL_VOTE + QUEUE_TYPE = QueueMessageType.get_or_create("dao_proposal_vote") async def get_pending_messages(self) -> List[QueueMessage]: """Get all unprocessed DAO proposal vote messages from the queue.""" filters = QueueMessageFilter(type=self.QUEUE_TYPE, is_processed=False) return backend.list_queue_messages(filters=filters) + async def _validate_config(self, context: JobContext) -> bool: + """Validate task configuration.""" + try: + # Check if voting tool can be initialized + if not config.scheduler: + logger.error("Scheduler config not available") + return False + return True + except Exception as e: + logger.error( + f"Error validating proposal voter config: {str(e)}", exc_info=True + ) + return False + + async def _validate_resources(self, context: JobContext) -> bool: + """Validate resource availability.""" + try: + # Check backend connectivity + backend.get_api_status() + return True + except Exception as e: + logger.error(f"Backend not available: {str(e)}") + return False + async def _validate_task_specific(self, context: JobContext) -> bool: """Validate that we have pending messages to process.""" try: @@ -53,10 +93,20 @@ async def _validate_task_specific(self, context: JobContext) -> bool: logger.info("No pending DAO proposal vote messages to process") return False - logger.info( - f"Found {len(pending_messages)} pending DAO proposal vote messages" - ) - return True + # Validate each message has required data + valid_messages = [] + for message in pending_messages: + if await self._is_message_valid(message): + valid_messages.append(message) + + if valid_messages: + logger.info( + f"Found {len(valid_messages)} valid DAO proposal vote messages" + ) + return True + + logger.info("No valid DAO proposal vote messages to process") + return False except Exception as e: logger.error( @@ -64,8 +114,31 @@ async def _validate_task_specific(self, context: JobContext) -> bool: ) return False - async def process_message(self, message: QueueMessage) -> Dict[str, Any]: - """Process a single DAO proposal voting message.""" + async def _is_message_valid(self, message: QueueMessage) -> bool: + """Check if a proposal vote message is valid for processing.""" + try: + if not message.wallet_id or not message.message: + return False + + proposal_id = message.message.get("proposal_id") + if not proposal_id: + return False + + # Check if proposal exists + try: + proposal_uuid = UUID(proposal_id) + proposal = backend.get_proposal(proposal_uuid) + if not proposal: + return False + except (ValueError, Exception): + return False + + return True + except Exception: + return False + + async def _process_message(self, message: QueueMessage) -> Dict[str, Any]: + """Process a single DAO proposal voting message with enhanced error handling.""" message_id = message.id message_data = message.message or {} wallet_id = message.wallet_id @@ -245,16 +318,40 @@ async def process_message(self, message: QueueMessage) -> Dict[str, Any]: # Mark the message as processed ONLY if ALL votes were handled successfully successful_votes = len([r for r in results if r["success"]]) if successful_votes == len(results) and successful_votes > 0: - update_data = QueueMessageBase(is_processed=True) + result = { + "success": True, + "votes_processed": successful_votes, + "votes_failed": len(results) - successful_votes, + "results": results, + } + update_data = QueueMessageBase(is_processed=True, result=result) backend.update_queue_message(message_id, update_data) logger.info( f"Successfully processed all {successful_votes} votes for message {message_id} - marking as processed" ) elif successful_votes > 0: + result = { + "success": False, + "votes_processed": successful_votes, + "votes_failed": len(results) - successful_votes, + "results": results, + "message": "Partial success - some votes failed", + } + update_data = QueueMessageBase(result=result) + backend.update_queue_message(message_id, update_data) logger.warning( f"Only {successful_votes}/{len(results)} votes succeeded for message {message_id} - leaving unprocessed for retry" ) else: + result = { + "success": False, + "votes_processed": 0, + "votes_failed": len(results), + "results": results, + "message": "All votes failed", + } + update_data = QueueMessageBase(result=result) + backend.update_queue_message(message_id, update_data) logger.error( f"No votes succeeded for message {message_id} - leaving unprocessed for retry" ) @@ -269,10 +366,59 @@ async def process_message(self, message: QueueMessage) -> Dict[str, Any]: except Exception as e: error_msg = f"Error processing message {message_id}: {str(e)}" logger.error(error_msg, exc_info=True) - return {"success": False, "error": error_msg} + result = {"success": False, "error": error_msg} + + # Store result even for failed processing + update_data = QueueMessageBase(result=result) + backend.update_queue_message(message_id, update_data) + + return result + + def _should_retry_on_error(self, error: Exception, context: JobContext) -> bool: + """Determine if error should trigger retry.""" + # Retry on network errors, blockchain timeouts + retry_errors = ( + ConnectionError, + TimeoutError, + ) + + # Don't retry on validation errors + if "not found" in str(error).lower(): + return False + if "invalid" in str(error).lower() and "format" in str(error).lower(): + return False + + return isinstance(error, retry_errors) + + async def _handle_execution_error( + self, error: Exception, context: JobContext + ) -> Optional[List[DAOProposalVoteResult]]: + """Handle execution errors with recovery logic.""" + if "blockchain" in str(error).lower() or "proposal" in str(error).lower(): + logger.warning(f"Blockchain/proposal error: {str(error)}, will retry") + return None + + if isinstance(error, (ConnectionError, TimeoutError)): + logger.warning(f"Network error: {str(error)}, will retry") + return None + + # For validation errors, don't retry + return [ + DAOProposalVoteResult( + success=False, + message=f"Unrecoverable error: {str(error)}", + error=error, + ) + ] + + async def _post_execution_cleanup( + self, context: JobContext, results: List[DAOProposalVoteResult] + ) -> None: + """Cleanup after task execution.""" + logger.debug("DAO proposal voter task cleanup completed") async def _execute_impl(self, context: JobContext) -> List[DAOProposalVoteResult]: - """Run the DAO proposal voter task by processing each message directly.""" + """Run the DAO proposal voter task by processing each message with batch processing.""" # Get pending messages pending_messages = await self.get_pending_messages() @@ -292,44 +438,55 @@ async def _execute_impl(self, context: JobContext) -> List[DAOProposalVoteResult # Process each message processed_count = 0 total_votes_processed = 0 + total_votes_cast = 0 errors = [] + batch_size = getattr(context, "batch_size", 3) - for message in pending_messages: - try: - result = await self.process_message(message) - processed_count += 1 - - if result.get("success"): - votes_processed = result.get("votes_processed", 0) - total_votes_processed += votes_processed - logger.debug( - f"Message {message.id}: processed {votes_processed} votes" - ) - else: - error_msg = result.get("error", "Unknown error") - errors.append(f"Message {message.id}: {error_msg}") - logger.error(f"Failed to process message {message.id}: {error_msg}") + # Process messages in batches + for i in range(0, len(pending_messages), batch_size): + batch = pending_messages[i : i + batch_size] + + for message in batch: + try: + result = await self._process_message(message) + processed_count += 1 + + if result.get("success"): + votes_processed = result.get("votes_processed", 0) + total_votes_processed += votes_processed + if votes_processed > 0: + total_votes_cast += votes_processed + logger.debug( + f"Message {message.id}: processed {votes_processed} votes" + ) + else: + error_msg = result.get("error", "Unknown error") + errors.append(f"Message {message.id}: {error_msg}") + logger.error( + f"Failed to process message {message.id}: {error_msg}" + ) - except Exception as e: - error_msg = f"Exception processing message {message.id}: {str(e)}" - errors.append(error_msg) - logger.error(error_msg, exc_info=True) + except Exception as e: + error_msg = f"Exception processing message {message.id}: {str(e)}" + errors.append(error_msg) + logger.error(error_msg, exc_info=True) logger.info( - f"Task completed - Processed: {processed_count}/{message_count} messages, " - f"Votes: {total_votes_processed}, Errors: {len(errors)}" + f"DAO proposal voter task completed - Processed: {processed_count}/{message_count} messages, " + f"Votes cast: {total_votes_cast}, Errors: {len(errors)}" ) return [ DAOProposalVoteResult( success=True, - message=f"Processed {processed_count} message(s), voted on {total_votes_processed} vote(s)", + message=f"Processed {processed_count} message(s), voted on {total_votes_cast} vote(s)", proposals_processed=processed_count, proposals_voted=total_votes_processed, + votes_cast=total_votes_cast, errors=errors, ) ] -# Instantiate the task for use in the registry +# Create instance for auto-registration dao_proposal_voter = DAOProposalVoterTask() diff --git a/services/runner/tasks/dao_task.py b/services/runner/tasks/dao_task.py deleted file mode 100644 index f33e0fbd..00000000 --- a/services/runner/tasks/dao_task.py +++ /dev/null @@ -1,239 +0,0 @@ -from dataclasses import dataclass -from datetime import datetime -from typing import Any, Dict, List, Optional -from uuid import UUID - -from backend.factory import backend -from backend.models import ( - DAOFilter, - Profile, - QueueMessage, - QueueMessageBase, - QueueMessageFilter, - QueueMessageType, -) -from lib.logger import configure_logger -from services.workflows import execute_workflow_stream -from tools.tools_factory import filter_tools_by_names, initialize_tools - -from ..base import BaseTask, JobContext, RunnerConfig, RunnerResult - -logger = configure_logger(__name__) - - -@dataclass -class DAOProcessingResult(RunnerResult): - """Result of DAO processing operation.""" - - dao_id: Optional[UUID] = None - deployment_data: Optional[Dict[str, Any]] = None - - -class DAOTask(BaseTask[DAOProcessingResult]): - """Task for processing DAO deployments.""" - - def __init__(self, config: Optional[RunnerConfig] = None): - super().__init__(config) - self._pending_messages = None - self.tools_map_all = initialize_tools( - Profile(id=self.config.twitter_profile_id, created_at=datetime.now()), - agent_id=self.config.twitter_agent_id, - ) - self.tools_map = filter_tools_by_names( - ["contract_deploy_dao"], self.tools_map_all - ) - logger.debug(f"Initialized {len(self.tools_map)} DAO deployment tools") - - async def _validate_config(self, context: JobContext) -> bool: - """Validate task configuration.""" - try: - if not self.tools_map: - logger.error("No DAO deployment tools available") - return False - - if not self.tools_map_all: - logger.error("Tools not properly initialized") - return False - - return True - except Exception as e: - logger.error(f"Error validating DAO config: {str(e)}", exc_info=True) - return False - - async def _validate_prerequisites(self, context: JobContext) -> bool: - """Validate task prerequisites.""" - try: - # Check for pending DAOs first - pending_daos = backend.list_daos( - filters=DAOFilter( - is_deployed=False, - is_broadcasted=True, - wallet_id=self.config.twitter_wallet_id, - ) - ) - if pending_daos: - logger.info( - f"Found {len(pending_daos)} pending Twitter DAO(s), skipping queue processing" - ) - return False - - # Cache pending messages for later use - self._pending_messages = backend.list_queue_messages( - filters=QueueMessageFilter( - type=QueueMessageType.DAO, is_processed=False - ) - ) - return True - except Exception as e: - logger.error(f"Error validating DAO prerequisites: {str(e)}", exc_info=True) - self._pending_messages = None - return False - - async def _validate_task_specific(self, context: JobContext) -> bool: - """Validate task-specific conditions.""" - try: - if not self._pending_messages: - logger.debug("No pending DAO messages found") - return False - - message_count = len(self._pending_messages) - if message_count > 0: - logger.debug(f"Found {message_count} unprocessed DAO messages") - return True - - logger.debug("No unprocessed DAO messages to process") - return False - - except Exception as e: - logger.error(f"Error in DAO task validation: {str(e)}", exc_info=True) - return False - - async def _validate_message( - self, message: QueueMessage - ) -> Optional[DAOProcessingResult]: - """Validate a single message before processing.""" - try: - params = message.message.get("parameters", {}) - required_params = [ - "token_symbol", - "token_name", - "token_description", - "token_max_supply", - "token_decimals", - "origin_address", - "mission", - ] - - missing_params = [p for p in required_params if p not in params] - if missing_params: - return DAOProcessingResult( - success=False, - message=f"Missing required parameters: {', '.join(missing_params)}", - ) - - return None # Validation passed - - except Exception as e: - logger.error( - f"Error validating message {message.id}: {str(e)}", exc_info=True - ) - return DAOProcessingResult( - success=False, - message=f"Error validating message: {str(e)}", - error=e, - ) - - def _get_dao_parameters(self, message: QueueMessage) -> Optional[str]: - """Extract and format DAO parameters from message.""" - try: - params = message.message["parameters"] - return ( - f"Please deploy a DAO with the following parameters:\n" - f"Token Symbol: {params['token_symbol']}\n" - f"Token Name: {params['token_name']}\n" - f"Token Description: {params['token_description']}\n" - f"Token Max Supply: {params['token_max_supply']}\n" - f"Token Decimals: {params['token_decimals']}\n" - f"Origin Address: {params['origin_address']}\n" - f"Tweet Origin: {message.tweet_id}\n" - f"Mission: {params['mission']}" - ) - except KeyError as e: - logger.error(f"Missing required parameter in message: {e}") - return None - - async def _process_dao_message(self, message: QueueMessage) -> DAOProcessingResult: - """Process a single DAO message.""" - try: - # Validate message first - validation_result = await self._validate_message(message) - if validation_result: - return validation_result - - tool_input = self._get_dao_parameters(message) - if not tool_input: - return DAOProcessingResult( - success=False, - message="Failed to extract DAO parameters from message", - ) - - logger.info(f"Processing DAO deployment for message {message.id}") - logger.debug(f"DAO deployment parameters: {tool_input}") - - deployment_data = {} - async for chunk in execute_workflow_stream( - history=[], input_str=tool_input, tools_map=self.tools_map - ): - if chunk["type"] == "result": - deployment_data = chunk["content"] - logger.info("DAO deployment completed successfully") - logger.debug(f"Deployment data: {deployment_data}") - elif chunk["type"] == "tool": - logger.debug(f"Executing tool: {chunk}") - - return DAOProcessingResult( - success=True, - message="Successfully processed DAO deployment", - deployment_data=deployment_data, - ) - - except Exception as e: - logger.error(f"Error processing DAO message: {str(e)}", exc_info=True) - return DAOProcessingResult( - success=False, message=f"Error processing DAO: {str(e)}", error=e - ) - - async def _execute_impl(self, context: JobContext) -> List[DAOProcessingResult]: - """Execute DAO deployment task.""" - results: List[DAOProcessingResult] = [] - try: - if not self._pending_messages: - return results - - # Process one message at a time for DAOs - message = self._pending_messages[0] - logger.debug(f"Processing DAO deployment message: {message.id}") - - result = await self._process_dao_message(message) - results.append(result) - - if result.success: - backend.update_queue_message( - queue_message_id=message.id, - update_data=QueueMessageBase(is_processed=True), - ) - logger.debug(f"Marked message {message.id} as processed") - - return results - - except Exception as e: - logger.error(f"Error in DAO task: {str(e)}", exc_info=True) - results.append( - DAOProcessingResult( - success=False, message=f"Error in DAO task: {str(e)}", error=e - ) - ) - return results - - -dao_task = DAOTask() diff --git a/services/runner/tasks/dao_tweet_task.py b/services/runner/tasks/dao_tweet_task.py deleted file mode 100644 index e3bf185d..00000000 --- a/services/runner/tasks/dao_tweet_task.py +++ /dev/null @@ -1,230 +0,0 @@ -from dataclasses import dataclass -from typing import Any, List, Optional -from uuid import UUID - -from backend.factory import backend -from backend.models import ( - QueueMessageBase, - QueueMessageCreate, - QueueMessageFilter, - QueueMessageType, - TokenFilter, -) -from lib.logger import configure_logger -from services.workflows import generate_dao_tweet - -from ..base import BaseTask, JobContext, RunnerConfig, RunnerResult - -logger = configure_logger(__name__) - - -@dataclass -class DAOTweetProcessingResult(RunnerResult): - """Result of DAO tweet processing operation.""" - - dao_id: Optional[UUID] = None - tweet_id: Optional[str] = None - - -class DAOTweetTask(BaseTask[DAOTweetProcessingResult]): - """Task for generating tweets for completed DAOs.""" - - def __init__(self, config: Optional[RunnerConfig] = None): - super().__init__(config) - self._pending_messages = None - - async def _validate_config(self, context: JobContext) -> bool: - """Validate task configuration.""" - try: - # No specific config requirements for this task - return True - except Exception as e: - logger.error( - f"Error validating DAO tweet task config: {str(e)}", exc_info=True - ) - return False - - async def _validate_prerequisites(self, context: JobContext) -> bool: - """Validate task prerequisites.""" - try: - # Cache pending messages for later use - self._pending_messages = backend.list_queue_messages( - filters=QueueMessageFilter( - type=QueueMessageType.DAO_TWEET, is_processed=False - ) - ) - return True - except Exception as e: - logger.error( - f"Error validating DAO tweet prerequisites: {str(e)}", exc_info=True - ) - self._pending_messages = None - return False - - async def _validate_task_specific(self, context: JobContext) -> bool: - """Validate task-specific conditions.""" - try: - if not self._pending_messages: - logger.debug("No pending DAO tweet messages found") - return False - - message_count = len(self._pending_messages) - if message_count > 0: - logger.debug(f"Found {message_count} pending DAO tweet messages") - return True - - logger.debug("No pending DAO tweet messages to process") - return False - - except Exception as e: - logger.error(f"Error in DAO tweet task validation: {str(e)}", exc_info=True) - return False - - async def _validate_message( - self, message: Any - ) -> Optional[DAOTweetProcessingResult]: - """Validate a single message before processing.""" - try: - if not message.dao_id: - return DAOTweetProcessingResult( - success=False, message="DAO message has no dao_id", dao_id=None - ) - - # Validate DAO exists and is deployed - dao = backend.get_dao(message.dao_id) - if not dao: - return DAOTweetProcessingResult( - success=False, - message=f"No DAO found for id: {message.dao_id}", - dao_id=message.dao_id, - ) - - if not dao.is_deployed: - return DAOTweetProcessingResult( - success=False, - message=f"DAO is not deployed: {message.dao_id}", - dao_id=message.dao_id, - ) - - # Validate token exists - token = backend.list_tokens(filters=TokenFilter(dao_id=message.dao_id)) - if not token: - return DAOTweetProcessingResult( - success=False, - message=f"No token found for DAO: {message.dao_id}", - dao_id=message.dao_id, - ) - - return None # Validation passed - - except Exception as e: - logger.error( - f"Error validating message {message.id}: {str(e)}", exc_info=True - ) - return DAOTweetProcessingResult( - success=False, - message=f"Error validating message: {str(e)}", - error=e, - dao_id=message.dao_id if hasattr(message, "dao_id") else None, - ) - - async def _process_dao_message(self, message: Any) -> DAOTweetProcessingResult: - """Process a single DAO message.""" - try: - # Validate message first - validation_result = await self._validate_message(message) - if validation_result: - return validation_result - - # Get the validated DAO and token info - dao = backend.get_dao(message.dao_id) - token = backend.list_tokens(filters=TokenFilter(dao_id=message.dao_id))[0] - - logger.info(f"Generating tweet for DAO: {dao.name} ({dao.id})") - logger.debug( - f"DAO details - Symbol: {token.symbol}, Mission: {dao.mission}" - ) - - # Generate tweet - generated_tweet = await generate_dao_tweet( - dao_name=dao.name, - dao_symbol=token.symbol, - dao_mission=dao.mission, - dao_id=dao.id, - ) - - # Create a new tweet message in the queue - tweet_message = backend.create_queue_message( - QueueMessageCreate( - type="tweet", - dao_id=dao.id, - message={"body": generated_tweet["tweet_text"]}, - tweet_id=message.tweet_id, - conversation_id=message.conversation_id, - ) - ) - - logger.info(f"Created tweet message for DAO: {dao.name}") - logger.debug(f"Tweet message ID: {tweet_message.id}") - - return DAOTweetProcessingResult( - success=True, - message="Successfully generated tweet", - dao_id=dao.id, - tweet_id=message.tweet_id, - ) - - except Exception as e: - logger.error( - f"Error processing DAO message {message.id}: {str(e)}", exc_info=True - ) - return DAOTweetProcessingResult( - success=False, - message=f"Error processing DAO: {str(e)}", - error=e, - dao_id=message.dao_id if hasattr(message, "dao_id") else None, - ) - - async def _execute_impl( - self, context: JobContext - ) -> List[DAOTweetProcessingResult]: - """Execute DAO tweet processing task.""" - results: List[DAOTweetProcessingResult] = [] - try: - if not self._pending_messages: - return results - - processed_count = 0 - success_count = 0 - - for message in self._pending_messages: - logger.debug(f"Processing DAO tweet message: {message.id}") - result = await self._process_dao_message(message) - results.append(result) - processed_count += 1 - - if result.success: - success_count += 1 - backend.update_queue_message( - queue_message_id=message.id, - update_data=QueueMessageBase(is_processed=True), - ) - logger.debug(f"Marked message {message.id} as processed") - - logger.debug( - f"Task metrics - Processed: {processed_count}, Successful: {success_count}" - ) - - return results - - except Exception as e: - logger.error(f"Error in DAO tweet task: {str(e)}", exc_info=True) - results.append( - DAOTweetProcessingResult( - success=False, message=f"Error in DAO tweet task: {str(e)}", error=e - ) - ) - return results - - -dao_tweet_task = DAOTweetTask() diff --git a/services/runner/tasks/discord_task.py b/services/runner/tasks/discord_task.py index 5c9107ba..166452a5 100644 --- a/services/runner/tasks/discord_task.py +++ b/services/runner/tasks/discord_task.py @@ -9,10 +9,11 @@ QueueMessageFilter, QueueMessageType, ) +from config import config from lib.logger import configure_logger from services.discord.discord_factory import create_discord_service from services.runner.base import BaseTask, JobContext, RunnerConfig, RunnerResult -from config import config +from services.runner.decorators import JobPriority, job logger = configure_logger(__name__) @@ -23,20 +24,62 @@ class DiscordProcessingResult(RunnerResult): queue_message_id: Optional[UUID] = None dao_id: Optional[UUID] = None + messages_sent: int = 0 + webhook_url_used: Optional[str] = None +@job( + job_type="discord", + name="Discord Message Sender", + description="Sends Discord messages from queue with webhook support and enhanced error handling", + interval_seconds=20, + priority=JobPriority.MEDIUM, + max_retries=3, + retry_delay_seconds=30, + timeout_seconds=120, + max_concurrent=3, + requires_discord=True, + batch_size=10, + enable_dead_letter_queue=True, +) class DiscordTask(BaseTask[DiscordProcessingResult]): - """Task for sending Discord messages from the queue.""" + """Task for sending Discord messages from the queue with enhanced capabilities.""" def __init__(self, config: Optional[RunnerConfig] = None): super().__init__(config) self._pending_messages: Optional[List[QueueMessage]] = None - self.discord_service = None + self._discord_services: dict[str, object] = {} async def _validate_config(self, context: JobContext) -> bool: """Validate task configuration.""" - # No special config needed for Discord - return True + try: + # Check if at least one webhook URL is configured + if ( + not config.discord.webhook_url_passed + and not config.discord.webhook_url_failed + ): + logger.error("No Discord webhook URLs configured") + return False + return True + except Exception as e: + logger.error(f"Error validating Discord config: {str(e)}", exc_info=True) + return False + + async def _validate_resources(self, context: JobContext) -> bool: + """Validate resource availability.""" + try: + # Test Discord service creation + test_webhook = ( + config.discord.webhook_url_passed or config.discord.webhook_url_failed + ) + discord_service = create_discord_service(webhook_url=test_webhook) + if not discord_service: + logger.error("Cannot create Discord service") + return False + return True + except Exception as e: + logger.error(f"Discord resource validation failed: {str(e)}") + return False async def _validate_prerequisites(self, context: JobContext) -> bool: """Validate task prerequisites.""" @@ -59,15 +102,70 @@ async def _validate_task_specific(self, context: JobContext) -> bool: if not self._pending_messages: logger.debug("No pending Discord messages found") return False - logger.debug(f"Found {len(self._pending_messages)} pending Discord messages") - return True + + # Validate each message has required content + valid_messages = [] + for message in self._pending_messages: + if await self._is_message_valid(message): + valid_messages.append(message) + + self._pending_messages = valid_messages + + if valid_messages: + logger.debug(f"Found {len(valid_messages)} valid Discord messages") + return True + + logger.debug("No valid Discord messages to process") + return False + + async def _is_message_valid(self, message: QueueMessage) -> bool: + """Check if a Discord message is valid for processing.""" + try: + if not message.message or not isinstance(message.message, dict): + return False + + content = message.message.get("content") + if not content or not content.strip(): + return False + + return True + except Exception: + return False + + def _get_webhook_url(self, message: QueueMessage) -> str: + """Get the appropriate webhook URL for the message.""" + # Allow message-level webhook override + webhook_url = message.message.get("webhook_url") + if webhook_url: + return webhook_url + + # Select based on proposal status + proposal_status = message.message.get("proposal_status") + if proposal_status == "passed": + return config.discord.webhook_url_passed + elif proposal_status == "failed": + return config.discord.webhook_url_failed + else: + # Default to passed webhook for backwards compatibility + return config.discord.webhook_url_passed + + def _get_discord_service(self, webhook_url: str): + """Get or create Discord service with caching.""" + if webhook_url in self._discord_services: + return self._discord_services[webhook_url] + + discord_service = create_discord_service(webhook_url=webhook_url) + if discord_service: + self._discord_services[webhook_url] = discord_service + + return discord_service async def _process_discord_message( self, message: QueueMessage ) -> DiscordProcessingResult: - """Process a single Discord queue message.""" + """Process a single Discord queue message with enhanced error handling.""" try: - # Extract content and optional embeds from message.message + # Extract content and optional parameters from message.message if not message.message: return DiscordProcessingResult( success=False, @@ -75,23 +173,23 @@ async def _process_discord_message( queue_message_id=message.id, dao_id=message.dao_id, ) + content = message.message.get("content") embeds = message.message.get("embeds") tts = message.message.get("tts", False) - proposal_status = message.message.get("proposal_status") - webhook_url = message.message.get("webhook_url") # Allow override - # Select appropriate webhook URL based on proposal status + # Get appropriate webhook URL + webhook_url = self._get_webhook_url(message) if not webhook_url: - if proposal_status == "passed": - webhook_url = config.discord.webhook_url_passed - elif proposal_status == "failed": - webhook_url = config.discord.webhook_url_failed - else: - # Default to passed webhook for backwards compatibility - webhook_url = config.discord.webhook_url_passed + return DiscordProcessingResult( + success=False, + message="No webhook URL available for Discord message", + queue_message_id=message.id, + dao_id=message.dao_id, + ) - discord_service = create_discord_service(webhook_url=webhook_url) + # Get Discord service + discord_service = self._get_discord_service(webhook_url) if not discord_service: return DiscordProcessingResult( success=False, @@ -100,7 +198,12 @@ async def _process_discord_message( dao_id=message.dao_id, ) + logger.info(f"Sending Discord message for queue {message.id}") + logger.debug(f"Content: {content[:100]}..." if content else "No content") + + # Send the message result = discord_service.send_message(content, embeds=embeds, tts=tts) + if result.get("success"): logger.info(f"Successfully sent Discord message for queue {message.id}") return DiscordProcessingResult( @@ -108,6 +211,8 @@ async def _process_discord_message( message="Successfully sent Discord message", queue_message_id=message.id, dao_id=message.dao_id, + messages_sent=1, + webhook_url_used=webhook_url, ) else: logger.error(f"Failed to send Discord message: {result}") @@ -117,6 +222,7 @@ async def _process_discord_message( queue_message_id=message.id, dao_id=message.dao_id, ) + except Exception as e: logger.error( f"Error processing Discord message {message.id}: {str(e)}", @@ -130,22 +236,130 @@ async def _process_discord_message( dao_id=message.dao_id, ) + def _should_retry_on_error(self, error: Exception, context: JobContext) -> bool: + """Determine if error should trigger retry.""" + # Retry on network errors, API timeouts, webhook issues + retry_errors = ( + ConnectionError, + TimeoutError, + ) + + # Don't retry on configuration errors + if "webhook" in str(error).lower() and "not configured" in str(error).lower(): + return False + + return isinstance(error, retry_errors) + + async def _handle_execution_error( + self, error: Exception, context: JobContext + ) -> Optional[List[DiscordProcessingResult]]: + """Handle execution errors with recovery logic.""" + if "webhook" in str(error).lower() or "discord" in str(error).lower(): + logger.warning(f"Discord service error: {str(error)}, will retry") + return None + + if isinstance(error, (ConnectionError, TimeoutError)): + logger.warning(f"Network error: {str(error)}, will retry") + return None + + # For configuration errors, don't retry + return [ + DiscordProcessingResult( + success=False, + message=f"Unrecoverable error: {str(error)}", + error=error, + ) + ] + + async def _post_execution_cleanup( + self, context: JobContext, results: List[DiscordProcessingResult] + ) -> None: + """Cleanup after task execution.""" + # Clear cached pending messages + self._pending_messages = None + + # Keep Discord services cached for reuse + logger.debug( + f"Discord task cleanup completed. Cached services: {len(self._discord_services)}" + ) + async def _execute_impl(self, context: JobContext) -> List[DiscordProcessingResult]: - """Execute Discord message sending task.""" + """Execute Discord message sending task with batch processing.""" results: List[DiscordProcessingResult] = [] + if not self._pending_messages: + logger.debug("No pending Discord messages to process") return results - for message in self._pending_messages: - logger.debug(f"Processing Discord message: {message.id}") - result = await self._process_discord_message(message) - results.append(result) - if result.success: - backend.update_queue_message( - queue_message_id=message.id, - update_data=QueueMessageBase(is_processed=True), - ) - logger.debug(f"Marked Discord message {message.id} as processed") + + processed_count = 0 + success_count = 0 + batch_size = getattr(context, "batch_size", 10) + + # Process messages in batches + for i in range(0, len(self._pending_messages), batch_size): + batch = self._pending_messages[i : i + batch_size] + + for message in batch: + logger.debug(f"Processing Discord message: {message.id}") + result = await self._process_discord_message(message) + results.append(result) + processed_count += 1 + + if result.success: + success_count += 1 + # Mark message as processed with result + result_dict = { + "success": result.success, + "message": result.message, + "queue_message_id": ( + str(result.queue_message_id) + if result.queue_message_id + else None + ), + "dao_id": str(result.dao_id) if result.dao_id else None, + "messages_sent": result.messages_sent, + "webhook_url_used": result.webhook_url_used, + "error": str(result.error) if result.error else None, + } + backend.update_queue_message( + queue_message_id=message.id, + update_data=QueueMessageBase( + is_processed=True, result=result_dict + ), + ) + logger.debug( + f"Marked Discord message {message.id} as processed with result" + ) + else: + # Store result for failed processing + result_dict = { + "success": result.success, + "message": result.message, + "queue_message_id": ( + str(result.queue_message_id) + if result.queue_message_id + else None + ), + "dao_id": str(result.dao_id) if result.dao_id else None, + "messages_sent": result.messages_sent, + "webhook_url_used": result.webhook_url_used, + "error": str(result.error) if result.error else None, + } + backend.update_queue_message( + queue_message_id=message.id, + update_data=QueueMessageBase(result=result_dict), + ) + logger.debug( + f"Stored result for failed Discord message {message.id}" + ) + + logger.info( + f"Discord task completed - Processed: {processed_count}, " + f"Successful: {success_count}, Failed: {processed_count - success_count}" + ) + return results +# Create instance for auto-registration discord_task = DiscordTask() diff --git a/services/runner/tasks/proposal_embedder.py b/services/runner/tasks/proposal_embedder.py deleted file mode 100644 index 4107ada4..00000000 --- a/services/runner/tasks/proposal_embedder.py +++ /dev/null @@ -1,235 +0,0 @@ -"""Proposal embedding task implementation.""" - -from dataclasses import dataclass -from typing import List, Optional - -import openai -from langchain_openai import OpenAIEmbeddings - -from backend.factory import backend -from backend.models import Proposal -from config import config -from lib.logger import configure_logger -from services.runner.base import BaseTask, JobContext, RunnerResult - -logger = configure_logger(__name__) - -PROPOSAL_COLLECTION_NAME = "proposals" -EMBEDDING_MODEL = "text-embedding-ada-002" - - -@dataclass -class ProposalEmbedderResult(RunnerResult): - """Result of proposal embedding operation.""" - - proposals_checked: int = 0 - proposals_embedded: int = 0 - errors: List[str] = None - - def __post_init__(self): - self.errors = self.errors or [] - - -class ProposalEmbedderTask(BaseTask[ProposalEmbedderResult]): - """Task runner for embedding DAO proposals into a vector store.""" - - async def _validate_task_specific(self, context: JobContext) -> bool: - """Validate task-specific conditions.""" - if not config.api.openai_api_key: - logger.warning("OpenAI API key is not configured. Skipping embedding.") - return False - if not backend.vecs_client: - logger.warning("Vector client (vecs) not initialized. Skipping embedding.") - return False - # Basic check: Task runs if enabled and dependencies are met. - # More sophisticated check could compare DB count vs vector store count. - return True - - def _format_proposal_for_embedding(self, proposal: Proposal) -> str: - """Format proposal data into a string for embedding.""" - parts = [ - f"Title: {proposal.title or 'N/A'}", - f"Content: {proposal.content or 'N/A'}", - f"Type: {proposal.type.value if proposal.type else 'N/A'}", - ] - if proposal.action: - parts.append(f"Action: {proposal.action}") - # Add more relevant fields as needed - return "\n".join(parts) - - async def _get_embeddings(self, texts: List[str]) -> Optional[List[List[float]]]: - """Get embeddings for a list of texts using OpenAI API.""" - try: - # Instantiate the embeddings model here - embeddings_model = OpenAIEmbeddings(model=EMBEDDING_MODEL) - # Use the embed_documents method - embeddings = await embeddings_model.aembed_documents(texts) - return embeddings - except Exception as e: - logger.error( - f"Error getting embeddings using Langchain OpenAI: {str(e)}", - exc_info=True, - ) - return None - - async def _execute_impl(self, context: JobContext) -> List[ProposalEmbedderResult]: - """Run the proposal embedding task.""" - logger.info("Starting proposal embedding task...") - errors: List[str] = [] - proposals_checked = 0 - proposals_embedded = 0 - - try: - # Ensure OpenAI client is configured (Langchain uses this implicitly or explicitly) - if not config.api.openai_api_key: - raise ValueError("OpenAI API key not found in configuration.") - openai.api_key = config.api.openai_api_key - - # Ensure the vector collection exists - try: - collection = backend.get_vector_collection(PROPOSAL_COLLECTION_NAME) - except Exception: - logger.info( - f"Collection '{PROPOSAL_COLLECTION_NAME}' not found, creating..." - ) - # Assuming default dimensions are okay, or fetch from config/model - collection = backend.create_vector_collection(PROPOSAL_COLLECTION_NAME) - # Optionally create an index for better query performance - backend.create_vector_index(PROPOSAL_COLLECTION_NAME) - - # Get all proposals from the database - all_proposals = backend.list_proposals() - proposals_checked = len(all_proposals) - logger.debug(f"Found {proposals_checked} proposals in the database.") - - if not all_proposals: - logger.info("No proposals found to embed.") - return [ - ProposalEmbedderResult( - success=True, - message="No proposals found.", - proposals_checked=0, - proposals_embedded=0, - ) - ] - - # Get IDs of proposals already in the vector store - db_proposal_ids = {str(p.id) for p in all_proposals} - existing_vector_ids = set() - try: - # Fetch existing records - assuming fetch returns tuples (id, vector, metadata) - # We only need the IDs, fetch minimal data. - # Note: Fetching potentially large lists of IDs might be inefficient - # depending on the backend/library implementation. - fetched_vectors = await backend.fetch_vectors( - collection_name=PROPOSAL_COLLECTION_NAME, ids=list(db_proposal_ids) - ) - existing_vector_ids = {record[0] for record in fetched_vectors} - logger.debug( - f"Found {len(existing_vector_ids)} existing proposal vectors out of {len(db_proposal_ids)} DB proposals." - ) - except Exception as e: - logger.warning( - f"Could not efficiently fetch existing vector IDs: {str(e)}. Proceeding may re-embed existing items." - ) - # Fallback or decide how to handle - for now, we'll proceed cautiously - # If fetch fails, we might end up embedding everything again if existing_vector_ids remains empty. - - # Identify proposals that need embedding - new_proposal_ids = db_proposal_ids - existing_vector_ids - if not new_proposal_ids: - logger.debug("No new proposals found requiring embedding.") - return [ - ProposalEmbedderResult( - success=True, - message="No new proposals to embed.", - proposals_checked=proposals_checked, - proposals_embedded=0, - ) - ] - - logger.debug(f"Identified {len(new_proposal_ids)} new proposals to embed.") - - # Filter proposals to embed only the new ones - proposals_to_embed = [ - p for p in all_proposals if str(p.id) in new_proposal_ids - ] - - # Prepare data for embedding only for new proposals - texts_to_embed = [] - metadata_list = [] - proposal_ids = [] - - for proposal in proposals_to_embed: - proposal_text = self._format_proposal_for_embedding(proposal) - texts_to_embed.append(proposal_text) - metadata_list.append( - { - "proposal_id": str(proposal.id), - "title": proposal.title or "", - "dao_id": str(proposal.dao_id), - "type": proposal.type.value if proposal.type else "", - } - ) - proposal_ids.append(str(proposal.id)) - - # Get embeddings using the updated method - logger.debug( - f"Requesting embeddings for {len(texts_to_embed)} new proposals." - ) - embeddings_list = await self._get_embeddings(texts_to_embed) - - if embeddings_list is None: - errors.append("Failed to retrieve embeddings.") - else: - logger.debug( - f"Successfully retrieved {len(embeddings_list)} embeddings." - ) - # Prepare records for upsert - records_to_upsert = [] - for i, proposal_id in enumerate(proposal_ids): - records_to_upsert.append( - ( - proposal_id, # Use proposal UUID as the vector ID - embeddings_list[i], # Use the retrieved embeddings - metadata_list[i], - ) - ) - - # Upsert into the vector collection - try: - collection.upsert(records=records_to_upsert) - proposals_embedded = len(records_to_upsert) - logger.info( - f"Successfully upserted {proposals_embedded} proposal embeddings." - ) - except Exception as e: - error_msg = f"Failed to upsert proposal embeddings: {str(e)}" - logger.error(error_msg, exc_info=True) - errors.append(error_msg) - - except Exception as e: - error_msg = f"Error during proposal embedding task: {str(e)}" - logger.error(error_msg, exc_info=True) - errors.append(error_msg) - - success = not errors - message = ( - f"Checked {proposals_checked} proposals, embedded/updated {proposals_embedded}." - if success - else f"Proposal embedding task failed. Errors: {'; '.join(errors)}" - ) - - return [ - ProposalEmbedderResult( - success=success, - message=message, - proposals_checked=proposals_checked, - proposals_embedded=proposals_embedded, - errors=errors, - ) - ] - - -# Instantiate the task for use in the registry -proposal_embedder = ProposalEmbedderTask() diff --git a/services/runner/tasks/tweet_task.py b/services/runner/tasks/tweet_task.py index cf3bd286..c3b5bbd3 100644 --- a/services/runner/tasks/tweet_task.py +++ b/services/runner/tasks/tweet_task.py @@ -1,26 +1,29 @@ +"""Enhanced Tweet Task using the new job queue system.""" + +import re from dataclasses import dataclass +from io import BytesIO from typing import List, Optional +from urllib.parse import urlparse from uuid import UUID +import requests +import tweepy + from backend.factory import backend from backend.models import ( QueueMessage, QueueMessageBase, QueueMessageFilter, QueueMessageType, + XCredsFilter, ) -import re -from io import BytesIO -from urllib.parse import urlparse - -import requests -import tweepy - from config import config from lib.logger import configure_logger from lib.twitter import TwitterService from lib.utils import extract_image_urls from services.runner.base import BaseTask, JobContext, RunnerConfig, RunnerResult +from services.runner.decorators import JobPriority, job logger = configure_logger(__name__) @@ -31,15 +34,31 @@ class TweetProcessingResult(RunnerResult): tweet_id: Optional[str] = None dao_id: Optional[UUID] = None - - + tweets_sent: int = 0 + chunks_processed: int = 0 + + +@job( + job_type="tweet", + name="Tweet Processor", + description="Processes and sends tweets for DAOs with automatic retry and error handling", + interval_seconds=30, + priority=JobPriority.HIGH, + max_retries=3, + retry_delay_seconds=60, + timeout_seconds=300, + max_concurrent=2, + requires_twitter=True, + batch_size=5, + enable_dead_letter_queue=True, +) class TweetTask(BaseTask[TweetProcessingResult]): - """Task for sending tweets.""" + """Enhanced task for sending tweets with improved error handling and monitoring.""" def __init__(self, config: Optional[RunnerConfig] = None): super().__init__(config) self._pending_messages: Optional[List[QueueMessage]] = None - self.twitter_service = None + self._twitter_services: dict[UUID, TwitterService] = {} def _split_text_into_chunks(self, text: str, limit: int = 280) -> List[str]: """Split text into chunks not exceeding the limit without cutting words.""" @@ -58,27 +77,79 @@ def _split_text_into_chunks(self, text: str, limit: int = 280) -> List[str]: return chunks def _get_extension(self, url: str) -> str: + """Extract file extension from URL.""" path = urlparse(url).path.lower() - for ext in [".png", ".jpg", ".jpeg", ".gif"]: + for ext in [".png", ".jpg", ".jpeg", ".gif", ".webp"]: if path.endswith(ext): return ext return ".jpg" + async def _get_twitter_service(self, dao_id: UUID) -> Optional[TwitterService]: + """Get or create Twitter service for a DAO with caching.""" + if dao_id in self._twitter_services: + return self._twitter_services[dao_id] + + try: + # Get Twitter credentials for the DAO + creds = backend.list_x_creds(filters=XCredsFilter(dao_id=dao_id)) + if not creds: + logger.error(f"No Twitter credentials found for DAO {dao_id}") + return None + + # Initialize Twitter service with the credentials + twitter_service = TwitterService( + consumer_key=creds[0].consumer_key, + consumer_secret=creds[0].consumer_secret, + client_id=creds[0].client_id, + client_secret=creds[0].client_secret, + access_token=creds[0].access_token, + access_secret=creds[0].access_secret, + ) + await twitter_service._ainitialize() + + # Cache the service + self._twitter_services[dao_id] = twitter_service + logger.debug(f"Initialized and cached Twitter service for DAO {dao_id}") + return twitter_service + + except Exception as e: + logger.error( + f"Error initializing Twitter service for DAO {dao_id}: {str(e)}", + exc_info=True, + ) + return None + def _post_tweet_with_media( self, + twitter_service: TwitterService, image_url: str, text: str, reply_id: Optional[str] = None, ): + """Post a tweet with media attachment.""" try: - headers = {"User-Agent": "Mozilla/5.0"} - response = requests.get(image_url, headers=headers, timeout=10) + headers = {"User-Agent": "Mozilla/5.0 (compatible; AIBTC Bot/1.0)"} + response = requests.get(image_url, headers=headers, timeout=30) response.raise_for_status() + + # Validate content type and size + content_type = response.headers.get("content-type", "").lower() + if not any( + ct in content_type + for ct in ["image/jpeg", "image/png", "image/gif", "image/webp"] + ): + logger.warning(f"Unsupported content type: {content_type}") + return None + + if len(response.content) > 5 * 1024 * 1024: # 5MB limit + logger.warning(f"Image too large: {len(response.content)} bytes") + return None + auth = tweepy.OAuth1UserHandler( - self.twitter_service.consumer_key, - self.twitter_service.consumer_secret, - self.twitter_service.access_token, - self.twitter_service.access_secret, + twitter_service.consumer_key, + twitter_service.consumer_secret, + twitter_service.access_token, + twitter_service.access_secret, ) api = tweepy.API(auth) extension = self._get_extension(image_url) @@ -88,16 +159,16 @@ def _post_tweet_with_media( ) client = tweepy.Client( - consumer_key=self.twitter_service.consumer_key, - consumer_secret=self.twitter_service.consumer_secret, - access_token=self.twitter_service.access_token, - access_token_secret=self.twitter_service.access_secret, + consumer_key=twitter_service.consumer_key, + consumer_secret=twitter_service.consumer_secret, + access_token=twitter_service.access_token, + access_token_secret=twitter_service.access_secret, ) result = client.create_tweet( text=text, media_ids=[media.media_id_string], - reply_in_reply_to_tweet_id=reply_id, + in_reply_to_tweet_id=reply_id, ) if result and result.data: return type("Obj", (), {"id": result.data["id"]})() @@ -141,11 +212,19 @@ async def _initialize_twitter_service(self, dao_id: UUID) -> bool: return True except Exception as e: - logger.error(f"Error initializing Twitter service: {str(e)}", exc_info=True) - return False + logger.error(f"Failed to post tweet with media: {str(e)}") + return None async def _validate_config(self, context: JobContext) -> bool: """Validate task configuration.""" + # Enhanced validation with timeout check + if context.timeout_seconds and context.timeout_seconds < 60: + logger.warning("Tweet task timeout should be at least 60 seconds") + return False + return True + + async def _validate_resources(self, context: JobContext) -> bool: + """Validate resource availability.""" try: # Validate Twitter configuration if not config.twitter.enabled: @@ -167,7 +246,7 @@ async def _validate_config(self, context: JobContext) -> bool: return True except Exception as e: - logger.error(f"Error validating tweet task config: {str(e)}", exc_info=True) + logger.error(f"Backend not available: {str(e)}") return False async def _validate_prerequisites(self, context: JobContext) -> bool: @@ -181,112 +260,76 @@ async def _validate_prerequisites(self, context: JobContext) -> bool: ) return True except Exception as e: - logger.error( - f"Error validating tweet prerequisites: {str(e)}", exc_info=True - ) + logger.error(f"Error loading pending tweets: {str(e)}", exc_info=True) self._pending_messages = None return False async def _validate_task_specific(self, context: JobContext) -> bool: """Validate task-specific conditions.""" - try: - if not self._pending_messages: - logger.debug("No pending tweet messages found") - return False - - message_count = len(self._pending_messages) - if message_count > 0: - logger.debug(f"Found {message_count} pending tweet messages") - return True - - logger.debug("No pending tweet messages to process") + if not self._pending_messages: + logger.debug("No pending tweet messages found") return False - except Exception as e: - logger.error(f"Error in tweet task validation: {str(e)}", exc_info=True) - return False + # Validate each message before processing + valid_messages = [] + for message in self._pending_messages: + if await self._is_message_valid(message): + valid_messages.append(message) - async def _validate_message( - self, message: QueueMessage - ) -> Optional[TweetProcessingResult]: - """Validate a single message before processing.""" - try: - # Check if message exists - if not message.message: - return TweetProcessingResult( - success=False, - message="Tweet message is empty", - tweet_id=message.tweet_id, - ) + self._pending_messages = valid_messages - # Extract tweet text from the message field - tweet_text = None - if isinstance(message.message, dict) and "message" in message.message: - tweet_text = message.message["message"] - else: - return TweetProcessingResult( - success=False, - message=f"Unsupported tweet message format: {message.message}", - tweet_id=message.tweet_id, - ) + if valid_messages: + logger.debug(f"Found {len(valid_messages)} valid tweet messages") + return True - if not tweet_text: - return TweetProcessingResult( - success=False, - message="Tweet message content is empty", - tweet_id=message.tweet_id, - ) + logger.debug("No valid tweet messages to process") + return False - if not message.dao_id: - return TweetProcessingResult( - success=False, - message="Tweet message has no dao_id", - dao_id=None, - ) + async def _is_message_valid(self, message: QueueMessage) -> bool: + """Check if a message is valid for processing.""" + try: + if not message.message or not message.dao_id: + return False - # No need to modify the message structure, keep it as is - return None + if ( + not isinstance(message.message, dict) + or "message" not in message.message + ): + return False - except Exception as e: - logger.error( - f"Error validating message {message.id}: {str(e)}", exc_info=True - ) - return TweetProcessingResult( - success=False, - message=f"Error validating message: {str(e)}", - error=e, - tweet_id=message.tweet_id if hasattr(message, "tweet_id") else None, - dao_id=message.dao_id if hasattr(message, "dao_id") else None, - ) + tweet_text = message.message["message"] + if not tweet_text or not tweet_text.strip(): + return False + + return True + except Exception: + return False async def _process_tweet_message( self, message: QueueMessage ) -> TweetProcessingResult: - """Process a single tweet message.""" + """Process a single tweet message with enhanced error handling.""" try: - # Validate message first - validation_result = await self._validate_message(message) - if validation_result: - return validation_result - - # Initialize Twitter service for this DAO - if not await self._initialize_twitter_service(message.dao_id): + # Get Twitter service for this DAO + twitter_service = await self._get_twitter_service(message.dao_id) + if not twitter_service: return TweetProcessingResult( success=False, - message=f"Failed to initialize Twitter service for DAO: {message.dao_id}", + message=f"Failed to get Twitter service for DAO: {message.dao_id}", dao_id=message.dao_id, ) - # Extract tweet text directly from the message format + # Extract tweet text tweet_text = message.message["message"] logger.info(f"Sending tweet for DAO {message.dao_id}") - logger.debug(f"Tweet content: {tweet_text}") + logger.debug(f"Tweet content: {tweet_text[:100]}...") # Look for image URLs in the text image_urls = extract_image_urls(tweet_text) image_url = image_urls[0] if image_urls else None if image_url: + # Remove image URL from text tweet_text = re.sub(re.escape(image_url), "", tweet_text).strip() tweet_text = re.sub(r"\s+", " ", tweet_text) @@ -294,37 +337,53 @@ async def _process_tweet_message( chunks = self._split_text_into_chunks(tweet_text) previous_tweet_id = message.tweet_id tweet_response = None + tweets_sent = 0 for index, chunk in enumerate(chunks): - if index == 0 and image_url: - tweet_response = self._post_tweet_with_media( - image_url=image_url, - text=chunk, - reply_id=previous_tweet_id, - ) - else: - tweet_response = await self.twitter_service._apost_tweet( - text=chunk, - reply_in_reply_to_tweet_id=previous_tweet_id, - ) - - if not tweet_response: - return TweetProcessingResult( - success=False, - message="Failed to send tweet", - dao_id=message.dao_id, - tweet_id=previous_tweet_id, - ) - - logger.info(f"Successfully posted tweet {tweet_response.id}") - logger.debug(f"Tweet ID: {tweet_response.id}") - previous_tweet_id = tweet_response.id + try: + if index == 0 and image_url: + tweet_response = self._post_tweet_with_media( + twitter_service=twitter_service, + image_url=image_url, + text=chunk, + reply_id=previous_tweet_id, + ) + else: + tweet_response = await twitter_service._apost_tweet( + text=chunk, + reply_in_reply_to_tweet_id=previous_tweet_id, + ) + + if tweet_response: + tweets_sent += 1 + previous_tweet_id = tweet_response.id + logger.info( + f"Successfully posted tweet chunk {index + 1}: {tweet_response.id}" + ) + else: + logger.error(f"Failed to send tweet chunk {index + 1}") + if index == 0: # If first chunk fails, whole message fails + return TweetProcessingResult( + success=False, + message="Failed to send first tweet chunk", + dao_id=message.dao_id, + tweet_id=previous_tweet_id, + chunks_processed=index, + ) + # For subsequent chunks, we can continue + + except Exception as chunk_error: + logger.error(f"Error sending chunk {index + 1}: {str(chunk_error)}") + if index == 0: # Critical failure on first chunk + raise chunk_error return TweetProcessingResult( - success=True, - message="Successfully sent tweet", + success=tweets_sent > 0, + message=f"Successfully sent {tweets_sent}/{len(chunks)} tweet chunks", tweet_id=previous_tweet_id, dao_id=message.dao_id, + tweets_sent=tweets_sent, + chunks_processed=len(chunks), ) except Exception as e: @@ -335,21 +394,72 @@ async def _process_tweet_message( success=False, message=f"Error sending tweet: {str(e)}", error=e, - tweet_id=message.tweet_id if hasattr(message, "tweet_id") else None, - dao_id=message.dao_id if hasattr(message, "dao_id") else None, + tweet_id=getattr(message, "tweet_id", None), + dao_id=message.dao_id, + ) + + def _should_retry_on_error(self, error: Exception, context: JobContext) -> bool: + """Determine if error should trigger retry.""" + # Retry on network errors, API rate limits, temporary failures + retry_errors = ( + ConnectionError, + TimeoutError, + requests.exceptions.RequestException, + tweepy.TooManyRequests, + tweepy.ServiceUnavailable, + ) + return isinstance(error, retry_errors) + + async def _handle_execution_error( + self, error: Exception, context: JobContext + ) -> Optional[List[TweetProcessingResult]]: + """Handle execution errors with recovery logic.""" + if isinstance(error, tweepy.TooManyRequests): + logger.warning("Twitter API rate limit reached, will retry later") + return None # Let default retry handling take over + + if isinstance(error, (ConnectionError, TimeoutError)): + logger.warning(f"Network error: {str(error)}, will retry") + return None + + # For other errors, don't retry + return [ + TweetProcessingResult( + success=False, + message=f"Unrecoverable error: {str(error)}", + error=error, ) + ] + + async def _post_execution_cleanup( + self, context: JobContext, results: List[TweetProcessingResult] + ) -> None: + """Cleanup after task execution.""" + # Clear cached pending messages + self._pending_messages = None + + # Don't clear Twitter services cache as they can be reused + logger.debug( + f"Cleanup completed. Cached Twitter services: {len(self._twitter_services)}" + ) async def _execute_impl(self, context: JobContext) -> List[TweetProcessingResult]: - """Execute tweet sending task.""" + """Execute tweet sending task with batch processing.""" results: List[TweetProcessingResult] = [] - try: - if not self._pending_messages: - return results - processed_count = 0 - success_count = 0 + if not self._pending_messages: + logger.debug("No pending tweet messages to process") + return results + + processed_count = 0 + success_count = 0 + batch_size = getattr(context, "batch_size", 5) + + # Process messages in batches + for i in range(0, len(self._pending_messages), batch_size): + batch = self._pending_messages[i : i + batch_size] - for message in self._pending_messages: + for message in batch: logger.debug(f"Processing tweet message: {message.id}") result = await self._process_tweet_message(message) results.append(result) @@ -357,28 +467,49 @@ async def _execute_impl(self, context: JobContext) -> List[TweetProcessingResult if result.success: success_count += 1 + # Mark message as processed with result + result_dict = { + "success": result.success, + "message": result.message, + "tweet_id": result.tweet_id, + "dao_id": str(result.dao_id) if result.dao_id else None, + "tweets_sent": result.tweets_sent, + "chunks_processed": result.chunks_processed, + "error": str(result.error) if result.error else None, + } backend.update_queue_message( queue_message_id=message.id, - update_data=QueueMessageBase(is_processed=True), + update_data=QueueMessageBase( + is_processed=True, result=result_dict + ), ) - logger.debug(f"Marked message {message.id} as processed") + logger.debug( + f"Marked message {message.id} as processed with result" + ) + else: + # Store result for failed processing + result_dict = { + "success": result.success, + "message": result.message, + "tweet_id": result.tweet_id, + "dao_id": str(result.dao_id) if result.dao_id else None, + "tweets_sent": result.tweets_sent, + "chunks_processed": result.chunks_processed, + "error": str(result.error) if result.error else None, + } + backend.update_queue_message( + queue_message_id=message.id, + update_data=QueueMessageBase(result=result_dict), + ) + logger.debug(f"Stored result for failed message {message.id}") - logger.debug( - f"Task metrics - Processed: {processed_count}, Successful: {success_count}" - ) + logger.info( + f"Tweet task completed - Processed: {processed_count}, " + f"Successful: {success_count}, Failed: {processed_count - success_count}" + ) - return results - - except Exception as e: - logger.error(f"Error in tweet task: {str(e)}", exc_info=True) - results.append( - TweetProcessingResult( - success=False, - message=f"Error in tweet task: {str(e)}", - error=e, - ) - ) - return results + return results +# Create instance for auto-registration tweet_task = TweetTask() diff --git a/services/startup.py b/services/startup.py index cbb710de..3183ff78 100644 --- a/services/startup.py +++ b/services/startup.py @@ -1,28 +1,74 @@ +"""Enhanced startup service with auto-discovery and comprehensive monitoring.""" + import asyncio -from typing import Any, Optional +import signal +import sys +from typing import Any, Dict, Optional from apscheduler.schedulers.asyncio import AsyncIOScheduler from config import config from lib.logger import configure_logger from services.bot import start_application +from services.runner.auto_discovery import discover_and_register_tasks from services.runner.job_manager import JobManager +from services.runner.monitoring import MetricsCollector, SystemMetrics from services.websocket import websocket_manager logger = configure_logger(__name__) +# Global enhanced job manager instance +job_manager: Optional[JobManager] = None +shutdown_event = asyncio.Event() +metrics_collector = MetricsCollector() +system_metrics = SystemMetrics() + + +def signal_handler(signum, frame): + """Handle shutdown signals gracefully.""" + logger.info(f"Received signal {signum}, initiating graceful shutdown...") + shutdown_event.set() + -class StartupService: - """Service to manage application startup and background tasks.""" +class EnhancedStartupService: + """Enhanced service to manage application startup with auto-discovery and monitoring.""" def __init__(self, scheduler: Optional[AsyncIOScheduler] = None): self.scheduler = scheduler or AsyncIOScheduler() self.cleanup_task: Optional[asyncio.Task] = None + self.bot_application: Optional[Any] = None + self.job_manager: Optional[JobManager] = None + + async def initialize_job_system(self): + """Initialize the enhanced job system with auto-discovery.""" + try: + # Initialize enhanced job manager + self.job_manager = JobManager() + + # Auto-discover and register all jobs (this populates JobRegistry) + discover_and_register_tasks() + + # Get registered jobs from JobRegistry + from services.runner.decorators import JobRegistry + + registered_jobs = JobRegistry.list_jobs() + + logger.info( + f"Enhanced job system initialized with {len(registered_jobs)} jobs discovered" + ) + return True + + except Exception as e: + logger.error( + f"Failed to initialize enhanced job system: {e}", exc_info=True + ) + return False async def start_websocket_cleanup(self) -> None: """Start the WebSocket cleanup task.""" try: await websocket_manager.start_cleanup_task() + logger.info("WebSocket cleanup task started") except Exception as e: logger.error(f"Error starting WebSocket cleanup task: {str(e)}") raise @@ -34,67 +80,213 @@ async def start_bot(self) -> Any: return None try: - application = await start_application() - logger.info("Bot started successfully") - return application + self.bot_application = await start_application() + logger.info("Telegram bot started successfully") + return self.bot_application except Exception as e: logger.error(f"Failed to start Telegram bot: {e}") raise - def init_scheduler(self) -> None: - """Initialize and start the scheduler with configured jobs.""" - # Use the JobManager to schedule all enabled jobs - any_enabled = JobManager.schedule_jobs(self.scheduler) + async def start_enhanced_job_system(self) -> None: + """Start the enhanced job system.""" + if not await self.initialize_job_system(): + logger.error("Failed to initialize enhanced job system") + raise RuntimeError("Job system initialization failed") + + # Start the job executor + await self.job_manager.start_executor() + logger.info("Enhanced job manager executor started successfully") - # Start the scheduler if any jobs are enabled - if any_enabled: - logger.info("Starting scheduler") - self.scheduler.start() - logger.info("Scheduler started") - else: - logger.info("Scheduler is disabled") + # Start system metrics collection + await system_metrics.start_monitoring() + logger.info("System metrics monitoring started") async def init_background_tasks(self) -> asyncio.Task: - """Initialize all background tasks.""" - # Initialize scheduler - self.init_scheduler() + """Initialize all enhanced background tasks.""" + logger.info("Starting Enhanced AIBTC Background Services...") - # Start websocket cleanup task - self.cleanup_task = asyncio.create_task(self.start_websocket_cleanup()) + try: + # Start enhanced job system + await self.start_enhanced_job_system() + + # Start websocket cleanup task + self.cleanup_task = asyncio.create_task(self.start_websocket_cleanup()) + + # Start bot if enabled + await self.start_bot() - # Start bot if enabled - await self.start_bot() + logger.info("All enhanced background services started successfully") + return self.cleanup_task - # Return the cleanup task for management - return self.cleanup_task + except Exception as e: + logger.error(f"Failed to start background services: {e}", exc_info=True) + raise async def shutdown(self) -> None: - """Shutdown all services gracefully.""" - logger.info("Shutting down services...") + """Enhanced cleanup and shutdown with graceful task termination.""" + logger.info("Initiating enhanced shutdown sequence...") + + try: + # Stop system metrics collection + if system_metrics: + await system_metrics.stop_monitoring() + logger.info("System metrics collection stopped") + + # Gracefully shutdown enhanced job manager + if self.job_manager: + logger.info("Stopping enhanced job manager...") + await self.job_manager.stop_executor() + logger.info("Enhanced job manager stopped successfully") + + # Stop websocket cleanup + if self.cleanup_task: + self.cleanup_task.cancel() + try: + await self.cleanup_task + except asyncio.CancelledError: + pass + logger.info("WebSocket cleanup task stopped") + + # Stop bot + if self.bot_application: + logger.info("Stopping Telegram bot...") + # Add any necessary bot shutdown code here + logger.info("Telegram bot stopped") + + except Exception as e: + logger.error(f"Error during enhanced shutdown: {e}", exc_info=True) - if self.scheduler.running: - self.scheduler.shutdown() - logger.info("Scheduler shutdown complete") + logger.info("Enhanced shutdown complete") - if self.cleanup_task: - self.cleanup_task.cancel() - try: - await self.cleanup_task - except asyncio.CancelledError: - pass - logger.info("Cleanup task shutdown complete") + def get_health_status(self) -> Dict: + """Get comprehensive health status of the enhanced startup service.""" + if not self.job_manager: + return { + "status": "unhealthy", + "message": "Enhanced job manager not initialized", + "jobs": {"running": 0, "registered": 0, "failed": 0}, + "system": {}, + "uptime": 0, + } + # Get comprehensive health data + health_data = self.job_manager.get_system_health() + system_health = system_metrics.get_current_metrics() -# Global instance for convenience -startup_service = StartupService() + return { + "status": health_data["status"], + "message": "Enhanced job system running", + "jobs": { + "running": health_data["executor"]["running"], + "registered": health_data["tasks"]["total_registered"], + "enabled": health_data["tasks"]["enabled"], + "disabled": health_data["tasks"]["disabled"], + "total_executions": health_data["metrics"]["total_executions"], + }, + "system": { + "cpu_usage": system_health.get("cpu_usage", 0), + "memory_usage": system_health.get("memory_usage", 0), + "disk_usage": system_health.get("disk_usage", 0), + }, + "uptime": health_data.get("uptime_seconds", 0), + "last_updated": system_health.get("timestamp"), + "version": "2.0-enhanced", + "services": { + "websocket_cleanup": self.cleanup_task is not None + and not self.cleanup_task.done(), + "telegram_bot": self.bot_application is not None, + "job_manager": self.job_manager is not None + and self.job_manager.is_running, + }, + } + def get_job_metrics(self) -> Dict: + """Get detailed job execution metrics.""" + if not self.job_manager: + return {"error": "Enhanced job manager not available"} -# Convenience functions that use the global instance + return self.job_manager.get_comprehensive_metrics() + + def get_system_metrics(self) -> Dict: + """Get current system performance metrics.""" + return system_metrics.get_current_metrics() + + def trigger_job(self, job_type: str) -> Dict: + """Manually trigger a specific job type.""" + if not self.job_manager: + return {"error": "Enhanced job manager not available"} + + return self.job_manager.trigger_job(job_type) + + +# Global enhanced instance for convenience +startup_service = EnhancedStartupService() + + +# Enhanced convenience functions that use the global instance async def run() -> asyncio.Task: - """Initialize all background tasks using the global startup service.""" - return await startup_service.init_background_tasks() + """Initialize all enhanced background tasks using the global startup service.""" + global job_manager + + # Setup signal handlers for standalone mode + signal.signal(signal.SIGINT, signal_handler) + signal.signal(signal.SIGTERM, signal_handler) + + try: + cleanup_task = await startup_service.init_background_tasks() + job_manager = startup_service.job_manager + + logger.info("Enhanced AIBTC services running. Press Ctrl+C to stop.") + return cleanup_task + + except Exception as e: + logger.error(f"Failed to start enhanced services: {e}", exc_info=True) + raise async def shutdown() -> None: - """Shutdown all services using the global startup service.""" + """Shutdown all enhanced services using the global startup service.""" await startup_service.shutdown() + + +# Enhanced health check functions +def get_health_status() -> Dict: + """Get comprehensive health status.""" + return startup_service.get_health_status() + + +def get_job_metrics() -> Dict: + """Get detailed job execution metrics.""" + return startup_service.get_job_metrics() + + +def get_system_metrics() -> Dict: + """Get current system performance metrics.""" + return startup_service.get_system_metrics() + + +def trigger_job(job_type: str) -> Dict: + """Manually trigger a specific job type.""" + return startup_service.trigger_job(job_type) + + +# Enhanced standalone mode for direct execution +async def run_standalone(): + """Run the enhanced startup service in standalone mode.""" + try: + await run() + + # Wait for shutdown signal + await shutdown_event.wait() + + except KeyboardInterrupt: + logger.info("Received keyboard interrupt") + except Exception as e: + logger.error(f"Critical error in standalone mode: {e}", exc_info=True) + sys.exit(1) + finally: + await shutdown() + + +if __name__ == "__main__": + asyncio.run(run_standalone()) diff --git a/services/webhooks/chainhook/handlers/action_concluder_handler.py b/services/webhooks/chainhook/handlers/action_concluder_handler.py index 562f2a12..4a1f5ea8 100644 --- a/services/webhooks/chainhook/handlers/action_concluder_handler.py +++ b/services/webhooks/chainhook/handlers/action_concluder_handler.py @@ -278,7 +278,7 @@ async def handle_transaction(self, transaction: TransactionWithReceipt) -> None: # Create queue messages for both Twitter and Discord if proposal passed tweet_message = backend.create_queue_message( QueueMessageCreate( - type=QueueMessageType.TWEET, + type=QueueMessageType.get_or_create("tweet"), message={"message": clean_message}, dao_id=dao_data["id"], ) @@ -299,7 +299,7 @@ async def handle_transaction(self, transaction: TransactionWithReceipt) -> None: follow_up_tweet = backend.create_queue_message( QueueMessageCreate( - type=QueueMessageType.TWEET, + type=QueueMessageType.get_or_create("tweet"), message={"message": follow_up_message}, dao_id=dao_data["id"], ) @@ -340,7 +340,7 @@ async def handle_transaction(self, transaction: TransactionWithReceipt) -> None: discord_message = backend.create_queue_message( QueueMessageCreate( - type=QueueMessageType.DISCORD, + type=QueueMessageType.get_or_create("discord"), message={"content": formatted_message, "proposal_status": "passed"}, dao_id=dao_data["id"], ) @@ -381,7 +381,7 @@ async def handle_transaction(self, transaction: TransactionWithReceipt) -> None: discord_message = backend.create_queue_message( QueueMessageCreate( - type=QueueMessageType.DISCORD, + type=QueueMessageType.get_or_create("discord"), message={"content": formatted_message, "proposal_status": "failed"}, dao_id=dao_data["id"], ) diff --git a/services/webhooks/chainhook/handlers/action_proposal_handler.py b/services/webhooks/chainhook/handlers/action_proposal_handler.py index 49f1869b..67ee6a4f 100644 --- a/services/webhooks/chainhook/handlers/action_proposal_handler.py +++ b/services/webhooks/chainhook/handlers/action_proposal_handler.py @@ -433,7 +433,9 @@ async def handle_transaction(self, transaction: TransactionWithReceipt) -> None: backend.create_queue_message( QueueMessageCreate( - type=QueueMessageType.DAO_PROPOSAL_EVALUATION, + type=QueueMessageType.get_or_create( + "dao_proposal_evaluation" + ), message=message_data, dao_id=dao_data["id"], wallet_id=agent["wallet_id"], @@ -528,7 +530,9 @@ async def handle_transaction(self, transaction: TransactionWithReceipt) -> None: backend.create_queue_message( QueueMessageCreate( - type=QueueMessageType.DAO_PROPOSAL_EVALUATION, + type=QueueMessageType.get_or_create( + "dao_proposal_evaluation" + ), message=message_data, dao_id=dao_data["id"], wallet_id=agent["wallet_id"], diff --git a/services/webhooks/chainhook/handlers/dao_proposal_burn_height_handler.py b/services/webhooks/chainhook/handlers/dao_proposal_burn_height_handler.py index ab2e1bb3..6292a073 100644 --- a/services/webhooks/chainhook/handlers/dao_proposal_burn_height_handler.py +++ b/services/webhooks/chainhook/handlers/dao_proposal_burn_height_handler.py @@ -2,7 +2,6 @@ from typing import Dict, List, Optional from uuid import UUID -from config import config from backend.factory import backend from backend.models import ( @@ -12,6 +11,7 @@ QueueMessageFilter, QueueMessageType, ) +from config import config from lib.logger import configure_logger from services.webhooks.chainhook.handlers.base import ChainhookEventHandler from services.webhooks.chainhook.models import ChainHookData, TransactionWithReceipt @@ -232,7 +232,7 @@ async def handle_transaction(self, transaction: TransactionWithReceipt) -> None: # Check if a veto notification message already exists if self._queue_message_exists( - QueueMessageType.DISCORD, proposal.id, dao.id + QueueMessageType.get_or_create("discord"), proposal.id, dao.id ): self.logger.debug( f"Veto notification Discord message already exists for proposal {proposal.id}, skipping" @@ -251,7 +251,7 @@ async def handle_transaction(self, transaction: TransactionWithReceipt) -> None: backend.create_queue_message( QueueMessageCreate( - type=QueueMessageType.DISCORD, + type=QueueMessageType.get_or_create("discord"), message={"content": message, "proposal_status": "veto_window_open"}, dao_id=dao.id, ) @@ -269,7 +269,7 @@ async def handle_transaction(self, transaction: TransactionWithReceipt) -> None: # Check if a veto end notification message already exists if self._queue_message_exists( - QueueMessageType.DISCORD, proposal.id, dao.id + QueueMessageType.get_or_create("discord"), proposal.id, dao.id ): self.logger.debug( f"Veto end notification Discord message already exists for proposal {proposal.id}, skipping" @@ -288,7 +288,7 @@ async def handle_transaction(self, transaction: TransactionWithReceipt) -> None: backend.create_queue_message( QueueMessageCreate( - type=QueueMessageType.DISCORD, + type=QueueMessageType.get_or_create("discord"), message={ "content": message, "proposal_status": "veto_window_closed", @@ -309,7 +309,9 @@ async def handle_transaction(self, transaction: TransactionWithReceipt) -> None: # Check if a conclude message already exists for this proposal if self._queue_message_exists( - QueueMessageType.DAO_PROPOSAL_CONCLUDE, proposal.id, dao.id + QueueMessageType.get_or_create("dao_proposal_conclude"), + proposal.id, + dao.id, ): self.logger.debug( f"Conclude queue message already exists for proposal {proposal.id}, skipping" @@ -323,7 +325,7 @@ async def handle_transaction(self, transaction: TransactionWithReceipt) -> None: backend.create_queue_message( QueueMessageCreate( - type=QueueMessageType.DAO_PROPOSAL_CONCLUDE, + type=QueueMessageType.get_or_create("dao_proposal_conclude"), message=message_data, dao_id=dao.id, wallet_id=None, # No specific wallet needed for conclusion @@ -352,7 +354,7 @@ async def handle_transaction(self, transaction: TransactionWithReceipt) -> None: for agent in agents: # Check if a queue message already exists for this proposal+wallet combination if self._queue_message_exists( - QueueMessageType.DAO_PROPOSAL_VOTE, + QueueMessageType.get_or_create("dao_proposal_vote"), proposal.id, dao.id, agent["wallet_id"], @@ -369,7 +371,7 @@ async def handle_transaction(self, transaction: TransactionWithReceipt) -> None: backend.create_queue_message( QueueMessageCreate( - type=QueueMessageType.DAO_PROPOSAL_VOTE, + type=QueueMessageType.get_or_create("dao_proposal_vote"), message=message_data, dao_id=dao.id, wallet_id=agent["wallet_id"], diff --git a/services/workflows/__init__.py b/services/workflows/__init__.py index 7249e894..c48d4405 100644 --- a/services/workflows/__init__.py +++ b/services/workflows/__init__.py @@ -18,11 +18,15 @@ ChatWorkflow, execute_chat_stream, ) -from services.workflows.planning_mixin import PlanningCapability +from services.workflows.mixins.planning_mixin import PlanningCapability +from services.workflows.mixins.vector_mixin import ( + VectorRetrievalCapability, + add_documents_to_vectors, +) +from services.workflows.mixins.web_search_mixin import WebSearchCapability from services.workflows.proposal_evaluation import ( ProposalEvaluationWorkflow, evaluate_and_vote_on_proposal, - evaluate_proposal_only, ) from services.workflows.tweet_analysis import ( TweetAnalysisWorkflow, @@ -32,11 +36,6 @@ TweetGeneratorWorkflow, generate_dao_tweet, ) -from services.workflows.vector_mixin import ( - VectorRetrievalCapability, - add_documents_to_vectors, -) -from services.workflows.web_search_mixin import WebSearchCapability from services.workflows.workflow_service import ( BaseWorkflowService, WorkflowBuilder, @@ -72,7 +71,6 @@ "TweetGeneratorWorkflow", "analyze_tweet", "evaluate_and_vote_on_proposal", - "evaluate_proposal_only", "generate_dao_tweet", "ChatService", "ChatWorkflow", diff --git a/services/workflows/agents/core_context.py b/services/workflows/agents/core_context.py index c51df929..a0fb9b21 100644 --- a/services/workflows/agents/core_context.py +++ b/services/workflows/agents/core_context.py @@ -4,11 +4,14 @@ from backend.factory import backend from lib.logger import configure_logger -from services.workflows.capability_mixins import BaseCapabilityMixin, PromptCapability +from services.workflows.mixins.capability_mixins import ( + BaseCapabilityMixin, + PromptCapability, +) +from services.workflows.mixins.vector_mixin import VectorRetrievalCapability from services.workflows.utils.models import AgentOutput from services.workflows.utils.state_reducers import update_state_with_agent_result from services.workflows.utils.token_usage import TokenUsageMixin -from services.workflows.vector_mixin import VectorRetrievalCapability logger = configure_logger(__name__) diff --git a/services/workflows/agents/financial_context.py b/services/workflows/agents/financial_context.py index 0278af89..722a9fd7 100644 --- a/services/workflows/agents/financial_context.py +++ b/services/workflows/agents/financial_context.py @@ -3,7 +3,10 @@ from langchain_core.prompts.chat import ChatPromptTemplate from lib.logger import configure_logger -from services.workflows.capability_mixins import BaseCapabilityMixin, PromptCapability +from services.workflows.mixins.capability_mixins import ( + BaseCapabilityMixin, + PromptCapability, +) from services.workflows.utils.models import AgentOutput from services.workflows.utils.state_reducers import update_state_with_agent_result from services.workflows.utils.token_usage import TokenUsageMixin diff --git a/services/workflows/agents/historical_context.py b/services/workflows/agents/historical_context.py index c8bb9359..df632087 100644 --- a/services/workflows/agents/historical_context.py +++ b/services/workflows/agents/historical_context.py @@ -6,11 +6,14 @@ from backend.factory import backend from backend.models import Proposal, ProposalFilter from lib.logger import configure_logger -from services.workflows.capability_mixins import BaseCapabilityMixin, PromptCapability +from services.workflows.mixins.capability_mixins import ( + BaseCapabilityMixin, + PromptCapability, +) +from services.workflows.mixins.vector_mixin import VectorRetrievalCapability from services.workflows.utils.models import AgentOutput from services.workflows.utils.state_reducers import update_state_with_agent_result from services.workflows.utils.token_usage import TokenUsageMixin -from services.workflows.vector_mixin import VectorRetrievalCapability logger = configure_logger(__name__) diff --git a/services/workflows/agents/image_processing.py b/services/workflows/agents/image_processing.py index dfe6a92d..94727ecb 100644 --- a/services/workflows/agents/image_processing.py +++ b/services/workflows/agents/image_processing.py @@ -4,7 +4,7 @@ from lib.logger import configure_logger from lib.utils import extract_image_urls -from services.workflows.capability_mixins import BaseCapabilityMixin +from services.workflows.mixins.capability_mixins import BaseCapabilityMixin logger = configure_logger(__name__) diff --git a/services/workflows/agents/proposal_metadata.py b/services/workflows/agents/proposal_metadata.py index 59c04f71..c3f7589c 100644 --- a/services/workflows/agents/proposal_metadata.py +++ b/services/workflows/agents/proposal_metadata.py @@ -3,7 +3,7 @@ from langchain_core.prompts.chat import ChatPromptTemplate from lib.logger import configure_logger -from services.workflows.capability_mixins import BaseCapabilityMixin +from services.workflows.mixins.capability_mixins import BaseCapabilityMixin from services.workflows.utils.models import ProposalMetadataOutput from services.workflows.utils.token_usage import TokenUsageMixin diff --git a/services/workflows/agents/proposal_recommendation.py b/services/workflows/agents/proposal_recommendation.py index 076963cf..9d1a1c82 100644 --- a/services/workflows/agents/proposal_recommendation.py +++ b/services/workflows/agents/proposal_recommendation.py @@ -6,7 +6,7 @@ from backend.factory import backend from backend.models import DAO, Proposal, ProposalFilter from lib.logger import configure_logger -from services.workflows.capability_mixins import BaseCapabilityMixin +from services.workflows.mixins.capability_mixins import BaseCapabilityMixin from services.workflows.utils.models import ProposalRecommendationOutput from services.workflows.utils.token_usage import TokenUsageMixin diff --git a/services/workflows/agents/reasoning.py b/services/workflows/agents/reasoning.py index d87e976c..75e066af 100644 --- a/services/workflows/agents/reasoning.py +++ b/services/workflows/agents/reasoning.py @@ -6,9 +6,12 @@ from langgraph.graph import StateGraph from lib.logger import configure_logger -from services.workflows.capability_mixins import BaseCapabilityMixin, PromptCapability from services.workflows.chat import StreamingCallbackHandler -from services.workflows.planning_mixin import PlanningCapability +from services.workflows.mixins.capability_mixins import ( + BaseCapabilityMixin, + PromptCapability, +) +from services.workflows.mixins.planning_mixin import PlanningCapability from services.workflows.utils.models import FinalOutput from services.workflows.utils.state_reducers import update_state_with_agent_result from services.workflows.utils.token_usage import TokenUsageMixin diff --git a/services/workflows/agents/social_context.py b/services/workflows/agents/social_context.py index dee008b4..f3d56541 100644 --- a/services/workflows/agents/social_context.py +++ b/services/workflows/agents/social_context.py @@ -3,7 +3,10 @@ from langchain_core.prompts.chat import ChatPromptTemplate from lib.logger import configure_logger -from services.workflows.capability_mixins import BaseCapabilityMixin, PromptCapability +from services.workflows.mixins.capability_mixins import ( + BaseCapabilityMixin, + PromptCapability, +) from services.workflows.utils.models import AgentOutput from services.workflows.utils.state_reducers import update_state_with_agent_result from services.workflows.utils.token_usage import TokenUsageMixin diff --git a/services/workflows/chat.py b/services/workflows/chat.py index 747a00ee..878e1724 100644 --- a/services/workflows/chat.py +++ b/services/workflows/chat.py @@ -26,11 +26,11 @@ MessageProcessor, StreamingCallbackHandler, ) -from services.workflows.planning_mixin import PlanningCapability -from services.workflows.vector_mixin import ( +from services.workflows.mixins.planning_mixin import PlanningCapability +from services.workflows.mixins.vector_mixin import ( VectorRetrievalCapability, ) -from services.workflows.web_search_mixin import WebSearchCapability +from services.workflows.mixins.web_search_mixin import WebSearchCapability logger = configure_logger(__name__) diff --git a/services/workflows/hierarchical_workflows.py b/services/workflows/hierarchical_workflows.py index 0f04c9a5..8b4ab90b 100644 --- a/services/workflows/hierarchical_workflows.py +++ b/services/workflows/hierarchical_workflows.py @@ -19,7 +19,7 @@ from langgraph.graph import END, StateGraph from lib.logger import configure_logger -from services.workflows.capability_mixins import ( +from services.workflows.mixins.capability_mixins import ( BaseCapabilityMixin, ComposableWorkflowMixin, StateType, diff --git a/services/workflows/capability_mixins.py b/services/workflows/mixins/capability_mixins.py similarity index 100% rename from services/workflows/capability_mixins.py rename to services/workflows/mixins/capability_mixins.py diff --git a/services/workflows/planning_mixin.py b/services/workflows/mixins/planning_mixin.py similarity index 100% rename from services/workflows/planning_mixin.py rename to services/workflows/mixins/planning_mixin.py diff --git a/services/workflows/vector_mixin.py b/services/workflows/mixins/vector_mixin.py similarity index 100% rename from services/workflows/vector_mixin.py rename to services/workflows/mixins/vector_mixin.py diff --git a/services/workflows/web_search_mixin.py b/services/workflows/mixins/web_search_mixin.py similarity index 100% rename from services/workflows/web_search_mixin.py rename to services/workflows/mixins/web_search_mixin.py diff --git a/services/workflows/proposal_evaluation.py b/services/workflows/proposal_evaluation.py index 45286500..9a1ffb14 100644 --- a/services/workflows/proposal_evaluation.py +++ b/services/workflows/proposal_evaluation.py @@ -618,30 +618,3 @@ async def evaluate_and_vote_on_proposal( except Exception as e: logger.error(f"Error in evaluate_and_vote_on_proposal: {str(e)}") return {"error": f"Failed to evaluate proposal: {str(e)}"} - - -async def evaluate_proposal_only( - proposal_id: UUID, - wallet_id: Optional[UUID] = None, - agent_id: Optional[UUID] = None, - dao_id: Optional[UUID] = None, -) -> Dict: - """Evaluate a proposal without voting. - - Args: - proposal_id: Proposal ID - wallet_id: Optional wallet ID - agent_id: Optional agent ID - dao_id: Optional DAO ID - - Returns: - Evaluation results - """ - # Delegate to evaluate_and_vote_on_proposal with auto_vote=False - return await evaluate_and_vote_on_proposal( - proposal_id=proposal_id, - wallet_id=wallet_id, - agent_id=agent_id, - auto_vote=False, - dao_id=dao_id, - ) diff --git a/test_proposal_evaluation.py b/test_proposal_evaluation.py index d82606b5..bcb03288 100644 --- a/test_proposal_evaluation.py +++ b/test_proposal_evaluation.py @@ -20,10 +20,7 @@ sys.path.insert(0, os.path.dirname(os.path.abspath(__file__))) -from services.workflows.proposal_evaluation import ( - evaluate_and_vote_on_proposal, - evaluate_proposal_only, -) +from services.workflows.proposal_evaluation import evaluate_and_vote_on_proposal def parse_uuid(value: str) -> Optional[UUID]: @@ -124,11 +121,12 @@ async def main(): try: if args.evaluation_only: print("🔍 Running evaluation only...") - result = await evaluate_proposal_only( + result = await evaluate_and_vote_on_proposal( proposal_id=args.proposal_id, wallet_id=args.wallet_id, agent_id=args.agent_id, dao_id=args.dao_id, + auto_vote=False, ) else: print("🔍 Running evaluation with voting option...") diff --git a/tools/agent_account.py b/tools/agent_account.py index 6f36568c..fafd7665 100644 --- a/tools/agent_account.py +++ b/tools/agent_account.py @@ -45,10 +45,17 @@ class AgentAccountDeployTool(BaseTool): args_schema: Type[BaseModel] = AgentAccountDeployInput return_direct: bool = False wallet_id: Optional[UUID] = None + seed_phrase: Optional[str] = None - def __init__(self, wallet_id: Optional[UUID] = None, **kwargs): + def __init__( + self, + wallet_id: Optional[UUID] = None, + seed_phrase: Optional[str] = None, + **kwargs, + ): super().__init__(**kwargs) self.wallet_id = wallet_id + self.seed_phrase = seed_phrase def _deploy( self, @@ -60,8 +67,12 @@ def _deploy( **kwargs, ) -> Dict[str, Any]: """Execute the tool to deploy agent account.""" - if self.wallet_id is None: - return {"success": False, "message": "Wallet ID is required", "data": None} + if self.seed_phrase is None and self.wallet_id is None: + return { + "success": False, + "message": "Either seed phrase or wallet ID is required", + "data": None, + } args = [ owner_address, @@ -71,12 +82,21 @@ def _deploy( str(save_to_file).lower(), ] - return BunScriptRunner.bun_run( - self.wallet_id, - "aibtc-cohort-0/contract-tools", - "deploy-agent-account.ts", - *args, - ) + # Use seed phrase if available, otherwise fall back to wallet_id + if self.seed_phrase: + return BunScriptRunner.bun_run_with_seed_phrase( + self.seed_phrase, + "aibtc-cohort-0/contract-tools", + "deploy-agent-account.ts", + *args, + ) + else: + return BunScriptRunner.bun_run( + self.wallet_id, + "aibtc-cohort-0/contract-tools", + "deploy-agent-account.ts", + *args, + ) def _run( self, diff --git a/tools/alex.py b/tools/alex.py deleted file mode 100644 index c1a26767..00000000 --- a/tools/alex.py +++ /dev/null @@ -1,93 +0,0 @@ -from typing import Any, Dict, List, Type - -from langchain.tools import BaseTool -from pydantic import BaseModel, Field - -from lib.alex import AlexApi - - -class AlexBaseInput(BaseModel): - """Base input schema for Alex tools.""" - - -class AlexPriceHistoryInput(AlexBaseInput): - """Input schema for AlexGetPriceHistory.""" - - token_address: str = Field( - ..., description="The address of the token to get price history for." - ) - - -class AlexTokenPoolVolumeInput(AlexBaseInput): - """Input schema for AlexGetTokenPoolVolume.""" - - token_pool_id: str = Field( - ..., description="The token pool ID to get volume data for." - ) - - -class AlexGetPriceHistory(BaseTool): - name: str = "alex_get_price_history" - description: str = ( - "Retrieve historical price data for a specified cryptocurrency token address" - ) - args_schema: Type[BaseModel] = AlexPriceHistoryInput - return_direct: bool = False - - def _deploy(self, token_address: str, **kwargs) -> List[Any]: - """Execute the tool to get price history.""" - obj = AlexApi() - return obj.get_price_history(token_address) - - def _run(self, token_address: str, **kwargs) -> List[Any]: - """Execute the tool to get price history.""" - return self._deploy(token_address) - - async def _arun(self, token_address: str, **kwargs) -> List[Any]: - """Async version of the tool.""" - return self._deploy(token_address) - - -class AlexGetSwapInfo(BaseTool): - name: str = "alex_get_swap_info" - description: str = "Retrieve all available token pair data from the Alex DEX" - return_direct: bool = False - args_schema: Type[BaseModel] = AlexBaseInput - - def _deploy(self, **kwargs) -> List[Dict[str, str]]: - """Execute the tool to get swap info.""" - obj = AlexApi() - pairs = obj.get_pairs() - return [ - {"token": pair.get("wrapped_token_y"), "token_pool_id": pair.get("pool_id")} - for pair in pairs - if pair.get("wrapped_token_x") == "STX" - ] - - def _run(self, **kwargs) -> List[Dict[str, str]]: - """Execute the tool to get swap info.""" - return self._deploy() - - async def _arun(self, **kwargs) -> List[Dict[str, str]]: - """Async version of the tool.""" - return self._deploy() - - -class AlexGetTokenPoolVolume(BaseTool): - name: str = "alex_get_token_pool_volume" - description: str = "Retrieve pool volume data for a specified token pool ID" - args_schema: Type[BaseModel] = AlexTokenPoolVolumeInput - return_direct: bool = False - - def _deploy(self, token_pool_id: str, **kwargs) -> str: - """Execute the tool to get token pool volume.""" - obj = AlexApi() - return obj.get_token_pool_price(token_pool_id) - - def _run(self, token_pool_id: str, **kwargs) -> str: - """Execute the tool to get token pool volume.""" - return self._deploy(token_pool_id) - - async def _arun(self, token_pool_id: str, **kwargs) -> str: - """Async version of the tool.""" - return self._deploy(token_pool_id) diff --git a/tools/bun.py b/tools/bun.py index 9c3dc20b..eced1455 100644 --- a/tools/bun.py +++ b/tools/bun.py @@ -40,6 +40,49 @@ def bun_run( secret = backend.get_secret(wallet.secret_id) mnemonic = secret.decrypted_secret + return BunScriptRunner._execute_script( + mnemonic, script_path, script_name, *args + ) + + @staticmethod + def bun_run_with_seed_phrase( + seed_phrase: str, script_path: str, script_name: str, *args: str + ) -> Dict[str, Union[str, bool, None]]: + """ + Run a TypeScript script using Bun with specified parameters using seed phrase directly. + + Args: + seed_phrase: The mnemonic seed phrase to use for script execution + script_path: Path of the directory containing the script + script_name: Name of the TypeScript script to run + *args: Additional arguments to pass to the script + + Returns: + Dict containing: + - output: Script execution stdout if successful + - error: Error message if execution failed, None otherwise + - success: Boolean indicating if execution was successful + """ + return BunScriptRunner._execute_script( + seed_phrase, script_path, script_name, *args + ) + + @staticmethod + def _execute_script( + mnemonic: str, script_path: str, script_name: str, *args: str + ) -> Dict[str, Union[str, bool, None]]: + """ + Internal method to execute the script with the given mnemonic. + + Args: + mnemonic: The mnemonic phrase to use + script_path: Path of the directory containing the script + script_name: Name of the TypeScript script to run + *args: Additional arguments to pass to the script + + Returns: + Dict containing script execution results + """ env = os.environ.copy() env["ACCOUNT_INDEX"] = "0" env["MNEMONIC"] = mnemonic @@ -60,7 +103,7 @@ def bun_run( ) try: - logger.info(f"Running script: {script_name} for wallet: {wallet_id}") + logger.info(f"Running script: {script_name}") result = subprocess.run( command, check=True, diff --git a/tools/coinmarketcap.py b/tools/coinmarketcap.py deleted file mode 100644 index 8e478725..00000000 --- a/tools/coinmarketcap.py +++ /dev/null @@ -1,76 +0,0 @@ -from typing import Type - -import requests -from langchain.tools import BaseTool -from pydantic import BaseModel - -from config import config - - -class GetBitcoinDataInput(BaseModel): - """Input schema for GetBitcoinData tool. - This tool doesn't require any input parameters but we still define the schema for consistency. - """ - - pass - - -class GetBitcoinData(BaseTool): - name: str = "get_bitcoin_data" - description: str = "Fetch current Bitcoin market data including price, market cap, 24h trading volume, and percentage changes from CoinMarketCap" - args_schema: Type[BaseModel] = GetBitcoinDataInput - return_direct: bool = False - - def _deploy(self, **kwargs) -> str: - """Execute the tool to fetch Bitcoin market data.""" - # Get the API key from the config - api_key = config.api.cmc_api_key - - if not api_key: - return "Error: API key not found. Please set the 'AIBTC_CMC_API_KEY' environment variable." - - # CoinMarketCap API URL and parameters - url = "https://pro-api.coinmarketcap.com/v1/cryptocurrency/quotes/latest" - parameters = {"symbol": "BTC", "convert": "USD"} - - # Request headers including API key - headers = { - "Accepts": "application/json", - "X-CMC_PRO_API_KEY": api_key, - } - - try: - # Make the API request - response = requests.get(url, headers=headers, params=parameters) - response.raise_for_status() # Raise an exception for HTTP errors - - # Parse the JSON response - data = response.json() - bitcoin_data = data["data"]["BTC"] - - # Extract relevant Bitcoin data - price = bitcoin_data["quote"]["USD"]["price"] - market_cap = bitcoin_data["quote"]["USD"]["market_cap"] - volume_24h = bitcoin_data["quote"]["USD"]["volume_24h"] - percent_change_24h = bitcoin_data["quote"]["USD"]["percent_change_24h"] - percent_change_7d = bitcoin_data["quote"]["USD"]["percent_change_7d"] - - # Format the result as a string - return ( - f"Bitcoin Price: ${price:.2f}\n" - f"Market Cap: ${market_cap:.2f}\n" - f"24h Trading Volume: ${volume_24h:.2f}\n" - f"24h Change: {percent_change_24h:.2f}%\n" - f"7d Change: {percent_change_7d:.2f}%" - ) - - except requests.RequestException as e: - return f"Error fetching Bitcoin data: {e}" - - def _run(self, **kwargs) -> str: - """Execute the tool to fetch Bitcoin market data.""" - return self._deploy(**kwargs) - - async def _arun(self, **kwargs) -> str: - """Async version of the tool.""" - return self._deploy(**kwargs) diff --git a/tools/dao_ext_action_proposals.py b/tools/dao_ext_action_proposals.py index 4823eb91..dcba8a68 100644 --- a/tools/dao_ext_action_proposals.py +++ b/tools/dao_ext_action_proposals.py @@ -254,10 +254,17 @@ class ConcludeActionProposalTool(BaseTool): args_schema: Type[BaseModel] = ConcludeActionProposalInput return_direct: bool = False wallet_id: Optional[UUID] = None + seed_phrase: Optional[str] = None - def __init__(self, wallet_id: Optional[UUID] = None, **kwargs): + def __init__( + self, + wallet_id: Optional[UUID] = None, + seed_phrase: Optional[str] = None, + **kwargs, + ): super().__init__(**kwargs) self.wallet_id = wallet_id + self.seed_phrase = seed_phrase def _deploy( self, @@ -268,8 +275,12 @@ def _deploy( **kwargs, ) -> Dict[str, Any]: """Execute the tool to conclude an action proposal.""" - if self.wallet_id is None: - return {"success": False, "message": "Wallet ID is required", "data": None} + if self.seed_phrase is None and self.wallet_id is None: + return { + "success": False, + "message": "Either seed phrase or wallet ID is required", + "data": None, + } args = [ action_proposals_voting_extension, @@ -278,12 +289,21 @@ def _deploy( dao_token_contract_address, ] - return BunScriptRunner.bun_run( - self.wallet_id, - "aibtc-cohort-0/dao-tools/extensions/action-proposal-voting/public", - "conclude-action-proposal.ts", - *args, - ) + # Use seed phrase if available, otherwise fall back to wallet_id + if self.seed_phrase: + return BunScriptRunner.bun_run_with_seed_phrase( + self.seed_phrase, + "aibtc-cohort-0/dao-tools/extensions/action-proposal-voting/public", + "conclude-action-proposal.ts", + *args, + ) + else: + return BunScriptRunner.bun_run( + self.wallet_id, + "aibtc-cohort-0/dao-tools/extensions/action-proposal-voting/public", + "conclude-action-proposal.ts", + *args, + ) def _run( self, diff --git a/tools/jing.py b/tools/jing.py deleted file mode 100644 index 889107ac..00000000 --- a/tools/jing.py +++ /dev/null @@ -1,580 +0,0 @@ -from typing import Any, Dict, Optional, Type - -from langchain.tools import BaseTool -from pydantic import BaseModel, Field - -from backend.models import UUID - -from .bun import BunScriptRunner - - -# Schema definitions -class JingGetOrderBookInput(BaseModel): - """Input schema for getting orderbook data.""" - - pair: str = Field(..., description="Trading pair (e.g., 'PEPE-STX')") - - -class JingCreateBidInput(BaseModel): - """Input schema for creating bid offers.""" - - pair: str = Field(..., description="Trading pair (e.g., 'PEPE-STX')") - stx_amount: float = Field(..., description="Amount of STX to bid") - token_amount: float = Field(..., description="Amount of tokens requested") - recipient: Optional[str] = Field( - None, description="Optional: recipient address for private offers" - ) - expiry: Optional[int] = Field(None, description="Optional: blocks until expiration") - - -class JingSubmitOrderInput(BaseModel): - """Input schema for submitting (accepting) existing orders.""" - - swap_id: int = Field(..., description="ID of the order to submit") - - -class JingCreateAskInput(BaseModel): - """Input schema for creating ask offers.""" - - pair: str = Field(..., description="Trading pair (e.g., 'PEPE-STX')") - token_amount: float = Field(..., description="Amount of tokens to sell") - stx_amount: float = Field(..., description="Amount of STX requested") - recipient: Optional[str] = Field( - None, description="Optional: recipient address for private offers" - ) - expiry: Optional[int] = Field(None, description="Optional: blocks until expiration") - - -class JingGetPrivateOffersInput(BaseModel): - """Input schema for getting private offers.""" - - pair: str = Field(..., description="Trading pair (e.g., 'PEPE-STX')") - user_address: str = Field(..., description="Address to check private offers for") - - -class JingRepriceOrderInput(BaseModel): - """Input schema for repricing orders.""" - - swap_id: int = Field(..., description="ID of the order to reprice") - new_amount: float = Field( - ..., description="New amount (STX for asks, token for bids)" - ) - pair: str = Field(..., description="Trading pair (e.g., 'PEPE-STX')") - recipient: Optional[str] = Field( - None, description="Optional: recipient address for private offers" - ) - expiry: Optional[int] = Field(None, description="Optional: blocks until expiration") - - -class JingGetOrderInput(BaseModel): - """Input schema for getting order details.""" - - swap_id: int = Field(..., description="ID of the order to get details for") - - -class JingGetMarketsInput(BaseModel): - """Input schema for getting available markets.""" - - pass - - -# Base Tool with common initialization -class JingBaseTool(BaseTool): - wallet_id: Optional[UUID] = None - - def __init__(self, wallet_id: Optional[UUID] = None, **kwargs): - super().__init__(**kwargs) - self.wallet_id = wallet_id - - -# Tool implementations -class JingGetOrderBookTool(JingBaseTool): - name: str = "jing_get_order_book" - description: str = "Get the current order book for a trading pair on JingCash" - args_schema: Type[BaseModel] = JingGetOrderBookInput - return_direct: bool = False - - def _deploy(self, pair: str, **kwargs) -> Dict[str, Any]: - """Execute the tool to get order book data.""" - if self.wallet_id is None: - return { - "success": False, - "error": "Wallet ID is required", - "output": "", - } - return BunScriptRunner.bun_run(self.wallet_id, "jing", "get-orderbook.ts", pair) - - def _run(self, pair: str, **kwargs) -> Dict[str, Any]: - """Execute the tool to get order book data.""" - return self._deploy(pair, **kwargs) - - async def _arun(self, pair: str, **kwargs) -> Dict[str, Any]: - """Async version of the tool.""" - return self._deploy(pair, **kwargs) - - -class JingCreateBidTool(JingBaseTool): - name: str = "jing_create_bid" - description: str = "Create a new bid offer to buy tokens with STX on JingCash" - args_schema: Type[BaseModel] = JingCreateBidInput - return_direct: bool = False - - def _deploy( - self, - pair: str, - stx_amount: float, - token_amount: float, - recipient: Optional[str] = None, - expiry: Optional[int] = None, - **kwargs, - ) -> Dict[str, Any]: - """Execute the tool to create a bid offer.""" - if self.wallet_id is None: - return { - "success": False, - "error": "Wallet ID is required", - "output": "", - } - args = [pair, str(stx_amount), str(token_amount)] - if recipient: - args.append(recipient) - if expiry: - args.append(str(expiry)) - args.append(str(self.wallet_id)) - - return BunScriptRunner.bun_run(self.wallet_id, "jing", "bid.ts", *args) - - def _run( - self, - pair: str, - stx_amount: float, - token_amount: float, - recipient: Optional[str] = None, - expiry: Optional[int] = None, - **kwargs, - ) -> Dict[str, Any]: - """Execute the tool to create a bid offer.""" - return self._deploy(pair, stx_amount, token_amount, recipient, expiry, **kwargs) - - async def _arun( - self, - pair: str, - stx_amount: float, - token_amount: float, - recipient: Optional[str] = None, - expiry: Optional[int] = None, - **kwargs, - ) -> Dict[str, Any]: - """Async version of the tool.""" - return self._deploy(pair, stx_amount, token_amount, recipient, expiry) - - -class JingSubmitBidTool(JingBaseTool): - name: str = "jing_submit_bid" - description: str = ( - "Submit (accept) an existing bid offer to sell tokens on JingCash" - ) - args_schema: Type[BaseModel] = JingSubmitOrderInput - return_direct: bool = False - - def _deploy(self, swap_id: int, **kwargs) -> Dict[str, Any]: - """Execute the tool to submit a bid.""" - if self.wallet_id is None: - return { - "success": False, - "error": "Wallet ID is required", - "output": "", - } - return BunScriptRunner.bun_run( - self.wallet_id, "jing", "submit-bid.ts", str(swap_id) - ) - - def _run(self, swap_id: int, **kwargs) -> Dict[str, Any]: - """Execute the tool to submit a bid.""" - return self._deploy(swap_id, **kwargs) - - async def _arun(self, swap_id: int, **kwargs) -> Dict[str, Any]: - """Async version of the tool.""" - return self._deploy(swap_id, **kwargs) - - -class JingCreateAskTool(JingBaseTool): - name: str = "jing_create_ask" - description: str = "Create a new ask offer to sell tokens for STX on JingCash" - args_schema: Type[BaseModel] = JingCreateAskInput - return_direct: bool = False - - def _deploy( - self, - pair: str, - token_amount: float, - stx_amount: float, - recipient: Optional[str] = None, - expiry: Optional[int] = None, - **kwargs, - ) -> Dict[str, Any]: - """Execute the tool to create an ask offer.""" - if self.wallet_id is None: - return { - "success": False, - "error": "Wallet ID is required", - "output": "", - } - args = [pair, str(token_amount), str(stx_amount)] - if recipient: - args.append(recipient) - if expiry: - args.append(str(expiry)) - args.append(str(self.wallet_id)) - - return BunScriptRunner.bun_run(self.wallet_id, "jing", "ask.ts", *args) - - def _run( - self, - pair: str, - token_amount: float, - stx_amount: float, - recipient: Optional[str] = None, - expiry: Optional[int] = None, - **kwargs, - ) -> Dict[str, Any]: - """Execute the tool to create an ask offer.""" - return self._deploy(pair, token_amount, stx_amount, recipient, expiry, **kwargs) - - async def _arun( - self, - pair: str, - token_amount: float, - stx_amount: float, - recipient: Optional[str] = None, - expiry: Optional[int] = None, - **kwargs, - ) -> Dict[str, Any]: - """Async version of the tool.""" - return self._deploy(pair, token_amount, stx_amount, recipient, expiry) - - -class JingSubmitAskTool(JingBaseTool): - name: str = "jing_submit_ask" - description: str = "Submit (accept) an existing ask offer to buy tokens on JingCash" - args_schema: Type[BaseModel] = JingSubmitOrderInput - return_direct: bool = False - - def _deploy(self, swap_id: int, **kwargs) -> Dict[str, Any]: - """Execute the tool to submit an ask.""" - if self.wallet_id is None: - return { - "success": False, - "error": "Wallet ID is required", - "output": "", - } - return BunScriptRunner.bun_run( - self.wallet_id, "jing", "submit-ask.ts", str(swap_id) - ) - - def _run(self, swap_id: int, **kwargs) -> Dict[str, Any]: - """Execute the tool to submit an ask.""" - return self._deploy(swap_id, **kwargs) - - async def _arun(self, swap_id: int, **kwargs) -> Dict[str, Any]: - """Async version of the tool.""" - return self._deploy(swap_id, **kwargs) - - -class JingGetPrivateOffersTool(JingBaseTool): - name: str = "jing_get_private_offers" - description: str = "Get private offers for a specific address on JingCash" - args_schema: Type[BaseModel] = JingGetPrivateOffersInput - return_direct: bool = False - - def _deploy(self, pair: str, user_address: str, **kwargs) -> Dict[str, Any]: - """Execute the tool to get private offers.""" - if self.wallet_id is None: - return { - "success": False, - "error": "Wallet ID is required", - "output": "", - } - return BunScriptRunner.bun_run( - self.wallet_id, "jing", "get-private-offers.ts", pair, user_address - ) - - def _run(self, pair: str, user_address: str, **kwargs) -> Dict[str, Any]: - """Execute the tool to get private offers.""" - return self._deploy(pair, user_address, **kwargs) - - async def _arun(self, pair: str, user_address: str, **kwargs) -> Dict[str, Any]: - """Async version of the tool.""" - return self._deploy(pair, user_address, **kwargs) - - -class JingGetPendingOrdersTool(JingBaseTool): - name: str = "jing_get_pending_orders" - description: str = "Get all pending orders for the current user on JingCash" - args_schema: Type[BaseModel] = JingGetMarketsInput - return_direct: bool = False - - def _deploy(self, **kwargs) -> Dict[str, Any]: - """Execute the tool to get pending orders.""" - if self.wallet_id is None: - return { - "success": False, - "error": "Wallet ID is required", - "output": "", - } - return BunScriptRunner.bun_run(self.wallet_id, "jing", "get-pending-orders.ts") - - def _run(self, **kwargs) -> Dict[str, Any]: - """Execute the tool to get pending orders.""" - return self._deploy(**kwargs) - - async def _arun(self, **kwargs) -> Dict[str, Any]: - """Async version of the tool.""" - return self._deploy(**kwargs) - - -class JingRepriceBidTool(JingBaseTool): - name: str = "jing_reprice_bid" - description: str = "Reprice an existing bid order on JingCash" - args_schema: Type[BaseModel] = JingRepriceOrderInput - return_direct: bool = False - - def _deploy( - self, - swap_id: int, - new_amount: float, - pair: str, - recipient: Optional[str] = None, - expiry: Optional[int] = None, - **kwargs, - ) -> Dict[str, Any]: - """Execute the tool to reprice a bid.""" - if self.wallet_id is None: - return { - "success": False, - "error": "Wallet ID is required", - "output": "", - } - args = [str(swap_id), str(new_amount), pair] - if recipient: - args.append(recipient) - if expiry: - args.append(str(expiry)) - args.append(str(self.wallet_id)) - - return BunScriptRunner.bun_run(self.wallet_id, "jing", "reprice-bid.ts", *args) - - def _run( - self, - swap_id: int, - new_amount: float, - pair: str, - recipient: Optional[str] = None, - expiry: Optional[int] = None, - **kwargs, - ) -> Dict[str, Any]: - """Execute the tool to reprice a bid.""" - return self._deploy(swap_id, new_amount, pair, recipient, expiry, **kwargs) - - async def _arun( - self, - swap_id: int, - new_amount: float, - pair: str, - recipient: Optional[str] = None, - expiry: Optional[int] = None, - **kwargs, - ) -> Dict[str, Any]: - """Async version of the tool.""" - return self._deploy(swap_id, new_amount, pair, recipient, expiry, **kwargs) - - -class JingRepriceAskTool(JingBaseTool): - name: str = "jing_reprice_ask" - description: str = "Reprice an existing ask order on JingCash" - args_schema: Type[BaseModel] = JingRepriceOrderInput - return_direct: bool = False - - def _deploy( - self, - swap_id: int, - new_amount: float, - pair: str, - recipient: Optional[str] = None, - expiry: Optional[int] = None, - **kwargs, - ) -> Dict[str, Any]: - """Execute the tool to reprice an ask.""" - if self.wallet_id is None: - return { - "success": False, - "error": "Wallet ID is required", - "output": "", - } - args = [str(swap_id), str(new_amount), pair] - if recipient: - args.append(recipient) - if expiry: - args.append(str(expiry)) - args.append(str(self.wallet_id)) - - return BunScriptRunner.bun_run(self.wallet_id, "jing", "reprice-ask.ts", *args) - - def _run( - self, - swap_id: int, - new_amount: float, - pair: str, - recipient: Optional[str] = None, - expiry: Optional[int] = None, - **kwargs, - ) -> Dict[str, Any]: - """Execute the tool to reprice an ask.""" - return self._deploy(swap_id, new_amount, pair, recipient, expiry, **kwargs) - - async def _arun( - self, - swap_id: int, - new_amount: float, - pair: str, - recipient: Optional[str] = None, - expiry: Optional[int] = None, - **kwargs, - ) -> Dict[str, Any]: - """Async version of the tool.""" - return self._deploy(swap_id, new_amount, pair, recipient, expiry, **kwargs) - - -class JingCancelBidTool(JingBaseTool): - name: str = "jing_cancel_bid" - description: str = "Cancel an existing bid order on JingCash" - args_schema: Type[BaseModel] = JingGetOrderInput - return_direct: bool = False - - def _deploy(self, swap_id: int, **kwargs) -> Dict[str, Any]: - """Execute the tool to cancel a bid.""" - if self.wallet_id is None: - return { - "success": False, - "error": "Wallet ID is required", - "output": "", - } - return BunScriptRunner.bun_run( - self.wallet_id, "jing", "cancel-bid.ts", str(swap_id) - ) - - def _run(self, swap_id: int, **kwargs) -> Dict[str, Any]: - """Execute the tool to cancel a bid.""" - return self._deploy(swap_id, **kwargs) - - async def _arun(self, swap_id: int, **kwargs) -> Dict[str, Any]: - """Async version of the tool.""" - return self._deploy(swap_id, **kwargs) - - -class JingCancelAskTool(JingBaseTool): - name: str = "jing_cancel_ask" - description: str = "Cancel an existing ask order on JingCash" - args_schema: Type[BaseModel] = JingGetOrderInput - return_direct: bool = False - - def _deploy(self, swap_id: int, **kwargs) -> Dict[str, Any]: - """Execute the tool to cancel an ask.""" - if self.wallet_id is None: - return { - "success": False, - "error": "Wallet ID is required", - "output": "", - } - return BunScriptRunner.bun_run( - self.wallet_id, "jing", "cancel-ask.ts", str(swap_id) - ) - - def _run(self, swap_id: int, **kwargs) -> Dict[str, Any]: - """Execute the tool to cancel an ask.""" - return self._deploy(swap_id, **kwargs) - - async def _arun(self, swap_id: int, **kwargs) -> Dict[str, Any]: - """Async version of the tool.""" - return self._deploy(swap_id, **kwargs) - - -class JingGetBidTool(JingBaseTool): - name: str = "jing_get_bid" - description: str = "Get details of a specific bid order on JingCash" - args_schema: Type[BaseModel] = JingGetOrderInput - return_direct: bool = False - - def _deploy(self, swap_id: int, **kwargs) -> Dict[str, Any]: - """Execute the tool to get bid details.""" - if self.wallet_id is None: - return { - "success": False, - "error": "Wallet ID is required", - "output": "", - } - return BunScriptRunner.bun_run( - self.wallet_id, "jing", "get-bid.ts", str(swap_id) - ) - - def _run(self, swap_id: int, **kwargs) -> Dict[str, Any]: - """Execute the tool to get bid details.""" - return self._deploy(swap_id, **kwargs) - - async def _arun(self, swap_id: int, **kwargs) -> Dict[str, Any]: - """Async version of the tool.""" - return self._deploy(swap_id, **kwargs) - - -class JingGetAskTool(JingBaseTool): - name: str = "jing_get_ask" - description: str = "Get details of a specific ask order on JingCash" - args_schema: Type[BaseModel] = JingGetOrderInput - return_direct: bool = False - - def _deploy(self, swap_id: int, **kwargs) -> Dict[str, Any]: - """Execute the tool to get ask details.""" - if self.wallet_id is None: - return { - "success": False, - "error": "Wallet ID is required", - "output": "", - } - return BunScriptRunner.bun_run( - self.wallet_id, "jing", "get-ask.ts", str(swap_id) - ) - - def _run(self, swap_id: int, **kwargs) -> Dict[str, Any]: - """Execute the tool to get ask details.""" - return self._deploy(swap_id, **kwargs) - - async def _arun(self, swap_id: int, **kwargs) -> Dict[str, Any]: - """Async version of the tool.""" - return self._deploy(swap_id, **kwargs) - - -class JingGetMarketsTool(JingBaseTool): - name: str = "jing_get_markets" - description: str = ( - "Get all available trading pairs and their contract details on JingCash" - ) - args_schema: Type[BaseModel] = JingGetMarketsInput - return_direct: bool = False - - def _deploy(self, **kwargs) -> Dict[str, Any]: - """Execute the tool to get available markets.""" - if self.wallet_id is None: - return { - "success": False, - "error": "Wallet ID is required", - "output": "", - } - return BunScriptRunner.bun_run(self.wallet_id, "jing", "list-markets.ts") - - def _run(self, **kwargs) -> Dict[str, Any]: - """Execute the tool to get available markets.""" - return self._deploy(**kwargs) - - async def _arun(self, **kwargs) -> Dict[str, Any]: - """Async version of the tool.""" - return self._deploy(**kwargs) diff --git a/tools/smartwallet.py b/tools/smartwallet.py deleted file mode 100644 index 5645d4f2..00000000 --- a/tools/smartwallet.py +++ /dev/null @@ -1,2566 +0,0 @@ -from typing import Any, Dict, Optional, Type -from uuid import UUID - -from langchain.tools import BaseTool -from pydantic import BaseModel, Field - -from tools.bun import BunScriptRunner - - -class SmartWalletGenerateSmartWalletInput(BaseModel): - """Input schema for generating a smart wallet contract.""" - - owner_address: str = Field( - ..., - description="Stacks address of the wallet owner", - example="ST1PQHQKV0RJXZFY1DGX8MNSNYVE3VGZJSRTPGZGM", - ) - agent_address: str = Field( - ..., - description="Stacks address of the agent", - example="ST2CY5V39NHDPWSXMW9QDT3HC3GD6Q6XX4CFRK9AG", - ) - dao_token_contract: str = Field( - ..., - description="Contract principal of the DAO token", - example="ST35K818S3K2GSNEBC3M35GA3W8Q7X72KF4RVM3QA.aibtc-token", - ) - dao_token_dex_contract: str = Field( - ..., - description="Contract principal of the DAO token DEX", - example="ST35K818S3K2GSNEBC3M35GA3W8Q7X72KF4RVM3QA.aibtc-token-dex", - ) - generate_files: bool = Field( - False, - description="Whether to generate contract files", - ) - - -class SmartWalletGenerateSmartWalletTool(BaseTool): - name: str = "smartwallet_generate_smart_wallet" - description: str = ( - "Generate a new smart wallet contract with specified owner and agent addresses. " - "Returns the contract name, hash, and source code." - ) - args_schema: Type[BaseModel] = SmartWalletGenerateSmartWalletInput - return_direct: bool = False - wallet_id: Optional[UUID] = None - - def __init__(self, wallet_id: Optional[UUID] = None, **kwargs): - super().__init__(**kwargs) - self.wallet_id = wallet_id - - def _deploy( - self, - owner_address: str, - agent_address: str, - dao_token_contract: str, - dao_token_dex_contract: str, - generate_files: bool = False, - **kwargs, - ) -> Dict[str, Any]: - """Execute the tool to generate smart wallet.""" - if self.wallet_id is None: - return {"success": False, "message": "Wallet ID is required", "data": None} - - args = [ - owner_address, - agent_address, - dao_token_contract, - dao_token_dex_contract, - str(generate_files).lower(), - ] - - return BunScriptRunner.bun_run( - self.wallet_id, - "aibtc-dao/smart-wallet", - "generate-smart-wallet.ts", - *args, - ) - - def _run( - self, - owner_address: str, - agent_address: str, - dao_token_contract: str, - dao_token_dex_contract: str, - generate_files: bool = False, - **kwargs, - ) -> Dict[str, Any]: - """Execute the tool to generate smart wallet.""" - return self._deploy( - owner_address, - agent_address, - dao_token_contract, - dao_token_dex_contract, - generate_files, - **kwargs, - ) - - async def _arun( - self, - owner_address: str, - agent_address: str, - dao_token_contract: str, - dao_token_dex_contract: str, - generate_files: bool = False, - **kwargs, - ) -> Dict[str, Any]: - """Async version of the tool.""" - return self._deploy( - owner_address, - agent_address, - dao_token_contract, - dao_token_dex_contract, - generate_files, - **kwargs, - ) - - -class SmartWalletGenerateMySmartWalletInput(BaseModel): - """Input schema for generating a smart wallet contract using the current agent as the agent address.""" - - owner_address: str = Field( - ..., - description="Stacks address of the wallet owner", - example="ST1PQHQKV0RJXZFY1DGX8MNSNYVE3VGZJSRTPGZGM", - ) - dao_token_contract: str = Field( - ..., - description="Contract principal of the DAO token", - example="ST35K818S3K2GSNEBC3M35GA3W8Q7X72KF4RVM3QA.aibtc-token", - ) - dao_token_dex_contract: str = Field( - ..., - description="Contract principal of the DAO token DEX", - example="ST35K818S3K2GSNEBC3M35GA3W8Q7X72KF4RVM3QA.aibtc-token-dex", - ) - generate_files: bool = Field( - False, - description="Whether to generate contract files", - ) - - -class SmartWalletGenerateMySmartWalletTool(BaseTool): - name: str = "smartwallet_generate_my_smart_wallet" - description: str = ( - "Generate a new smart wallet contract using the current agent as the agent address. " - "Returns the contract name, hash, and source code." - ) - args_schema: Type[BaseModel] = SmartWalletGenerateMySmartWalletInput - return_direct: bool = False - wallet_id: Optional[UUID] = None - - def __init__(self, wallet_id: Optional[UUID] = None, **kwargs): - super().__init__(**kwargs) - self.wallet_id = wallet_id - - def _deploy( - self, - owner_address: str, - dao_token_contract: str, - dao_token_dex_contract: str, - generate_files: bool = False, - **kwargs, - ) -> Dict[str, Any]: - """Execute the tool to generate smart wallet.""" - if self.wallet_id is None: - return {"success": False, "message": "Wallet ID is required", "data": None} - - args = [ - owner_address, - dao_token_contract, - dao_token_dex_contract, - str(generate_files).lower(), - ] - - return BunScriptRunner.bun_run( - self.wallet_id, - "aibtc-dao/smart-wallet", - "generate-my-smart-wallet.ts", - *args, - ) - - def _run( - self, - owner_address: str, - dao_token_contract: str, - dao_token_dex_contract: str, - generate_files: bool = False, - **kwargs, - ) -> Dict[str, Any]: - """Execute the tool to generate smart wallet.""" - return self._deploy( - owner_address, - dao_token_contract, - dao_token_dex_contract, - generate_files, - **kwargs, - ) - - async def _arun( - self, - owner_address: str, - dao_token_contract: str, - dao_token_dex_contract: str, - generate_files: bool = False, - **kwargs, - ) -> Dict[str, Any]: - """Async version of the tool.""" - return self._deploy( - owner_address, - dao_token_contract, - dao_token_dex_contract, - generate_files, - **kwargs, - ) - - -class SmartWalletDeploySmartWalletInput(BaseModel): - """Input schema for deploying a smart wallet contract.""" - - owner_address: str = Field( - ..., - description="Stacks address of the wallet owner", - example="ST1PQHQKV0RJXZFY1DGX8MNSNYVE3VGZJSRTPGZGM", - ) - dao_token_contract: str = Field( - ..., - description="Contract principal of the DAO token", - example="ST35K818S3K2GSNEBC3M35GA3W8Q7X72KF4RVM3QA.aibtc-token", - ) - dao_token_dex_contract: str = Field( - ..., - description="Contract principal of the DAO token DEX", - example="ST35K818S3K2GSNEBC3M35GA3W8Q7X72KF4RVM3QA.aibtc-token-dex", - ) - - -class SmartWalletDeploySmartWalletTool(BaseTool): - name: str = "smartwallet_deploy_smart_wallet" - description: str = ( - "Deploy a new smart wallet contract with specified owner and agent addresses. " - "Returns the deployed contract address and transaction ID." - ) - args_schema: Type[BaseModel] = SmartWalletDeploySmartWalletInput - return_direct: bool = False - wallet_id: Optional[UUID] = None - - def __init__(self, wallet_id: Optional[UUID] = None, **kwargs): - super().__init__(**kwargs) - self.wallet_id = wallet_id - - def _deploy( - self, - owner_address: str, - agent_address: str, - dao_token_contract: str, - dao_token_dex_contract: str, - **kwargs, - ) -> Dict[str, Any]: - """Execute the tool to deploy smart wallet.""" - if self.wallet_id is None: - return {"success": False, "message": "Wallet ID is required", "data": None} - - args = [ - owner_address, - agent_address, - dao_token_contract, - dao_token_dex_contract, - ] - - return BunScriptRunner.bun_run( - self.wallet_id, - "aibtc-dao/smart-wallet", - "deploy-smart-wallet.ts", - *args, - ) - - def _run( - self, - owner_address: str, - agent_address: str, - dao_token_contract: str, - dao_token_dex_contract: str, - **kwargs, - ) -> Dict[str, Any]: - """Execute the tool to deploy smart wallet.""" - return self._deploy( - owner_address, - agent_address, - dao_token_contract, - dao_token_dex_contract, - **kwargs, - ) - - async def _arun( - self, - owner_address: str, - agent_address: str, - dao_token_contract: str, - dao_token_dex_contract: str, - **kwargs, - ) -> Dict[str, Any]: - """Async version of the tool.""" - return self._deploy( - owner_address, - agent_address, - dao_token_contract, - dao_token_dex_contract, - **kwargs, - ) - - -class SmartWalletDeployMySmartWalletInput(BaseModel): - """Input schema for deploying a smart wallet contract using the current agent as the agent address.""" - - owner_address: str = Field( - ..., - description="Stacks address of the wallet owner", - example="ST1PQHQKV0RJXZFY1DGX8MNSNYVE3VGZJSRTPGZGM", - ) - dao_token_contract: str = Field( - ..., - description="Contract principal of the DAO token", - example="ST35K818S3K2GSNEBC3M35GA3W8Q7X72KF4RVM3QA.aibtc-token", - ) - dao_token_dex_contract: str = Field( - ..., - description="Contract principal of the DAO token DEX", - example="ST35K818S3K2GSNEBC3M35GA3W8Q7X72KF4RVM3QA.aibtc-token-dex", - ) - - -class SmartWalletDeployMySmartWalletTool(BaseTool): - name: str = "smartwallet_deploy_my_smart_wallet" - description: str = ( - "Deploy a new smart wallet contract using the current agent as the agent address. " - "Returns the deployed contract address and transaction ID." - ) - args_schema: Type[BaseModel] = SmartWalletDeployMySmartWalletInput - return_direct: bool = False - wallet_id: Optional[UUID] = None - - def __init__(self, wallet_id: Optional[UUID] = None, **kwargs): - super().__init__(**kwargs) - self.wallet_id = wallet_id - - def _deploy( - self, - owner_address: str, - dao_token_contract: str, - dao_token_dex_contract: str, - **kwargs, - ) -> Dict[str, Any]: - """Execute the tool to deploy smart wallet.""" - if self.wallet_id is None: - return {"success": False, "message": "Wallet ID is required", "data": None} - - args = [ - owner_address, - dao_token_contract, - dao_token_dex_contract, - ] - - return BunScriptRunner.bun_run( - self.wallet_id, - "aibtc-dao/smart-wallet", - "deploy-my-smart-wallet.ts", - *args, - ) - - def _run( - self, - owner_address: str, - dao_token_contract: str, - dao_token_dex_contract: str, - **kwargs, - ) -> Dict[str, Any]: - """Execute the tool to deploy smart wallet.""" - return self._deploy( - owner_address, - dao_token_contract, - dao_token_dex_contract, - **kwargs, - ) - - async def _arun( - self, - owner_address: str, - dao_token_contract: str, - dao_token_dex_contract: str, - **kwargs, - ) -> Dict[str, Any]: - """Async version of the tool.""" - return self._deploy( - owner_address, - dao_token_contract, - dao_token_dex_contract, - **kwargs, - ) - - -class SmartWalletIsApprovedAssetInput(BaseModel): - """Input schema for checking if an asset is approved in a smart wallet.""" - - smart_wallet_contract: str = Field( - ..., - description="Contract principal of the smart wallet", - example="ST35K818S3K2GSNEBC3M35GA3W8Q7X72KF4RVM3QA.aibtc-user-agent-smart-wallet", - ) - asset_contract: str = Field( - ..., - description="Contract principal of the asset", - example="ST35K818S3K2GSNEBC3M35GA3W8Q7X72KF4RVM3QA.aibtc-token", - ) - - -class SmartWalletIsApprovedAssetTool(BaseTool): - name: str = "smartwallet_is_approved_asset" - description: str = ( - "Check if an asset is approved for use with a smart wallet. " - "Returns true if the asset is approved, false otherwise." - ) - args_schema: Type[BaseModel] = SmartWalletIsApprovedAssetInput - return_direct: bool = False - wallet_id: Optional[UUID] = None - - def __init__(self, wallet_id: Optional[UUID] = None, **kwargs): - super().__init__(**kwargs) - self.wallet_id = wallet_id - - def _deploy( - self, - smart_wallet_contract: str, - asset_contract: str, - **kwargs, - ) -> Dict[str, Any]: - """Execute the tool to check asset approval.""" - if self.wallet_id is None: - return {"success": False, "message": "Wallet ID is required", "data": None} - - args = [ - smart_wallet_contract, - asset_contract, - ] - - return BunScriptRunner.bun_run( - self.wallet_id, - "aibtc-dao/smart-wallet/read-only", - "is-approved-asset.ts", - *args, - ) - - def _run( - self, - smart_wallet_contract: str, - asset_contract: str, - **kwargs, - ) -> Dict[str, Any]: - """Execute the tool to check asset approval.""" - return self._deploy( - smart_wallet_contract, - asset_contract, - **kwargs, - ) - - async def _arun( - self, - smart_wallet_contract: str, - asset_contract: str, - **kwargs, - ) -> Dict[str, Any]: - """Async version of the tool.""" - return self._deploy( - smart_wallet_contract, - asset_contract, - **kwargs, - ) - - -class SmartWalletGetBalanceStxInput(BaseModel): - """Input schema for getting STX balance from a smart wallet.""" - - smart_wallet_contract: str = Field( - ..., - description="Contract principal of the smart wallet", - example="ST35K818S3K2GSNEBC3M35GA3W8Q7X72KF4RVM3QA.aibtc-user-agent-smart-wallet", - ) - - -class SmartWalletGetBalanceStxTool(BaseTool): - name: str = "smartwallet_get_balance_stx" - description: str = ( - "Get the STX balance from a smart wallet. Returns the balance in microSTX." - ) - args_schema: Type[BaseModel] = SmartWalletGetBalanceStxInput - return_direct: bool = False - wallet_id: Optional[UUID] = None - - def __init__(self, wallet_id: Optional[UUID] = None, **kwargs): - super().__init__(**kwargs) - self.wallet_id = wallet_id - - def _deploy( - self, - smart_wallet_contract: str, - **kwargs, - ) -> Dict[str, Any]: - """Execute the tool to get STX balance.""" - if self.wallet_id is None: - return {"success": False, "message": "Wallet ID is required", "data": None} - - args = [smart_wallet_contract] - - return BunScriptRunner.bun_run( - self.wallet_id, - "aibtc-dao/smart-wallet/read-only", - "get-balance-stx.ts", - *args, - ) - - def _run( - self, - smart_wallet_contract: str, - **kwargs, - ) -> Dict[str, Any]: - """Execute the tool to get STX balance.""" - return self._deploy(smart_wallet_contract, **kwargs) - - async def _arun( - self, - smart_wallet_contract: str, - **kwargs, - ) -> Dict[str, Any]: - """Async version of the tool.""" - return self._deploy(smart_wallet_contract, **kwargs) - - -class SmartWalletGetConfigurationInput(BaseModel): - """Input schema for getting smart wallet configuration.""" - - smart_wallet_contract: str = Field( - ..., - description="Contract principal of the smart wallet", - example="ST35K818S3K2GSNEBC3M35GA3W8Q7X72KF4RVM3QA.aibtc-user-agent-smart-wallet", - ) - - -class SmartWalletGetConfigurationTool(BaseTool): - name: str = "smartwallet_get_configuration" - description: str = ( - "Get the configuration of a smart wallet. " - "Returns owner, agent, and other configuration details." - ) - args_schema: Type[BaseModel] = SmartWalletGetConfigurationInput - return_direct: bool = False - wallet_id: Optional[UUID] = None - - def __init__(self, wallet_id: Optional[UUID] = None, **kwargs): - super().__init__(**kwargs) - self.wallet_id = wallet_id - - def _deploy( - self, - smart_wallet_contract: str, - **kwargs, - ) -> Dict[str, Any]: - """Execute the tool to get wallet configuration.""" - if self.wallet_id is None: - return {"success": False, "message": "Wallet ID is required", "data": None} - - args = [smart_wallet_contract] - - return BunScriptRunner.bun_run( - self.wallet_id, - "aibtc-dao/smart-wallet/read-only", - "get-configuration.ts", - *args, - ) - - def _run( - self, - smart_wallet_contract: str, - **kwargs, - ) -> Dict[str, Any]: - """Execute the tool to get wallet configuration.""" - return self._deploy(smart_wallet_contract, **kwargs) - - async def _arun( - self, - smart_wallet_contract: str, - **kwargs, - ) -> Dict[str, Any]: - """Async version of the tool.""" - return self._deploy(smart_wallet_contract, **kwargs) - - -class SmartWalletApproveAssetInput(BaseModel): - """Input schema for approving an asset in a smart wallet.""" - - smart_wallet_contract: str = Field( - ..., - description="Contract principal of the smart wallet", - example="ST35K818S3K2GSNEBC3M35GA3W8Q7X72KF4RVM3QA.aibtc-user-agent-smart-wallet", - ) - asset_contract: str = Field( - ..., - description="Contract principal of the asset to approve", - example="ST35K818S3K2GSNEBC3M35GA3W8Q7X72KF4RVM3QA.aibtc-token", - ) - - -class SmartWalletApproveAssetTool(BaseTool): - name: str = "smartwallet_approve_asset" - description: str = ( - "Approve an asset for use with a smart wallet. " - "Returns the transaction ID of the approval transaction." - ) - args_schema: Type[BaseModel] = SmartWalletApproveAssetInput - return_direct: bool = False - wallet_id: Optional[UUID] = None - - def __init__(self, wallet_id: Optional[UUID] = None, **kwargs): - super().__init__(**kwargs) - self.wallet_id = wallet_id - - def _deploy( - self, - smart_wallet_contract: str, - asset_contract: str, - **kwargs, - ) -> Dict[str, Any]: - """Execute the tool to approve asset.""" - if self.wallet_id is None: - return {"success": False, "message": "Wallet ID is required", "data": None} - - args = [ - smart_wallet_contract, - asset_contract, - ] - - return BunScriptRunner.bun_run( - self.wallet_id, - "aibtc-dao/smart-wallet/public", - "approve-asset.ts", - *args, - ) - - def _run( - self, - smart_wallet_contract: str, - asset_contract: str, - **kwargs, - ) -> Dict[str, Any]: - """Execute the tool to approve asset.""" - return self._deploy( - smart_wallet_contract, - asset_contract, - **kwargs, - ) - - async def _arun( - self, - smart_wallet_contract: str, - asset_contract: str, - **kwargs, - ) -> Dict[str, Any]: - """Async version of the tool.""" - return self._deploy( - smart_wallet_contract, - asset_contract, - **kwargs, - ) - - -class SmartWalletRevokeAssetInput(BaseModel): - """Input schema for revoking an asset from a smart wallet.""" - - smart_wallet_contract: str = Field( - ..., - description="Contract principal of the smart wallet", - example="ST35K818S3K2GSNEBC3M35GA3W8Q7X72KF4RVM3QA.aibtc-user-agent-smart-wallet", - ) - asset_contract: str = Field( - ..., - description="Contract principal of the asset to revoke", - example="ST35K818S3K2GSNEBC3M35GA3W8Q7X72KF4RVM3QA.aibtc-token", - ) - - -class SmartWalletRevokeAssetTool(BaseTool): - name: str = "smartwallet_revoke_asset" - description: str = ( - "Revoke an asset from a smart wallet. " - "Returns the transaction ID of the revocation transaction." - ) - args_schema: Type[BaseModel] = SmartWalletRevokeAssetInput - return_direct: bool = False - wallet_id: Optional[UUID] = None - - def __init__(self, wallet_id: Optional[UUID] = None, **kwargs): - super().__init__(**kwargs) - self.wallet_id = wallet_id - - def _deploy( - self, - smart_wallet_contract: str, - asset_contract: str, - **kwargs, - ) -> Dict[str, Any]: - """Execute the tool to revoke asset.""" - if self.wallet_id is None: - return {"success": False, "message": "Wallet ID is required", "data": None} - - args = [ - smart_wallet_contract, - asset_contract, - ] - - return BunScriptRunner.bun_run( - self.wallet_id, - "aibtc-dao/smart-wallet/public", - "revoke-asset.ts", - *args, - ) - - def _run( - self, - smart_wallet_contract: str, - asset_contract: str, - **kwargs, - ) -> Dict[str, Any]: - """Execute the tool to revoke asset.""" - return self._deploy( - smart_wallet_contract, - asset_contract, - **kwargs, - ) - - async def _arun( - self, - smart_wallet_contract: str, - asset_contract: str, - **kwargs, - ) -> Dict[str, Any]: - """Async version of the tool.""" - return self._deploy( - smart_wallet_contract, - asset_contract, - **kwargs, - ) - - -class SmartWalletDepositStxInput(BaseModel): - """Input schema for depositing STX to a smart wallet.""" - - smart_wallet_contract: str = Field( - ..., - description="Contract principal of the smart wallet", - example="ST35K818S3K2GSNEBC3M35GA3W8Q7X72KF4RVM3QA.aibtc-user-agent-smart-wallet", - ) - amount: int = Field( - ..., - description="Amount of STX to deposit in microSTX", - example=1000000, - gt=0, - ) - - -class SmartWalletDepositStxTool(BaseTool): - name: str = "smartwallet_deposit_stx" - description: str = ( - "Deposit STX to a smart wallet. " - "Returns the transaction ID of the deposit transaction." - ) - args_schema: Type[BaseModel] = SmartWalletDepositStxInput - return_direct: bool = False - wallet_id: Optional[UUID] = None - - def __init__(self, wallet_id: Optional[UUID] = None, **kwargs): - super().__init__(**kwargs) - self.wallet_id = wallet_id - - def _deploy( - self, - smart_wallet_contract: str, - amount: int, - **kwargs, - ) -> Dict[str, Any]: - """Execute the tool to deposit STX.""" - if self.wallet_id is None: - return {"success": False, "message": "Wallet ID is required", "data": None} - - args = [ - smart_wallet_contract, - str(amount), - ] - - return BunScriptRunner.bun_run( - self.wallet_id, - "aibtc-dao/smart-wallet/public", - "deposit-stx.ts", - *args, - ) - - def _run( - self, - smart_wallet_contract: str, - amount: int, - **kwargs, - ) -> Dict[str, Any]: - """Execute the tool to deposit STX.""" - return self._deploy( - smart_wallet_contract, - amount, - **kwargs, - ) - - async def _arun( - self, - smart_wallet_contract: str, - amount: int, - **kwargs, - ) -> Dict[str, Any]: - """Async version of the tool.""" - return self._deploy( - smart_wallet_contract, - amount, - **kwargs, - ) - - -class SmartWalletWithdrawStxInput(BaseModel): - """Input schema for withdrawing STX from a smart wallet.""" - - smart_wallet_contract: str = Field( - ..., - description="Contract principal of the smart wallet", - example="ST35K818S3K2GSNEBC3M35GA3W8Q7X72KF4RVM3QA.aibtc-user-agent-smart-wallet", - ) - amount: int = Field( - ..., - description="Amount of STX to withdraw in microSTX", - example=1000000, - gt=0, - ) - - -class SmartWalletWithdrawSTXTool(BaseTool): - name: str = "smartwallet_withdraw_stx" - description: str = ( - "Withdraw STX from a smart wallet. " - "Returns the transaction ID of the withdrawal transaction." - ) - args_schema: Type[BaseModel] = SmartWalletWithdrawStxInput - return_direct: bool = False - wallet_id: Optional[UUID] = None - - def __init__(self, wallet_id: Optional[UUID] = None, **kwargs): - super().__init__(**kwargs) - self.wallet_id = wallet_id - - def _deploy( - self, - smart_wallet_contract: str, - amount: int, - **kwargs, - ) -> Dict[str, Any]: - """Execute the tool to withdraw STX.""" - if self.wallet_id is None: - return {"success": False, "message": "Wallet ID is required", "data": None} - - args = [ - smart_wallet_contract, - str(amount), - ] - - return BunScriptRunner.bun_run( - self.wallet_id, - "aibtc-dao/smart-wallet/public", - "withdraw-stx.ts", - *args, - ) - - def _run( - self, - smart_wallet_contract: str, - amount: int, - **kwargs, - ) -> Dict[str, Any]: - """Execute the tool to withdraw STX.""" - return self._deploy( - smart_wallet_contract, - amount, - **kwargs, - ) - - async def _arun( - self, - smart_wallet_contract: str, - amount: int, - **kwargs, - ) -> Dict[str, Any]: - """Async version of the tool.""" - return self._deploy( - smart_wallet_contract, - amount, - **kwargs, - ) - - -class SmartWalletDepositFtInput(BaseModel): - """Input schema for depositing fungible tokens to a smart wallet.""" - - smart_wallet_contract: str = Field( - ..., - description="Contract principal of the smart wallet", - example="ST35K818S3K2GSNEBC3M35GA3W8Q7X72KF4RVM3QA.aibtc-user-agent-smart-wallet", - ) - ft_contract: str = Field( - ..., - description="Contract principal of the fungible token", - example="ST35K818S3K2GSNEBC3M35GA3W8Q7X72KF4RVM3QA.aibtc-token", - ) - amount: int = Field( - ..., - description="Amount of tokens to deposit", - example=1000, - gt=0, - ) - - -class SmartWalletDepositFtTool(BaseTool): - name: str = "smartwallet_deposit_ft" - description: str = ( - "Deposit fungible tokens to a smart wallet. " - "Returns the transaction ID of the deposit transaction." - ) - args_schema: Type[BaseModel] = SmartWalletDepositFtInput - return_direct: bool = False - wallet_id: Optional[UUID] = None - - def __init__(self, wallet_id: Optional[UUID] = None, **kwargs): - super().__init__(**kwargs) - self.wallet_id = wallet_id - - def _deploy( - self, - smart_wallet_contract: str, - ft_contract: str, - amount: int, - **kwargs, - ) -> Dict[str, Any]: - """Execute the tool to deposit fungible tokens.""" - if self.wallet_id is None: - return {"success": False, "message": "Wallet ID is required", "data": None} - - args = [ - smart_wallet_contract, - ft_contract, - str(amount), - ] - - return BunScriptRunner.bun_run( - self.wallet_id, - "aibtc-dao/smart-wallet/public", - "deposit-ft.ts", - *args, - ) - - def _run( - self, - smart_wallet_contract: str, - ft_contract: str, - amount: int, - **kwargs, - ) -> Dict[str, Any]: - """Execute the tool to deposit fungible tokens.""" - return self._deploy( - smart_wallet_contract, - ft_contract, - amount, - **kwargs, - ) - - async def _arun( - self, - smart_wallet_contract: str, - ft_contract: str, - amount: int, - **kwargs, - ) -> Dict[str, Any]: - """Async version of the tool.""" - return self._deploy( - smart_wallet_contract, - ft_contract, - amount, - **kwargs, - ) - - -class SmartWalletWithdrawFtInput(BaseModel): - """Input schema for withdrawing fungible tokens from a smart wallet.""" - - smart_wallet_contract: str = Field( - ..., - description="Contract principal of the smart wallet", - example="ST35K818S3K2GSNEBC3M35GA3W8Q7X72KF4RVM3QA.aibtc-user-agent-smart-wallet", - ) - ft_contract: str = Field( - ..., - description="Contract principal of the fungible token", - example="ST35K818S3K2GSNEBC3M35GA3W8Q7X72KF4RVM3QA.aibtc-token", - ) - amount: int = Field( - ..., - description="Amount of tokens to withdraw", - example=1000, - gt=0, - ) - - -class SmartWalletWithdrawFTTool(BaseTool): - name: str = "smartwallet_withdraw_ft" - description: str = ( - "Withdraw fungible tokens from a smart wallet. " - "Returns the transaction ID of the withdrawal transaction." - ) - args_schema: Type[BaseModel] = SmartWalletWithdrawFtInput - return_direct: bool = False - wallet_id: Optional[UUID] = None - - def __init__(self, wallet_id: Optional[UUID] = None, **kwargs): - super().__init__(**kwargs) - self.wallet_id = wallet_id - - def _deploy( - self, - smart_wallet_contract: str, - ft_contract: str, - amount: int, - **kwargs, - ) -> Dict[str, Any]: - """Execute the tool to withdraw fungible tokens.""" - if self.wallet_id is None: - return {"success": False, "message": "Wallet ID is required", "data": None} - - args = [ - smart_wallet_contract, - ft_contract, - str(amount), - ] - - return BunScriptRunner.bun_run( - self.wallet_id, - "aibtc-dao/smart-wallet/public", - "withdraw-ft.ts", - *args, - ) - - def _run( - self, - smart_wallet_contract: str, - ft_contract: str, - amount: int, - **kwargs, - ) -> Dict[str, Any]: - """Execute the tool to withdraw fungible tokens.""" - return self._deploy( - smart_wallet_contract, - ft_contract, - amount, - **kwargs, - ) - - async def _arun( - self, - smart_wallet_contract: str, - ft_contract: str, - amount: int, - **kwargs, - ) -> Dict[str, Any]: - """Async version of the tool.""" - return self._deploy( - smart_wallet_contract, - ft_contract, - amount, - **kwargs, - ) - - -class SmartWalletProxyCreateProposalInput(BaseModel): - """Input schema for creating a core proposal through a smart wallet.""" - - smart_wallet_contract: str = Field( - ..., - description="Contract principal of the smart wallet", - example="ST35K818S3K2GSNEBC3M35GA3W8Q7X72KF4RVM3QA.aibtc-user-agent-smart-wallet", - ) - dao_core_proposals_extension_contract: str = Field( - ..., - description="Contract principal of the DAO core proposals extension", - example="ST35K818S3K2GSNEBC3M35GA3W8Q7X72KF4RVM3QA.aibtc-core-proposals-v2", - ) - dao_proposal_contract: str = Field( - ..., - description="Contract principal of the DAO proposal", - example="ST35K818S3K2GSNEBC3M35GA3W8Q7X72KF4RVM3QA.proposal-add-extension", - ) - dao_token_contract: str = Field( - ..., - description="Contract principal of the DAO token", - example="ST35K818S3K2GSNEBC3M35GA3W8Q7X72KF4RVM3QA.aibtc-token", - ) - - -class SmartWalletProxyCreateProposalTool(BaseTool): - name: str = "smartwallet_proxy_create_proposal" - description: str = ( - "Create a core proposal through a smart wallet. " - "Returns the transaction ID of the proposal creation transaction." - ) - args_schema: Type[BaseModel] = SmartWalletProxyCreateProposalInput - return_direct: bool = False - wallet_id: Optional[UUID] = None - - def __init__(self, wallet_id: Optional[UUID] = None, **kwargs): - super().__init__(**kwargs) - self.wallet_id = wallet_id - - def _deploy( - self, - smart_wallet_contract: str, - dao_core_proposals_extension_contract: str, - dao_proposal_contract: str, - dao_token_contract: str, - **kwargs, - ) -> Dict[str, Any]: - """Execute the tool to create a core proposal.""" - if self.wallet_id is None: - return {"success": False, "message": "Wallet ID is required", "data": None} - - args = [ - smart_wallet_contract, - dao_core_proposals_extension_contract, - dao_proposal_contract, - dao_token_contract, - ] - - return BunScriptRunner.bun_run( - self.wallet_id, - "aibtc-dao/smart-wallet/public", - "proxy-create-proposal.ts", - *args, - ) - - def _run( - self, - smart_wallet_contract: str, - dao_core_proposals_extension_contract: str, - dao_proposal_contract: str, - dao_token_contract: str, - **kwargs, - ) -> Dict[str, Any]: - """Execute the tool to create a core proposal.""" - return self._deploy( - smart_wallet_contract, - dao_core_proposals_extension_contract, - dao_proposal_contract, - dao_token_contract, - **kwargs, - ) - - async def _arun( - self, - smart_wallet_contract: str, - dao_core_proposals_extension_contract: str, - dao_proposal_contract: str, - dao_token_contract: str, - **kwargs, - ) -> Dict[str, Any]: - """Async version of the tool.""" - return self._deploy( - smart_wallet_contract, - dao_core_proposals_extension_contract, - dao_proposal_contract, - dao_token_contract, - **kwargs, - ) - - -class SmartWalletProxyProposeActionSendMessageInput(BaseModel): - """Input schema for proposing a send message action through a smart wallet.""" - - smart_wallet_contract: str = Field( - ..., - description="Contract principal of the smart wallet", - example="ST35K818S3K2GSNEBC3M35GA3W8Q7X72KF4RVM3QA.aibtc-user-agent-smart-wallet", - ) - dao_action_proposals_extension_contract: str = Field( - ..., - description="Contract principal of the DAO action proposals extension", - example="ST35K818S3K2GSNEBC3M35GA3W8Q7X72KF4RVM3QA.aibtc-action-proposals-v2", - ) - dao_action_proposal_contract: str = Field( - ..., - description="Contract principal of the DAO action proposal", - example="ST35K818S3K2GSNEBC3M35GA3W8Q7X72KF4RVM3QA.aibtc-action-send-message", - ) - dao_token_contract: str = Field( - ..., - description="Contract principal of the DAO token", - example="ST35K818S3K2GSNEBC3M35GA3W8Q7X72KF4RVM3QA.aibtc-token", - ) - message: str = Field( - ..., - description="Message to send", - example="hello world", - ) - - -class SmartWalletProxyProposeActionSendMessageTool(BaseTool): - name: str = "smartwallet_proxy_propose_action_send_message" - description: str = ( - "Propose a send message action through a smart wallet. " - "Returns the transaction ID of the action proposal transaction." - ) - args_schema: Type[BaseModel] = SmartWalletProxyProposeActionSendMessageInput - return_direct: bool = False - wallet_id: Optional[UUID] = None - - def __init__(self, wallet_id: Optional[UUID] = None, **kwargs): - super().__init__(**kwargs) - self.wallet_id = wallet_id - - def _deploy( - self, - smart_wallet_contract: str, - dao_action_proposals_extension_contract: str, - dao_action_proposal_contract: str, - dao_token_contract: str, - message: str, - **kwargs, - ) -> Dict[str, Any]: - """Execute the tool to propose a send message action.""" - if self.wallet_id is None: - return {"success": False, "message": "Wallet ID is required", "data": None} - - args = [ - smart_wallet_contract, - dao_action_proposals_extension_contract, - dao_action_proposal_contract, - dao_token_contract, - message, - ] - - return BunScriptRunner.bun_run( - self.wallet_id, - "aibtc-dao/smart-wallet/public", - "proxy-propose-action-send-message.ts", - *args, - ) - - def _run( - self, - smart_wallet_contract: str, - dao_action_proposals_extension_contract: str, - dao_action_proposal_contract: str, - dao_token_contract: str, - message: str, - **kwargs, - ) -> Dict[str, Any]: - """Execute the tool to propose a send message action.""" - return self._deploy( - smart_wallet_contract, - dao_action_proposals_extension_contract, - dao_action_proposal_contract, - dao_token_contract, - message, - **kwargs, - ) - - async def _arun( - self, - smart_wallet_contract: str, - dao_action_proposals_extension_contract: str, - dao_action_proposal_contract: str, - dao_token_contract: str, - message: str, - **kwargs, - ) -> Dict[str, Any]: - """Async version of the tool.""" - return self._deploy( - smart_wallet_contract, - dao_action_proposals_extension_contract, - dao_action_proposal_contract, - dao_token_contract, - message, - **kwargs, - ) - - -class SmartWalletVoteOnActionProposalInput(BaseModel): - """Input schema for voting on an action proposal through a smart wallet.""" - - smart_wallet_contract: str = Field( - ..., - description="Contract principal of the smart wallet", - example="ST1PQHQKV0RJXZFY1DGX8MNSNYVE3VGZJSRTPGZGM.aibtc-user-agent-smart-wallet", - ) - dao_action_proposals_extension_contract: str = Field( - ..., - description="Contract principal of the DAO action proposals extension", - example="ST35K818S3K2GSNEBC3M35GA3W8Q7X72KF4RVM3QA.aibtc-action-proposals-v2", - ) - proposal_id: int = Field( - ..., - description="ID of the action proposal", - example=1, - gt=0, - ) - vote: bool = Field( - ..., - description="True to vote in favor, False to vote against", - example=True, - ) - - -class SmartWalletVoteOnActionProposalTool(BaseTool): - name: str = "smartwallet_vote_on_action_proposal" - description: str = ( - "Vote on an action proposal through a smart wallet. " - "Returns the transaction ID of the vote transaction." - ) - args_schema: Type[BaseModel] = SmartWalletVoteOnActionProposalInput - return_direct: bool = False - wallet_id: Optional[UUID] = None - - def __init__(self, wallet_id: Optional[UUID] = None, **kwargs): - super().__init__(**kwargs) - self.wallet_id = wallet_id - - def _deploy( - self, - smart_wallet_contract: str, - dao_action_proposals_extension_contract: str, - proposal_id: int, - vote: bool, - **kwargs, - ) -> Dict[str, Any]: - """Execute the tool to vote on an action proposal.""" - if self.wallet_id is None: - return {"success": False, "message": "Wallet ID is required", "data": None} - - args = [ - smart_wallet_contract, - dao_action_proposals_extension_contract, - str(proposal_id), - str(vote).lower(), - ] - - return BunScriptRunner.bun_run( - self.wallet_id, - "aibtc-dao/smart-wallet/public", - "vote-on-action-proposal.ts", - *args, - ) - - def _run( - self, - smart_wallet_contract: str, - dao_action_proposals_extension_contract: str, - proposal_id: int, - vote: bool, - **kwargs, - ) -> Dict[str, Any]: - """Execute the tool to vote on an action proposal.""" - return self._deploy( - smart_wallet_contract, - dao_action_proposals_extension_contract, - proposal_id, - vote, - **kwargs, - ) - - async def _arun( - self, - smart_wallet_contract: str, - dao_action_proposals_extension_contract: str, - proposal_id: int, - vote: bool, - **kwargs, - ) -> Dict[str, Any]: - """Async version of the tool.""" - return self._deploy( - smart_wallet_contract, - dao_action_proposals_extension_contract, - proposal_id, - vote, - **kwargs, - ) - - -class SmartWalletVoteOnCoreProposalInput(BaseModel): - """Input schema for voting on a core proposal through a smart wallet.""" - - smart_wallet_contract: str = Field( - ..., - description="Contract principal of the smart wallet", - example="ST1PQHQKV0RJXZFY1DGX8MNSNYVE3VGZJSRTPGZGM.aibtc-user-agent-smart-wallet", - ) - dao_core_proposals_extension_contract: str = Field( - ..., - description="Contract principal of the DAO core proposals extension", - example="ST35K818S3K2GSNEBC3M35GA3W8Q7X72KF4RVM3QA.aibtc-core-proposals-v2", - ) - dao_proposal_contract: str = Field( - ..., - description="Contract principal of the DAO proposal", - example="ST35K818S3K2GSNEBC3M35GA3W8Q7X72KF4RVM3QA.aibtc-onchain-messaging-send", - ) - vote: bool = Field( - ..., - description="True to vote in favor, False to vote against", - example=True, - ) - - -class SmartWalletVoteOnCoreProposalTool(BaseTool): - name: str = "smartwallet_vote_on_core_proposal" - description: str = ( - "Vote on a core proposal through a smart wallet. " - "Returns the transaction ID of the vote transaction." - ) - args_schema: Type[BaseModel] = SmartWalletVoteOnCoreProposalInput - return_direct: bool = False - wallet_id: Optional[UUID] = None - - def __init__(self, wallet_id: Optional[UUID] = None, **kwargs): - super().__init__(**kwargs) - self.wallet_id = wallet_id - - def _deploy( - self, - smart_wallet_contract: str, - dao_core_proposals_extension_contract: str, - dao_proposal_contract: str, - vote: bool, - **kwargs, - ) -> Dict[str, Any]: - """Execute the tool to vote on a core proposal.""" - if self.wallet_id is None: - return {"success": False, "message": "Wallet ID is required", "data": None} - - args = [ - smart_wallet_contract, - dao_core_proposals_extension_contract, - dao_proposal_contract, - str(vote).lower(), - ] - - return BunScriptRunner.bun_run( - self.wallet_id, - "aibtc-dao/smart-wallet/public", - "vote-on-core-proposal.ts", - *args, - ) - - def _run( - self, - smart_wallet_contract: str, - dao_core_proposals_extension_contract: str, - dao_proposal_contract: str, - vote: bool, - **kwargs, - ) -> Dict[str, Any]: - """Execute the tool to vote on a core proposal.""" - return self._deploy( - smart_wallet_contract, - dao_core_proposals_extension_contract, - dao_proposal_contract, - vote, - **kwargs, - ) - - async def _arun( - self, - smart_wallet_contract: str, - dao_core_proposals_extension_contract: str, - dao_proposal_contract: str, - vote: bool, - **kwargs, - ) -> Dict[str, Any]: - """Async version of the tool.""" - return self._deploy( - smart_wallet_contract, - dao_core_proposals_extension_contract, - dao_proposal_contract, - vote, - **kwargs, - ) - - -class SmartWalletConcludeActionProposalInput(BaseModel): - """Input schema for concluding an action proposal through a smart wallet.""" - - smart_wallet_contract: str = Field( - ..., - description="Contract principal of the smart wallet", - example="ST1PQHQKV0RJXZFY1DGX8MNSNYVE3VGZJSRTPGZGM.aibtc-user-agent-smart-wallet", - ) - dao_action_proposals_extension_contract: str = Field( - ..., - description="Contract principal of the DAO action proposals extension", - example="ST35K818S3K2GSNEBC3M35GA3W8Q7X72KF4RVM3QA.aibtc-action-proposals-v2", - ) - proposal_id: int = Field( - ..., - description="ID of the action proposal", - example=1, - gt=0, - ) - - -class SmartWalletConcludeActionProposalTool(BaseTool): - name: str = "smartwallet_conclude_action_proposal" - description: str = ( - "Conclude an action proposal through a smart wallet. " - "Returns the transaction ID of the conclusion transaction." - ) - args_schema: Type[BaseModel] = SmartWalletConcludeActionProposalInput - return_direct: bool = False - wallet_id: Optional[UUID] = None - - def __init__(self, wallet_id: Optional[UUID] = None, **kwargs): - super().__init__(**kwargs) - self.wallet_id = wallet_id - - def _deploy( - self, - smart_wallet_contract: str, - dao_action_proposals_extension_contract: str, - proposal_id: int, - **kwargs, - ) -> Dict[str, Any]: - """Execute the tool to conclude an action proposal.""" - if self.wallet_id is None: - return {"success": False, "message": "Wallet ID is required", "data": None} - - args = [ - smart_wallet_contract, - dao_action_proposals_extension_contract, - str(proposal_id), - ] - - return BunScriptRunner.bun_run( - self.wallet_id, - "aibtc-dao/smart-wallet/public", - "conclude-action-proposal.ts", - *args, - ) - - def _run( - self, - smart_wallet_contract: str, - dao_action_proposals_extension_contract: str, - proposal_id: int, - **kwargs, - ) -> Dict[str, Any]: - """Execute the tool to conclude an action proposal.""" - return self._deploy( - smart_wallet_contract, - dao_action_proposals_extension_contract, - proposal_id, - **kwargs, - ) - - async def _arun( - self, - smart_wallet_contract: str, - dao_action_proposals_extension_contract: str, - proposal_id: int, - **kwargs, - ) -> Dict[str, Any]: - """Async version of the tool.""" - return self._deploy( - smart_wallet_contract, - dao_action_proposals_extension_contract, - proposal_id, - **kwargs, - ) - - -class SmartWalletConcludeCoreProposalInput(BaseModel): - """Input schema for concluding a core proposal through a smart wallet.""" - - smart_wallet_contract: str = Field( - ..., - description="Contract principal of the smart wallet", - example="ST1PQHQKV0RJXZFY1DGX8MNSNYVE3VGZJSRTPGZGM.aibtc-user-agent-smart-wallet", - ) - dao_core_proposals_extension_contract: str = Field( - ..., - description="Contract principal of the DAO core proposals extension", - example="ST35K818S3K2GSNEBC3M35GA3W8Q7X72KF4RVM3QA.aibtc-core-proposals-v2", - ) - dao_proposal_contract: str = Field( - ..., - description="Contract principal of the DAO proposal", - example="ST35K818S3K2GSNEBC3M35GA3W8Q7X72KF4RVM3QA.aibtc-onchain-messaging-send", - ) - - -class SmartWalletConcludeCoreProposalTool(BaseTool): - name: str = "smartwallet_conclude_core_proposal" - description: str = ( - "Conclude a core proposal through a smart wallet. " - "Returns the transaction ID of the conclusion transaction." - ) - args_schema: Type[BaseModel] = SmartWalletConcludeCoreProposalInput - return_direct: bool = False - wallet_id: Optional[UUID] = None - - def __init__(self, wallet_id: Optional[UUID] = None, **kwargs): - super().__init__(**kwargs) - self.wallet_id = wallet_id - - def _deploy( - self, - smart_wallet_contract: str, - dao_core_proposals_extension_contract: str, - dao_proposal_contract: str, - **kwargs, - ) -> Dict[str, Any]: - """Execute the tool to conclude a core proposal.""" - if self.wallet_id is None: - return {"success": False, "message": "Wallet ID is required", "data": None} - - args = [ - smart_wallet_contract, - dao_core_proposals_extension_contract, - dao_proposal_contract, - ] - - return BunScriptRunner.bun_run( - self.wallet_id, - "aibtc-dao/smart-wallet/public", - "conclude-core-proposal.ts", - *args, - ) - - def _run( - self, - smart_wallet_contract: str, - dao_core_proposals_extension_contract: str, - dao_proposal_contract: str, - **kwargs, - ) -> Dict[str, Any]: - """Execute the tool to conclude a core proposal.""" - return self._deploy( - smart_wallet_contract, - dao_core_proposals_extension_contract, - dao_proposal_contract, - **kwargs, - ) - - async def _arun( - self, - smart_wallet_contract: str, - dao_core_proposals_extension_contract: str, - dao_proposal_contract: str, - **kwargs, - ) -> Dict[str, Any]: - """Async version of the tool.""" - return self._deploy( - smart_wallet_contract, - dao_core_proposals_extension_contract, - dao_proposal_contract, - **kwargs, - ) - - -class SmartWalletProxyProposeActionAddResourceInput(BaseModel): - """Input schema for proposing an action to add a resource through a smart wallet.""" - - smart_wallet_contract: str = Field( - ..., - description="Contract principal of the smart wallet", - example="ST1PQHQKV0RJXZFY1DGX8MNSNYVE3VGZJSRTPGZGM.aibtc-user-agent-smart-wallet", - ) - dao_action_proposals_extension_contract: str = Field( - ..., - description="Contract principal of the DAO action proposals extension", - example="ST35K818S3K2GSNEBC3M35GA3W8Q7X72KF4RVM3QA.aibtc-action-proposals-v2", - ) - dao_action_proposal_contract: str = Field( - ..., - description="Contract principal of the DAO action proposal", - example="ST35K818S3K2GSNEBC3M35GA3W8Q7X72KF4RVM3QA.aibtc-onchain-messaging-send", - ) - dao_token_contract: str = Field( - ..., - description="Contract principal of the DAO token", - example="ST35K818S3K2GSNEBC3M35GA3W8Q7X72KF4RVM3QA.aibtc-token", - ) - resource_name: str = Field( - ..., - description="Name of the resource to add", - example="my-resource", - ) - resource_contract: str = Field( - ..., - description="Contract principal of the resource", - example="ST35K818S3K2GSNEBC3M35GA3W8Q7X72KF4RVM3QA.my-resource", - ) - - -class SmartWalletProxyProposeActionAddResourceTool(BaseTool): - name: str = "smartwallet_proxy_propose_action_add_resource" - description: str = ( - "Propose an action to add a resource through a smart wallet. " - "Returns the transaction ID of the proposal transaction." - ) - args_schema: Type[BaseModel] = SmartWalletProxyProposeActionAddResourceInput - return_direct: bool = False - wallet_id: Optional[UUID] = None - - def __init__(self, wallet_id: Optional[UUID] = None, **kwargs): - super().__init__(**kwargs) - self.wallet_id = wallet_id - - def _deploy( - self, - smart_wallet_contract: str, - dao_action_proposals_extension_contract: str, - dao_action_proposal_contract: str, - dao_token_contract: str, - resource_name: str, - resource_contract: str, - **kwargs, - ) -> Dict[str, Any]: - """Execute the tool to propose an action to add a resource.""" - if self.wallet_id is None: - return {"success": False, "message": "Wallet ID is required", "data": None} - - args = [ - smart_wallet_contract, - dao_action_proposals_extension_contract, - dao_action_proposal_contract, - dao_token_contract, - resource_name, - resource_contract, - ] - - return BunScriptRunner.bun_run( - self.wallet_id, - "aibtc-dao/smart-wallet/public", - "proxy-propose-action-add-resource.ts", - *args, - ) - - def _run( - self, - smart_wallet_contract: str, - dao_action_proposals_extension_contract: str, - dao_action_proposal_contract: str, - dao_token_contract: str, - resource_name: str, - resource_contract: str, - **kwargs, - ) -> Dict[str, Any]: - """Execute the tool to propose an action to add a resource.""" - return self._deploy( - smart_wallet_contract, - dao_action_proposals_extension_contract, - dao_action_proposal_contract, - dao_token_contract, - resource_name, - resource_contract, - **kwargs, - ) - - async def _arun( - self, - smart_wallet_contract: str, - dao_action_proposals_extension_contract: str, - dao_action_proposal_contract: str, - dao_token_contract: str, - resource_name: str, - resource_contract: str, - **kwargs, - ) -> Dict[str, Any]: - """Async version of the tool.""" - return self._deploy( - smart_wallet_contract, - dao_action_proposals_extension_contract, - dao_action_proposal_contract, - dao_token_contract, - resource_name, - resource_contract, - **kwargs, - ) - - -class SmartWalletProxyProposeActionAllowAssetInput(BaseModel): - """Input schema for proposing an action to allow an asset through a smart wallet.""" - - smart_wallet_contract: str = Field( - ..., - description="Contract principal of the smart wallet", - example="ST1PQHQKV0RJXZFY1DGX8MNSNYVE3VGZJSRTPGZGM.aibtc-user-agent-smart-wallet", - ) - dao_action_proposals_extension_contract: str = Field( - ..., - description="Contract principal of the DAO action proposals extension", - example="ST35K818S3K2GSNEBC3M35GA3W8Q7X72KF4RVM3QA.aibtc-action-proposals-v2", - ) - dao_action_proposal_contract: str = Field( - ..., - description="Contract principal of the DAO action proposal", - example="ST35K818S3K2GSNEBC3M35GA3W8Q7X72KF4RVM3QA.aibtc-onchain-messaging-send", - ) - dao_token_contract: str = Field( - ..., - description="Contract principal of the DAO token", - example="ST35K818S3K2GSNEBC3M35GA3W8Q7X72KF4RVM3QA.aibtc-token", - ) - asset_contract: str = Field( - ..., - description="Contract principal of the asset", - example="ST35K818S3K2GSNEBC3M35GA3W8Q7X72KF4RVM3QA.my-asset", - ) - - -class SmartWalletProxyProposeActionAllowAssetTool(BaseTool): - name: str = "smartwallet_proxy_propose_action_allow_asset" - description: str = ( - "Propose an action to allow an asset through a smart wallet. " - "Returns the transaction ID of the proposal transaction." - ) - args_schema: Type[BaseModel] = SmartWalletProxyProposeActionAllowAssetInput - return_direct: bool = False - wallet_id: Optional[UUID] = None - - def __init__(self, wallet_id: Optional[UUID] = None, **kwargs): - super().__init__(**kwargs) - self.wallet_id = wallet_id - - def _deploy( - self, - smart_wallet_contract: str, - dao_action_proposals_extension_contract: str, - dao_action_proposal_contract: str, - dao_token_contract: str, - asset_contract: str, - **kwargs, - ) -> Dict[str, Any]: - """Execute the tool to propose an action to allow an asset.""" - if self.wallet_id is None: - return {"success": False, "message": "Wallet ID is required", "data": None} - - args = [ - smart_wallet_contract, - dao_action_proposals_extension_contract, - dao_action_proposal_contract, - dao_token_contract, - asset_contract, - ] - - return BunScriptRunner.bun_run( - self.wallet_id, - "aibtc-dao/smart-wallet/public", - "proxy-propose-action-allow-asset.ts", - *args, - ) - - def _run( - self, - smart_wallet_contract: str, - dao_action_proposals_extension_contract: str, - dao_action_proposal_contract: str, - dao_token_contract: str, - asset_contract: str, - **kwargs, - ) -> Dict[str, Any]: - """Execute the tool to propose an action to allow an asset.""" - return self._deploy( - smart_wallet_contract, - dao_action_proposals_extension_contract, - dao_action_proposal_contract, - dao_token_contract, - asset_contract, - **kwargs, - ) - - async def _arun( - self, - smart_wallet_contract: str, - dao_action_proposals_extension_contract: str, - dao_action_proposal_contract: str, - dao_token_contract: str, - asset_contract: str, - **kwargs, - ) -> Dict[str, Any]: - """Async version of the tool.""" - return self._deploy( - smart_wallet_contract, - dao_action_proposals_extension_contract, - dao_action_proposal_contract, - dao_token_contract, - asset_contract, - **kwargs, - ) - - -class SmartWalletProxyProposeActionToggleResourceByNameInput(BaseModel): - """Input schema for proposing an action to toggle a resource by name through a smart wallet.""" - - smart_wallet_contract: str = Field( - ..., - description="Contract principal of the smart wallet", - example="ST1PQHQKV0RJXZFY1DGX8MNSNYVE3VGZJSRTPGZGM.aibtc-user-agent-smart-wallet", - ) - dao_action_proposals_extension_contract: str = Field( - ..., - description="Contract principal of the DAO action proposals extension", - example="ST35K818S3K2GSNEBC3M35GA3W8Q7X72KF4RVM3QA.aibtc-action-proposals-v2", - ) - dao_action_proposal_contract: str = Field( - ..., - description="Contract principal of the DAO action proposal", - example="ST35K818S3K2GSNEBC3M35GA3W8Q7X72KF4RVM3QA.aibtc-onchain-messaging-send", - ) - dao_token_contract: str = Field( - ..., - description="Contract principal of the DAO token", - example="ST35K818S3K2GSNEBC3M35GA3W8Q7X72KF4RVM3QA.aibtc-token", - ) - resource_name: str = Field( - ..., - description="Name of the resource to toggle", - example="my-resource", - ) - - -class SmartWalletProxyProposeActionToggleResourceByNameTool(BaseTool): - name: str = "smartwallet_proxy_propose_action_toggle_resource_by_name" - description: str = ( - "Propose an action to toggle a resource by name through a smart wallet. " - "Returns the transaction ID of the proposal transaction." - ) - args_schema: Type[BaseModel] = ( - SmartWalletProxyProposeActionToggleResourceByNameInput - ) - return_direct: bool = False - wallet_id: Optional[UUID] = None - - def __init__(self, wallet_id: Optional[UUID] = None, **kwargs): - super().__init__(**kwargs) - self.wallet_id = wallet_id - - def _deploy( - self, - smart_wallet_contract: str, - dao_action_proposals_extension_contract: str, - dao_action_proposal_contract: str, - dao_token_contract: str, - resource_name: str, - **kwargs, - ) -> Dict[str, Any]: - """Execute the tool to propose an action to toggle a resource by name.""" - if self.wallet_id is None: - return {"success": False, "message": "Wallet ID is required", "data": None} - - args = [ - smart_wallet_contract, - dao_action_proposals_extension_contract, - dao_action_proposal_contract, - dao_token_contract, - resource_name, - ] - - return BunScriptRunner.bun_run( - self.wallet_id, - "aibtc-dao/smart-wallet/public", - "proxy-propose-action-toggle-resource-by-name.ts", - *args, - ) - - def _run( - self, - smart_wallet_contract: str, - dao_action_proposals_extension_contract: str, - dao_action_proposal_contract: str, - dao_token_contract: str, - resource_name: str, - **kwargs, - ) -> Dict[str, Any]: - """Execute the tool to propose an action to toggle a resource by name.""" - return self._deploy( - smart_wallet_contract, - dao_action_proposals_extension_contract, - dao_action_proposal_contract, - dao_token_contract, - resource_name, - **kwargs, - ) - - async def _arun( - self, - smart_wallet_contract: str, - dao_action_proposals_extension_contract: str, - dao_action_proposal_contract: str, - dao_token_contract: str, - resource_name: str, - **kwargs, - ) -> Dict[str, Any]: - """Async version of the tool.""" - return self._deploy( - smart_wallet_contract, - dao_action_proposals_extension_contract, - dao_action_proposal_contract, - dao_token_contract, - resource_name, - **kwargs, - ) - - -class SmartWalletProxyProposeActionSetAccountHolderInput(BaseModel): - """Input schema for proposing an action to set the account holder through a smart wallet.""" - - smart_wallet_contract: str = Field( - ..., - description="Contract principal of the smart wallet", - example="ST1PQHQKV0RJXZFY1DGX8MNSNYVE3VGZJSRTPGZGM.aibtc-user-agent-smart-wallet", - ) - dao_action_proposals_extension_contract: str = Field( - ..., - description="Contract principal of the DAO action proposals extension", - example="ST35K818S3K2GSNEBC3M35GA3W8Q7X72KF4RVM3QA.aibtc-action-proposals-v2", - ) - dao_action_proposal_contract: str = Field( - ..., - description="Contract principal of the DAO action proposal", - example="ST35K818S3K2GSNEBC3M35GA3W8Q7X72KF4RVM3QA.aibtc-onchain-messaging-send", - ) - dao_token_contract: str = Field( - ..., - description="Contract principal of the DAO token", - example="ST35K818S3K2GSNEBC3M35GA3W8Q7X72KF4RVM3QA.aibtc-token", - ) - account_holder: str = Field( - ..., - description="Principal of the new account holder", - example="ST1PQHQKV0RJXZFY1DGX8MNSNYVE3VGZJSRTPGZGM", - ) - - -class SmartWalletProxyProposeActionSetAccountHolderTool(BaseTool): - name: str = "smartwallet_proxy_propose_action_set_account_holder" - description: str = ( - "Propose an action to set the account holder through a smart wallet. " - "Returns the transaction ID of the proposal transaction." - ) - args_schema: Type[BaseModel] = SmartWalletProxyProposeActionSetAccountHolderInput - return_direct: bool = False - wallet_id: Optional[UUID] = None - - def __init__(self, wallet_id: Optional[UUID] = None, **kwargs): - super().__init__(**kwargs) - self.wallet_id = wallet_id - - def _deploy( - self, - smart_wallet_contract: str, - dao_action_proposals_extension_contract: str, - dao_action_proposal_contract: str, - dao_token_contract: str, - account_holder: str, - **kwargs, - ) -> Dict[str, Any]: - """Execute the tool to propose an action to set the account holder.""" - if self.wallet_id is None: - return {"success": False, "message": "Wallet ID is required", "data": None} - - args = [ - smart_wallet_contract, - dao_action_proposals_extension_contract, - dao_action_proposal_contract, - dao_token_contract, - account_holder, - ] - - return BunScriptRunner.bun_run( - self.wallet_id, - "aibtc-dao/smart-wallet/public", - "proxy-propose-action-set-account-holder.ts", - *args, - ) - - def _run( - self, - smart_wallet_contract: str, - dao_action_proposals_extension_contract: str, - dao_action_proposal_contract: str, - dao_token_contract: str, - account_holder: str, - **kwargs, - ) -> Dict[str, Any]: - """Execute the tool to propose an action to set the account holder.""" - return self._deploy( - smart_wallet_contract, - dao_action_proposals_extension_contract, - dao_action_proposal_contract, - dao_token_contract, - account_holder, - **kwargs, - ) - - async def _arun( - self, - smart_wallet_contract: str, - dao_action_proposals_extension_contract: str, - dao_action_proposal_contract: str, - dao_token_contract: str, - account_holder: str, - **kwargs, - ) -> Dict[str, Any]: - """Async version of the tool.""" - return self._deploy( - smart_wallet_contract, - dao_action_proposals_extension_contract, - dao_action_proposal_contract, - dao_token_contract, - account_holder, - **kwargs, - ) - - -class SmartWalletProxyProposeActionSetWithdrawalAmountInput(BaseModel): - """Input schema for proposing an action to set the withdrawal amount through a smart wallet.""" - - smart_wallet_contract: str = Field( - ..., - description="Contract principal of the smart wallet", - example="ST1PQHQKV0RJXZFY1DGX8MNSNYVE3VGZJSRTPGZGM.aibtc-user-agent-smart-wallet", - ) - dao_action_proposals_extension_contract: str = Field( - ..., - description="Contract principal of the DAO action proposals extension", - example="ST35K818S3K2GSNEBC3M35GA3W8Q7X72KF4RVM3QA.aibtc-action-proposals-v2", - ) - dao_action_proposal_contract: str = Field( - ..., - description="Contract principal of the DAO action proposal", - example="ST35K818S3K2GSNEBC3M35GA3W8Q7X72KF4RVM3QA.aibtc-onchain-messaging-send", - ) - dao_token_contract: str = Field( - ..., - description="Contract principal of the DAO token", - example="ST35K818S3K2GSNEBC3M35GA3W8Q7X72KF4RVM3QA.aibtc-token", - ) - withdrawal_amount: int = Field( - ..., - description="New withdrawal amount in micro-STX", - example=1000000, - gt=0, - ) - - -class SmartWalletProxyProposeActionSetWithdrawalAmountTool(BaseTool): - name: str = "smartwallet_proxy_propose_action_set_withdrawal_amount" - description: str = ( - "Propose an action to set the withdrawal amount through a smart wallet. " - "Returns the transaction ID of the proposal transaction." - ) - args_schema: Type[BaseModel] = SmartWalletProxyProposeActionSetWithdrawalAmountInput - return_direct: bool = False - wallet_id: Optional[UUID] = None - - def __init__(self, wallet_id: Optional[UUID] = None, **kwargs): - super().__init__(**kwargs) - self.wallet_id = wallet_id - - def _deploy( - self, - smart_wallet_contract: str, - dao_action_proposals_extension_contract: str, - dao_action_proposal_contract: str, - dao_token_contract: str, - withdrawal_amount: int, - **kwargs, - ) -> Dict[str, Any]: - """Execute the tool to propose an action to set the withdrawal amount.""" - if self.wallet_id is None: - return {"success": False, "message": "Wallet ID is required", "data": None} - - args = [ - smart_wallet_contract, - dao_action_proposals_extension_contract, - dao_action_proposal_contract, - dao_token_contract, - str(withdrawal_amount), - ] - - return BunScriptRunner.bun_run( - self.wallet_id, - "aibtc-dao/smart-wallet/public", - "proxy-propose-action-set-withdrawal-amount.ts", - *args, - ) - - def _run( - self, - smart_wallet_contract: str, - dao_action_proposals_extension_contract: str, - dao_action_proposal_contract: str, - dao_token_contract: str, - withdrawal_amount: int, - **kwargs, - ) -> Dict[str, Any]: - """Execute the tool to propose an action to set the withdrawal amount.""" - return self._deploy( - smart_wallet_contract, - dao_action_proposals_extension_contract, - dao_action_proposal_contract, - dao_token_contract, - withdrawal_amount, - **kwargs, - ) - - async def _arun( - self, - smart_wallet_contract: str, - dao_action_proposals_extension_contract: str, - dao_action_proposal_contract: str, - dao_token_contract: str, - withdrawal_amount: int, - **kwargs, - ) -> Dict[str, Any]: - """Async version of the tool.""" - return self._deploy( - smart_wallet_contract, - dao_action_proposals_extension_contract, - dao_action_proposal_contract, - dao_token_contract, - withdrawal_amount, - **kwargs, - ) - - -class SmartWalletProxyProposeActionSetWithdrawalPeriodInput(BaseModel): - """Input schema for proposing an action to set the withdrawal period through a smart wallet.""" - - smart_wallet_contract: str = Field( - ..., - description="Contract principal of the smart wallet", - example="ST1PQHQKV0RJXZFY1DGX8MNSNYVE3VGZJSRTPGZGM.aibtc-user-agent-smart-wallet", - ) - dao_action_proposals_extension_contract: str = Field( - ..., - description="Contract principal of the DAO action proposals extension", - example="ST35K818S3K2GSNEBC3M35GA3W8Q7X72KF4RVM3QA.aibtc-action-proposals-v2", - ) - dao_action_proposal_contract: str = Field( - ..., - description="Contract principal of the DAO action proposal", - example="ST35K818S3K2GSNEBC3M35GA3W8Q7X72KF4RVM3QA.aibtc-onchain-messaging-send", - ) - dao_token_contract: str = Field( - ..., - description="Contract principal of the DAO token", - example="ST35K818S3K2GSNEBC3M35GA3W8Q7X72KF4RVM3QA.aibtc-token", - ) - withdrawal_period: int = Field( - ..., - description="New withdrawal period in blocks", - example=144, - gt=0, - ) - - -class SmartWalletProxyProposeActionSetWithdrawalPeriodTool(BaseTool): - name: str = "smartwallet_proxy_propose_action_set_withdrawal_period" - description: str = ( - "Propose an action to set the withdrawal period through a smart wallet. " - "Returns the transaction ID of the proposal transaction." - ) - args_schema: Type[BaseModel] = SmartWalletProxyProposeActionSetWithdrawalPeriodInput - return_direct: bool = False - wallet_id: Optional[UUID] = None - - def __init__(self, wallet_id: Optional[UUID] = None, **kwargs): - super().__init__(**kwargs) - self.wallet_id = wallet_id - - def _deploy( - self, - smart_wallet_contract: str, - dao_action_proposals_extension_contract: str, - dao_action_proposal_contract: str, - dao_token_contract: str, - withdrawal_period: int, - **kwargs, - ) -> Dict[str, Any]: - """Execute the tool to propose an action to set the withdrawal period.""" - if self.wallet_id is None: - return {"success": False, "message": "Wallet ID is required", "data": None} - - args = [ - smart_wallet_contract, - dao_action_proposals_extension_contract, - dao_action_proposal_contract, - dao_token_contract, - str(withdrawal_period), - ] - - return BunScriptRunner.bun_run( - self.wallet_id, - "aibtc-dao/smart-wallet/public", - "proxy-propose-action-set-withdrawal-period.ts", - *args, - ) - - def _run( - self, - smart_wallet_contract: str, - dao_action_proposals_extension_contract: str, - dao_action_proposal_contract: str, - dao_token_contract: str, - withdrawal_period: int, - **kwargs, - ) -> Dict[str, Any]: - """Execute the tool to propose an action to set the withdrawal period.""" - return self._deploy( - smart_wallet_contract, - dao_action_proposals_extension_contract, - dao_action_proposal_contract, - dao_token_contract, - withdrawal_period, - **kwargs, - ) - - async def _arun( - self, - smart_wallet_contract: str, - dao_action_proposals_extension_contract: str, - dao_action_proposal_contract: str, - dao_token_contract: str, - withdrawal_period: int, - **kwargs, - ) -> Dict[str, Any]: - """Async version of the tool.""" - return self._deploy( - smart_wallet_contract, - dao_action_proposals_extension_contract, - dao_action_proposal_contract, - dao_token_contract, - withdrawal_period, - **kwargs, - ) - - -class SmartWalletDepositSTXInput(BaseModel): - """Input schema for depositing STX to a smart wallet.""" - - smart_wallet_contract: str = Field( - ..., - description="Contract principal of the smart wallet", - example="ST35K818S3K2GSNEBC3M35GA3W8Q7X72KF4RVM3QA.aibtc-smart-wallet-ST3YT-S5D18", - ) - amount: int = Field(..., description="Amount of STX to deposit in microstacks") - - -class SmartWalletDepositSTXTool(BaseTool): - name: str = "smartwallet_deposit_stx" - description: str = ( - "Deposit STX into a smart wallet. " - "The amount should be specified in microstacks (1 STX = 1,000,000 microstacks)." - ) - args_schema: Type[BaseModel] = SmartWalletDepositSTXInput - return_direct: bool = False - wallet_id: Optional[UUID] = None - - def __init__(self, wallet_id: Optional[UUID] = None, **kwargs): - super().__init__(**kwargs) - self.wallet_id = wallet_id - - def _deploy( - self, - smart_wallet_contract: str, - amount: int, - **kwargs, - ) -> Dict[str, Any]: - """Execute the tool to deposit STX to a smart wallet.""" - if self.wallet_id is None: - return {"success": False, "message": "Wallet ID is required", "data": None} - - args = [smart_wallet_contract, str(amount)] - - return BunScriptRunner.bun_run( - self.wallet_id, - "aibtc-dao/smart-wallet/public", - "deposit-stx.ts", - *args, - ) - - def _run( - self, - smart_wallet_contract: str, - amount: int, - **kwargs, - ) -> Dict[str, Any]: - """Execute the tool to deposit STX to a smart wallet.""" - return self._deploy(smart_wallet_contract, amount, **kwargs) - - async def _arun( - self, - smart_wallet_contract: str, - amount: int, - **kwargs, - ) -> Dict[str, Any]: - """Async version of the tool to deposit STX to a smart wallet.""" - return self._deploy(smart_wallet_contract, amount, **kwargs) - - -class SmartWalletDepositFTInput(BaseModel): - """Input schema for depositing fungible tokens to a smart wallet.""" - - smart_wallet_contract: str = Field( - ..., - description="Contract principal of the smart wallet", - example="ST35K818S3K2GSNEBC3M35GA3W8Q7X72KF4RVM3QA.aibtc-smart-wallet-ST3YT-S5D18", - ) - ft_contract: str = Field( - ..., - description="Contract principal of the fungible token", - example="ST35K818S3K2GSNEBC3M35GA3W8Q7X72KF4RVM3QA.aibtc-token", - ) - amount: int = Field(..., description="Amount of tokens to deposit") - - -class SmartWalletDepositFTTool(BaseTool): - name: str = "smartwallet_deposit_ft" - description: str = ( - "Deposit fungible tokens into a smart wallet. " - "Requires the token contract principal and amount to deposit." - ) - args_schema: Type[BaseModel] = SmartWalletDepositFTInput - return_direct: bool = False - wallet_id: Optional[UUID] = None - - def __init__(self, wallet_id: Optional[UUID] = None, **kwargs): - super().__init__(**kwargs) - self.wallet_id = wallet_id - - def _deploy( - self, - smart_wallet_contract: str, - ft_contract: str, - amount: int, - **kwargs, - ) -> Dict[str, Any]: - """Execute the tool to deposit fungible tokens.""" - if self.wallet_id is None: - return {"success": False, "message": "Wallet ID is required", "data": None} - - args = [smart_wallet_contract, ft_contract, str(amount)] - - return BunScriptRunner.bun_run( - self.wallet_id, - "aibtc-dao/smart-wallet/public", - "deposit-ft.ts", - *args, - ) - - def _run( - self, - smart_wallet_contract: str, - ft_contract: str, - amount: int, - **kwargs, - ) -> Dict[str, Any]: - """Execute the tool to deposit fungible tokens.""" - return self._deploy(smart_wallet_contract, ft_contract, amount, **kwargs) - - async def _arun( - self, - smart_wallet_contract: str, - ft_contract: str, - amount: int, - **kwargs, - ) -> Dict[str, Any]: - """Async version of the tool.""" - return self._deploy(smart_wallet_contract, ft_contract, amount, **kwargs) - - -class SmartWalletGetBalanceSTXInput(BaseModel): - """Input schema for getting STX balance from a smart wallet.""" - - smart_wallet_contract: str = Field( - ..., - description="Contract principal of the smart wallet", - example="ST35K818S3K2GSNEBC3M35GA3W8Q7X72KF4RVM3QA.aibtc-smart-wallet-ST3YT-S5D18", - ) - - -class SmartWalletGetBalanceSTXTool(BaseTool): - name: str = "smartwallet_get_balance_stx" - description: str = "Get the STX balance from a smart wallet. Returns the current STX balance as a number." - args_schema: Type[BaseModel] = SmartWalletGetBalanceSTXInput - return_direct: bool = False - wallet_id: Optional[UUID] = None - - def __init__(self, wallet_id: Optional[UUID] = None, **kwargs): - super().__init__(**kwargs) - self.wallet_id = wallet_id - - def _deploy( - self, - smart_wallet_contract: str, - **kwargs, - ) -> Dict[str, Any]: - """Execute the tool to get STX balance.""" - if self.wallet_id is None: - return {"success": False, "message": "Wallet ID is required", "data": None} - - args = [smart_wallet_contract] - - return BunScriptRunner.bun_run( - self.wallet_id, - "aibtc-dao/smart-wallet/read-only", - "get-balance-stx.ts", - *args, - ) - - def _run( - self, - smart_wallet_contract: str, - **kwargs, - ) -> Dict[str, Any]: - """Execute the tool to get STX balance.""" - return self._deploy(smart_wallet_contract, **kwargs) - - async def _arun( - self, - smart_wallet_contract: str, - **kwargs, - ) -> Dict[str, Any]: - """Async version of the tool.""" - return self._deploy(smart_wallet_contract, **kwargs) - - -class SmartWalletIsApprovedAssetInput(BaseModel): - """Input schema for checking if an asset is approved in a smart wallet.""" - - smart_wallet_contract: str = Field( - ..., - description="Contract principal of the smart wallet", - example="ST35K818S3K2GSNEBC3M35GA3W8Q7X72KF4RVM3QA.aibtc-smart-wallet-ST3YT-S5D18", - ) - asset_contract: str = Field( - ..., - description="Contract principal of the asset to check", - example="ST35K818S3K2GSNEBC3M35GA3W8Q7X72KF4RVM3QA.aibtc-faktory", - ) diff --git a/tools/stxcity.py b/tools/stxcity.py deleted file mode 100644 index aea10db9..00000000 --- a/tools/stxcity.py +++ /dev/null @@ -1,276 +0,0 @@ -from decimal import Decimal -from typing import Any, Dict, Optional, Type - -from langchain.tools import BaseTool -from pydantic import BaseModel, Field - -from backend.models import UUID -from tools.bun import BunScriptRunner - - -class StxCityBaseInput(BaseModel): - """Base input schema for STXCity tools that don't require parameters.""" - - pass - - -class StxCityExecuteBuyInput(BaseModel): - """Input schema for STXCity buy order execution.""" - - stx_amount: str = Field(..., description="Amount of STX to spend on the purchase") - dex_contract_id: str = Field(..., description="Contract ID of the DEX") - token_contract_id: str = Field( - ..., description="Contract ID of the token to purchase" - ) - token_symbol: str = Field(..., description="Symbol of the token to purchase") - slippage: Optional[str] = Field( - default="50", - description="Slippage tolerance in basis points (default: 50, which is 0.5%)", - ) - - -class StxCityExecuteBuyTool(BaseTool): - name: str = "stxcity_execute_buy" - description: str = ( - "Execute a buy order on STXCity DEX with specified STX amount and token details" - ) - args_schema: Type[BaseModel] = StxCityExecuteBuyInput - return_direct: bool = False - wallet_id: Optional[UUID] = None - - def __init__(self, wallet_id: Optional[UUID] = None, **kwargs): - super().__init__(**kwargs) - self.wallet_id = wallet_id - - def _deploy( - self, - stx_amount: str, - dex_contract_id: str, - token_contract_id: str, - token_symbol: str, - slippage: Optional[str] = "50", - **kwargs, - ) -> str: - """Execute the tool to place a buy order.""" - if self.wallet_id is None: - return { - "success": False, - "error": "Wallet ID is required", - "output": "", - } - return BunScriptRunner.bun_run( - self.wallet_id, - "stacks-stxcity", - "exec-buy.ts", - stx_amount, - dex_contract_id, - token_contract_id, - token_symbol, - slippage, - ) - - def _run( - self, - stx_amount: str, - dex_contract_id: str, - token_contract_id: str, - token_symbol: str, - slippage: Optional[str] = "50", - **kwargs, - ) -> str: - """Execute the tool to place a buy order.""" - return self._deploy( - stx_amount, dex_contract_id, token_contract_id, token_symbol, slippage - ) - - async def _arun( - self, - stx_amount: str, - dex_contract_id: str, - token_contract_id: str, - token_symbol: str, - slippage: Optional[str] = "50", - **kwargs, - ) -> str: - """Async version of the tool.""" - return self._deploy( - stx_amount, dex_contract_id, token_contract_id, token_symbol, slippage - ) - - -class StxCityListBondingTokensTool(BaseTool): - name: str = "stxcity_list_bonding_tokens" - description: str = "Get a list of all available tokens for bonding on STXCity" - args_schema: Type[BaseModel] = StxCityBaseInput - return_direct: bool = False - wallet_id: Optional[UUID] = None - - def __init__(self, wallet_id: Optional[UUID] = None, **kwargs): - super().__init__(**kwargs) - self.wallet_id = wallet_id - - def _deploy(self, **kwargs) -> Dict[str, Any]: - """Execute the tool to list available bonding tokens.""" - if self.wallet_id is None: - return { - "success": False, - "error": "Wallet ID is required", - "output": "", - } - return BunScriptRunner.bun_run(self.wallet_id, "stacks-stxcity", "exec-list.ts") - - def _run(self, **kwargs) -> Dict[str, Any]: - """Execute the tool to list available bonding tokens.""" - return self._deploy() - - async def _arun(self, **kwargs) -> Dict[str, Any]: - """Async version of the tool.""" - return self._deploy() - - -class StxCitySearchInput(BaseModel): - """Input schema for searching bonding opportunities.""" - - keyword: Optional[str] = Field( - default=None, description="Search keyword to filter results" - ) - token_contract: Optional[str] = Field( - default=None, description="Token contract to filter results" - ) - - -class StxCitySearchTool(BaseTool): - name: str = "stxcity_search" - description: str = ( - "Search for bonding opportunities on STXCity with optional keyword and token " - "contract filters" - ) - args_schema: Type[BaseModel] = StxCitySearchInput - return_direct: bool = False - wallet_id: Optional[UUID] = None - - def __init__(self, wallet_id: Optional[UUID] = None, **kwargs): - super().__init__(**kwargs) - self.wallet_id = wallet_id - - def _deploy( - self, - keyword: Optional[str] = None, - token_contract: Optional[str] = None, - **kwargs, - ) -> Dict[str, Any]: - """Execute the tool to search for bonding opportunities.""" - if self.wallet_id is None: - return { - "success": False, - "error": "Wallet ID is required", - "output": "", - } - args = [] - if keyword: - args.append(keyword) - if token_contract: - args.append(token_contract) - return BunScriptRunner.bun_run( - self.wallet_id, "stacks-stxcity", "exec-search.ts", *args - ) - - def _run( - self, - keyword: Optional[str] = None, - token_contract: Optional[str] = None, - **kwargs, - ) -> Dict[str, Any]: - """Execute the tool to search for bonding opportunities.""" - return self._deploy(keyword, token_contract) - - async def _arun( - self, - keyword: Optional[str] = None, - token_contract: Optional[str] = None, - **kwargs, - ) -> Dict[str, Any]: - """Async version of the tool.""" - return self._deploy(keyword, token_contract) - - -class StxCityExecuteSellInput(BaseModel): - """Input schema for STXCity sell order execution.""" - - token_amount: Decimal = Field(..., description="Amount of tokens to sell") - dex_contract_id: str = Field(..., description="Contract ID of the DEX") - token_contract_id: str = Field(..., description="Contract ID of the token to sell") - token_symbol: str = Field(..., description="Symbol of the token to sell") - slippage: Optional[int] = Field( - default=50, - description="Slippage tolerance in basis points (default: 50, which is 0.5%)", - ) - - -class StxCityExecuteSellTool(BaseTool): - name: str = "stxcity_execute_sell" - description: str = ( - "Execute a sell order on STXCity DEX with specified token amount and details" - ) - args_schema: Type[BaseModel] = StxCityExecuteSellInput - return_direct: bool = False - wallet_id: Optional[UUID] = None - - def __init__(self, wallet_id: Optional[UUID] = None, **kwargs): - super().__init__(**kwargs) - self.wallet_id = wallet_id - - def _deploy( - self, - token_amount: Decimal, - dex_contract_id: str, - token_contract_id: str, - token_symbol: str, - slippage: Optional[int] = 50, - **kwargs, - ) -> str: - """Execute the tool to place a sell order.""" - if self.wallet_id is None: - return { - "success": False, - "error": "Wallet ID is required", - "output": "", - } - return BunScriptRunner.bun_run( - self.wallet_id, - "stacks-stxcity", - "exec-sell.ts", - token_amount, - dex_contract_id, - token_contract_id, - token_symbol, - slippage, - ) - - def _run( - self, - token_amount: Decimal, - dex_contract_id: str, - token_contract_id: str, - token_symbol: str, - slippage: Optional[int] = 50, - **kwargs, - ) -> str: - """Execute the tool to place a sell order.""" - return self._deploy( - token_amount, dex_contract_id, token_contract_id, token_symbol, slippage - ) - - async def _arun( - self, - token_amount: Decimal, - dex_contract_id: str, - token_contract_id: str, - token_symbol: str, - slippage: Optional[int] = 50, - **kwargs, - ) -> str: - """Async version of the tool.""" - return self._deploy( - token_amount, dex_contract_id, token_contract_id, token_symbol, slippage - ) diff --git a/tools/velar.py b/tools/velar.py deleted file mode 100644 index 583f3582..00000000 --- a/tools/velar.py +++ /dev/null @@ -1,69 +0,0 @@ -from typing import Type - -from langchain.tools import BaseTool -from pydantic import BaseModel, Field - -from lib.velar import VelarApi - - -class VelarPriceHistoryInput(BaseModel): - """Input schema for retrieving token price history from Velar.""" - - token_symbol: str = Field( - ..., description="The symbol of the token to get price history for." - ) - - -class VelarGetPriceHistory(BaseTool): - name: str = "velar_token_price_history" - description: str = ( - "Retrieve historical price data for a specific cryptocurrency token from Velar. " - "Returns monthly price data points for the token's STX trading pair." - ) - args_schema: Type[BaseModel] = VelarPriceHistoryInput - return_direct: bool = False - - def _deploy(self, token_symbol: str, **kwargs) -> str: - """Execute the tool to get token price history.""" - obj = VelarApi() - token_stx_pools = obj.get_token_stx_pools(token_symbol.upper()) - return obj.get_token_price_history(token_stx_pools[0]["id"], "month") - - def _run(self, token_symbol: str, **kwargs) -> str: - """Execute the tool to get token price history.""" - return self._deploy(token_symbol, **kwargs) - - async def _arun(self, token_symbol: str, **kwargs) -> str: - """Async version of the tool.""" - return self._deploy(token_symbol, **kwargs) - - -class VelarGetTokensInput(BaseModel): - """Input schema for retrieving available tokens from Velar. - This tool doesn't require any input parameters but we define the schema for consistency. - """ - - pass - - -class VelarGetTokens(BaseTool): - name: str = "velar_list_tokens" - description: str = ( - "Retrieve a list of all available tokens from the Velar API with their details " - "including symbols, names, and contract information." - ) - args_schema: Type[BaseModel] = VelarGetTokensInput - return_direct: bool = False - - def _deploy(self, **kwargs) -> str: - """Execute the tool to get available tokens.""" - obj = VelarApi() - return obj.get_tokens() - - def _run(self, **kwargs) -> str: - """Execute the tool to get available tokens.""" - return self._deploy(**kwargs) - - async def _arun(self, **kwargs) -> str: - """Async version of the tool.""" - return self._deploy(**kwargs) diff --git a/worker.py b/worker.py new file mode 100644 index 00000000..c1f75444 --- /dev/null +++ b/worker.py @@ -0,0 +1,41 @@ +"""Worker mode entrypoint for running background services without the web server.""" + +import asyncio +import sys + +from config import config +from lib.logger import configure_logger +from services import startup + +# Configure module logger +logger = configure_logger(__name__) + +# Load configuration +_ = config + + +async def main(): + """Main worker function that runs all background services.""" + logger.info("Starting AI BTC Dev Backend in worker mode...") + logger.info("Worker mode - Web server disabled, running background services only") + + try: + # Run the startup service in standalone mode + # This includes: + # - Enhanced job system with auto-discovery + # - Telegram bot (if enabled) + # - WebSocket cleanup tasks + # - System metrics monitoring + await startup.run_standalone() + + except KeyboardInterrupt: + logger.info("Worker mode interrupted by user") + except Exception as e: + logger.error(f"Critical error in worker mode: {e}", exc_info=True) + sys.exit(1) + finally: + logger.info("Worker mode shutdown complete") + + +if __name__ == "__main__": + asyncio.run(main())